Example usage for java.util HashSet clear

List of usage examples for java.util HashSet clear

Introduction

In this page you can find the example usage for java.util HashSet clear.

Prototype

public void clear() 

Source Link

Document

Removes all of the elements from this set.

Usage

From source file:Main.java

public static void main(String[] a) {
    String elements[] = { "A", "B", "C", "D", "E" };
    HashSet<String> set = new HashSet<String>(Arrays.asList(elements));

    elements = new String[] { "E", "F" };

    set.addAll(Arrays.asList(elements));

    System.out.println(set);/*from   www.  ja va  2 s .c  o m*/

    set.clear();

    System.out.println(set);
}

From source file:Main.java

public static void main(String[] args) {
    HashSet<Integer> hSet = new HashSet<Integer>();

    hSet.add(new Integer("1"));
    hSet.add(new Integer("2"));
    hSet.add(new Integer("3"));

    System.out.println(hSet);// w w w.  j  a va2 s .  co  m

    hSet.clear();

    System.out.println(hSet);

    System.out.println(hSet.isEmpty());

}

From source file:Main.java

public static void main(String args[]) {
    HashSet<String> newset = new HashSet<String>();

    // populate hash set
    newset.add("Learning");
    newset.add("from");
    newset.add("java2s.com");

    // checking elements in hash set
    System.out.println("Hash set values: " + newset);

    // clear set values
    newset.clear();

    System.out.println("Hash set values after clear: " + newset);
}

From source file:de.unileipzig.ub.indexer.App.java

public static void main(String[] args) throws IOException {

    // create Options object
    Options options = new Options();

    options.addOption("h", "help", false, "display this help");

    options.addOption("f", "filename", true, "name of the JSON file whose content should be indexed");
    options.addOption("i", "index", true, "the name of the target index");
    options.addOption("d", "doctype", true, "the name of the doctype (title, local, ...)");

    options.addOption("t", "host", true, "elasticsearch hostname (default: 0.0.0.0)");
    options.addOption("p", "port", true, "transport port (that's NOT the http port, default: 9300)");
    options.addOption("c", "cluster", true, "cluster name (default: elasticsearch_mdma)");

    options.addOption("b", "bulksize", true, "number of docs sent in one request (default: 3000)");
    options.addOption("v", "verbose", false, "show processing speed while indexing");
    options.addOption("s", "status", false, "only show status of index for file");

    options.addOption("r", "repair", false, "attempt to repair recoverable inconsistencies on the go");
    options.addOption("e", "debug", false, "set logging level to debug");
    options.addOption("l", "logfile", true, "logfile - in not specified only log to stdout");

    options.addOption("m", "memcached", true, "host and port of memcached (default: localhost:11211)");
    options.addOption("z", "latest-flag-on", true,
            "enable latest flag according to field (within content, e.g. 001)");
    options.addOption("a", "flat", false, "flat-mode: do not check for inconsistencies");

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;/*from www  .j av a  2 s . c o m*/

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException ex) {
        logger.error(ex);
        System.exit(1);
    }

    // setup logging
    Properties systemProperties = System.getProperties();
    systemProperties.put("net.spy.log.LoggerImpl", "net.spy.memcached.compat.log.Log4JLogger");
    System.setProperties(systemProperties);
    Logger.getLogger("net.spy.memcached").setLevel(Level.ERROR);

    Properties props = new Properties();
    props.load(props.getClass().getResourceAsStream("/log4j.properties"));

    if (cmd.hasOption("debug")) {
        props.setProperty("log4j.logger.de.unileipzig", "DEBUG");
    }

    if (cmd.hasOption("logfile")) {
        props.setProperty("log4j.rootLogger", "INFO, stdout, F");
        props.setProperty("log4j.appender.F", "org.apache.log4j.FileAppender");
        props.setProperty("log4j.appender.F.File", cmd.getOptionValue("logfile"));
        props.setProperty("log4j.appender.F.layout", "org.apache.log4j.PatternLayout");
        props.setProperty("log4j.appender.F.layout.ConversionPattern", "%5p | %d | %F | %L | %m%n");
    }

    PropertyConfigurator.configure(props);

    InetAddress addr = InetAddress.getLocalHost();
    String memcachedHostAndPort = addr.getHostAddress() + ":11211";
    if (cmd.hasOption("m")) {
        memcachedHostAndPort = cmd.getOptionValue("m");
    }

    // setup caching
    try {
        if (memcachedClient == null) {
            memcachedClient = new MemcachedClient(
                    new ConnectionFactoryBuilder().setFailureMode(FailureMode.Cancel).build(),
                    AddrUtil.getAddresses("0.0.0.0:11211"));
            try {
                // give client and server 500ms
                Thread.sleep(300);
            } catch (InterruptedException ex) {
            }

            Collection availableServers = memcachedClient.getAvailableServers();
            logger.info(availableServers);
            if (availableServers.size() == 0) {
                logger.info("no memcached servers found");
                memcachedClient.shutdown();
                memcachedClient = null;
            } else {
                logger.info(availableServers.size() + " memcached server(s) detected, fine.");
            }
        }
    } catch (IOException ex) {
        logger.warn("couldn't create a connection, bailing out: " + ex.getMessage());
    }

    // process options

    if (cmd.hasOption("h")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("indexer", options, true);
        quit(0);
    }

    boolean verbose = false;
    if (cmd.hasOption("verbose")) {
        verbose = true;
    }

    // ES options
    String[] hosts = new String[] { "0.0.0.0" };
    int port = 9300;
    String clusterName = "elasticsearch_mdma";
    int bulkSize = 3000;

    if (cmd.hasOption("host")) {
        hosts = cmd.getOptionValues("host");
    }
    if (cmd.hasOption("port")) {
        port = Integer.parseInt(cmd.getOptionValue("port"));
    }
    if (cmd.hasOption("cluster")) {
        clusterName = cmd.getOptionValue("cluster");
    }
    if (cmd.hasOption("bulksize")) {
        bulkSize = Integer.parseInt(cmd.getOptionValue("bulksize"));
        if (bulkSize < 1 || bulkSize > 100000) {
            logger.error("bulksize must be between 1 and 100,000");
            quit(1);
        }
    }

    // ES Client
    final Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "elasticsearch_mdma")
            .build();
    final TransportClient client = new TransportClient(settings);
    for (String host : hosts) {
        client.addTransportAddress(new InetSocketTransportAddress(host, port));
    }

    if (cmd.hasOption("filename") && cmd.hasOption("index") && cmd.hasOption("doctype")) {

        final String filename = cmd.getOptionValue("filename");

        final File _file = new File(filename);
        if (_file.length() == 0) {
            logger.info(_file.getAbsolutePath() + " is empty, skipping");
            quit(0); // file is empty
        }

        // for flat mode: leave a stampfile beside the json to 
        // indicate previous successful processing
        File directory = new File(filename).getParentFile();
        File stampfile = new File(directory, DigestUtils.shaHex(filename) + ".indexed");

        long start = System.currentTimeMillis();
        long lineCount = 0;

        final String indexName = cmd.getOptionValue("index");
        final String docType = cmd.getOptionValue("doctype");
        BulkRequestBuilder bulkRequest = client.prepareBulk();

        try {
            if (cmd.hasOption("flat")) {
                // flat mode
                // .........
                if (stampfile.exists()) {
                    logger.info("SKIPPING, since it seems this file has already " + "been imported (found: "
                            + stampfile.getAbsolutePath() + ")");
                    quit(0);
                }
            } else {

                final String srcSHA1 = extractSrcSHA1(filename);

                logger.debug(filename + " srcsha1: " + srcSHA1);

                long docsInIndex = getIndexedRecordCount(client, indexName, srcSHA1);
                logger.debug(filename + " indexed: " + docsInIndex);

                long docsInFile = getLineCount(filename);
                logger.debug(filename + " lines: " + docsInFile);

                // in non-flat-mode, indexing would take care
                // of inconsistencies
                if (docsInIndex == docsInFile) {
                    logger.info("UP-TO DATE: " + filename + " (" + docsInIndex + ", " + srcSHA1 + ")");
                    client.close();
                    quit(0);
                }

                if (docsInIndex > 0) {
                    logger.warn("INCONSISTENCY DETECTED: " + filename + ": indexed:" + docsInIndex + " lines:"
                            + docsInFile);

                    if (!cmd.hasOption("r")) {
                        logger.warn(
                                "Please re-run indexer with --repair flag or delete residues first with: $ curl -XDELETE "
                                        + hosts[0] + ":9200/" + indexName
                                        + "/_query -d ' {\"term\" : { \"meta.srcsha1\" : \"" + srcSHA1
                                        + "\" }}'");
                        client.close();
                        quit(1);
                    } else {
                        logger.info("Attempting to clear residues...");
                        // attempt to repair once
                        DeleteByQueryResponse dbqr = client.prepareDeleteByQuery(indexName)
                                .setQuery(termQuery("meta.srcsha1", srcSHA1)).execute().actionGet();

                        Iterator<IndexDeleteByQueryResponse> it = dbqr.iterator();
                        long deletions = 0;
                        while (it.hasNext()) {
                            IndexDeleteByQueryResponse response = it.next();
                            deletions += 1;
                        }
                        logger.info("Deleted residues of " + filename);
                        logger.info("Refreshing [" + indexName + "]");
                        RefreshResponse refreshResponse = client.admin().indices()
                                .refresh(new RefreshRequest(indexName)).actionGet();

                        long indexedAfterDelete = getIndexedRecordCount(client, indexName, srcSHA1);
                        logger.info(indexedAfterDelete + " docs remained");
                        if (indexedAfterDelete > 0) {
                            logger.warn("Not all residues cleaned. Try to fix this manually: $ curl -XDELETE "
                                    + hosts[0] + ":9200/" + indexName
                                    + "/_query -d ' {\"term\" : { \"meta.srcsha1\" : \"" + srcSHA1 + "\" }}'");
                            quit(1);
                        } else {
                            logger.info("Residues are gone. Now trying to reindex: " + filename);
                        }
                    }
                }
            }

            logger.info("INDEXING-REQUIRED: " + filename);
            if (cmd.hasOption("status")) {
                quit(0);
            }

            HashSet idsInBatch = new HashSet();

            String idField = null;
            if (cmd.hasOption("z")) {
                idField = cmd.getOptionValue("z");
            }

            final FileReader fr = new FileReader(filename);
            final BufferedReader br = new BufferedReader(fr);

            String line;
            // one line is one document
            while ((line = br.readLine()) != null) {

                // "Latest-Flag" machine
                // This gets obsolete with a "flat" index
                if (cmd.hasOption("z")) {
                    // flag that indicates, whether the document
                    // about to be indexed will be the latest
                    boolean willBeLatest = true;

                    // check if there is a previous (lower meta.timestamp) document with 
                    // the same identifier (whatever that may be - queried under "content")
                    final String contentIdentifier = getContentIdentifier(line, idField);
                    idsInBatch.add(contentIdentifier);

                    // assumed in meta.timestamp
                    final Long timestamp = Long.parseLong(getTimestamp(line));

                    logger.debug("Checking whether record is latest (line: " + lineCount + ")");
                    logger.debug(contentIdentifier + ", " + timestamp);

                    // get all docs, which match the contentIdentifier
                    // by filter, which doesn't score
                    final TermFilterBuilder idFilter = new TermFilterBuilder("content." + idField,
                            contentIdentifier);
                    final TermFilterBuilder kindFilter = new TermFilterBuilder("meta.kind", docType);
                    final AndFilterBuilder afb = new AndFilterBuilder();
                    afb.add(idFilter).add(kindFilter);
                    final FilteredQueryBuilder fb = filteredQuery(matchAllQuery(), afb);

                    final SearchResponse searchResponse = client.prepareSearch(indexName)
                            .setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(fb).setFrom(0)
                            .setSize(1200) // 3 years and 105 days assuming daily updates at the most
                            .setExplain(false).execute().actionGet();

                    final SearchHits searchHits = searchResponse.getHits();

                    logger.debug("docs with this id in the index: " + searchHits.getTotalHits());

                    for (final SearchHit hit : searchHits.getHits()) {
                        final String docId = hit.id();
                        final Map<String, Object> source = hit.sourceAsMap();
                        final Map meta = (Map) source.get("meta");
                        final Long docTimestamp = Long.parseLong(meta.get("timestamp").toString());
                        // if the indexed doc timestamp is lower the the current one, 
                        // remove any latest flag
                        if (timestamp >= docTimestamp) {
                            source.remove("latest");
                            final ObjectMapper mapper = new ObjectMapper();
                            // put the updated doc back
                            // IndexResponse response = 
                            client.prepareIndex(indexName, docType).setCreate(false).setId(docId)
                                    .setSource(mapper.writeValueAsBytes(source))
                                    .execute(new ActionListener<IndexResponse>() {
                                        public void onResponse(IndexResponse rspns) {
                                            logger.debug("Removed latest flag from " + contentIdentifier + ", "
                                                    + docTimestamp + ", " + hit.id() + " since (" + timestamp
                                                    + " > " + docTimestamp + ")");
                                        }

                                        public void onFailure(Throwable thrwbl) {
                                            logger.error("Could not remove flag from " + hit.id() + ", "
                                                    + contentIdentifier);
                                        }
                                    });
                            // .execute()
                            //.actionGet();
                        } else {
                            logger.debug("Doc " + hit.id() + " is newer (" + docTimestamp + ")");
                            willBeLatest = false;
                        }
                    }

                    if (willBeLatest) {
                        line = setLatestFlag(line);
                        logger.info("Setting latest flag on " + contentIdentifier + ", " + timestamp);
                    }

                    // end of latest-flag machine
                    // beware - this will be correct as long as there
                    // are no dups within one bulk!
                }

                bulkRequest.add(client.prepareIndex(indexName, docType).setSource(line));
                lineCount++;
                logger.debug("Added line " + lineCount + " to BULK");
                logger.debug(line);

                if (lineCount % bulkSize == 0) {

                    if (idsInBatch.size() != bulkSize && cmd.hasOption("z")) {
                        logger.error(
                                "This batch has duplications in the ID. That's not bad for the index, just makes the latest flag fuzzy");
                        logger.error(
                                "Bulk size was: " + bulkSize + ", but " + idsInBatch.size() + " IDs (only)");
                    }
                    idsInBatch.clear();

                    logger.debug("Issuing BULK request");

                    final long actionCount = bulkRequest.numberOfActions();
                    final BulkResponse bulkResponse = bulkRequest.execute().actionGet();
                    final long tookInMillis = bulkResponse.getTookInMillis();

                    if (bulkResponse.hasFailures()) {
                        logger.fatal("FAILED, bulk not indexed. exiting now.");
                        Iterator<BulkItemResponse> it = bulkResponse.iterator();
                        while (it.hasNext()) {
                            BulkItemResponse bir = it.next();
                            if (bir.isFailed()) {
                                Failure failure = bir.getFailure();
                                logger.fatal("id: " + failure.getId() + ", message: " + failure.getMessage()
                                        + ", type: " + failure.getType() + ", index: " + failure.getIndex());
                            }
                        }
                        quit(1);
                    } else {
                        if (verbose) {
                            final double elapsed = System.currentTimeMillis() - start;
                            final double speed = (lineCount / elapsed * 1000);
                            logger.info("OK (" + filename + ") " + lineCount + " docs indexed (" + actionCount
                                    + "/" + tookInMillis + "ms" + "/" + String.format("%.2f", speed) + "r/s)");
                        }
                    }
                    bulkRequest = client.prepareBulk();
                }
            }

            // handle the remaining items
            final long actionCount = bulkRequest.numberOfActions();
            if (actionCount > 0) {
                final BulkResponse bulkResponse = bulkRequest.execute().actionGet();
                final long tookInMillis = bulkResponse.getTookInMillis();

                if (bulkResponse.hasFailures()) {
                    logger.fatal("FAILED, bulk not indexed. exiting now.");
                    Iterator<BulkItemResponse> it = bulkResponse.iterator();
                    while (it.hasNext()) {
                        BulkItemResponse bir = it.next();
                        if (bir.isFailed()) {
                            Failure failure = bir.getFailure();
                            logger.fatal("id: " + failure.getId() + ", message: " + failure.getMessage()
                                    + ", type: " + failure.getType() + ", index: " + failure.getIndex());
                        }
                    }
                    quit(1);
                } else {

                    // trigger update now
                    RefreshResponse refreshResponse = client.admin().indices()
                            .refresh(new RefreshRequest(indexName)).actionGet();

                    if (verbose) {
                        final double elapsed = System.currentTimeMillis() - start;
                        final double speed = (lineCount / elapsed * 1000);
                        logger.info("OK (" + filename + ") " + lineCount + " docs indexed (" + actionCount + "/"
                                + tookInMillis + "ms" + "/" + String.format("%.2f", speed) + "r/s)");
                    }

                }

            }

            br.close();
            client.close();
            final double elapsed = (System.currentTimeMillis() - start) / 1000;
            final double speed = (lineCount / elapsed);
            logger.info("indexing (" + filename + ") " + lineCount + " docs took " + elapsed + "s (speed: "
                    + String.format("%.2f", speed) + "r/s)");
            if (cmd.hasOption("flat")) {
                try {
                    FileUtils.touch(stampfile);
                } catch (IOException ioe) {
                    logger.warn(".indexed files not created. Will reindex everything everytime.");
                }
            }
        } catch (IOException e) {
            client.close();
            logger.error(e);
            quit(1);
        } finally {
            client.close();
        }
    }
    quit(0);
}

From source file:com.googlecode.eyesfree.brailleback.FocusFinder.java

public AccessibilityNodeInfoCompat linear(AccessibilityNodeInfoCompat source, int direction) {
    if (source == null) {
        return null;
    }/*from w  ww. ja v  a  2s  .co m*/
    AccessibilityNodeInfoCompat next = NodeFocusFinder.focusSearch(source, direction);

    HashSet<AccessibilityNodeInfoCompat> seenNodes = mTmpNodeHash;
    seenNodes.clear();

    while ((next != null) && !AccessibilityNodeInfoUtils.shouldFocusNode(mContext, next)) {
        if (seenNodes.contains(next)) {
            LogUtils.log(this, Log.ERROR, "Found duplicate node during traversal: %s", next);
            break;
        }

        LogUtils.log(this, Log.VERBOSE, "Search strategy rejected node: %s", next.getInfo());
        seenNodes.add(next);
        next = NodeFocusFinder.focusSearch(next, direction);
    }

    // Clear the list of seen nodes.
    AccessibilityNodeInfoUtils.recycleNodes(seenNodes);

    if (next == null) {
        LogUtils.log(this, Log.VERBOSE, "Failed to find the next node");
    }
    return next;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM.java

public void clearNodeSetForAttempt(ApplicationAttemptId attemptId) {
    super.writeLock.lock();
    try {//ww  w  .  j  a  v  a 2 s  .  c o m
        HashSet<NodeId> nodeSet = this.appAttemptToNodeKeyMap.get(attemptId);
        if (nodeSet != null) {
            LOG.info("Clear node set for " + attemptId);
            nodeSet.clear();
        }
    } finally {
        super.writeLock.unlock();
    }
}

From source file:org.apache.tika.parser.ner.NamedEntityParserTest.java

@Test
public void testParse() throws Exception {

    //test config is added to resources directory
    TikaConfig config = new TikaConfig(getClass().getResourceAsStream(CONFIG_FILE));
    Tika tika = new Tika(config);

    JSONParser parser = new JSONParser();
    String text = "";

    HashMap<Integer, String> hmap = new HashMap<Integer, String>();
    HashMap<String, HashMap<Integer, String>> outerhmap = new HashMap<String, HashMap<Integer, String>>();

    int index = 0;
    //Input Directory Path
    String inputDirPath = "/Users/AravindMac/Desktop/polardata_json_grobid/application_pdf";
    int count = 0;
    try {/*  w w  w  .j  av a  2  s .c  o  m*/

        File root = new File(inputDirPath);
        File[] listDir = root.listFiles();
        for (File filename : listDir) {

            if (!filename.getName().equals(".DS_Store") && count < 3573) {
                count += 1;
                System.out.println(count);

                String absoluteFilename = filename.getAbsolutePath().toString();

                //   System.out.println(absoluteFilename);
                //Read the json file, parse and retrieve the text present in the content field.

                Object obj = parser.parse(new FileReader(absoluteFilename));

                BufferedWriter bw = new BufferedWriter(new FileWriter(new File(absoluteFilename)));

                JSONObject jsonObject = (JSONObject) obj;
                text = (String) jsonObject.get("content");

                Metadata md = new Metadata();
                tika.parse(new ByteArrayInputStream(text.getBytes()), md);

                //Parse the content and retrieve the values tagged as the NER entities
                HashSet<String> set = new HashSet<String>();
                set.addAll(Arrays.asList(md.getValues("X-Parsed-By")));

                // Store values tagged as NER_PERSON
                set.clear();
                set.addAll(Arrays.asList(md.getValues("NER_PERSON")));

                hmap = new HashMap<Integer, String>();
                index = 0;

                for (Iterator<String> i = set.iterator(); i.hasNext();) {
                    String f = i.next();
                    hmap.put(index, f);
                    index++;
                }

                if (!hmap.isEmpty()) {
                    outerhmap.put("PERSON", hmap);
                }

                // Store values tagged as NER_LOCATION
                set.clear();
                set.addAll(Arrays.asList(md.getValues("NER_LOCATION")));
                hmap = new HashMap<Integer, String>();
                index = 0;

                for (Iterator<String> i = set.iterator(); i.hasNext();) {
                    String f = i.next();
                    hmap.put(index, f);
                    index++;
                }

                if (!hmap.isEmpty()) {
                    outerhmap.put("LOCATION", hmap);
                }

                //Store values tagged as NER_ORGANIZATION
                set.clear();
                set.addAll(Arrays.asList(md.getValues("NER_ORGANIZATION")));

                hmap = new HashMap<Integer, String>();
                index = 0;

                for (Iterator<String> i = set.iterator(); i.hasNext();) {
                    String f = i.next();
                    hmap.put(index, f);
                    index++;
                }

                if (!hmap.isEmpty()) {
                    outerhmap.put("ORGANIZATION", hmap);
                }

                // Store values tagged as NER_DATE
                set.clear();
                set.addAll(Arrays.asList(md.getValues("NER_DATE")));

                hmap = new HashMap<Integer, String>();
                index = 0;

                for (Iterator<String> i = set.iterator(); i.hasNext();) {
                    String f = i.next();
                    hmap.put(index, f);
                    index++;
                }

                if (!hmap.isEmpty()) {
                    outerhmap.put("DATE", hmap);
                }

                JSONArray array = new JSONArray();
                array.put(outerhmap);
                if (!outerhmap.isEmpty()) {
                    jsonObject.put("OpenNLP", array); //Add the NER entities to the json under NER key as a JSON array.
                }

                System.out.println(jsonObject);

                bw.write(jsonObject.toJSONString()); //Stringify thr JSON and write it back to the file 
                bw.close();

            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:com.tamingtext.tagrecommender.TestStackOverflowTagger.java

public void execute() {
    PrintStream out = null;/* w ww .  ja  v a  2  s.  com*/

    try {
        OpenObjectIntHashMap<String> tagCounts = new OpenObjectIntHashMap<String>();
        OpenObjectIntHashMap<String> tagCorrect = new OpenObjectIntHashMap<String>();
        loadTags(tagCounts);

        StackOverflowStream stream = new StackOverflowStream();
        stream.open(inputFile.getAbsolutePath());

        out = new PrintStream(new FileOutputStream(outputFile));

        int correctTagCount = 0;
        int postCount = 0;

        HashSet<String> postTags = new HashSet<String>();
        float postPctCorrect;

        int totalSingleCorrect = 0;
        int totalHalfCorrect = 0;

        for (StackOverflowPost post : stream) {
            correctTagCount = 0;
            postCount++;

            postTags.clear();
            postTags.addAll(post.getTags());
            for (String tag : post.getTags()) {
                if (tagCounts.containsKey(tag)) {
                    tagCounts.adjustOrPutValue(tag, 1, 1);
                }
            }

            ScoreTag[] tags = client.getTags(post.getTitle() + "\n" + post.getBody(), maxTags);

            for (ScoreTag tag : tags) {
                if (postTags.contains(tag.getTag())) {
                    correctTagCount += 1;
                    tagCorrect.adjustOrPutValue(tag.getTag(), 1, 1);
                }
            }

            if (correctTagCount > 0) {
                totalSingleCorrect += 1;
            }

            postPctCorrect = correctTagCount / (float) postTags.size();
            if (postPctCorrect >= 0.50f) {
                totalHalfCorrect += 1;
            }

            if ((postCount % 100) == 0) {
                dumpStats(System.err, postCount, totalSingleCorrect, totalHalfCorrect);
            }

        }

        dumpStats(System.err, postCount, totalSingleCorrect, totalHalfCorrect);
        dumpStats(out, postCount, totalSingleCorrect, totalHalfCorrect);
        dumpTags(out, tagCounts, tagCorrect);
    } catch (Exception ex) {
        throw (RuntimeException) new RuntimeException().initCause(ex);
    } finally {
        if (out != null) {
            out.close();
        }
    }
}

From source file:org.osaf.cosmo.security.aop.SecurityAdviceTest.java

/** */
public void testSecuredApiWithTicket() throws Exception {
    User user1 = testHelper.makeDummyUser("user1", "password");
    User user2 = testHelper.makeDummyUser("user2", "password");
    CollectionItem rootCollection = contentDao.createRootItem(user1);
    CollectionItem collection = testHelper.makeDummyCollection(user1);
    collection.setUid("col");

    // create RO and RW tickets on collection
    Ticket roTicket = testHelper.makeDummyTicket();
    roTicket.setKey("T1");
    roTicket.getPrivileges().add(Ticket.PRIVILEGE_READ);
    collection.getTickets().add(roTicket);
    Ticket rwTicket = testHelper.makeDummyTicket();
    rwTicket.setKey("T2");
    rwTicket.getPrivileges().add(Ticket.PRIVILEGE_WRITE);
    collection.getTickets().add(rwTicket);

    collection = contentDao.createCollection(rootCollection, collection);

    ContentItem dummyContent = new MockNoteItem();
    dummyContent.setName("foo");
    dummyContent.setOwner(user1);//  w  w  w.  java  2 s. co m
    dummyContent.setUid("1");
    dummyContent = contentDao.createContent(collection, dummyContent);

    // login as RO ticket
    initiateContext(roTicket);

    // view is fine
    proxyService.findItemByUid("1");

    // update should fail
    try {
        proxyService.updateContent(dummyContent);
        Assert.fail("able to update item");
    } catch (ItemSecurityException e) {
        Assert.assertEquals("1", e.getItem().getUid());
        Assert.assertEquals(Permission.WRITE, e.getPermission());
    }

    // login as RW ticket
    initiateContext(rwTicket);

    // view and update should work
    proxyService.findItemByUid("1");
    proxyService.updateContent(dummyContent);

    // login as use2 including rw ticket
    HashSet<Ticket> tickets = new HashSet<Ticket>();
    tickets.add(rwTicket);
    initiateContextWithTickets(user2, tickets);

    // view and update should work
    proxyService.findItemByUid("1");
    proxyService.updateContent(dummyContent);

    // remove tickets
    tickets.clear();

    // view should fail
    try {
        proxyService.findItemByUid("1");
        Assert.fail("able to view item");
    } catch (ItemSecurityException e) {
        Assert.assertEquals("1", e.getItem().getUid());
        Assert.assertEquals(Permission.READ, e.getPermission());
    }
}

From source file:com.esri.squadleader.model.GeoPackageReader.java

/**
 * Reads the tables in a GeoPackage, makes a layer from each table, and returns a list containing
 * those layers./*  w ww .j a  va2s. co  m*/
 *
 * @param gpkgPath       the full path to the .gpkg file.
 * @param sr             the spatial reference to which any raster layers should be projected, typically the
 *                       spatial reference of your map.
 * @param showVectors    if true, this method will include the GeoPackage's vector layers.
 * @param showRasters    if true, this method will include the GeoPackage's raster layer.
 * @param rasterRenderer the renderer to be used for raster layers. One simple option is an RGBRenderer.
 * @param markerRenderer the renderer to be used for point layers.
 * @param lineRenderer   the renderer to be used for polyline layers.
 * @param fillRenderer   the renderer to be used for polygon layers.
 * @return a list of the layers created for all tables in the GeoPackage.
 * @throws IOException if gpkgPath cannot be read. Possible reasons include the file not
 *                     existing, failure to request READ_EXTERNAL_STORAGE or
 *                     WRITE_EXTERNAL_STORAGE permission, or the GeoPackage containing an
 *                     invalid spatial reference.
 */
public List<Layer> readGeoPackageToLayerList(String gpkgPath, SpatialReference sr, boolean showVectors,
        boolean showRasters, RasterRenderer rasterRenderer, Renderer markerRenderer, Renderer lineRenderer,
        Renderer fillRenderer) throws IOException {
    List<Layer> layers = new ArrayList<Layer>();

    if (showRasters) {
        // Check to see if there are any rasters before loading them
        SQLiteDatabase sqliteDb = null;
        Cursor cursor = null;
        try {
            sqliteDb = SQLiteDatabase.openDatabase(gpkgPath, null, SQLiteDatabase.OPEN_READONLY);
            cursor = sqliteDb.rawQuery("SELECT COUNT(*) FROM gpkg_contents WHERE data_type = ?",
                    new String[] { "tiles" });
            if (cursor.moveToNext()) {
                if (0 < cursor.getInt(0)) {
                    cursor.close();
                    sqliteDb.close();
                    FileRasterSource src = new FileRasterSource(gpkgPath);
                    rasterSources.add(src);
                    if (null != sr) {
                        src.project(sr);
                    }
                    RasterLayer rasterLayer = new RasterLayer(src);
                    rasterLayer.setRenderer(rasterRenderer);
                    rasterLayer
                            .setName((gpkgPath.contains("/") ? gpkgPath.substring(gpkgPath.lastIndexOf("/") + 1)
                                    : gpkgPath) + " (raster)");
                    layers.add(rasterLayer);
                }
            }
        } catch (Throwable t) {
            Log.e(TAG, "Could not read raster(s) from GeoPackage", t);
        } finally {
            if (null != cursor) {
                cursor.close();
            }
            if (null != sqliteDb) {
                sqliteDb.close();
            }
        }
    }

    if (showVectors) {
        Geopackage gpkg;
        try {
            gpkg = new Geopackage(gpkgPath);
        } catch (RuntimeException ex) {
            throw new IOException(null != ex.getMessage() && ex.getMessage().contains("unknown wkt")
                    ? "Geopackage " + gpkgPath + " contains an invalid spatial reference."
                    : null, ex);
        }
        geopackages.add(gpkg);
        List<GeopackageFeatureTable> tables = gpkg.getGeopackageFeatureTables();
        if (0 < tables.size()) {
            //First pass: polygons and unknowns
            HashSet<Geometry.Type> types = new HashSet<Geometry.Type>();
            types.add(Geometry.Type.ENVELOPE);
            types.add(Geometry.Type.POLYGON);
            types.add(Geometry.Type.UNKNOWN);
            layers.addAll(getTablesAsLayers(tables, types, fillRenderer));

            //Second pass: lines
            types.clear();
            types.add(Geometry.Type.LINE);
            types.add(Geometry.Type.POLYLINE);
            layers.addAll(getTablesAsLayers(tables, types, lineRenderer));

            //Third pass: points
            types.clear();
            types.add(Geometry.Type.MULTIPOINT);
            types.add(Geometry.Type.POINT);
            layers.addAll(getTablesAsLayers(tables, types, markerRenderer));
        }
    }

    return layers;
}