Example usage for java.util HashSet iterator

List of usage examples for java.util HashSet iterator

Introduction

In this page you can find the example usage for java.util HashSet iterator.

Prototype

public Iterator<E> iterator() 

Source Link

Document

Returns an iterator over the elements in this set.

Usage

From source file:org.openremote.android.console.model.PollingHelper.java

/**
 * Instantiates a new polling helper.//from   w  w w .  j  a v a  2  s.  c om
 * 
 * @param ids the ids
 * @param context the context
 */
public PollingHelper(HashSet<Integer> ids, final Context context) {
    this.context = context;
    this.serverUrl = AppSettingsModel.getSecuredServer(context);
    readDeviceId(context);

    Iterator<Integer> id = ids.iterator();
    if (id.hasNext()) {
        pollingStatusIds = id.next().toString();
    }
    while (id.hasNext()) {
        pollingStatusIds = pollingStatusIds + "," + id.next();
    }

    handler = new Handler() {
        @Override
        public void handleMessage(Message msg) {
            isPolling = false;
            Log.i(LOG_CATEGORY, "polling failed and canceled." + msg.what);
            // only if the network is error, server error and request error, 
            // switch controller server, or endless loop would happen to switch server.
            int statusCode = msg.what;
            if (statusCode == NETWORK_ERROR || statusCode == ControllerException.SERVER_ERROR
                    || statusCode == ControllerException.REQUEST_ERROR) {
                ORControllerServerSwitcher.doSwitch(context);
            } else {
                ViewHelper.showAlertViewWithTitle(context, "Polling Error",
                        ControllerException.exceptionMessageOfCode(statusCode));
            }
        }
    };
}

From source file:com.thesmartweb.swebrank.DataManipulation.java

/**
 * Method that clears a List from duplicates and null elements
 * @param wordList It contains the List to be cleared
 * @return a List cleared from duplicates and null elements
 *//*  w  ww .ja v a 2 s . co m*/
public List<String> clearListString(List<String> wordList) {
    //remove all null elements of the wordlist
    wordList.removeAll(Collections.singleton(null));
    //remove the duplicate elements since HashSet does not allow duplicates
    HashSet<String> hashSet_wordList = new HashSet<String>(wordList);
    //create an iterator to the hashset to add the elements back to the wordlist
    Iterator wordList_iterator = hashSet_wordList.iterator();
    //clear the wordlist
    wordList.clear();
    while (wordList_iterator.hasNext()) {
        wordList.add(wordList_iterator.next().toString());
    }
    return wordList;

}

From source file:opennlp.tools.parse_thicket.opinion_processor.StopList.java

public String getRandomFirstName() {
    HashSet<String> firstNames = m_stopHash.get("FIRST_NAMES");
    int indexRand = (int) (Math.random() * new Float(firstNames.size()));
    Iterator iter = firstNames.iterator();
    for (int i = 0; i < indexRand; i++) {
        iter.next();/* ww  w . j a  v a 2s.  c  o m*/
    }
    return ((String) iter.next()).toLowerCase();
}

From source file:org.apache.tika.parser.ner.NLTKNERecogniserTest.java

@Test
public void testGetEntityTypes() throws Exception {
    System.setProperty(NamedEntityParser.SYS_PROP_NER_IMPL, NLTKNERecogniser.class.getName());
    Tika tika = new Tika(new TikaConfig(NamedEntityParser.class.getResourceAsStream("tika-config.xml")));

    JSONParser parser = new JSONParser();
    String text = "";

    HashMap<Integer, String> hmap = new HashMap<Integer, String>();
    HashMap<String, HashMap<Integer, String>> outerhmap = new HashMap<String, HashMap<Integer, String>>();

    int index = 0;
    //Input Directory Path
    String inputDirPath = "/Users/AravindMac/Desktop/polardata_json_grobid/application_pdf";
    int count = 0;

    try {//from  w w  w. j a  v a2  s.co m

        File root = new File(inputDirPath);
        File[] listDir = root.listFiles();
        for (File filename : listDir) {

            if (!filename.getName().equals(".DS_Store") && count < 3239) {
                count += 1;
                System.out.println(count);

                String absoluteFilename = filename.getAbsolutePath().toString();

                //   System.out.println(absoluteFilename);
                //Read the json file, parse and retrieve the text present in the content field.

                Object obj = parser.parse(new FileReader(absoluteFilename));

                BufferedWriter bw = new BufferedWriter(new FileWriter(new File(absoluteFilename)));

                JSONObject jsonObject = (JSONObject) obj;
                text = (String) jsonObject.get("content");

                Metadata md = new Metadata();
                tika.parse(new ByteArrayInputStream(text.getBytes()), md);

                //Parse the content and retrieve the values tagged as the NER entities
                HashSet<String> set = new HashSet<String>();

                // Store values tagged as NER_NAMES
                set.addAll(Arrays.asList(md.getValues("NER_NAMES")));

                hmap = new HashMap<Integer, String>();
                index = 0;

                for (Iterator<String> i = set.iterator(); i.hasNext();) {
                    String f = i.next();
                    hmap.put(index, f);
                    index++;
                }

                if (!hmap.isEmpty()) {
                    outerhmap.put("NAMES", hmap);
                }

                JSONArray array = new JSONArray();
                array.put(outerhmap);

                if (!outerhmap.isEmpty()) {
                    jsonObject.put("NLTK", array); //Add the NER entities to the json under NER key as a JSON array.
                }

                System.out.println(jsonObject);

                bw.write(jsonObject.toJSONString()); //Stringify thr JSON and write it back to the file 
                bw.close();

            }
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:ORG.oclc.os.SRW.Lucene.SRWLuceneDatabase.java

@Override
public TermList getTermList(CQLTermNode cqlTermNode, int position, int maxTerms,
        ScanRequestType scanRequestType) {
    log.debug(//www  .  j a v a  2  s  .co m
            "in getTermList: cqlTermNode=" + cqlTermNode + ", position=" + position + ", maxTerms=" + maxTerms);
    TermList list = new TermList();
    if (position > 1) {
        log.debug("unsupported responsePosition=" + position);
        list.addDiagnostic(SRWDiagnostic.ResponsePositionOutOfRange, Integer.toString(position));
    } else {
        try {
            int i;
            Query q = translator.makeQuery(cqlTermNode);
            HashSet<Term> terms = new HashSet<Term>();
            q.extractTerms(terms);
            Term t = terms.iterator().next();
            log.debug("scan term=" + t);
            TermEnum te = searcher.getIndexReader().terms(t);
            ArrayList<TermType> v = new ArrayList<TermType>();
            for (i = position; i < 1; i++)
                te.next();
            for (i = 1; i <= maxTerms; i++) {
                v.add(new TermType(te.term().text(), new NonNegativeInteger("0"), null, null, null));
                if (!te.next())
                    break;
            }
            list.setTerms((TermType[]) v.toArray(new TermType[0]));
        } catch (SRWDiagnostic e) {
            list.addDiagnostic(e.getCode(), e.getAddInfo());
        } catch (IOException e) {
            log.error(e, e);
            list.addDiagnostic(SRWDiagnostic.GeneralSystemError, e.getMessage());
        }
    }
    return list;
}

From source file:com.gemstone.gemfire.rest.internal.web.controllers.RestAPIOnRegionFunctionExecutionDUnitTest.java

private void populateRRRegion() {
    Region region = CacheFactory.getAnyInstance().getRegion(REPLICATE_REGION_NAME);
    assertNotNull(region);/*from  w  ww.j  a v  a 2s.c  o m*/

    final HashSet testKeys = new HashSet();
    for (int i = 17 * 3; i > 0; i--) {
        testKeys.add("execKey-" + i);
    }
    int j = 0;
    for (Iterator i = testKeys.iterator(); i.hasNext();) {
        Integer val = new Integer(j++);
        region.put(i.next(), val);
    }

}

From source file:gobblin.source.extractor.filebased.FileBasedSource.java

/**
 * This method takes the snapshot seen in the previous run, and compares it to the list
 * of files currently in the source - it then decided which files it needs to pull
 * and distributes those files across the workunits; it does this comparison by comparing
 * the names of the files currently in the source vs. the names retrieved from the
 * previous state/*  w ww  . jav a 2 s  .co m*/
 * @param state is the source state
 * @return a list of workunits for the framework to run
 */
@Override
public List<WorkUnit> getWorkunits(SourceState state) {
    initLogger(state);
    try {
        initFileSystemHelper(state);
    } catch (FileBasedHelperException e) {
        Throwables.propagate(e);
    }

    log.info("Getting work units");
    String nameSpaceName = state.getProp(ConfigurationKeys.EXTRACT_NAMESPACE_NAME_KEY);
    String entityName = state.getProp(ConfigurationKeys.SOURCE_ENTITY);

    // Override extract table name
    String extractTableName = state.getProp(ConfigurationKeys.EXTRACT_TABLE_NAME_KEY);

    // If extract table name is not found then consider entity name as extract table name
    if (Strings.isNullOrEmpty(extractTableName)) {
        extractTableName = entityName;
    }

    TableType tableType = TableType
            .valueOf(state.getProp(ConfigurationKeys.EXTRACT_TABLE_TYPE_KEY).toUpperCase());
    List<WorkUnitState> previousWorkunits = Lists.newArrayList(state.getPreviousWorkUnitStates());
    Set<String> prevFsSnapshot = Sets.newHashSet();

    // Get list of files seen in the previous run
    if (!previousWorkunits.isEmpty()) {
        if (previousWorkunits.get(0).getWorkunit().contains(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT)) {
            prevFsSnapshot = previousWorkunits.get(0).getWorkunit()
                    .getPropAsSet(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT);
        } else if (state.getPropAsBoolean(ConfigurationKeys.SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED,
                ConfigurationKeys.DEFAULT_SOURCE_FILEBASED_FS_PRIOR_SNAPSHOT_REQUIRED)) {
            // If a previous job exists, there should be a snapshot property.  If not, we need to fail so that we
            // don't accidentally read files that have already been processed.
            throw new RuntimeException(String.format("No '%s' found on state of prior job",
                    ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT));
        }
    }

    List<WorkUnit> workUnits = Lists.newArrayList();
    List<WorkUnit> previousWorkUnitsForRetry = this.getPreviousWorkUnitsForRetry(state);
    log.info("Total number of work units from the previous failed runs: " + previousWorkUnitsForRetry.size());
    for (WorkUnit previousWorkUnitForRetry : previousWorkUnitsForRetry) {
        prevFsSnapshot.addAll(
                previousWorkUnitForRetry.getPropAsSet(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
        workUnits.add(previousWorkUnitForRetry);
    }

    // Get list of files that need to be pulled
    List<String> currentFsSnapshot = this.getcurrentFsSnapshot(state);
    HashSet<String> filesWithTimeToPull = new HashSet<>(currentFsSnapshot);
    filesWithTimeToPull.removeAll(prevFsSnapshot);
    List<String> filesToPull = new ArrayList<>();
    Iterator<String> it = filesWithTimeToPull.iterator();
    while (it.hasNext()) {
        String filesWithoutTimeToPull[] = it.next().split(this.splitPattern);
        filesToPull.add(filesWithoutTimeToPull[0]);
    }

    if (!filesToPull.isEmpty()) {
        logFilesToPull(filesToPull);

        int numPartitions = state.contains(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS)
                && state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS) <= filesToPull.size()
                        ? state.getPropAsInt(ConfigurationKeys.SOURCE_MAX_NUMBER_OF_PARTITIONS)
                        : filesToPull.size();
        if (numPartitions <= 0) {
            throw new IllegalArgumentException("The number of partitions should be positive");
        }

        int filesPerPartition = filesToPull.size() % numPartitions == 0 ? filesToPull.size() / numPartitions
                : filesToPull.size() / numPartitions + 1;

        // Distribute the files across the workunits
        for (int fileOffset = 0; fileOffset < filesToPull.size(); fileOffset += filesPerPartition) {
            SourceState partitionState = new SourceState();
            partitionState.addAll(state);

            // Eventually these setters should be integrated with framework support for generalized watermark handling
            partitionState.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_SNAPSHOT,
                    StringUtils.join(currentFsSnapshot, ","));

            List<String> partitionFilesToPull = filesToPull.subList(fileOffset,
                    fileOffset + filesPerPartition > filesToPull.size() ? filesToPull.size()
                            : fileOffset + filesPerPartition);
            partitionState.setProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL,
                    StringUtils.join(partitionFilesToPull, ","));
            if (state.getPropAsBoolean(ConfigurationKeys.SOURCE_FILEBASED_PRESERVE_FILE_NAME, false)) {
                if (partitionFilesToPull.size() != 1) {
                    throw new RuntimeException(
                            "Cannot preserve the file name if a workunit is given multiple files");
                }
                partitionState.setProp(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR,
                        partitionState.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL));
            }

            // Use extract table name to create extract
            Extract extract = partitionState.createExtract(tableType, nameSpaceName, extractTableName);
            workUnits.add(partitionState.createWorkUnit(extract));
        }

        log.info("Total number of work units for the current run: "
                + (workUnits.size() - previousWorkUnitsForRetry.size()));
    }

    return workUnits;
}

From source file:org.squale.squalix.tools.macker.MackerTask.java

/**
 * Configure Macker en lui indiquant le fichier des rgles et les classes qu'il doit analyser
 * //from ww  w.  j a  v a2  s  .co  m
 * @param pFilesToAnalyse les fichiers compils  analyser
 * @param pConfigFile le fichier de configuration Macker
 * @throws ClassParseException si erreur
 * @throws RulesException si erreur
 * @throws IOException si erreur
 * @return Macker le macker configur
 */
protected Macker configMacker(HashSet pFilesToAnalyse, File pConfigFile)
        throws RulesException, ClassParseException, IOException {
    Macker macker = new Macker();
    Iterator filesIt = pFilesToAnalyse.iterator();
    while (filesIt.hasNext()) {
        try {
            macker.addClass(new File((String) filesIt.next()));
        } catch (IllegalStateException ise) {
            // On log juste un warning. Cette erreur peut survenir lorsque
            // l'utilisateur a par exemple laisser des .class dans le rpertoire
            // de gnration des .class
            LOGGER.warn(ise.getMessage());
            initError(ise.getMessage());
        }
    }
    // On indique le fichier de configuration
    macker.addRulesFile(pConfigFile);
    return macker;
}

From source file:org.entrystore.rest.util.jdil.JDIL.java

public JSONObject exportGraphToJDIL(Graph graph, Resource root) {
    try {/*from  www  . ja  v  a 2s  . co m*/
        HashMap<Resource, JSONObject> res2Jdil = new HashMap<Resource, JSONObject>();
        HashSet<Resource> notRoots = new HashSet<Resource>();

        for (Statement statement : graph) {
            JSONObject subj = getOrCreateSubject(statement.getSubject(), res2Jdil);
            String predicate = namespaces.abbreviate(statement.getPredicate().stringValue());
            notRoots.add(statement.getPredicate());
            Value value = statement.getObject();

            if (value instanceof Resource) {
                /*
                 * Create a new JDIL value to accumulate to the subject.
                 */
                JSONObject JDILValueObject = getOrCreateObject((Resource) value, res2Jdil);

                subj.accumulate(predicate, JDILValueObject);
                notRoots.add((Resource) value);

            } else {
                Literal lit = (Literal) value;
                String language = lit.getLanguage();
                URI datatype = lit.getDatatype();
                JSONObject object = new JSONObject();
                object.accumulate("@value", value.stringValue());
                if (language != null) {
                    object.accumulate("@language", language);
                } else if (datatype != null) {
                    object.accumulate("@datatype", datatype.stringValue());
                }
                subj.accumulate(predicate, object);
            }
        }
        if (root != null) {
            JSONObject obj = res2Jdil.get(root);
            cutLoops(obj, new HashSet());
            return obj;
        }
        HashSet<Resource> roots = new HashSet<Resource>(res2Jdil.keySet());
        roots.removeAll(notRoots);
        if (roots.size() == 1) {
            JSONObject obj = res2Jdil.get(roots.iterator().next());
            cutLoops(obj, new HashSet());
            return obj;
        }
    } catch (JSONException jse) {
        log.error(jse.getMessage());
    }
    return null;
}

From source file:org.apache.forrest.solr.client.SolrSearchGenerator.java

private PostMethod preparePost() {
    PostMethod filePost = new PostMethod(destination);
    filePost.addRequestHeader("User-Agent", AGENT);
    Iterator keys = map.keySet().iterator();
    HashSet set = new HashSet();
    while (keys.hasNext()) {
        String element = (String) keys.next();
        if (!QUERY_PARAM.equals(element)) {
            String value = (String) map.get(element);
            set.add(new NameValuePair(element, value));
        }//from w ww.  j a  v  a2  s. c o m
    }
    //make sure we send the query (even if null) to get a response
    set.add(new NameValuePair(QUERY_PARAM, query));
    for (Iterator iter = set.iterator(); iter.hasNext();) {
        filePost.addParameter((NameValuePair) iter.next());
    }
    return filePost;
}