Example usage for java.util Set parallelStream

List of usage examples for java.util Set parallelStream

Introduction

In this page you can find the example usage for java.util Set parallelStream.

Prototype

default Stream<E> parallelStream() 

Source Link

Document

Returns a possibly parallel Stream with this collection as its source.

Usage

From source file:se.idsecurity.ldifcompare.LdifCompare.java

private void getUniqueEntriesUsingDN(Set<Entry> source, Set<Entry> target, LDIFWriter ldifWriterUnique) {
    Runnable r = () -> {//from  w  ww. ja  va2s.co m
        StopWatch sw = new StopWatch();
        sw.start();
        ConcurrentMap<String, Entry> targetMap = target.parallelStream()
                .collect(Collectors.toConcurrentMap(Entry::getDN, Function.identity()));

        for (Entry e : source) {
            //Get unique entries from the rightLdif file based on DN - no entry with the same DN exist in leftLdif
            String dn = e.getDN();
            if (!targetMap.containsKey(dn)) {
                try {
                    ldifWriterUnique.writeEntry(e);//Entry only exists in rightLdif
                } catch (IOException ex) {
                    logger.error("Error writing to LDIF file", ex);
                }
            }
        }
        fileWriteCdl.countDown();

        sw.stop();
        logger.error("Time taken to process getUniqueEntriesUsingDN(): " + sw.toString());
        sw.reset();
    };

    exec.execute(r);

}

From source file:uk.co.flax.biosolr.builders.ChildNodeFacetTreeBuilder.java

@Override
public List<TreeFacetField> processFacetTree(SolrIndexSearcher searcher, Map<String, Integer> facetMap)
        throws IOException {
    // Check that all of the given fields are in the searcher's schema
    checkFieldsInSchema(searcher, docFields);

    // Extract the facet keys to a volatile set
    Set<String> facetKeys = new HashSet<>(facetMap.keySet());

    // Build a map of parent - child node IDs. This should contain the parents
    // of all our starting facet terms.
    Map<String, Set<String>> nodeChildren = findParentEntries(searcher, facetKeys);

    // Find the details for the starting facet terms, if there are any which haven't 
    // been found already.
    facetKeys.removeAll(nodeChildren.keySet());
    nodeChildren.putAll(filterEntriesByField(searcher, facetKeys, getNodeField()));

    // Find the top nodes
    Set<String> topNodes = findTopLevelNodes(nodeChildren);
    LOGGER.debug("Found {} top level nodes", topNodes.size());

    // Convert to a list of TreeFacetFields
    return topNodes.parallelStream().map(node -> buildAccumulatedEntryTree(0, node, nodeChildren, facetMap))
            .collect(Collectors.toList());
}

From source file:uk.co.flax.biosolr.builders.ParentNodeFacetTreeBuilder.java

@Override
public List<TreeFacetField> processFacetTree(SolrIndexSearcher searcher, Map<String, Integer> facetMap)
        throws IOException {
    checkFieldsInSchema(searcher, docFields);

    // Extract the facet keys to a volatile set
    Set<String> facetKeys = new HashSet<>(facetMap.keySet());

    // Build a map of parent - child node IDs. This should contain the parents
    // of all our starting facet terms.
    Map<String, Set<String>> nodeChildren = findParentEntries(searcher, facetKeys);

    // Find the top nodes
    Set<String> topNodes = findTopLevelNodes(nodeChildren);
    LOGGER.debug("Found {} top level nodes", topNodes.size());

    // Convert to a list of TreeFacetFields
    return topNodes.parallelStream().map(node -> buildAccumulatedEntryTree(0, node, nodeChildren, facetMap))
            .collect(Collectors.toList());
}

From source file:utils.StringManip.java

public static String getMultiwordRegexString(Set<String> stopwords) {
    return StringUtils.join(
            stopwords.parallelStream().map(StringManip::getOnlyWordRegex).collect(Collectors.toList()), "|");
}