Example usage for java.util HashSet addAll

List of usage examples for java.util HashSet addAll

Introduction

In this page you can find the example usage for java.util HashSet addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:org.eurekastreams.server.action.execution.stream.GetAllPopularHashTagsFromGroupsJoinedExecution.java

/**
 * Get the popular hashtags for an activity stream.
 *
 * @param inActionContext/*from  www.ja  v  a2 s.c  o m*/
 *            the action context
 * @return an ArrayList of the popular hashtags
 * @throws ExecutionException
 *             on error
 */
@Override
public Serializable execute(final PrincipalActionContext inActionContext) throws ExecutionException {
    GetCurrentUserStreamFiltersResponse groupResponse = (GetCurrentUserStreamFiltersResponse) getGroups
            .execute(inActionContext);
    List<StreamPopularHashTagsRequest> requests = new ArrayList<StreamPopularHashTagsRequest>();

    for (StreamFilter filter : groupResponse.getStreamFilters()) {
        StreamPopularHashTagsRequest request = new StreamPopularHashTagsRequest(ScopeType.GROUP,
                ((GroupStreamDTO) filter).getShortName());
        requests.add(request);
    }

    StreamPopularHashTagsRequest personRequest = new StreamPopularHashTagsRequest(ScopeType.PERSON,
            inActionContext.getPrincipal().getAccountId());
    requests.add(personRequest);

    List<StreamPopularHashTagsReportDTO> responses = popularHashTagsMapper.execute(requests);

    HashSet<String> result = new HashSet<String>();

    if (responses.size() != requests.size()) {
        log.warn("response size does not equal request size");
    }

    for (StreamPopularHashTagsReportDTO response : responses) {
        if (response.getPopularHashTags() != null) {
            result.addAll(response.getPopularHashTags());
        }
    }

    return result;
}

From source file:edu.msu.cme.rdp.kmer.cli.KmerCoverage.java

public void printCovereage(OutputStream coverage_out, OutputStream abundance_out) throws IOException {
    adjustCount();// w w  w.j  ava  2s  . c o m
    // print out the weighted kmer coverage
    // we found mean coverage matched the previous biological observation
    PrintStream coverage_outStream = new PrintStream(coverage_out);
    coverage_outStream.println("#total reads: " + totalReads.intValue());
    coverage_outStream.println("#use mean_cov to adjust the contig abundance, not median_cov ");
    coverage_outStream.println("#seqid\tmean_cov\tmedian_cov\ttotal_pos\tcovered_pos\tcovered_ratio");

    for (Contig contig : contigMap.values()) {
        ArrayList<Double> counts = new ArrayList<Double>();
        int coveredPos = 0;
        for (int pos = 0; pos < contig.coverage.length; pos++) {
            if (contig.coverage[pos] > 0) {
                coveredPos++;
            }
            counts.add(contig.coverage[pos]);
        }
        if (coveredPos > 0) {
            coverage_outStream.println(contig.name + "\t" + String.format(dformat, StdevCal.calMean(counts))
                    + "\t" + String.format(dformat, (StdevCal.calMedian(counts))) + "\t" + counts.size() + "\t"
                    + coveredPos + "\t"
                    + String.format(dformat, (double) coveredPos / (double) contig.coverage.length));
        } else { // no coverage
            coverage_outStream.println(
                    contig.name + "\t" + 0 + "\t" + 0 + "\t" + contig.coverage.length + "\t" + 0 + "\t" + 0);
        }
    }
    coverage_outStream.close();

    // print kmer abundance
    HashMap<Integer, Integer> abundanceCountMap = new HashMap<Integer, Integer>(); // the frequeny of the kmer abundance         
    PrintStream abundance_outStream = new PrintStream(abundance_out);
    // need to merge the counts from forward and reverse together.
    HashSet<Kmer> kmerSet = new HashSet<Kmer>();
    kmerSet.addAll(kmerMaps[0].keySet());
    for (Kmer kmer : kmerSet) {
        AtomicInteger abundance = kmerMaps[0].get(kmer).count;

        String reverseKmerStr = IUBUtilities.reverseComplement(kmer.decodeLong(kmer.getLongKmers()));
        Kmer reverseKmer = (new NuclKmerGenerator(reverseKmerStr, this.kmerSize)).next();
        KmerAbund kmerAbund = kmerMaps[1].get(reverseKmer);

        if (kmerAbund != null) {
            abundance.addAndGet(kmerAbund.count.get());
        }

        Integer count = abundanceCountMap.get(abundance.get());
        if (count == null) {
            abundanceCountMap.put(abundance.get(), 1);
        } else {
            abundanceCountMap.put(abundance.get(), count + 1);
        }
    }

    abundance_outStream.println("kmer_abundance\tfrequency");
    for (Integer abundance : abundanceCountMap.keySet()) {
        abundance_outStream.println(abundance + "\t" + abundanceCountMap.get(abundance));
    }
    abundance_outStream.close();
}

From source file:de.uni_potsdam.hpi.bpt.promnicat.persistenceApi.orientdbObj.index.IndexIntersection.java

/**
 * Load the intersecting referenced objects from the specified indices.
 * First load the database ids from all indices, intersect them, and load the remaining ids.
 * /*w  ww  .  ja  v a 2s.  c o m*/
 * @return the resulting {@link IndexCollectionElement}s
 */
@SuppressWarnings({ "unchecked", "rawtypes" })
public Collection<IndexCollectionElement<V>> load() {

    //load dbIds only and sort them by result set size
    TreeList rawResults = new TreeList(); //no generics possible
    int maxSize = 0;
    for (AbstractIndex index : indices) {
        ResultSet<V> oneResultSet = new ResultSet<V>(index.loadIdsOnly(), index.getName());
        rawResults.add(oneResultSet);
        maxSize = Math.max(maxSize, oneResultSet.getSize());
    }

    // create a list of intersecting dbIds
    // start with the smallest result set and intersect with the second smallest, intersect this result with the third smallest a.s.o.
    HashSet<String> intersectingDbIds = new HashSet<String>(maxSize);
    for (Object r : rawResults) {
        ResultSet<V> aResult = (ResultSet<V>) r;

        if (intersectingDbIds.isEmpty()) {
            intersectingDbIds.addAll(aResult.getDbIds());
        } else {
            intersectingDbIds.retainAll(aResult.getDbIds());
        }

        if (intersectingDbIds.isEmpty()) {
            break;
        }
    }

    //create Map of IndexElements each, i.e. group by referenced id. Every group is stored in a IndexCollectedElement
    HashMap<String, IndexCollectionElement<V>> finalElements = new HashMap<String, IndexCollectionElement<V>>(
            indices.size());
    for (Object r : rawResults) {
        ResultSet<V> aResult = (ResultSet<V>) r;
        for (IndexElement indexElement : aResult.getList()) {
            String currentString = indexElement.getDbId();
            if (intersectingDbIds.contains(currentString)) {
                if (!finalElements.containsKey(currentString)) {
                    finalElements.put(currentString, new IndexCollectionElement<V>(currentString));
                }
                finalElements.get(currentString).addIndexElements(indexElement);
            }
        }
    }

    //load pojos
    for (IndexCollectionElement<V> collectionElement : finalElements.values()) {
        collectionElement.loadPojo(papi);
    }

    return finalElements.values();
}

From source file:org.hyperic.hq.events.ext.RegisteredTriggers.java

public Collection<RegisterableTriggerInterface> getInterestedTriggers(AbstractEvent event) {
    HashSet<RegisterableTriggerInterface> trigs = new HashSet<RegisterableTriggerInterface>();

    // Can't very well look up a null object
    if (event.getInstanceId() != null) {
        // Get the triggers that are interested in this instance
        trigs.addAll(getInterestedTriggers(event, event.getInstanceId()));
    }//from   w w  w.  j ava  2s.c o  m
    // Get the triggers that are interested in all instances
    trigs.addAll(getInterestedTriggers(event, KEY_ALL));
    return trigs;
}

From source file:com.datatorrent.lib.io.fs.AbstractFSDirectoryInputOperatorFailureHandlingTest.java

@Test
public void testFailureHandling() throws Exception {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.dir).getAbsolutePath()), true);
    HashSet<String> allLines = Sets.newHashSet();
    // Create files with 100 records.
    for (int file = 0; file < 10; file++) {
        HashSet<String> lines = Sets.newHashSet();
        for (int line = 0; line < 10; line++) {
            lines.add("f" + file + "l" + line);
        }/*from   w w w . j  ava2s  .  com*/
        allLines.addAll(lines);
        FileUtils.write(new File(testMeta.dir, "file" + file), StringUtils.join(lines, '\n'));
    }

    Thread.sleep(10);

    TestFSDirectoryInputOperator oper = new TestFSDirectoryInputOperator();

    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" })
    CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);

    oper.setDirectory(testMeta.dir);
    oper.getScanner().setFilePatternRegexp(".*file[\\d]");

    oper.setup(null);
    for (long wid = 0; wid < 1000; wid++) {
        oper.beginWindow(wid);
        oper.emitTuples();
        oper.endWindow();
    }
    oper.teardown();

    Assert.assertEquals("number tuples", 100, queryResults.collectedTuples.size());
    Assert.assertEquals("lines", allLines, new HashSet<String>(queryResults.collectedTuples));

}

From source file:org.apache.apex.malhar.lib.io.fs.AbstractFileInputOperatorFailureHandlingTest.java

@Test
public void testFailureHandling() throws Exception {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.getDir()).getAbsolutePath()), true);
    HashSet<String> allLines = Sets.newHashSet();
    // Create files with 100 records.
    for (int file = 0; file < 10; file++) {
        HashSet<String> lines = Sets.newHashSet();
        for (int line = 0; line < 10; line++) {
            lines.add("f" + file + "l" + line);
        }/*from ww w  .ja  va2 s .  c om*/
        allLines.addAll(lines);
        FileUtils.write(new File(testMeta.getDir(), "file" + file), StringUtils.join(lines, '\n'));
    }

    Thread.sleep(10);

    TestFileInputOperator oper = new TestFileInputOperator();

    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" })
    CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);

    oper.setDirectory(testMeta.getDir());
    oper.getScanner().setFilePatternRegexp(".*file[\\d]");

    oper.setup(mockOperatorContext(1, new Attribute.AttributeMap.DefaultAttributeMap()));
    for (long wid = 0; wid < 1000; wid++) {
        oper.beginWindow(wid);
        oper.emitTuples();
        oper.endWindow();
    }
    oper.teardown();

    Assert.assertEquals("number tuples", 100, queryResults.collectedTuples.size());
    Assert.assertEquals("lines", allLines, new HashSet<String>(queryResults.collectedTuples));
    TestUtils.deleteTargetTestClassFolder(testMeta.desc);

}

From source file:com.datatorrent.lib.io.fs.AbstractFileInputOperatorFailureHandlingTest.java

@Test
public void testFailureHandling() throws Exception {
    FileContext.getLocalFSFileContext().delete(new Path(new File(testMeta.getDir()).getAbsolutePath()), true);
    HashSet<String> allLines = Sets.newHashSet();
    // Create files with 100 records.
    for (int file = 0; file < 10; file++) {
        HashSet<String> lines = Sets.newHashSet();
        for (int line = 0; line < 10; line++) {
            lines.add("f" + file + "l" + line);
        }/*from   ww w .j  a v  a 2 s . c o  m*/
        allLines.addAll(lines);
        FileUtils.write(new File(testMeta.getDir(), "file" + file), StringUtils.join(lines, '\n'));
    }

    Thread.sleep(10);

    TestFileInputOperator oper = new TestFileInputOperator();

    CollectorTestSink<String> queryResults = new CollectorTestSink<String>();
    @SuppressWarnings({ "unchecked", "rawtypes" })
    CollectorTestSink<Object> sink = (CollectorTestSink) queryResults;
    oper.output.setSink(sink);

    oper.setDirectory(testMeta.getDir());
    oper.getScanner().setFilePatternRegexp(".*file[\\d]");

    oper.setup(new OperatorContextTestHelper.TestIdOperatorContext(1,
            new Attribute.AttributeMap.DefaultAttributeMap()));
    for (long wid = 0; wid < 1000; wid++) {
        oper.beginWindow(wid);
        oper.emitTuples();
        oper.endWindow();
    }
    oper.teardown();

    Assert.assertEquals("number tuples", 100, queryResults.collectedTuples.size());
    Assert.assertEquals("lines", allLines, new HashSet<String>(queryResults.collectedTuples));
    TestUtils.deleteTargetTestClassFolder(testMeta.desc);

}

From source file:com.redhat.victims.database.VictimsSqlDB.java

public HashSet<String> getVulnerabilities(VictimsRecord vr) throws VictimsException {
    try {/*from  ww w.  j  a va 2s  . c o m*/
        if (cache.exists(vr.hash)) {
            return cache.get(vr.hash);
        }
        HashSet<String> cves = new HashSet<String>();
        // Match jar sha512
        cves.addAll(getVulnerabilities(vr.hash.trim()));
        // Match any embedded filehashes
        cves.addAll(getEmbeddedVulnerabilities(vr));
        cache.add(vr.hash, cves);
        return cves;
    } catch (Throwable e) {
        throw new VictimsException("Could not determine vulnerabilities for hash: " + vr.hash, e);
    }
}

From source file:org.archive.crawler.spring.SheetOverlaysManager.java

/** 
 * Ensure all sheets are 'primed' after the entire ApplicatiotnContext
 * is assembled. This ensures target HasKeyedProperties beans know
 * any long paths by which their properties are addressed, and 
 * handles (by either PropertyEditor-conversion or a fast-failure)
 * any type-mismatches between overlay values and their target
 * properties./*w w w  . j  a  va  2s .c  o m*/
 * @see org.springframework.context.ApplicationListener#onApplicationEvent(org.springframework.context.ApplicationEvent)
 */
@Override
public void onApplicationEvent(ApplicationEvent event) {
    if (event instanceof ContextRefreshedEvent) {
        for (Sheet s : sheetsByName.values()) {
            s.prime(); // exception if Sheet can't target overridable properties
        }
        // log warning for any sheets named but not present
        HashSet<String> allSheetNames = new HashSet<String>();
        for (DecideRuledSheetAssociation assoc : ruleAssociations) {
            allSheetNames.addAll(assoc.getTargetSheetNames());
        }
        for (List<String> names : sheetNamesBySurt.values()) {
            allSheetNames.addAll(names);
        }
        for (String name : allSheetNames) {
            if (!sheetsByName.containsKey(name)) {
                logger.warning("sheet '" + name + "' referenced but absent");
            }
        }
    }
}

From source file:com.clustercontrol.repository.util.FacilityTreeCache.java

private static HashSet<String> getAuthorizedRoleIdSet(FacilityInfo facilityInfo,
        FacilityTreeItem parentTreeItem, HashMap<String, ArrayList<String>> objectRoleMap) {

    HashSet<String> roleIdSet = new HashSet<String>();
    // /*from w  ww  . j  a  v  a 2s.  c  o  m*/
    roleIdSet.add(RoleIdConstant.ADMINISTRATORS);
    roleIdSet.add(RoleIdConstant.HINEMOS_MODULE);

    // 
    // ???????????????????
    if (facilityInfo.getFacilityType() == FacilityConstant.TYPE_SCOPE) {
        roleIdSet.add(facilityInfo.getOwnerRoleId());
    }

    // ??????
    ArrayList<String> roleIdList = objectRoleMap.get(facilityInfo.getFacilityId());
    if (roleIdList != null) {
        roleIdSet.addAll(roleIdList);
    }

    //???
    if (parentTreeItem != null && parentTreeItem.getData().getFacilityType() == FacilityConstant.TYPE_SCOPE
            && parentTreeItem.getAuthorizedRoleIdSet() != null) {
        roleIdSet.addAll(parentTreeItem.getAuthorizedRoleIdSet());
    }

    return roleIdSet;
}