Example usage for java.util HashSet addAll

List of usage examples for java.util HashSet addAll

Introduction

In this page you can find the example usage for java.util HashSet addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:de.dfki.km.perspecting.obie.experiments.PhraseExperiment.java

/**
 * Test method for/*from ww  w .  ja  v  a2 s . c  om*/
 * {@link de.dfki.km.perspecting.obie.dixi.service.SimpleScobieService#extractInformationFromURL(java.lang.String, java.lang.String)}
 * .
 */
@Test
public void analyseTokenPhraseFrequencies() {
    final String template = "SELECT * WHERE {?s ?p ?o}";

    try {
        final BufferedWriter bw = new BufferedWriter(
                new FileWriter($SCOOBIE_HOME + "results/token_phrase_frequency_wikipedia.csv"));

        final String randomWikipediaPage = "http://en.wikipedia.org/wiki/Special:Random";

        bw.append("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");
        for (int i = 0; i < 100; i++) {

            Document document = pipeline.createDocument(FileUtils.toFile(new URL(randomWikipediaPage)),
                    new URI(randomWikipediaPage), MediaType.HTML, template, Language.EN);

            for (int step = 0; pipeline.hasNext(step) && step <= 5; step = pipeline.execute(step, document)) {
                System.out.println(step);
            }

            HashSet<String> wordsOfPhrases = new HashSet<String>();
            HashSet<String> wordsOfDocument = new HashSet<String>();

            for (Token token : document.getTokens()) {
                wordsOfDocument.add(token.toString());
            }

            int count = 0;
            for (TokenSequence<String> np : document.getNounPhrases()) {
                String[] words = np.toString().split("[\\s]+");
                count += words.length;
                wordsOfPhrases.addAll(Arrays.asList(words));
            }

            bw.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count
                    + "\t" + wordsOfPhrases.size() + "\t" + wordsOfDocument.size());
            bw.newLine();

        }
        bw.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    try {
        final BufferedWriter bw = new BufferedWriter(
                new FileWriter($SCOOBIE_HOME + "results/token_phrase_frequency_reuters.csv"));

        final TextCorpus corpus = new TextCorpus(new File("../corpora/reuters/reuters.zip"), MediaType.ZIP,
                MediaType.HTML, Language.EN);

        bw.append("tok in doc\tnp in doc\ttok in nps\tdistinct tok in nps\tdistinct tok in doc");

        corpus.forEach(new DocumentProcedure<URI>() {

            @Override
            public URI process(Reader reader, URI uri) throws Exception {

                Document document = pipeline.createDocument(reader, uri, corpus.getMediatype(), template,
                        corpus.getLanguage());

                for (int step = 0; pipeline.hasNext(step)
                        && step <= 5; step = pipeline.execute(step, document)) {
                    System.out.println(step);
                }

                HashSet<String> wordsOfPhrases = new HashSet<String>();
                HashSet<String> wordsOfDocument = new HashSet<String>();

                for (Token token : document.getTokens()) {
                    wordsOfDocument.add(token.toString());
                }

                int count = 0;
                for (TokenSequence<String> np : document.getNounPhrases()) {
                    String[] words = np.toString().split("[\\s]+");
                    count += words.length;
                    wordsOfPhrases.addAll(Arrays.asList(words));
                }

                bw.append(document.getTokens().size() + "\t" + document.getNounPhrases().size() + "\t" + count
                        + "\t" + wordsOfPhrases.size() + "\t" + wordsOfDocument.size());
                bw.newLine();
                return uri;
            }
        });

        bw.close();
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:org.apache.giraph.examples.Giraphx.java

private void initAllEdgeIndexList() {
    allEdgeIndexList.addAll(incomingEdgeIndexList);
    allEdgeIndexList.addAll(destEdgeIndexList);
    HashSet<LongWritable> hs = new HashSet<LongWritable>();
    hs.addAll(allEdgeIndexList);

    allEdgeIndexList.clear();/*from   w w w.  j av a2 s. c  om*/
    allEdgeIndexList.addAll(hs);
    incomingEdgeIndexList = null;
}

From source file:com.tremolosecurity.unison.openshiftv3.OpenShiftTarget.java

private void syncGroups(User user, boolean addOnly, int approvalID, Workflow workflow, User fromServer,
        String token) throws Exception, IOException {
    HttpCon con = null;/* ww  w .  ja  v  a2s  .com*/

    try {
        //first see if there are groups to add
        HashSet<String> fromServerGroups = new HashSet<String>();
        fromServerGroups.addAll(fromServer.getGroups());
        for (String groupName : user.getGroups()) {
            if (!fromServerGroups.contains(groupName)) {

                if (token == null) {
                    token = this.getAuthToken();
                }

                if (con == null) {
                    con = this.createClient();
                }

                this.addUserToGroup(token, con, user.getUserID(), groupName, approvalID, workflow);
            }
        }

        if (!addOnly) {
            //remove groups no longer present
            HashSet<String> fromUserGroups = new HashSet<String>();
            fromUserGroups.addAll(user.getGroups());

            for (String groupName : fromServer.getGroups()) {
                if (!fromUserGroups.contains(groupName)) {
                    if (token == null) {
                        token = this.getAuthToken();
                    }

                    if (con == null) {
                        con = this.createClient();
                    }

                    this.removeUserFromGroup(token, con, user.getUserID(), groupName, approvalID, workflow);
                }
            }
        }

    } finally {
        if (con != null) {
            con.getBcm().shutdown();
            con.getHttp().close();
        }
    }
}

From source file:org.jactr.core.module.declarative.search.local.DefaultSearchSystem.java

public Collection<IChunk> findFuzzy(ChunkTypeRequest pattern, Comparator<IChunk> sortRule) {

    /*/*from w  w  w . jav  a 2s.  c o m*/
     * second pass, ditch all those that don't match our chunktype
     */
    HashSet<IChunk> candidates = new HashSet<IChunk>();
    IChunkType chunkType = pattern.getChunkType();

    /*
     * first things first, find all the candidates based on the content of the
     * pattern. this is the same as findExact, but without the retainAll
     */
    for (IConditionalSlot slot : pattern.getConditionalSlots()) {
        Collection<IChunk> containers = find(slot);
        if (chunkType == null)
            candidates.addAll(containers);
        else
            for (IChunk candidate : containers)
                if (candidate.isA(chunkType))
                    candidates.add(candidate);
    }

    if (LOGGER.isDebugEnabled())
        LOGGER.debug("First pass candidates for " + pattern + " chunks: " + candidates);

    if (sortRule != null) {
        /*
         * finally, we sort them
         */
        TreeSet<IChunk> sortedResults = new TreeSet<IChunk>(sortRule);
        sortedResults.addAll(candidates);

        return sortedResults;
    }
    return candidates;
}

From source file:com.aimluck.eip.wiki.WikiSelectData.java

@Override
protected SelectQuery<EipTWiki> buildSelectQueryForFilter(SelectQuery<EipTWiki> query, RunData rundata,
        Context context) {/*ww  w. ja  va 2s. c  o m*/
    if (current_filterMap.containsKey("category")) {
        // ????????
        List<String> categoryIds = current_filterMap.get("category");
        categoryId = categoryIds.get(0).toString();
        if (null == topWikiList) {
            topWikiList = WikiUtils.loadTopWikiList(rundata);
        }
        boolean existCategory = false;
        if (topWikiList != null && topWikiList.size() > 0) {
            for (WikiResultData category : topWikiList) {
                if (categoryId.equals(category.getId().toString())) {
                    existCategory = true;
                    break;
                }
            }

        }
        if (!existCategory) {
            categoryId = "";
            current_filterMap.remove("category");
        } else {
            Expression exp = ExpressionFactory.matchExp(EipTWiki.PARENT_ID_PROPERTY, categoryId);
            Expression exp2 = ExpressionFactory.matchDbExp(EipTWiki.WIKI_ID_PK_COLUMN, categoryId);
            query.andQualifier(exp.orExp(exp2));
        }

        updateCategoryName();
    }

    super.buildSelectQueryForFilter(query, rundata, context);

    if (current_filterMap.containsKey("post")) {
        // ????????

        List<String> postIds = current_filterMap.get("post");

        HashSet<Integer> userIds = new HashSet<Integer>();
        for (String post : postIds) {
            List<Integer> userId = ALEipUtils.getUserIds(post);
            userIds.addAll(userId);
        }
        if (userIds.isEmpty()) {
            userIds.add(-1);
        }
        Expression exp = ExpressionFactory.inExp(EipTWiki.UPDATE_USER_ID_PROPERTY, userIds);
        query.andQualifier(exp);

        postId = postIds.get(0).toString();
        updatePostName();
    }

    String search = ALEipUtils.getTemp(rundata, context, LIST_SEARCH_STR);

    if (search != null && !"".equals(search)) {
        current_search = search;
        Expression ex1 = ExpressionFactory.likeExp(EipTWiki.NOTE_PROPERTY, "%" + search + "%");
        Expression ex2 = ExpressionFactory.likeExp(EipTWiki.WIKI_NAME_PROPERTY, "%" + search + "%");
        SelectQuery<EipTWiki> q = Database.query(EipTWiki.class);
        q.andQualifier(ex1.orExp(ex2));
        List<EipTWiki> queryList = q.fetchList();
        List<Integer> resultid = new ArrayList<Integer>();
        for (EipTWiki item : queryList) {
            resultid.add(item.getWikiId());
        }
        if (resultid.size() == 0) {
            // ??????????-1
            resultid.add(-1);
        }
        Expression ex = ExpressionFactory.inDbExp(EipTWiki.WIKI_ID_PK_COLUMN, resultid);
        query.andQualifier(ex);
    }
    return query;
}

From source file:org.openmrs.module.appointmentscheduling.api.impl.AppointmentServiceImpl.java

/**
 * @see org.openmrs.module.appointmentscheduling.api.AppointmentService#getAllAppointmentTypes()
 *///from  w  ww  .ja  va 2  s .c o  m
@Transactional(readOnly = true)
public Set<AppointmentType> getAllAppointmentTypes() {
    HashSet set = new HashSet();
    set.addAll(getAppointmentTypeDAO().getAll());
    return set;
}

From source file:org.deegree.feature.persistence.shape.ShapeFeatureStore.java

@Override
public FeatureInputStream query(Query query) throws FilterEvaluationException, FeatureStoreException {

    if (query.getTypeNames() == null || query.getTypeNames().length > 1) {
        String msg = "Only queries with exactly one or zero type name(s) are supported.";
        throw new UnsupportedOperationException(msg);
    }//ww w  .  j  a va 2 s  .co m

    HashSet<Integer> idFilterNums = null;
    if (query.getFilter() instanceof IdFilter) {
        idFilterNums = new HashSet<Integer>();
        IdFilter f = (IdFilter) query.getFilter();
        List<ResourceId> ids = f.getSelectedIds();
        for (ResourceId id : ids) {
            if (id.getRid().startsWith(fidPrefix)) {
                String[] ss = id.getRid().split("_");
                idFilterNums.add(Integer.valueOf(ss[1]));
            }
        }
    }

    if (query.getTypeNames().length == 0 && !(query.getFilter() instanceof IdFilter)
            || idFilterNums != null && idFilterNums.isEmpty()) {
        return new MemoryFeatureInputStream(new GenericFeatureCollection());
    }

    if (query.getTypeNames().length > 0) {
        QName featureType = query.getTypeNames()[0].getFeatureTypeName();
        if (featureType != null && !featureType.equals(ft.getName())) {
            // or null?
            return new MemoryFeatureInputStream(new GenericFeatureCollection());
        }
    }

    checkForUpdate();

    if (!available) {
        return null;
    }

    Filter filter = query.getFilter();
    Pair<Filter, Envelope> filterPair = splitOffBBoxConstraint(filter);

    List<Pair<Integer, Long>> recNumsAndPos = new LinkedList<Pair<Integer, Long>>();
    Envelope bbox = getTransformedEnvelope(query.getPrefilterBBoxEnvelope());

    if (bbox == null) {
        getEnvelope(null);
    }

    boolean queryIndex = filterPair.first == null || !generateAlphanumericIndexes;
    Pair<Filter, SortProperty[]> p = queryIndex ? null
            : dbfIndex.query(recNumsAndPos, filterPair.first, query.getSortProperties());
    HashSet<Integer> recNums = new HashSet<Integer>(unzipPair(recNumsAndPos).first);
    if (idFilterNums != null) {
        recNums.addAll(idFilterNums);
    }
    recNumsAndPos = shp.query(bbox, filter == null || p == null ? null : recNums);
    LOG.debug("{} records matching after BBOX filtering", recNumsAndPos.size());

    // don't forget about filters if dbf index could not be queried
    if (p == null) {
        p = new Pair<Filter, SortProperty[]>(filterPair.first, query.getSortProperties());
    }

    FeatureInputStream rs = new IteratorFeatureInputStream(new FeatureIterator(recNumsAndPos.iterator()));

    if (p.first != null) {
        LOG.debug("Applying in-memory filtering.");
        rs = new FilteredFeatureInputStream(rs, p.first);
    }

    if (p.second != null && p.second.length > 0) {
        LOG.debug("Applying in-memory sorting.");
        rs = new MemoryFeatureInputStream(Features.sortFc(rs.toCollection(), p.second));
    }

    return rs;
}

From source file:org.testmp.datastore.client.DataStoreClient.java

/**
 * Save data in data store into specified file with JSON representation
 * //from  w  w w.ja  v  a  2 s .c  om
 * @param filepath
 * @throws DataStoreClientException
 */
public void saveDataToFile(String filepath) throws DataStoreClientException {
    List<Tag> tags = getTags();
    HashSet<Integer> idSet = new HashSet<Integer>();
    for (Tag tag : tags) {
        idSet.addAll(tag.getRelatedDataIds());
    }
    LinkedList<Integer> idList = new LinkedList<Integer>(idSet);
    Collections.sort(idList);
    int maxDataToGetEachTime = 500, i = 0;
    PrintWriter writer = null;
    try {
        writer = new PrintWriter(filepath, "UTF-8");
        writer.print("[");
        boolean isFirst = true;
        while (i < idList.size()) {
            int startId = idList.get(i);
            i = i + maxDataToGetEachTime - 1;
            int endId = (i >= idList.size() ? idList.getLast() : idList.get(i));
            i++;
            List<DataInfo<Object>> dataInfoList = getDataByRange(Object.class, startId, endId);
            for (DataInfo<Object> dataInfo : dataInfoList) {
                if (!isFirst) {
                    writer.println(",");
                } else {
                    isFirst = false;
                }
                writer.print(dataInfo.toString());
            }
        }
        writer.println("]");
    } catch (Exception e) {
        throw new DataStoreClientException("Failed to save data to file", e);
    } finally {
        if (writer != null) {
            writer.close();
        }
    }
}

From source file:com.cburch.logisim.circuit.CircuitWires.java

WireSet getWireSet(Wire start) {
    WireBundle bundle = getWireBundle(start.e0);
    if (bundle == null)
        return WireSet.EMPTY;
    HashSet<Wire> wires = new HashSet<Wire>();
    for (Location loc : bundle.points) {
        wires.addAll(points.getWires(loc));
    }/*from  ww w.  j  a v  a 2 s  .co m*/
    return new WireSet(wires);
}

From source file:org.apache.jackrabbit.oak.plugins.segment.SegmentDataStoreBlobGCIT.java

private HashSet<String> addNodeSpecialChars() throws Exception {
    List<String> specialCharSets = Lists.newArrayList("q\\%22afdg\\%22", "a\nbcd", "a\n\rabcd", "012\\efg");
    HashSet<String> set = new HashSet<String>();
    NodeBuilder a = nodeStore.getRoot().builder();
    for (int i = 0; i < specialCharSets.size(); i++) {
        SegmentBlob b = (SegmentBlob) nodeStore.createBlob(randomStream(i, 18432));
        NodeBuilder n = a.child("cspecial");
        n.child(specialCharSets.get(i)).setProperty("x", b);
        Iterator<String> idIter = blobStore.resolveChunks(b.getBlobId());
        set.addAll(Lists.newArrayList(idIter));
    }//from   w  ww.  j  ava  2 s  .c  om
    nodeStore.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY);
    return set;
}