Example usage for java.util HashSet isEmpty

List of usage examples for java.util HashSet isEmpty

Introduction

In this page you can find the example usage for java.util HashSet isEmpty.

Prototype

public boolean isEmpty() 

Source Link

Document

Returns true if this set contains no elements.

Usage

From source file:amie.keys.CSAKey.java

public HashSet<HashSet<Integer>> powerSet(HashSet<Integer> originalSet) {
    HashSet<HashSet<Integer>> sets = new HashSet<HashSet<Integer>>();
    if (originalSet.isEmpty()) {
        sets.add(new HashSet<Integer>());
        return sets;
    }// w w w.  j  a va2s . co m
    List<Integer> list = new ArrayList<Integer>(originalSet);
    int head = list.get(0);
    HashSet<Integer> rest = new HashSet<Integer>(list.subList(1, list.size()));
    for (HashSet<Integer> set : powerSet(rest)) {
        HashSet<Integer> newSet = new HashSet<Integer>();
        newSet.add(head);
        newSet.addAll(set);
        sets.add(newSet);
        sets.add(set);
    }
    return sets;
}

From source file:org.sipfoundry.sipxconfig.phone.polycom.CodecGroupsTest.java

private void assertCodecGroup(CodecGroupType codecGroup, DeviceVersion version) throws DocumentException {

    // Initialize a phone with the codec group under test.
    PolycomModel model = new PolycomModel();
    Set<String> features = new HashSet<String>();
    features.add(String.format("%s_CodecPref", codecGroup));
    model.setSupportedFeatures(features);
    PolycomPhone phone = new PolycomPhone();
    phone.setModel(model);//from w  w  w .j av a2  s .c o m
    phone.setDeviceVersion(version);
    PhoneTestDriver.supplyTestData(phone, new LinkedList<User>());

    // The adaptor setting for the multi-enum setting.
    Setting codec_adaptor = phone.getSettings().getSetting("voice/codecPref/" + codecGroup);
    assertNotNull(String.format("Failed to get the '%s' codec group Setting.", codecGroup), codec_adaptor);

    // The actual multi-enum codec options setting type.
    MultiEnumSetting codec_type = (MultiEnumSetting) codec_adaptor.getType();
    Collection<String> actual_options = codec_type.getEnums().values();

    // Subsequent assert messages will be inaccurate if the real problem is duplicate entries.
    assertEquals(String.format("The '%s' codec group contains one or more duplicate entries.", codecGroup),
            (new HashSet<String>(actual_options)).size(), actual_options.size());

    // *** 1. Test the set of available codec options. ***

    // The *expected* codec options.
    HashSet<String> expected_options = new HashSet<String>(CODECGROUP_OPTION_MAP.get(codecGroup));

    // Loop though each *actual* option that the setting is offering.
    HashSet<String> unexpected_actual_options = new HashSet<String>();
    for (String actual_option : actual_options) {

        // Attempt to remove this *actual* option from the *expected* list, but record it if it
        // was not actually found.
        if (!expected_options.remove(actual_option)) {
            unexpected_actual_options.add(actual_option);
        }
    }

    // Were any of the expected options not actually found?
    String message = String.format("The following '%s' codec group is missing the following options: %s.",
            codecGroup, expected_options);
    assertTrue(message, expected_options.isEmpty());

    // Were any of the actual options unexpected?
    message = String.format("The following '%s' codec group options were not expected: %s.", codecGroup,
            unexpected_actual_options);
    assertTrue(message, unexpected_actual_options.isEmpty());

    // *** 2. Test the set and preference order of default selected codecs. ***
    String assumed_separator = "|";
    String actual_selected = trimToNull(
            join((Collection<String>) codec_adaptor.getTypedValue(), assumed_separator));
    String expected_selected = trimToNull(join(CODECGROUP_SELECTED_MAP.get(codecGroup), assumed_separator));
    assertEquals(String.format("The '%s' codec group's default selected codecs are incorrect.", codecGroup),
            expected_selected, actual_selected);
}

From source file:pathwaynet.PathwayCalculator.java

private <T> HashMap<T, Map<T, Number>> getDistancesWithGroup(HashMap<T, Map<T, Number>> distancesMap,
        Collection<T> componentsInGroup, Collection<T> componentsConsidered, boolean onlyFromSource,
        boolean insideGroup) {
    // get the in-group set and out-group set of enzymes
    HashSet<T> componentsOutsideGroupAvailable = new HashSet<>();
    componentsOutsideGroupAvailable.addAll(distancesMap.keySet());
    componentsOutsideGroupAvailable.retainAll(componentsConsidered);
    componentsOutsideGroupAvailable.removeAll(componentsInGroup);

    HashSet<T> componentsInGroupAvailable = new HashSet<>();
    componentsInGroupAvailable.addAll(distancesMap.keySet());
    componentsInGroupAvailable.retainAll(componentsConsidered);
    componentsInGroupAvailable.removeAll(componentsOutsideGroupAvailable);

    // obtain distance
    HashMap<T, Map<T, Number>> distancesFromGroup = new HashMap<>();
    if (insideGroup && componentsInGroupAvailable.size() < 2)
        System.err.println("WARNING: Fewer than TWO given components are involved in the pathway.");
    else if ((!insideGroup)
            && (componentsInGroupAvailable.isEmpty() || componentsOutsideGroupAvailable.isEmpty()))
        System.err.println(/*from   ww  w  . j  av  a  2  s  .  c  o  m*/
                "WARNING: There is either no or full overlap between the given components and the ones involving in the pathway.");
    else {
        componentsInGroupAvailable.stream().forEach((component1) -> {
            distancesFromGroup.put(component1, new HashMap<>());
            distancesMap.keySet().stream().forEach((component2) -> {
                Number minDist = getShortestDistance(distancesMap, component1, component2, onlyFromSource);

                if (insideGroup
                        && (componentsInGroupAvailable.contains(component2) && (!component1.equals(component2)))
                        && minDist != null) {
                    distancesFromGroup.get(component1).put(component2, minDist);
                } else if ((!insideGroup) && componentsOutsideGroupAvailable.contains(component2)
                        && minDist != null) {
                    distancesFromGroup.get(component1).put(component2, minDist);
                }
            });

            //System.err.println(component1 + "\t" + componentsInGroupAvailable.size() +"\t" + componentsOutsideGroupAvailable.size() + "\t" + distancesFromGroup.get(component1).values());
        });
    }
    return distancesFromGroup;
}

From source file:org.unitime.timetable.test.BatchStudentSectioningLoader.java

public void loadLastLikeStudent(org.hibernate.Session hibSession, LastLikeCourseDemand d,
        org.unitime.timetable.model.Student s, Long courseOfferingId, Hashtable studentTable,
        Hashtable courseTable, Hashtable classTable, Hashtable classAssignments) {
    sLog.debug("Loading last like demand of student " + s.getUniqueId() + " (id=" + s.getExternalUniqueId()
            + ", name=" + s.getFirstName() + " " + s.getMiddleName() + " " + s.getLastName() + ") for "
            + courseOfferingId);/*from  w w w. j a v a2 s.c om*/
    Student student = (Student) studentTable.get(s.getUniqueId());
    if (student == null) {
        student = new Student(s.getUniqueId().longValue(), true);
        if (iLoadStudentInfo)
            loadStudentInfo(student, s);
        studentTable.put(s.getUniqueId(), student);
    }
    int priority = student.getRequests().size();
    Vector courses = new Vector();
    Course course = (Course) courseTable.get(courseOfferingId);
    if (course == null) {
        sLog.warn("  -- course " + courseOfferingId + " not loaded");
        return;
    }
    courses.addElement(course);
    CourseRequest request = new CourseRequest(d.getUniqueId().longValue(), priority++, false, student, courses,
            false, null);
    sLog.debug("  -- added request " + request);
    if (classAssignments != null && !classAssignments.isEmpty()) {
        HashSet assignedSections = new HashSet();
        HashSet classIds = (HashSet) classAssignments.get(s.getUniqueId());
        if (classIds != null)
            for (Iterator i = classIds.iterator(); i.hasNext();) {
                Long classId = (Long) i.next();
                Section section = (Section) request.getSection(classId.longValue());
                if (section != null)
                    assignedSections.add(section);
            }
        if (!assignedSections.isEmpty()) {
            sLog.debug("    -- committed assignment: " + assignedSections);
            for (Enrollment enrollment : request.values(getAssignment())) {
                if (enrollment.getAssignments().containsAll(assignedSections)) {
                    request.setInitialAssignment(enrollment);
                    sLog.debug("      -- found: " + enrollment);
                    break;
                }
            }
        }
    }
}

From source file:com.android.mail.ui.AnimatedAdapter.java

private void updateAnimatingConversationItems(Object obj, HashSet<Long> items) {
    if (!items.isEmpty()) {
        if (obj instanceof ConversationItemView) {
            final ConversationItemView target = (ConversationItemView) obj;
            final long id = target.getConversation().id;
            items.remove(id);/*from  w w w . j  a  v a2 s . co m*/
            mAnimatingViews.remove(id);
            if (items.isEmpty()) {
                performAndSetNextAction(null);
                notifyDataSetChanged();
            }
        }
    }
}

From source file:ubic.gemma.core.ontology.GoMetric.java

/**
 * Tailored to handle computing overlap between two gene lists which may contain duplicate genes of the same name
 * but different IDs. If gene lists do not contain duplicates (size = 1) the result will be the same as that of
 * computing simple overlap./* w  w  w.j  a v  a 2  s .  co m*/
 *
 * @param  geneGoMap  gene go map
 * @param  sameGenes1 same genes 1
 * @param  sameGenes2 same genes 2
 * @return            number of overlapping terms between merged sets of GO terms for duplicate gene lists
 */
public Double computeMergedOverlap(List<Gene> sameGenes1, List<Gene> sameGenes2,
        Map<Long, Collection<String>> geneGoMap) {

    HashSet<String> mergedGoTerms1 = new HashSet<>();
    HashSet<String> mergedGoTerms2 = new HashSet<>();
    for (Gene gene1 : sameGenes1) {
        if (geneGoMap.containsKey(gene1.getId())) {
            mergedGoTerms1.addAll(geneGoMap.get(gene1.getId()));
        }
    }
    for (Gene gene2 : sameGenes2) {
        if (geneGoMap.containsKey(gene2.getId())) {
            mergedGoTerms2.addAll(geneGoMap.get(gene2.getId()));
        }
    }

    if (mergedGoTerms1.isEmpty() || mergedGoTerms2.isEmpty())
        return 0.0;

    return this.computeScore(mergedGoTerms1, mergedGoTerms2);
}

From source file:hms.hwestra.interactionrebuttal.InteractionRebuttal.java

public void prepareDataForCelltypeSpecificEQTLMapping(DoubleMatrixDataset<String, String> rawExpressionDataset,
        String inexpraw, String outdirectory, Double correlationThreshold, String celltypeSpecificProbeFile,
        String mdsComponentFile, String cellCountFile, String gte, Integer threads) throws IOException {
    String rawExpressionDataFile = inexpraw;
    // 7. select Cell type specific probes
    System.out.println("Loading list of cell type specific probes from: " + celltypeSpecificProbeFile);
    HashSet<String> cellTypeSpecificProbeSet = new HashSet<String>();
    TextFile cellSpecificProbeTF = new TextFile(celltypeSpecificProbeFile, TextFile.R);
    cellTypeSpecificProbeSet.addAll(cellSpecificProbeTF.readAsArrayList());
    cellSpecificProbeTF.close();/*from  w w w  .  j ava  2 s  .  com*/

    if (cellTypeSpecificProbeSet.isEmpty()) {
        System.err.println("Error: " + celltypeSpecificProbeFile + " is empty!");
        System.exit(-1);
    } else {
        System.out.println(cellTypeSpecificProbeSet.size() + " cell type specific probes loaded.");
    }

    // 1. load gene expression data
    System.out.println("Loading gene expression data.");

    double[][] rawExpressionData = rawExpressionDataset.getRawData();

    // determine the number of cell type specific probes in this dataset
    int probeCounter = 0;
    List<String> probes = rawExpressionDataset.rowObjects;
    for (int i = 0; i < probes.size(); i++) {
        if (cellTypeSpecificProbeSet.contains(probes.get(i))) {
            probeCounter++;
        }
    }

    if (probeCounter == 0) {
        System.err
                .println("Error: none of the cell type specific probes defined in " + celltypeSpecificProbeFile
                        + " are present in expression dataset: " + rawExpressionDataset.fileName);
        System.exit(-1);
    } else {
        System.out.println(probeCounter + " of the cell type specific probes are in your dataset.");
    }

    System.out.println("Now reloading the gene expression data for the samples that passed the QC.");
    // 6. Remove samples with r < 0.9 for PC1
    // reload expression file, include only samples that pass QC...
    //        rawExpressionDataset = new DoubleMatrixDataset<String, String>(rawExpressionDataFile);
    //        rawExpressionData = rawExpressionDataset.getRawData();

    //        // quantile normalize, log2 transform again, because the number of samples might have been changed..
    //        QuantileNormalization.quantilenormalize(rawExpressionData);
    //        Log2Transform.log2transform(rawExpressionData);
    rawExpressionData = rawExpressionDataset.rawData;

    // collect data for cell type specific probes
    double[][] probeData = new double[probeCounter][rawExpressionDataset.colObjects.size()];
    probeCounter = 0;
    ArrayList<String> cellTypeSpecificProbeDatasetRowNames = new ArrayList<String>();
    for (int i = 0; i < probes.size(); i++) {
        if (cellTypeSpecificProbeSet.contains(probes.get(i))) {
            probeData[probeCounter] = rawExpressionData[i];
            cellTypeSpecificProbeDatasetRowNames.add(probes.get(i));
            probeCounter++;
        }
    }

    // initiate cell type specific probe correlation matrix
    double[][] celltypeSpecificCorrelationMatrix = new double[probeCounter][probeCounter];
    for (int i = 0; i < probeCounter; i++) {
        for (int j = i + 1; j < probeCounter; j++) {
            double r = Correlation.correlate(probeData[i], probeData[j]);
            celltypeSpecificCorrelationMatrix[i][j] = r;
            celltypeSpecificCorrelationMatrix[j][i] = r;
        }
        celltypeSpecificCorrelationMatrix[i][i] = 1;
    }

    // save the correlation matrix
    DoubleMatrixDataset<String, String> probeCorrelationMatrixOut = new DoubleMatrixDataset<String, String>();
    probeCorrelationMatrixOut.colObjects = cellTypeSpecificProbeDatasetRowNames;
    probeCorrelationMatrixOut.rowObjects = cellTypeSpecificProbeDatasetRowNames;
    probeCorrelationMatrixOut.rawData = celltypeSpecificCorrelationMatrix;
    probeCorrelationMatrixOut.recalculateHashMaps();
    //        probeCorrelationMatrixOut.save(outdirectory + "CelltypeSpecificProbeCorrelationMatrix.txt.gz");

    // 9. PCA over cell specific probe correlation matrix
    DoubleMatrixDataset<String, String> cellTypeSpecificDataset = new DoubleMatrixDataset<String, String>(
            probeData);
    cellTypeSpecificDataset.colObjects = rawExpressionDataset.colObjects;
    cellTypeSpecificDataset.rowObjects = cellTypeSpecificProbeDatasetRowNames;
    //        cellTypeSpecificDataset.save(expressionOutputDirectory + "CellTypeSpecificProbeExpression.txt.gz");
    cellTypeSpecificDataset.transposeDataset();
    Normalizer n = new Normalizer();
    // calculate first Principal Component over the cell type specific probe matrix...
    Pair<DoubleMatrixDataset<String, String>, DoubleMatrixDataset<String, String>> PCAResults = n.calculatePCA(
            cellTypeSpecificDataset, celltypeSpecificCorrelationMatrix,
            outdirectory + "CellTypeSpecificProbePCA", 1);

    // 10. PC1 scores: cell specific proxy -- write to file for future use...
    DoubleMatrixDataset<String, String> cellSpecificPCScores = PCAResults.getLeft();

    //Ensure that the cellTypeSpecificPCScores correlate positively with the set of probes that we have used to determine this component:
    double[] pcScoresSamples = new double[cellSpecificPCScores.nrRows];
    for (int i = 0; i < cellSpecificPCScores.nrRows; i++) {
        pcScoresSamples[i] = cellSpecificPCScores.rawData[i][0];
    }
    cellTypeSpecificDataset.transposeDataset();
    int nrProbesCorrelatingPositively = 0;
    for (int i = 0; i < cellTypeSpecificDataset.rawData.length; i++) {
        double corr = JSci.maths.ArrayMath.correlation(pcScoresSamples, cellTypeSpecificDataset.rawData[i]);
        if (corr >= 0) {
            nrProbesCorrelatingPositively++;
        } else {
            nrProbesCorrelatingPositively--;
        }
    }
    if (nrProbesCorrelatingPositively < 0) {
        for (int i = 0; i < cellSpecificPCScores.nrRows; i++) {
            cellSpecificPCScores.rawData[i][0] = -cellSpecificPCScores.rawData[i][0];
        }
    }

    TextFile tfOutCellSpecific = new TextFile(outdirectory + "CellTypeProxyFile.txt", TextFile.W);
    tfOutCellSpecific.writeln("Sample\tCellCountProxyValue");
    for (int i = 0; i < cellSpecificPCScores.nrRows; i++) {
        tfOutCellSpecific
                .writeln(cellSpecificPCScores.rowObjects.get(i) + "\t" + cellSpecificPCScores.rawData[i][0]);
    }
    tfOutCellSpecific.close();

    File f = new File(outdirectory + "CellTypeSpecificProbePCA.PCAOverSamplesEigenvalues.txt.gz");
    f.delete();
    f = new File(outdirectory + "CellTypeSpecificProbePCA.PCAOverSamplesEigenvectors.txt.gz");
    f.delete();
    f = new File(outdirectory + "CellTypeSpecificProbePCA.PCAOverSamplesEigenvectorsTransposed.txt.gz");
    f.delete();
    f = new File(outdirectory + "CellTypeSpecificProbePCA.PCAOverSamplesPrincipalComponents.txt.gz");
    f.delete();

}

From source file:org.apache.ddlutils.model.ForeignKey.java

/**
 * Compares this foreign key to the given one while ignoring the case of identifiers.
 * /*w  ww.  j  a v a2 s .com*/
 * @param otherFk The other foreign key
 * @return <code>true</code> if this foreign key is equal (ignoring case) to the given one
 */
public boolean equalsIgnoreCase(ForeignKey otherFk) {
    boolean checkName = (_name != null) && (_name.length() > 0) && (otherFk._name != null)
            && (otherFk._name.length() > 0);

    if ((!checkName || _name.equalsIgnoreCase(otherFk._name))
            && _foreignTableName.equalsIgnoreCase(otherFk._foreignTableName)) {
        HashSet otherRefs = new HashSet();

        otherRefs.addAll(otherFk._references);
        for (Iterator it = _references.iterator(); it.hasNext();) {
            Reference curLocalRef = (Reference) it.next();
            boolean found = false;

            for (Iterator otherIt = otherRefs.iterator(); otherIt.hasNext();) {
                Reference curOtherRef = (Reference) otherIt.next();

                if (curLocalRef.equalsIgnoreCase(curOtherRef)) {
                    otherIt.remove();
                    found = true;
                    break;
                }
            }
            if (!found) {
                return false;
            }
        }
        return otherRefs.isEmpty();
    } else {
        return false;
    }
}

From source file:org.jumpmind.db.model.ForeignKey.java

/**
 * Compares this foreign key to the given one while ignoring the case of
 * identifiers.// w w  w .  j  ava 2 s  . co m
 * 
 * @param otherFk
 *            The other foreign key
 * @return <code>true</code> if this foreign key is equal (ignoring case) to
 *         the given one
 */
@SuppressWarnings("unchecked")
public boolean equalsIgnoreCase(ForeignKey otherFk) {
    boolean checkName = isCheckName(otherFk);

    if ((!checkName || name.equalsIgnoreCase(otherFk.name))
            && foreignTableName.equalsIgnoreCase(otherFk.foreignTableName)) {
        HashSet<Reference> otherRefs = new HashSet<Reference>();

        otherRefs.addAll(otherFk.references);
        for (Iterator<?> it = references.iterator(); it.hasNext();) {
            Reference curLocalRef = (Reference) it.next();
            boolean found = false;

            for (Iterator<?> otherIt = otherRefs.iterator(); otherIt.hasNext();) {
                Reference curOtherRef = (Reference) otherIt.next();

                if (curLocalRef.equalsIgnoreCase(curOtherRef)) {
                    otherIt.remove();
                    found = true;
                    break;
                }
            }
            if (!found) {
                return false;
            }
        }
        return otherRefs.isEmpty();
    } else {
        return false;
    }
}

From source file:com.aimluck.eip.msgboard.MsgboardTopicSelectData.java

@Override
protected SelectQuery<EipTMsgboardTopic> buildSelectQueryForFilter(SelectQuery<EipTMsgboardTopic> query,
        RunData rundata, Context context) {
    if (current_filterMap.containsKey("category")) {
        // ????????
        List<String> categoryIds = current_filterMap.get("category");
        categoryId = categoryIds.get(0).toString();
        List<MsgboardCategoryResultData> categoryList = MsgboardUtils.loadCategoryList(rundata);
        boolean existCategory = false;
        if (categoryList != null && categoryList.size() > 0) {
            for (MsgboardCategoryResultData category : categoryList) {
                if (categoryId.equals(category.getCategoryId().toString())) {
                    existCategory = true;
                    break;
                }/*from w  w w.  j  a va2  s  .co m*/
            }

        }
        if (!existCategory) {
            categoryId = "";
            current_filterMap.remove("category");

        }

        updateCategoryName();
    }

    super.buildSelectQueryForFilter(query, rundata, context);

    if (current_filterMap.containsKey("post")) {
        // ????????

        List<String> postIds = current_filterMap.get("post");
        boolean existPost = false;
        for (int i = 0; i < postList.size(); i++) {
            String pid = postList.get(i).getName().toString();
            if (pid.equals(postIds.get(0).toString())) {
                existPost = true;
                break;
            }
        }
        Map<Integer, ALEipPost> map = ALEipManager.getInstance().getPostMap();
        if (postIds != null && !postIds.isEmpty()) {
            for (Map.Entry<Integer, ALEipPost> item : map.entrySet()) {
                String pid = item.getValue().getGroupName().toString();
                if (pid.equals(postIds.get(0).toString())) {
                    existPost = true;
                    break;
                }
            }
        }
        if (existPost) {
            HashSet<Integer> userIds = new HashSet<Integer>();
            for (String post : postIds) {
                List<Integer> userId = ALEipUtils.getUserIds(post);
                userIds.addAll(userId);
            }
            if (userIds.isEmpty()) {
                userIds.add(-1);
            }
            Expression exp = ExpressionFactory.inExp(EipTMsgboardTopic.OWNER_ID_PROPERTY, userIds);
            query.andQualifier(exp);

            postId = postIds.get(0).toString();
            updatePostName();
        } else {
            postId = "";
            updatePostName();
            current_filterMap.remove("post");
        }
    }

    String search = ALEipUtils.getTemp(rundata, context, LIST_SEARCH_STR);

    if (search != null && !"".equals(search)) {
        current_search = search;
        Expression ex1 = ExpressionFactory.likeExp(EipTMsgboardTopic.NOTE_PROPERTY, "%" + search + "%");
        Expression ex2 = ExpressionFactory.likeExp(EipTMsgboardTopic.TOPIC_NAME_PROPERTY, "%" + search + "%");
        SelectQuery<EipTMsgboardTopic> q = Database.query(EipTMsgboardTopic.class);
        q.andQualifier(ex1.orExp(ex2));
        List<EipTMsgboardTopic> queryList = q.fetchList();
        List<Integer> resultid = new ArrayList<Integer>();
        for (EipTMsgboardTopic item : queryList) {
            if (item.getParentId() != 0 && !resultid.contains(item.getParentId())) {
                resultid.add(item.getParentId());
            } else if (!resultid.contains(item.getTopicId())) {
                resultid.add(item.getTopicId());
            }
        }
        if (resultid.size() == 0) {
            // ??????????-1
            resultid.add(-1);
        }
        Expression ex = ExpressionFactory.inDbExp(EipTMsgboardTopic.TOPIC_ID_PK_COLUMN, resultid);
        query.andQualifier(ex);
    }
    return query;
}