Example usage for java.util TreeSet add

List of usage examples for java.util TreeSet add

Introduction

In this page you can find the example usage for java.util TreeSet add.

Prototype

public boolean add(E e) 

Source Link

Document

Adds the specified element to this set if it is not already present.

Usage

From source file:com.jaspersoft.jasperserver.war.action.ReportJobEditAction.java

public Event setTriggerRecurrenceCalendar(RequestContext context) throws Exception {
    ReportJob job = getReportJob(context);
    ReportJobCalendarTrigger trigger = new ReportJobCalendarTrigger();
    copyCommonTriggerAttributes(trigger, job.getTrigger());
    trigger.setMinutes("0");
    trigger.setHours("0");
    trigger.setDaysType(ReportJobCalendarTrigger.DAYS_TYPE_ALL);

    TreeSet selectedMonths = new TreeSet();
    for (Iterator it = months.iterator(); it.hasNext();) {
        ByteEnum month = (ByteEnum) it.next();
        selectedMonths.add(new Byte(month.getCode()));
    }//  ww w  .  ja  v a 2 s  .c o m
    trigger.setMonths(selectedMonths);

    job.setTrigger(trigger);
    return success();
}

From source file:cooperativegametheory.Partition.java

public Partition(int[][] partition, PlayerSet onPlayerSet) {
    //TODO cleanup
    this.players = onPlayerSet;
    TreeSet<Integer> seen = new TreeSet<Integer>();
    for (int i = 0; i < partition.length; i++) {
        Coalition component = new Coalition(players);
        for (int j = 0; j < partition[i].length; j++) {
            if (!players.contains(partition[i][j])) {
                throw new IllegalArgumentException(
                        "agument 1 is not a proper partition (component members must be from the PlayerSet)");
            } else {
                if (!component.add(partition[i][j])) {
                    throw new IllegalArgumentException(
                            "agument 1 is not a proper partition (components must be a set)");
                }/*  ww  w  .  java2s.c  om*/
            }
            if (!seen.add(partition[i][j])) {
                throw new IllegalArgumentException(
                        "agument 1 is not a proper partition (components must be disjoint)");
            }
        }
        if (!this.add(component)) {
            throw new IllegalArgumentException(
                    "agument 1 is not a proper partition (this error should never appear ?!)");
        }
    }
    for (int i = 0; i < players.size(); i++) {
        if (!seen.contains(i)) {
            throw new IllegalArgumentException("agument 1 is not a proper partition (not complete)");
        }
    }
}

From source file:com.clust4j.algo.MeanShiftTests.java

@Test
public void testAutoEstimationWithScale() {
    Array2DRowRealMatrix iris = (Array2DRowRealMatrix) new StandardScaler().fit(data_).transform(data_);
    final double[][] X = iris.getData();

    // MS estimates bw at 1.6041295821313855
    final double bandwidth = 1.6041295821313855;

    assertTrue(Precision.equals(// w ww .j  a  va 2  s  .com
            MeanShift.autoEstimateBW(iris, 0.3, Distance.EUCLIDEAN, GlobalState.DEFAULT_RANDOM_STATE, false),
            bandwidth, 1e-9));

    assertTrue(Precision.equals(
            MeanShift.autoEstimateBW(iris, 0.3, Distance.EUCLIDEAN, GlobalState.DEFAULT_RANDOM_STATE, true),
            bandwidth, 1e-9));

    // Asserting fit works without breaking things...
    RadiusNeighbors r = new RadiusNeighbors(iris, new RadiusNeighborsParameters(bandwidth)).fit();

    TreeSet<MeanShiftSeed> centers = new TreeSet<>();
    for (double[] seed : X)
        centers.add(MeanShift.singleSeed(seed, r, X, 300));

    assertTrue(centers.size() == 4);

    double[][] expected_dists = new double[][] {
            new double[] { 0.50161528154395962, -0.31685274298813487, 0.65388162422893481,
                    0.65270450741975761 },
            new double[] { 0.52001211065400177, -0.29561728795619946, 0.67106269515983397,
                    0.67390853215763813 },
            new double[] { 0.54861244890482475, -0.25718786696105495, 0.68964559485632182,
                    0.69326664641211422 },
            new double[] { -1.0595457115461515, 0.74408909010240054, -1.2995708885010491,
                    -1.2545442961404225 } };

    int[] expected_centers = new int[] { 82, 80, 77, 45 };

    int idx = 0;
    for (MeanShiftSeed seed : centers) {
        assertTrue(VecUtils.equalsWithTolerance(seed.dists, expected_dists[idx], 1e-1));
        assertTrue(seed.count == expected_centers[idx]);
        idx++;
    }

    ArrayList<EntryPair<double[], Integer>> center_intensity = new ArrayList<>();
    for (MeanShiftSeed seed : centers) {
        if (null != seed) {
            center_intensity.add(seed.getPair());
        }
    }

    final ArrayList<EntryPair<double[], Integer>> sorted_by_intensity = center_intensity;

    // test getting the unique vals
    idx = 0;
    final int m_prime = sorted_by_intensity.size();
    final Array2DRowRealMatrix sorted_centers = new Array2DRowRealMatrix(m_prime, iris.getColumnDimension());
    for (Map.Entry<double[], Integer> e : sorted_by_intensity)
        sorted_centers.setRow(idx++, e.getKey());

    // Create a boolean mask, init true
    final boolean[] unique = new boolean[m_prime];
    for (int i = 0; i < unique.length; i++)
        unique[i] = true;

    // Fit the new neighbors model
    RadiusNeighbors nbrs = new RadiusNeighbors(sorted_centers,
            new RadiusNeighborsParameters(bandwidth).setVerbose(false)).fit();

    // Iterate over sorted centers and query radii
    int[] indcs;
    double[] center;
    for (int i = 0; i < m_prime; i++) {
        if (unique[i]) {
            center = sorted_centers.getRow(i);
            indcs = nbrs.getNeighbors(new double[][] { center }, bandwidth, false).getIndices()[0];

            for (int id : indcs) {
                unique[id] = false;
            }

            unique[i] = true; // Keep this as true
        }
    }

    // Now assign the centroids...
    int redundant_ct = 0;
    final ArrayList<double[]> centroids = new ArrayList<>();
    for (int i = 0; i < unique.length; i++) {
        if (unique[i]) {
            centroids.add(sorted_centers.getRow(i));
        }
    }

    redundant_ct = unique.length - centroids.size();

    assertTrue(redundant_ct == 2);
    assertTrue(centroids.size() == 2);
    assertTrue(VecUtils.equalsWithTolerance(centroids.get(0),
            new double[] { 0.4999404345258691, -0.3157948009929614, 0.6516983739795399, 0.6505251874544873 },
            1e-6));

    assertTrue(VecUtils.equalsExactly(centroids.get(1),
            new double[] { -1.0560079864392702, 0.7416046454700266, -1.295231741534238, -1.2503554887998656 }));

    // also put the centroids into a matrix. We have to
    // wait to perform this op, because we have to know
    // the size of centroids first...
    Array2DRowRealMatrix clust_centers = new Array2DRowRealMatrix(centroids.size(), iris.getColumnDimension());
    for (int i = 0; i < clust_centers.getRowDimension(); i++)
        clust_centers.setRow(i, centroids.get(i));

    // The final nearest neighbors model -- if this works, we are in the clear...
    new NearestNeighbors(clust_centers, new NearestNeighborsParameters(1)).fit();
}

From source file:net.sourceforge.fenixedu.presentationTier.Action.phd.CommonPhdIndividualProgramProcessDA.java

private ActionForward forwardToAlertMessageArchive(ActionMapping mapping, HttpServletRequest request,
        YearMonth yearMonthBean) {
    Integer year = yearMonthBean.getYear();
    if (year == null) {
        year = Integer.valueOf(ExecutionYear.readCurrentExecutionYear().getYear());
    }/*from w ww . j  av  a 2  s .com*/
    Month month = yearMonthBean.getMonth();

    TreeSet<PhdAlertMessage> orderedMessages = new TreeSet<PhdAlertMessage>(
            Collections.reverseOrder(PhdAlertMessage.COMPARATOR_BY_WHEN_CREATED_AND_ID));
    if (month == null) {
        for (PhdAlertMessage message : getLoggedPerson(request).getPhdAlertMessagesSet()) {
            if (year == message.getWhenCreated().getYear()) {
                orderedMessages.add(message);
            }
        }
    } else {
        for (PhdAlertMessage message : getLoggedPerson(request).getPhdAlertMessagesSet()) {
            if ((year == message.getWhenCreated().getYear())
                    && (month.getNumberOfMonth() == message.getWhenCreated().getMonthOfYear())) {
                orderedMessages.add(message);
            }
        }
    }

    request.setAttribute("yearMonthBean", yearMonthBean);
    request.setAttribute("alertMessages", orderedMessages);
    return mapping.findForward("viewAlertMessageArchive");
}

From source file:net.sourceforge.fenixedu.presentationTier.Action.phd.CommonPhdIndividualProgramProcessDA.java

private ActionForward forwardToProcessAlertMessageArchive(ActionMapping mapping, HttpServletRequest request,
        YearMonth yearMonthBean) throws NumberFormatException {

    Integer year = yearMonthBean.getYear();
    if (year == null) {
        year = Integer.valueOf(ExecutionYear.readCurrentExecutionYear().getYear());
    }/*ww w.j  av  a  2 s .  c  o m*/
    Month month = yearMonthBean.getMonth();

    TreeSet<PhdAlertMessage> orderedMessages = new TreeSet<PhdAlertMessage>(
            Collections.reverseOrder(PhdAlertMessage.COMPARATOR_BY_WHEN_CREATED_AND_ID));
    if (month == null) {
        for (PhdAlertMessage message : getProcess(request).getAlertMessagesForLoggedPerson()) {
            if (year == message.getWhenCreated().getYear()) {
                orderedMessages.add(message);
            }
        }
    } else {
        for (PhdAlertMessage message : getProcess(request).getAlertMessagesForLoggedPerson()) {
            if ((year == message.getWhenCreated().getYear())
                    && (month.getNumberOfMonth() == message.getWhenCreated().getMonthOfYear())) {
                orderedMessages.add(message);
            }
        }
    }

    request.setAttribute("yearMonthBean", yearMonthBean);
    request.setAttribute("alertMessages", orderedMessages);
    return mapping.findForward("viewProcessAlertMessageArchive");
}

From source file:api.wiki.WikiNameApi2.java

private TreeSet<String> processFile(String name, File file) {
    final TreeSet<String> names = new TreeSet<>();
    try {//from  w ww  . j  a  v  a2 s .c o m
        Document doc = documentBuilder.parse(file);
        doc.getDocumentElement().normalize();
        NodeList nodes = doc.getElementsByTagName("pl");
        for (int i = 0; i < nodes.getLength(); i++) {
            Node n = nodes.item(i).getAttributes().getNamedItem("title");
            if (n == null) {
                continue;
            }
            String s = n.getNodeValue();
            if (s.contains(name) && !s.matches(".*\\d.*")) {
                if (s.contains("("))
                    s = s.substring(0, s.indexOf("("));
                names.add(s);
            }
        }
    } catch (SAXException | IOException ex) {
        Logger.getLogger(WikiNameApi2.class.getName()).log(Level.SEVERE, null, ex);
    }
    return names;
}

From source file:biz.netcentric.cq.tools.actool.dumpservice.impl.DumpserviceImpl.java

private void createTransientDumpNode(String dump, Node rootNode)
        throws ItemExistsException, PathNotFoundException, NoSuchNodeTypeException, LockException,
        VersionException, ConstraintViolationException, RepositoryException, ValueFormatException {

    NodeIterator nodeIt = rootNode.getNodes();

    // TreeSet used here since only this type offers the methods first() and
    // last()/* www  .  jav a  2 s  .co m*/
    TreeSet<Node> dumpNodes = new TreeSet<Node>(new JcrCreatedComparator());

    Node previousDumpNode = null;

    // get all dump nodes
    while (nodeIt.hasNext()) {
        Node currNode = nodeIt.nextNode();

        if (currNode.getName().startsWith(DUMP_NODE_PREFIX)) {
            dumpNodes.add(currNode);
        }
    }
    // try to get previous dump node
    if (!dumpNodes.isEmpty()) {
        previousDumpNode = dumpNodes.first();
    }
    // is limit of dump nodes to save reached?
    if (dumpNodes.size() > (nrOfSavedDumps - 1)) {
        Node oldestDumpNode = dumpNodes.last();
        oldestDumpNode.remove();
    }
    Node dumpNode = getNewDumpNode(dump, rootNode);

    // order the newest dump node as first child node of ac root node
    if (previousDumpNode != null) {
        rootNode.orderBefore(dumpNode.getName(), previousDumpNode.getName());
    }
}

From source file:com.termmed.statistics.Processor.java

private TreeSet<Long> getOrder(IReportDetail file, File completeDetailFile) throws IOException {
    BufferedReader br = FileHelper.getReader(completeDetailFile);
    TreeSet<Long> ret = new TreeSet<Long>();

    Integer priorityIndex = file.getPriorityListColumnIndex();
    if (priorityIndex == null) {
        priorityIndex = 5;/*  www.  j  a va2s. c  o  m*/
    }
    br.readLine();
    String line;
    String[] spl;
    while ((line = br.readLine()) != null) {
        spl = line.split(",", -1);
        ret.add(Long.parseLong(spl[priorityIndex]));
    }
    br.close();
    return ret;
}

From source file:de.julielab.jcore.ae.jnet.uima.ConsistencyPreservation.java

/**
 * consistency presevation based on (exact) string matching. If string was
 * annotated once as entity, all other occurrences of this string get the
 * same label. For mode: _string_ TODO: more intelligent (voting) mechanism
 * needed to avoid false positives TODO: needs to be checked for performance
 * /*from  www  . jav a2  s .  c o  m*/
 * @param aJCas
 * @param entityMentionClassnames
 * @param confidenceThresholdForConsistencyPreservation
 * @throws AnalysisEngineProcessException
 */
public void stringMatch(final JCas aJCas, final TreeSet<String> entityMentionClassnames,
        double confidenceThresholdForConsistencyPreservation) throws AnalysisEngineProcessException {

    // check whether this mode is enabled
    if ((activeModes == null) || (activeModes.size() == 0)
            || !activeModes.contains(ConsistencyPreservation.MODE_STRING))
        return;

    final String text = aJCas.getDocumentText();

    final TypeSystem ts = aJCas.getTypeSystem();
    // This map stores the EntityMentions that share the same specificType.
    // We want to use the TreeSet to check - for a given specificType - if
    // there is already an annotation overlapping a specific text offset.
    // See the comparator below.
    final Map<String, TreeSet<EntityMention>> overlapIndex = new HashMap<>();
    // This Comparator checks whether two Entities overlap in any way. If
    // so, they are deemed "equal". The idea is to use this Comparator with
    // a TreeSet in which we store all existing entities. Then, we can
    // efficiently check for a specific span if there already exists any
    // overlapping entity.
    Comparator<EntityMention> overlapComparator = new Comparator<EntityMention>() {

        @Override
        public int compare(EntityMention o1, EntityMention o2) {
            int b1 = o1.getBegin();
            int e1 = o1.getEnd();
            int b2 = o2.getBegin();
            int e2 = o2.getEnd();

            if ((b1 <= b2) && (e1 >= e2)) {
                return 0;
            } else if ((b1 >= b2) && (e1 <= e2)) {
                return 0;
            }
            //
            else if ((b1 < e2) && (e1 > e2)) {
                return 0;
            } else if ((b1 < b2) && (e1 > b2)) {
                return 0;
            }
            return b1 - b2;
        }
    };

    for (final String entityMentionClassname : entityMentionClassnames) {
        // we use the index entity class wise; we don't want one class to
        // interfer with another
        overlapIndex.clear();
        try {
            // loop over all entity types to be considered
            EntityMention mentionForOffsetComparison = (EntityMention) JCoReAnnotationTools
                    .getAnnotationByClassName(aJCas, entityMentionClassname);

            LOGGER.debug("doStringBased() - checking consistency for type: " + entityMentionClassname);
            final Multimap<String, EntityMention> entityMap = HashMultimap.create();

            // final EntityMention myEntity = (EntityMention)
            // JCoReAnnotationTools
            // .getAnnotationByClassName(aJCas, entityMentionClassname);
            final Type entityType = ts.getType(entityMentionClassname);
            if (null == entityType)
                throw new IllegalArgumentException(
                        "Entity type \"" + entityMentionClassname + "\" was not found in the type system.");

            // loop over all entity annotations in document and put them in
            // hashmap
            LOGGER.debug("doStringBased() - building entity map");
            final Iterator<Annotation> entityIter = aJCas.getAnnotationIndex(entityType).iterator();
            while (entityIter.hasNext()) {
                final EntityMention entity = (EntityMention) entityIter.next();
                entityMap.put(entity.getCoveredText(), entity);
                // additionally, add the entities into the overlap index so
                // we can later quickly lookup whether there is already an
                // entity with the same specific type at a certain location
                String specificType = "<null>";
                if (!StringUtils.isBlank(entity.getSpecificType()))
                    specificType = entity.getSpecificType();
                TreeSet<EntityMention> set = overlapIndex.get(specificType);
                if (null == set) {
                    set = new TreeSet<>(overlapComparator);
                    overlapIndex.put(specificType, set);
                }
                set.add(entity);

            }

            // now search for strings not detected as this kind of entity
            LOGGER.debug("doStringBased() - searching for missed entities...");
            for (final String entityString : entityMap.keySet()) {
                final EntityMention entity = entityMap.get(entityString).iterator().next();
                String specificType = "<null>";
                if (!StringUtils.isBlank(entity.getSpecificType()))
                    specificType = entity.getSpecificType();
                TreeSet<EntityMention> overlapSet = overlapIndex.get(specificType);

                LOGGER.debug("doStringBased() - checking entity string: " + entityString);

                int pos = 0;
                int length = 0;
                List<EntityMention> stringMatchedEntities = new ArrayList<>();
                while ((pos = text.indexOf(entityString, (pos + length))) > -1) {
                    // for each position where we have found this entity
                    // string
                    LOGGER.debug("doStringBased() - found string at pos: " + pos);

                    // check whether there is already an annotation of this
                    // type
                    // this older approach had the issue that only one
                    // overlapping annotation of entityMentionClassname was
                    // returned; but this type could be the wrong one in
                    // that the returned had a different specific type but
                    // another existed with the same specificType as the
                    // sought entity
                    // EntityMention refEntity = (EntityMention)
                    // JCoReAnnotationTools
                    // .getOverlappingAnnotation(aJCas,
                    // entityMentionClassname, pos, pos
                    // + entityString.length());

                    mentionForOffsetComparison.setBegin(pos);
                    mentionForOffsetComparison.setEnd(pos + length);
                    boolean overlappingExists = overlapSet.contains(mentionForOffsetComparison);

                    // if (refEntity == null
                    // || (refEntity.getSpecificType() == null ^
                    // entity.getSpecificType() == null)
                    // || (refEntity.getSpecificType() != null
                    // && entity.getSpecificType() != null && !refEntity
                    // .getSpecificType().equals(entity.getSpecificType())))
                    // {
                    if (!overlappingExists) {
                        // if there is no annotation of same type on this
                        // text span yet...
                        LOGGER.debug("doStringBased() - adding annotation to unlabeled entity mention");
                        EntityMention refEntity = (EntityMention) JCoReAnnotationTools
                                .getAnnotationByClassName(aJCas, entityMentionClassname);
                        // We will not directly just annotate the found
                        // string but extend it to offsets of
                        // overlapped tokens.
                        List<Token> overlappingTokens = JCoReAnnotationTools.getNearestOverlappingAnnotations(
                                aJCas,
                                new Annotation(entity.getCAS().getJCas(), pos, pos + entityString.length()),
                                Token.class);
                        int begin = overlappingTokens.size() > 0 ? overlappingTokens.get(0).getBegin() : pos;
                        int end = overlappingTokens.size() > 0
                                ? overlappingTokens.get(overlappingTokens.size() - 1).getEnd()
                                : pos + entityString.length();
                        // If we would have to adjust the offsets too much,
                        // we have most like just hit some
                        // substring of a larger token by coincidence.
                        refEntity.setBegin(begin);
                        refEntity.setEnd(end);
                        refEntity.setSpecificType(entity.getSpecificType());
                        refEntity.setResourceEntryList(entity.getResourceEntryList());
                        refEntity.setConfidence(entity.getConfidence());
                        refEntity.setTextualRepresentation(entity.getTextualRepresentation());
                        refEntity.setComponentId(COMPONENT_ID + " String (" + entity.getCoveredText() + ", "
                                + begin + "-" + end + ")");
                        stringMatchedEntities.add(refEntity);

                    } else
                        LOGGER.debug("doStringBased() - there is already an entity!");

                    length = entityString.length();
                }

                // A.R. 30.06.15: this option can now be turned on, just by
                // setting the config parameter
                // confidenceThresholdForConsistencyPreservation to a value
                // greater than 0
                // earlier it has been switched by commenting or
                // un-commenting the following code

                // If confidenceThresholdForConsistencyPreservation is given
                // (value != -1)
                // only add the new entities if there is enough evidence by
                // originally found entities with the same string that
                // this is indeed an entity we would like to find.
                if (confidenceThresholdForConsistencyPreservation > 0) {
                    if (!stringMatchedEntities.isEmpty()) {

                        double meanConfidence = 0;
                        for (EntityMention recognizedEntity : entityMap.get(entityString)) {
                            if (null != entity.getConfidence()) {
                                meanConfidence += Double.parseDouble(recognizedEntity.getConfidence());
                            }
                        }
                        meanConfidence /= entityMap.get(entityString).size();

                        int allMatches = stringMatchedEntities.size() + entityMap.get(entityString).size();
                        if (entityMap.get(entityString).size() >= allMatches / 3d) {
                            if (meanConfidence > confidenceThresholdForConsistencyPreservation) {
                                for (EntityMention refEntity : stringMatchedEntities) {
                                    // we have to add the new entities to
                                    // the overlap-index to avoid duplicates
                                    // by other entities that are a
                                    // substring of the current entity
                                    overlapSet.add(refEntity);
                                    refEntity.addToIndexes();
                                }
                            }
                        }
                    }
                }
                // if confidence score doesn't need to be checked, just add
                // all occurrences
                else {
                    for (EntityMention refEntity : stringMatchedEntities) {
                        // we have to add the new entities to the
                        // overlap-index to avoid duplicates by other
                        // entities that are a substring of the current
                        // entity
                        overlapSet.add(refEntity);
                        refEntity.addToIndexes();
                    }
                }
            }

        } catch (final Exception e) {
            LOGGER.error("doStringBased() - exception occured: " + e.getMessage());
            throw new AnalysisEngineProcessException();
        }

    }
}

From source file:edu.fullerton.ldvw.ImageHistory.java

/**
 * if they checked anything return a list of image IDs
 * @return IDs of selected images//w w  w .j a  va 2  s .c o  m
 */
private TreeSet<Integer> getSelected() {
    TreeSet<Integer> ret = new TreeSet<>();

    Map<String, String[]> parameterMap = request.getParameterMap();
    for (String p : parameterMap.keySet()) {
        if (p.toLowerCase().startsWith("sel_")) {
            String sn = p.substring(4);
            if (sn.matches("^\\d+$")) {
                Integer id = Integer.parseInt(sn);
                ret.add(id);
            }
        }
    }
    return ret;
}