Example usage for java.util SortedSet add

List of usage examples for java.util SortedSet add

Introduction

In this page you can find the example usage for java.util SortedSet add.

Prototype

boolean add(E e);

Source Link

Document

Adds the specified element to this set if it is not already present (optional operation).

Usage

From source file:controllers.Statistics.java

private static void getElapsedTimeStatistics(long secondInMillis, String target, ObjectNode objectNode) {
    TitanGraph g = Global.getGraph();/*from  ww w . j  a  v  a  2s  .  c  om*/

    long second = (secondInMillis / 1000) * 1000;

    Iterator<Vertex> it = g.getVertices("second", second).iterator();
    Vertex v = null;
    SortedSet<Integer> numSet = new TreeSet<Integer>();
    int value = 0;
    int sum = 0;
    int count = 0;
    //if(it.hasNext()) {
    while (it.hasNext()) {
        v = it.next();
        for (Vertex vertex : v.query().labels("include").has("event", target).vertices()) {
            if (vertex == null) {
                continue;
            }
            value = (Integer) vertex.getProperty("elapsedTime");
            count++;
            sum += value;
            numSet.add(value);
        }
    }

    objectNode.put("cnt", count);
    if (count > 0) {
        objectNode.put("avg", sum / count);
        objectNode.put("min", numSet.first());
        objectNode.put("max", numSet.last());
    } else {
        objectNode.put("avg", 0);
        objectNode.put("min", 0);
        objectNode.put("max", 0);
    }
}

From source file:org.openmrs.module.hl7output.web.controller.RHEApatientController.java

@RequestMapping(value = "/{ecID}/encounters", method = RequestMethod.GET)
@ResponseBody/*from  w w  w  . j a va 2 s  . c  o m*/
public Object getEncounters(@PathVariable("ecID") String enterpriseId,
        @RequestParam(value = "encounterUniqueId", required = false) String encounterUniqueId,
        @RequestParam(value = "dateStart", required = false) String dateStart,
        @RequestParam(value = "dateEnd", required = false) String dateEnd, HttpServletRequest request,
        HttpServletResponse response) throws ResponseException {

    LogEncounterService service = Context.getService(LogEncounterService.class);

    Date fromDate = null;
    Date toDate = null;
    Patient p = null;
    ORU_R01 r01 = null;

    log.info("RHEA Controller call detected...");
    log.info("Enterprise Patient Id is :" + enterpriseId);
    log.info("encounterUniqueId is :" + encounterUniqueId);
    log.info("dateStart is :" + dateStart);

    GetEncounterLog getEncounterLog = new GetEncounterLog();
    getEncounterLog.setEncounterUniqueId(encounterUniqueId);

    // first, we create from and to data objects out of the String
    // parameters

    if (enterpriseId == null) {
        log.info("Error : missing enterpriseId");
        getEncounterLog.setResult("Error, missing enterpriseId");
        service.saveGetEncounterLog(getEncounterLog);

        response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        return null;
    }

    getEncounterLog.setEnterpriseId(enterpriseId);

    SimpleDateFormat format = new SimpleDateFormat("dd-MM-yyyy");
    try {
        if (dateStart != null)
            fromDate = format.parse(dateStart);
    } catch (ParseException e) {
        log.info("Error : failed to parse specidied start date : " + dateStart);
        getEncounterLog.setResult("Error, incorrectly parsed start date");
        service.saveGetEncounterLog(getEncounterLog);

        response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        return null;
    }

    log.info("fromDate is :" + fromDate);

    try {
        if (dateEnd != null)
            toDate = format.parse(dateEnd);
    } catch (ParseException e) {
        log.info("Error : failed to parse specidied end date : " + dateEnd);
        getEncounterLog.setResult("Error, incorrectly parsed start date");
        service.saveGetEncounterLog(getEncounterLog);

        response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        return null;
    }

    log.info("toDate is :" + toDate);

    getEncounterLog.setDateEnd(toDate);
    getEncounterLog.setDateStart(fromDate);

    // Next, we try to retrieve the matching patient object
    if (enterpriseId != null) {
        PatientIdentifierType patientIdentifierType = Context.getPatientService()
                .getPatientIdentifierTypeByName("ECID");
        List<PatientIdentifierType> identifierTypeList = new ArrayList<PatientIdentifierType>();
        identifierTypeList.add(patientIdentifierType);

        List<Patient> patients = Context.getPatientService().getPatients(null, enterpriseId, identifierTypeList,
                false);
        //I am not checking the identifier type here. Need to come back and add a check for this
        if (patients.size() == 1) {
            p = patients.get(0);
        }
    }

    // if the patient doesn't exist, we need to return 400-BAD REQUEST
    // because the parameters are malformed
    if (p == null) {
        log.info("Error : failed to retreive patient for the given uuid : " + enterpriseId);
        response.setStatus(HttpServletResponse.SC_BAD_REQUEST);

    } else {
        log.info("Patient id : " + p.getPatientId() + "was retreived...");

        if (p != null) {
            //get all the encounters for this patient
            List<Encounter> encounterList = Context.getEncounterService().getEncountersByPatient(p);
            //if the enconteruniqueId is not null, we can isolate the given encounter

            if (encounterUniqueId != null) {
                Iterator<Encounter> i = encounterList.iterator();
                while (i.hasNext()) {
                    if (!i.next().getUuid().equals(encounterUniqueId))
                        i.remove();
                }
            }

            //If the encounterUniqueId was not given, we will try to filter encounters based on from and to dates
            List<Encounter> filteredEncounterList = new ArrayList<Encounter>();

            if (fromDate != null || toDate != null) {
                for (Encounter encounter : encounterList) {
                    if (fromDate != null && toDate != null) {
                        if ((encounter.getEncounterDatetime().after(fromDate))
                                && (encounter.getEncounterDatetime().before(toDate))) {
                            filteredEncounterList.add(encounter);
                        }

                    } else if (fromDate == null) {
                        if (encounter.getEncounterDatetime().before(toDate)) {
                            filteredEncounterList.add(encounter);
                        }

                    } else {
                        if (encounter.getEncounterDatetime().after(fromDate)) {
                            filteredEncounterList.add(encounter);
                        }

                    }
                }

                log.info("The number of matching encounters are :" + filteredEncounterList.size());
                encounterList = filteredEncounterList;
            }
            log.info("Calling the ORU_R01 parser...");

            SortedSet<MatchingEncounters> encounterSet = new TreeSet<MatchingEncounters>();

            for (Encounter e : encounterList) {
                MatchingEncounters matchingEncounters = new MatchingEncounters();
                matchingEncounters.setGetEncounterLog(getEncounterLog);
                matchingEncounters.setEncounterId(e.getEncounterId());

                encounterSet.add(matchingEncounters);
            }

            getEncounterLog.setLogTime(new Date());
            if (encounterList.size() > 0)
                getEncounterLog.setResult("Results Retrived");
            if (encounterList.size() == 0)
                getEncounterLog.setResult("No Results Retrived");

            //Now we will generate the HL7 message

            GenerateORU_R01 R01Util = new GenerateORU_R01();
            try {
                r01 = R01Util.generateORU_R01Message(p, encounterList);
            } catch (Exception e) {
                getEncounterLog.setResult("Error : Processing hl7 message failed");
                service.saveGetEncounterLog(getEncounterLog);
                response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
                return null;
            }

            getEncounterLog.getMatchingEncounters().clear();
            getEncounterLog.setMatchingEncounters(encounterSet);

            service.saveGetEncounterLog(getEncounterLog);

        }

        try {
            // Convert the ORU_R01 object into a byte stream
            ByteArrayOutputStream bos = new ByteArrayOutputStream();
            ObjectOutputStream oos = new ObjectOutputStream(bos);
            oos.writeObject(r01);
            oos.flush();
            oos.close();
            bos.close();
            byte[] data = bos.toByteArray();

            // Write the bytestream into the HttpServletResponse
            ServletOutputStream stream = response.getOutputStream();
            stream.write(data);
            stream.flush();

            response.getWriter().flush();
            response.getWriter().close();

            //NOTE : Im returning the ORU_R01 object as a bytestream AND a session object. Why both ? remove one later !
            request.getSession().setAttribute("oru_r01", r01);

            response.setStatus(HttpServletResponse.SC_OK);

        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    // Return null for now
    return null;
}

From source file:com.spotify.heroic.suggest.elasticsearch.SuggestBackendV1.java

@Override
public AsyncFuture<TagValuesSuggest> tagValuesSuggest(final TagValuesSuggest.Request request) {
    return connection.doto((final Connection c) -> {
        final FilterBuilder f = TAG_CTX.filter(request.getFilter());

        final BoolQueryBuilder root = QueryBuilders.boolQuery();
        root.must(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), f));

        for (final String e : request.getExclude()) {
            root.mustNot(QueryBuilders.matchQuery(Utils.TAG_KEY_RAW, e));
        }/*from  w w w.  jav a 2s  .  c  om*/

        final SearchRequestBuilder builder;

        try {
            builder = c.search(request.getRange(), Utils.TYPE_TAG).setSearchType(SearchType.COUNT)
                    .setQuery(root);
        } catch (NoIndexSelectedException e) {
            return async.failed(e);
        }

        final OptionalLimit limit = request.getLimit();
        final OptionalLimit groupLimit = request.getGroupLimit();

        {
            final TermsBuilder terms = AggregationBuilders.terms("keys").field(Utils.TAG_KEY_RAW);

            limit.asInteger().ifPresent(l -> terms.size(l + 1));

            builder.addAggregation(terms);
            // make value bucket one entry larger than necessary to figure out when limiting
            // is applied.
            final TermsBuilder cardinality = AggregationBuilders.terms("values").field(Utils.TAG_VALUE_RAW);

            groupLimit.asInteger().ifPresent(l -> cardinality.size(l + 1));
            terms.subAggregation(cardinality);
        }

        return bind(builder.execute()).directTransform((SearchResponse response) -> {
            final List<TagValuesSuggest.Suggestion> suggestions = new ArrayList<>();

            final Terms terms = response.getAggregations().get("keys");

            final List<Bucket> suggestionBuckets = terms.getBuckets();

            for (final Terms.Bucket bucket : limit.limitList(suggestionBuckets)) {
                final Terms valueTerms = bucket.getAggregations().get("values");

                final List<Bucket> valueBuckets = valueTerms.getBuckets();

                final SortedSet<String> result = new TreeSet<>();

                for (final Terms.Bucket valueBucket : valueBuckets) {
                    result.add(valueBucket.getKey());
                }

                final boolean limited = groupLimit.isGreater(valueBuckets.size());
                final SortedSet<String> values = groupLimit.limitSortedSet(result);

                suggestions.add(new TagValuesSuggest.Suggestion(bucket.getKey(), values, limited));
            }

            return TagValuesSuggest.of(ImmutableList.copyOf(suggestions),
                    limit.isGreater(suggestionBuckets.size()));
        });
    });
}

From source file:org.stockwatcher.data.cassandra.StockDAOImpl.java

@Override
public SortedSet<Stock> findStocks(StatementOptions options, StockCriteria criteria) {
    validateCriteria(criteria);// w w w .ja  v  a2 s .  c o m
    Integer[] industries = criteria.getIndustryIds();
    Set<String> exchanges = new HashSet<String>(Arrays.asList(criteria.getExchangeIds()));
    BigDecimal minPrice = criteria.getMinimumPrice();
    BigDecimal maxPrice = criteria.getMaximumPrice();
    SortedSet<Stock> stocks = new TreeSet<Stock>();
    try {
        Set<String> symbols = getMatchingSymbols(options, industries, exchanges);
        Clause where = in("stock_symbol", symbols.toArray());
        for (Row row : getStockResultSet(options, where)) {
            BigDecimal curPrice = row.getDecimal("current_price");
            if (row.getBool("active") && (minPrice.compareTo(curPrice) <= 0)
                    && (maxPrice.compareTo(curPrice) >= 0)) {
                stocks.add(createStock(row));
            }
        }
    } catch (DriverException e) {
        throw new DAOException(e);
    }
    return stocks;
}

From source file:org.talend.license.LicenseRetriver.java

public Collection<File> updateLicense(final String version, final File file) {
    logger.info("start to update {} license ", version);
    String url = String.format(Configer.getBuildURL() + Configer.getLicenseURL(), version);

    Document doc = connector.getPage(url);
    if (null == doc) {
        logger.error("no {} license page url:{}", version, url);
        return null;
    }// www  .  ja  va2  s . c o  m
    String regex = String.format(Configer.getLicenseItem(), version);

    Elements eles = doc.getElementsMatchingOwnText(regex);

    if (eles.isEmpty()) {
        logger.error("no {} license page url:{}", version, url);
        return null;
    }

    final Pattern pattern = Pattern.compile(regex);

    SortedSet<String> set = new TreeSet<String>(new Comparator<String>() {

        public int compare(String o1, String o2) {
            String m1;
            String m2;
            Matcher matcher = pattern.matcher(o1);
            if (matcher.find()) {
                m1 = matcher.group(2);
            } else {
                return 1;
            }
            matcher = pattern.matcher(o2);
            if (matcher.find()) {
                m2 = matcher.group(2);
            } else {
                return -1;
            }
            return m2.compareTo(m1);
        }
    });
    logger.info("there are {} license build", eles.size());
    for (Element ele : eles) {
        String text = ele.text();
        set.add(text);
    }
    if (set.isEmpty()) {
        return null;
    }

    Iterator<String> ite = set.iterator();
    while (ite.hasNext()) {
        String target = ite.next();
        url = url + target;
        logger.info("retrive from newest build {}", url);
        Collection<File> fs = checkout(version, file, url);
        if (!fs.isEmpty()) {
            return fs;
        }
        logger.info("no available license in build");
    }
    logger.error("retrive license failed");
    return null;
}

From source file:module.workingCapital.presentationTier.action.WorkingCapitalAction.java

private SortedSet<WorkingCapitalProcess> getAllProcesses(final WorkingCapitalYear workingCapitalYear) {
    final SortedSet<WorkingCapitalProcess> unitProcesses = new TreeSet<WorkingCapitalProcess>(
            WorkingCapitalProcess.COMPARATOR_BY_UNIT_NAME);
    for (final WorkingCapital workingCapital : workingCapitalYear.getWorkingCapitalsSet()) {
        final WorkingCapitalProcess workingCapitalProcess = workingCapital.getWorkingCapitalProcess();
        if (workingCapitalProcess.isAccessibleToCurrentUser()) {
            unitProcesses.add(workingCapitalProcess);
        }/*from  w  w  w. j  ava 2  s  . c  o  m*/
    }
    return unitProcesses;
}

From source file:com.twitter.hraven.datasource.JobHistoryRawService.java

/**
 * Given a min and max jobId, get a {@link Scan} to go through all the records
 * loaded in the {@link Constants#HISTORY_RAW_TABLE}, get all the rowkeys and
 * create a list of scans with batchSize number of rows in the rawTable.
 * <p>/*w  w w.  ja v  a  2 s. co  m*/
 * Note that this can be a somewhat slow operation as the
 * {@link Constants#HISTORY_RAW_TABLE} will have to be scanned.
 * 
 * @param cluster
 *          on which the Hadoop jobs ran.
 * @param minJobId
 *          used to start the scan. If null then there is no min limit on
 *          JobId.
 * @param maxJobId
 *          used to end the scan (inclusive). If null then there is no max
 *          limit on jobId.
 * @param reprocess
 *          Reprocess those records that may have been processed already.
 *          Otherwise successfully processed jobs are skipped.
 * @param reloadOnly
 *          load only those raw records that were marked to be reloaded using
 *          {@link #markJobForReprocesssing(QualifiedJobId)}
 * @return a scan of jobIds between the specified min and max. Retrieves only
 *         one version of each column.
 * @throws IOException
 * @throws RowKeyParseException
 *           when rows returned from the Raw table do not conform to the
 *           expected row key.
 */
public List<Scan> getHistoryRawTableScans(String cluster, String minJobId, String maxJobId, boolean reprocess,
        int batchSize) throws IOException, RowKeyParseException {

    List<Scan> scans = new LinkedList<Scan>();

    // Get all the values in the scan so that we can evenly chop them into
    // batch size chunks.
    // The problem is that processRecords min and max can have vastly
    // overlapping ranges, and in addition, they may have a minJobId of a long
    // running Hadoop job that is processed much later. Many jobIds that are
    // of shorter jobs that have already been processed will in between the
    // min and max, but since the scan returns only the records that are not
    // already processed, the returned list may have large gaps.
    Scan scan = getHistoryRawTableScan(cluster, minJobId, maxJobId, reprocess, false);

    SortedSet<JobId> orderedJobIds = new TreeSet<JobId>();

    ResultScanner scanner = null;
    try {
        LOG.info("Scanning " + Constants.HISTORY_RAW_TABLE + " table from " + minJobId + " to " + maxJobId);
        scanner = rawTable.getScanner(scan);
        for (Result result : scanner) {
            JobId qualifiedJobId = getQualifiedJobIdFromResult(result);
            orderedJobIds.add(qualifiedJobId);
        }
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }

    // Now chop the set into chunks.
    List<Range<JobId>> ranges = BatchUtil.getRanges(orderedJobIds, batchSize);
    LOG.info("Dividing " + orderedJobIds.size() + " jobs in " + ranges.size() + " ranges.");

    for (Range<JobId> range : ranges) {
        Scan rawScan = getHistoryRawTableScan(cluster, range.getMin().getJobIdString(),
                range.getMax().getJobIdString(), reprocess, true);
        scans.add(rawScan);
    }

    return scans;
}

From source file:com.benfante.minimark.blo.QuestionBo.java

/**
 * Update the set of tags of a question.
 *
 * @param question The question to update
 *//* ww w .ja v a  2 s. c o  m*/
private void updateTagSet(Question question) {
    String[] tags = question.getTagList().split(",");
    SortedSet<TagQuestionLink> currentTags = question.getTags();
    SortedSet<TagQuestionLink> oldTags = new TreeSet<TagQuestionLink>(currentTags);
    currentTags.clear();
    for (String stag : tags) {
        if (StringUtils.isNotBlank(stag)) {
            stag = stag.trim().toLowerCase();
            TagQuestionLink tagLink = searchInSet(stag, oldTags);
            if (tagLink == null) {
                // new tag for this post
                Tag tag = tagDao.findByName(stag);
                if (tag == null) {
                    // totally new tag
                    tag = new Tag(stag);
                    tagDao.store(tag);
                }
                tagLink = new TagQuestionLink(tag, question);
            }
            currentTags.add(tagLink);
        }
    }
}

From source file:com.davidsoergel.trees.htpn.AbstractExtendedHierarchicalTypedPropertyNode.java

public SortedSet<Class> getPluginOptions(Incrementor incrementor) {
    //try/*from w  w  w  .  ja v  a  2  s . c  om*/
    V value = getValue();

    if (!(value == null || value instanceof GenericFactory || value instanceof Class)) {
        logger.warn("Can't get plugin options for a value " + value + " of type " + value.getClass());
    }
    //Class c = theField.getType();
    //Type genericType = theField.getGenericType();

    // if this is a GenericFactory, then we need the parameter type

    Type thePluginType = type;
    if (TypeUtils.isAssignableFrom(GenericFactory.class, type)) {
        if (type instanceof Class) {
            // it's a type that extends GenericFactory; just leave it alone then
            thePluginType = type;
        } else {
            thePluginType = ((ParameterizedType) type).getActualTypeArguments()[0];
        }
    }

    java.util.SortedSet<Class> result = new TreeSet<Class>(new Comparator<Class>() {
        public int compare(Class o1, Class o2) {
            if (o1 == null) {
                return -1;
            } else if (o2 == null) {
                return 1;
            } else {
                return o1.getSimpleName().compareTo(o2.getSimpleName());
            }
        }
    });
    if (isNullable) {
        result.add(null);
    }
    try {
        PluginManager.registerPluginsFromDefaultPackages(thePluginType, incrementor);
    } catch (IOException e) {
        logger.error("Error", e);
        throw new Error(e);
    }

    result.addAll(PluginManager.getPlugins(thePluginType));
    return result;
    //         }
    //      catch (PluginException e)
    //         {
    //         return null;
    //         }
}

From source file:de.interactive_instruments.ShapeChange.Model.EA.EADocument.java

/**
 * Collect and return all PackageInfo objects tagged as being a schema. If a
 * name is given, only the package with the specified name will be
 * considered./*w w  w.j  a  v  a 2  s . com*/
 */
public SortedSet<PackageInfo> schemas(String name) {
    SortedSet<PackageInfo> res = new TreeSet<PackageInfo>();
    for (PackageInfo pi : fPackageById.values()) {
        if (pi.isSchema())
            if (name != null && !name.equals("")) {
                if (pi.name().equals(name))
                    res.add(pi);
            } else
                res.add(pi);
        else if (pi.isAppSchema()) {
            // Validation is done later now, code can be removed
            // MessageContext mc = result.addError(null, 146, pi.name(),
            // "targetNamespace");
            // if (mc!=null) mc.addDetail(null,400,"Package",pi.fullName());
        }
    }
    return res;
}