Example usage for java.util TreeMap keySet

List of usage examples for java.util TreeMap keySet

Introduction

In this page you can find the example usage for java.util TreeMap keySet.

Prototype

public Set<K> keySet() 

Source Link

Document

Returns a Set view of the keys contained in this map.

Usage

From source file:com.opengamma.analytics.financial.provider.calculator.discounting.CashFlowEquivalentCalculator.java

@Override
public AnnuityPaymentFixed visitSwap(final Swap<?, ?> swap, final MulticurveProviderInterface multicurves) {
    ArgumentChecker.notNull(swap, "Swap");
    ArgumentChecker.notNull(multicurves, "Multicurves provider");
    final Currency ccy = swap.getFirstLeg().getCurrency();
    Validate.isTrue(ccy.equals(swap.getSecondLeg().getCurrency()),
            "Cash flow equivalent available only for single currency swaps.");
    final TreeMap<Double, Double> flow = new TreeMap<>();
    final AnnuityPaymentFixed cfeLeg1 = swap.getFirstLeg().accept(this, multicurves);
    final AnnuityPaymentFixed cfeLeg2 = swap.getSecondLeg().accept(this, multicurves);
    for (final PaymentFixed p : cfeLeg1.getPayments()) {
        flow.put(p.getPaymentTime(), p.getAmount());
    }//from  w  w  w  . j  a va 2s  .c o  m
    for (final PaymentFixed p : cfeLeg2.getPayments()) {
        addcf(flow, p.getPaymentTime(), p.getAmount());
    }
    final PaymentFixed[] agregatedCfe = new PaymentFixed[flow.size()];
    int loopcf = 0;
    for (final double time : flow.keySet()) {
        agregatedCfe[loopcf++] = new PaymentFixed(ccy, time, flow.get(time));
    }
    return new AnnuityPaymentFixed(agregatedCfe);
}

From source file:com.sfs.whichdoctor.analysis.RevenueAnalysisDAOImpl.java

/**
 * Consolidate summary.//  www  .  j  av  a2  s  .co m
 *
 * @param revenueMap the revenue map
 *
 * @return the tree map< revenue bean, collection< revenue bean>>
 */
private RevenueAnalysisBean consolidateSummary(final TreeMap<Object, ArrayList<RevenueBean>> revenueMap) {

    final RevenueAnalysisBean result = new RevenueAnalysisBean();

    final Collection<RevenueBean> summary = new ArrayList<RevenueBean>();

    for (Object key : revenueMap.keySet()) {
        RevenueBean summaryRevenue = new RevenueBean();
        for (RevenueBean revenue : revenueMap.get(key)) {

            summaryRevenue.setBatchReference(revenue.getBatchReference());
            summaryRevenue.setBatchNo(revenue.getBatchNo());
            summaryRevenue.setRevenueType(revenue.getRevenueType());
            summaryRevenue.setRevenueClass(revenue.getRevenueClass());

            final double summaryValue = summaryRevenue.getValue();
            final double summaryNetValue = summaryRevenue.getNetValue();

            /* Update the summary revenue totals for this batch */
            summaryRevenue.setValue(summaryValue + revenue.getValue());
            summaryRevenue.setNetValue(summaryNetValue + revenue.getNetValue());

            for (Double gstRate : revenue.getGSTValues().keySet()) {
                final double gstValue = revenue.getGSTValues().get(gstRate);
                double gstSubtotal = 0;
                if (summaryRevenue.getGSTValues().containsKey(gstRate)) {
                    gstSubtotal = summaryRevenue.getGSTValues().get(gstRate);
                }
                summaryRevenue.setGSTValue(gstRate, gstSubtotal + gstValue);
            }

            /* Add receipts/payments to this revenue batch */
            TreeMap<Integer, ReceiptBean> receipts = summaryRevenue.getReceipts();
            if (receipts == null) {
                receipts = new TreeMap<Integer, ReceiptBean>();
            }
            for (Integer receiptId : revenue.getReceipts().keySet()) {
                ReceiptBean receipt = revenue.getReceipts().get(receiptId);

                if (receipts.containsKey(receiptId)) {
                    ReceiptBean summaryReceipt = receipts.get(receiptId);

                    ArrayList<PaymentBean> payments = (ArrayList<PaymentBean>) summaryReceipt.getPayments();
                    if (payments == null) {
                        payments = new ArrayList<PaymentBean>();
                    }

                    if (receipt.getPayments() != null) {
                        for (PaymentBean payment : receipt.getPayments()) {
                            payments.add(payment);
                        }
                    }
                    summaryReceipt.setPayments(payments);

                    receipts.put(receiptId, summaryReceipt);
                } else {
                    receipts.put(receiptId, receipt);
                }
            }
        }

        if (dataLogger.isDebugEnabled()) {
            dataLogger.debug("Summary value: " + summaryRevenue.getValue());
            dataLogger.debug("Summary net value: " + summaryRevenue.getNetValue());
        }
        summary.add(summaryRevenue);
    }

    // Calculate the totals for the revenue analysis

    for (RevenueBean summaryRevenue : summary) {
        /* Update the overall running revenue totals */
        result.setValue(result.getValue() + summaryRevenue.getValue());
        result.setNetValue(result.getNetValue() + summaryRevenue.getNetValue());

        /* Update the GST totals */
        for (double gstRate : summaryRevenue.getGSTValues().keySet()) {
            final double gstValue = summaryRevenue.getGSTValues().get(gstRate);

            double currentGSTValue = 0;
            if (result.getGSTValues().containsKey(gstRate)) {
                currentGSTValue = result.getGSTValues().get(gstRate);
            }
            result.setGSTValue(gstRate, currentGSTValue + gstValue);
        }
    }
    // Ensure the revenue beans have the same GST fields
    result.setRevenue(processGSTRates(summary));

    if (dataLogger.isDebugEnabled()) {
        dataLogger.debug("Total calculated value: " + result.getValue());
        dataLogger.debug("Total calculated net value: " + result.getNetValue());
    }

    return result;
}

From source file:com.eucalyptus.objectstorage.pipeline.handlers.S3Authentication.java

/**
 * Query params are included in cases of Query-String/Presigned-url auth where they are considered just like headers
 * //from   w w w.  j  a va  2  s  .  c o m
 * @param httpRequest
 * @param includeQueryParams
 * @return
 */
private static String getCanonicalizedAmzHeaders(MappingHttpRequest httpRequest, boolean includeQueryParams) {
    String result = "";
    Set<String> headerNames = httpRequest.getHeaderNames();
    TreeMap<String, String> amzHeaders = new TreeMap<String, String>();
    for (String headerName : headerNames) {
        String headerNameString = headerName.toLowerCase().trim();
        if (headerNameString.startsWith("x-amz-")) {
            String value = httpRequest.getHeader(headerName).trim();
            String[] parts = value.split("\n");
            value = "";
            for (String part : parts) {
                part = part.trim();
                value += part + " ";
            }
            value = value.trim();
            if (amzHeaders.containsKey(headerNameString)) {
                String oldValue = (String) amzHeaders.remove(headerNameString);
                oldValue += "," + value;
                amzHeaders.put(headerNameString, oldValue);
            } else {
                amzHeaders.put(headerNameString, value);
            }
        }
    }

    if (includeQueryParams) {
        // For query-string auth, header values may include 'x-amz-*' that need to be signed
        for (String paramName : httpRequest.getParameters().keySet()) {
            processHeaderValue(paramName, httpRequest.getParameters().get(paramName), amzHeaders);
        }
    }

    // Build the canonical string
    Iterator<String> iterator = amzHeaders.keySet().iterator();
    while (iterator.hasNext()) {
        String key = iterator.next();
        String value = (String) amzHeaders.get(key);
        result += key + ":" + value + "\n";
    }
    return result;
}

From source file:com.sfs.whichdoctor.dao.BulkContactDAOImpl.java

/**
 * Prepare the bulk contact bean.//from  w  w w . j  a v  a2 s  .c  o  m
 *
 * @param contactBean the bulk contact bean
 * @param exportMap the export map
 * @param user the user
 * @return the bulk contact bean
 */
public final BulkContactBean prepare(final BulkContactBean contactBean,
        final TreeMap<String, ItemBean> exportMap, final UserBean user) {

    TreeMap<Integer, Integer> exportGUIDs = new TreeMap<Integer, Integer>();
    Collection<Object> guids = new ArrayList<Object>();

    /** Get a list of the unique GUIDs **/
    for (String key : exportMap.keySet()) {
        try {
            ItemBean details = (ItemBean) exportMap.get(key);
            exportGUIDs.put(details.getObject1GUID(), details.getObject1GUID());
        } catch (Exception e) {
            dataLogger.error("Error casting object to export map: " + e.getMessage());
        }
    }
    for (Integer guid : exportGUIDs.keySet()) {
        guids.add(guid);
    }

    BulkContactBean bulkContact = performSearch(contactBean, guids, user, true);

    return bulkContact;
}

From source file:gda.scan.ConcurrentScanChild.java

/**
 * Moves to the next step unless start is true, then moves to the start of the current (possibly child) scan.
 * @throws Exception//from   w w  w  . ja  va  2 s  .com
 */
protected void acquirePoint(boolean start, boolean collectDetectors) throws Exception {

    TreeMap<Integer, Scannable[]> devicesToMoveByLevel;
    if (collectDetectors) {
        devicesToMoveByLevel = generateDevicesToMoveByLevel(scannableLevels, allDetectors);
    } else {
        devicesToMoveByLevel = scannableLevels;
    }

    for (Integer thisLevel : devicesToMoveByLevel.keySet()) {

        Scannable[] scannablesAtThisLevel = devicesToMoveByLevel.get(thisLevel);

        // If there is a detector at this level then wait for detector readout thread to complete
        for (Scannable scannable : scannablesAtThisLevel) {
            if (scannable instanceof Detector) {
                waitForDetectorReadoutAndPublishCompletion();
                break;
            }
        }
        checkThreadInterrupted();

        // trigger at level start on all Scannables
        for (Scannable scannable : scannablesAtThisLevel) {
            scannable.atLevelStart();
        }

        // trigger at level move start on all Scannables
        for (Scannable scannable : scannablesAtThisLevel) {
            if (isScannableToBeMoved(scannable) != null) {
                if (isScannableToBeMoved(scannable).hasStart()) {
                    scannable.atLevelMoveStart();
                }
            }
        }

        // on detectors (technically scannables) that implement DetectorWithReadout call waitForReadoutComplete
        for (Scannable scannable : scannablesAtThisLevel) {
            if (scannable instanceof DetectorWithReadout) {
                if (!detectorWithReadoutDeprecationWarningGiven) {
                    logger.warn(
                            "The DetectorWithReadout interface is deprecated. Set gda.scan.concurrentScan.readoutConcurrently to true instead (after reading the 8.24 release note");
                    detectorWithReadoutDeprecationWarningGiven = true;
                }
                ((DetectorWithReadout) scannable).waitForReadoutCompletion();
            }
        }

        for (Scannable device : scannablesAtThisLevel) {
            if (!(device instanceof Detector)) {
                // does this scan (is a hierarchy of nested scans) operate this scannable?
                ScanObject scanObject = isScannableToBeMoved(device);
                if (scanObject != null) {
                    if (start) {
                        checkThreadInterrupted();
                        scanObject.moveToStart();
                    } else {
                        checkThreadInterrupted();
                        scanObject.moveStep();
                    }
                }
            } else {
                if (callCollectDataOnDetectors) {
                    checkThreadInterrupted();
                    ((Detector) device).collectData();
                }
            }
        }

        // pause here until all the scannables at this level have finished moving
        for (Entry<Integer, Scannable[]> entriesByLevel : devicesToMoveByLevel.entrySet()) {
            Scannable[] scannablesAtLevel = entriesByLevel.getValue();
            for (int i = 0; i < scannablesAtLevel.length; i++) {
                Scannable scn = scannablesAtLevel[i];
                scn.waitWhileBusy();
            }
        }
        for (Scannable scannable : scannablesAtThisLevel) {
            scannable.atLevelEnd();
        }
    }

}

From source file:com.act.lcms.db.analysis.AnalysisHelper.java

/**
 * This function scores the various metlin ions from different standard ion results, sorts them and picks the
 * best ion. This is done by adding up the indexed positions of the ion in each sorted entry of the list of
 * standard ion results. Since the entries in the standard ion results are sorted, the lower magnitude summation ions
 * are better than the larger magnitude summations. Then, we add another feature, in this case, the normalized SNR/maxSNR
 * but multiplying the positional score with the normalized SNR. The exact calculation is as follows:
 * score = positional_score * (1 - SNR(i)/maxSNR). We have to do the (1 - rel_snr) since we choose the lowest score,
 * so if the rel_snr is huge (ie a good signal), the overall magnitude of score will reduce, which makes that a better
 * ranking for the ion. We then do a post filtering on these scores based on if we have only positive/negative scans
 * from the scan files which exist in the context of the caller.
 * @param standardIonResults The list of standard ion results
 * @param curatedMetlinIons A map from standard ion result to the best curated ion that was manual inputted.
 * @param areOtherPositiveModeScansAvailable This boolean is used to post filter and pick a positive metlin ion if and
 *                                       only if positive ion mode scans are available.
 * @param areOtherNegativeModeScansAvailable This boolean is used to post filter and pick a negative metlin ion if and
 *                                       only if negative ion mode scans are available.
 * @return The best metlin ion or null if none can be found
 *//*from   w w w. jav  a2s. c o m*/
public static String scoreAndReturnBestMetlinIonFromStandardIonResults(
        List<StandardIonResult> standardIonResults, Map<StandardIonResult, String> curatedMetlinIons,
        boolean areOtherPositiveModeScansAvailable, boolean areOtherNegativeModeScansAvailable) {
    if (standardIonResults == null) {
        return null;
    }

    // We find the maximum SNR values for each standard ion result so that we can normalize individual SNR scores
    // during scoring.
    HashMap<StandardIonResult, Double> resultToMaxSNR = new HashMap<>();
    for (StandardIonResult result : standardIonResults) {
        Double maxSNR = 0.0d;
        for (Map.Entry<String, XZ> resultoDoublePair : result.getAnalysisResults().entrySet()) {
            if (resultoDoublePair.getValue().getIntensity() > maxSNR) {
                maxSNR = resultoDoublePair.getValue().getIntensity();
            }
        }
        resultToMaxSNR.put(result, maxSNR);
    }

    Map<String, Double> metlinScore = new HashMap<>();
    Set<String> ions = standardIonResults.get(0).getAnalysisResults().keySet();

    // For each ion, iterate through all the ion results to find the position of that ion in each result set (since the
    // ions are sorted) and then multiply that by a normalized value of the SNR.
    for (String ion : ions) {
        for (StandardIonResult result : standardIonResults) {
            Integer counter = 0;
            for (String localIon : result.getAnalysisResults().keySet()) {
                counter++;
                if (localIon.equals(ion)) {
                    Double ionScore = metlinScore.get(ion);
                    if (ionScore == null) {
                        // Normalize the sample's SNR by dividing it by the maxSNR. Then we multiple a variant of it to the counter
                        // score so that if the total magnitude of the score is lower, the ion is ranked higher.
                        ionScore = (1.0 * counter) * (1 - (result.getAnalysisResults().get(ion).getIntensity()
                                / resultToMaxSNR.get(result)));
                    } else {
                        ionScore += (1.0 * counter) * (1 - (result.getAnalysisResults().get(ion).getIntensity()
                                / resultToMaxSNR.get(result)));
                    }
                    metlinScore.put(ion, ionScore);
                    break;
                }
            }
        }
    }

    for (Map.Entry<StandardIonResult, String> resultToIon : curatedMetlinIons.entrySet()) {
        // Override all the scores of the manually curated standard ion result and set them to the highest rank.
        // Ideally, the user has been consistent for the best metlin ion across similar standard ion results, so
        // tie breakers will not happen. If a tie happen, it is broken arbitrarily.
        metlinScore.put(resultToIon.getValue(), MANUAL_OVERRIDE_BEST_SCORE);
    }

    TreeMap<Double, List<String>> sortedScores = new TreeMap<>();
    for (String ion : metlinScore.keySet()) {
        if (MS1.getIonModeOfIon(ion) != null) {
            if ((MS1.getIonModeOfIon(ion).equals(MS1.IonMode.POS) && areOtherPositiveModeScansAvailable)
                    || (MS1.getIonModeOfIon(ion).equals(MS1.IonMode.NEG)
                            && areOtherNegativeModeScansAvailable)) {
                List<String> ionBucket = sortedScores.get(metlinScore.get(ion));
                if (ionBucket == null) {
                    ionBucket = new ArrayList<>();
                }
                ionBucket.add(ion);
                sortedScores.put(metlinScore.get(ion), ionBucket);
            }
        }
    }

    if (sortedScores.size() == 0) {
        LOGGER.error(
                "Could not find any ions corresponding to the positive and negative scan mode conditionals");
        return null;
    } else {
        List<String> topMetlinIons = sortedScores.get(sortedScores.keySet().iterator().next());
        // In cases of a tie breaker, simply choose the first ion.
        return topMetlinIons.get(0);
    }
}

From source file:org.apache.camel.dataformat.bindy.BindyCsvFactory.java

public String unbind(Map<String, Object> model) throws Exception {

    StringBuilder buffer = new StringBuilder();
    results = new HashMap<Integer, List>();

    // Check if separator exists
    ObjectHelper.notNull(this.separator,
            "The separator has not been instantiated or property not defined in the @CsvRecord annotation");

    char separator = Converter.getCharDelimitor(this.getSeparator());

    if (LOG.isDebugEnabled()) {
        LOG.debug("Separator converted : '0x" + Integer.toHexString(separator) + "', from : "
                + this.getSeparator());
    }//from ww w  .  j a  va  2 s  .  com

    for (Class clazz : models) {

        if (model.containsKey(clazz.getName())) {

            Object obj = model.get(clazz.getName());

            if (LOG.isDebugEnabled()) {
                LOG.debug("Model object : " + obj + ", class : " + obj.getClass().getName());
            }

            if (obj != null) {

                // Generate Csv table
                generateCsvPositionMap(clazz, obj);

            }
        }
    }

    // Transpose result
    List<List> l = new ArrayList<List>();

    if (isOneToMany) {

        l = product(results);

    } else {

        // Convert Map<Integer, List> into List<List>
        TreeMap<Integer, List> sortValues = new TreeMap<Integer, List>(results);
        List<String> temp = new ArrayList<String>();

        for (Integer key : sortValues.keySet()) {

            // Get list of values
            List<String> val = sortValues.get(key);

            // For one to one relation
            // There is only one item in the list
            String value = (String) val.get(0);

            // Add the value to the temp array
            if (value != null) {
                temp.add(value);
            } else {
                temp.add("");
            }
        }

        l.add(temp);
    }

    if (l != null) {

        Iterator it = l.iterator();
        while (it.hasNext()) {

            List<String> tokens = (ArrayList<String>) it.next();
            Iterator itx = tokens.iterator();

            while (itx.hasNext()) {

                String res = (String) itx.next();

                if (res != null) {
                    buffer.append(res);
                } else {
                    buffer.append("");
                }

                if (itx.hasNext()) {
                    buffer.append(separator);
                }

            }

            if (it.hasNext()) {
                buffer.append(Converter.getStringCarriageReturn(getCarriageReturn()));
            }

        }

    }

    return buffer.toString();

}

From source file:cn.teamlab.wg.framework.util.csv.CsvWriter.java

/**
 * Bean?CSV?//  w  w  w .ja  va  2s .  c om
 * Bean@CsvPropAnno(index = ?)
 * @param objList
 * @return
 * @throws NoSuchMethodException 
 * @throws InvocationTargetException 
 * @throws IllegalAccessException 
 */
public static String bean2Csv(List<?> objList) throws Exception {
    if (objList == null || objList.size() == 0) {
        return "";
    }
    TreeMap<Integer, CsvFieldBean> map = new TreeMap<Integer, CsvFieldBean>();
    Object bean0 = objList.get(0);
    Class<?> clazz = bean0.getClass();

    PropertyDescriptor[] arr = org.springframework.beans.BeanUtils.getPropertyDescriptors(clazz);
    for (PropertyDescriptor p : arr) {
        String fieldName = p.getName();
        Field field = FieldUtils.getDeclaredField(clazz, fieldName, true);
        if (field == null) {
            continue;
        }

        boolean isAnno = field.isAnnotationPresent(CsvFieldAnno.class);
        if (isAnno) {
            CsvFieldAnno anno = field.getAnnotation(CsvFieldAnno.class);
            int idx = anno.index();
            map.put(idx, new CsvFieldBean(idx, anno.title(), fieldName));
        }
    }

    // CSVBuffer
    StringBuffer buff = new StringBuffer();

    // ???
    boolean withTitle = clazz.isAnnotationPresent(CsvTitleAnno.class);
    // ??csv
    if (withTitle) {
        StringBuffer titleBuff = new StringBuffer();
        for (int key : map.keySet()) {
            CsvFieldBean fieldBean = map.get(key);
            titleBuff.append(Letters.QUOTE).append(fieldBean.getTitle()).append(Letters.QUOTE);
            titleBuff.append(Letters.COMMA);
        }
        buff.append(StringUtils.chop(titleBuff.toString()));
        buff.append(Letters.LF);
        titleBuff.setLength(0);
    }

    for (Object o : objList) {
        StringBuffer tmpBuff = new StringBuffer();

        for (int key : map.keySet()) {
            CsvFieldBean fieldBean = map.get(key);

            Object val = BeanUtils.getProperty(o, fieldBean.getFieldName());
            if (val != null) {
                tmpBuff.append(Letters.QUOTE).append(val).append(Letters.QUOTE);
            } else {
                tmpBuff.append(StringUtils.EMPTY);
            }
            tmpBuff.append(Letters.COMMA);
        }

        buff.append(StringUtils.chop(tmpBuff.toString()));
        buff.append(Letters.LF);
        tmpBuff.setLength(0);
    }

    return buff.toString();
}

From source file:eu.edisonproject.training.wsd.BabelNet.java

private Set<Term> babelNetDisambiguation(String language, String lemma, Set<String> ngarms) {
    if (ngarms.isEmpty()) {
        return null;
    }//from w w  w  . j av a  2  s  . c  o  m
    if (ngarms.size() == 1 && ngarms.iterator().next().length() <= 1) {
        return null;
    }

    HashMap<CharSequence, Double> idsMap = new HashMap<>();
    Map<CharSequence, Term> termMap = new HashMap<>();
    Set<Term> terms = new HashSet<>();
    int count = 0;
    int breaklimit = 1000;
    int oneElementlimit = 65;
    int difflimit = 60;
    Double persent;
    for (String n : ngarms) {
        if (n.length() <= 1) {
            continue;
        }
        count++;
        if (idsMap.size() == 1 && count > oneElementlimit) {
            //                Double score = idsMap.values().iterator().next();
            //                if (score >= 10) {
            break;
            //                }
        }

        if ((count % 2) == 0 && idsMap.size() >= 2 && count > difflimit) {
            ValueComparator bvc = new ValueComparator(idsMap);
            TreeMap<CharSequence, Double> sorted_map = new TreeMap(bvc);
            sorted_map.putAll(idsMap);
            Iterator<CharSequence> iter = sorted_map.keySet().iterator();
            Double first = idsMap.get(iter.next());
            Double second = idsMap.get(iter.next());

            persent = first / (first + second);
            if (persent > 0.65) {
                break;
            }
        }
        if (count > breaklimit) {
            break;
        }

        String clearNg = n.replaceAll("_", " ");
        if (clearNg == null) {
            continue;
        }
        if (clearNg.startsWith(" ")) {
            clearNg = clearNg.replaceFirst(" ", "");
        }
        if (clearNg.endsWith(" ")) {
            clearNg = clearNg.substring(0, clearNg.length() - 1);
        }

        Pair<Term, Double> termPair = null;
        try {
            termPair = babelNetDisambiguation(language, lemma, clearNg);
        } catch (Exception ex) {
            if (ex.getMessage() != null && ex.getMessage().contains("Your key is not valid")) {
                try {
                    termPair = babelNetDisambiguation(language, lemma, clearNg);
                } catch (Exception ex1) {
                    //                       LOGGER.log(Level.WARNING, ex1, null);
                }
            } else {
                LOGGER.log(Level.WARNING, null, ex);
            }
        }
        if (termPair != null) {
            termMap.put(termPair.first.getUid(), termPair.first);
            Double score;
            if (idsMap.containsKey(termPair.first.getUid())) {
                score = idsMap.get(termPair.first.getUid());
                //                    score++;
                score += termPair.second;
            } else {
                //                    score = 1.0;
                score = termPair.second;
            }
            idsMap.put(termPair.first.getUid(), score);
        }
    }
    if (!idsMap.isEmpty()) {
        ValueComparator bvc = new ValueComparator(idsMap);
        TreeMap<CharSequence, Double> sorted_map = new TreeMap(bvc);
        sorted_map.putAll(idsMap);
        count = 0;
        Double firstScore = idsMap.get(sorted_map.firstKey());
        terms.add(termMap.get(sorted_map.firstKey()));
        idsMap.remove(sorted_map.firstKey());
        for (CharSequence tvID : sorted_map.keySet()) {
            if (count >= 1) {
                Double secondScore = idsMap.get(tvID);
                persent = secondScore / (firstScore + secondScore);
                if (persent > 0.2) {
                    terms.add(termMap.get(tvID));
                }
                if (count >= 2) {
                    break;
                }
            }
            count++;
        }
        return terms;
    }
    return null;
}

From source file:com.sfs.whichdoctor.dao.SupervisorDAOImpl.java

/**
 * Ordered supervisors./* w  w  w.  j a  v  a 2  s.c  o m*/
 *
 * @param supervisors the supervisors
 *
 * @return the collection< supervisor bean>
 */
private Collection<SupervisorBean> orderedSupervisors(final Collection<SupervisorBean> supervisors) {
    final Collection<SupervisorBean> ordered = new ArrayList<SupervisorBean>();

    TreeMap<String, SupervisorBean> orderMap = new TreeMap<String, SupervisorBean>();

    if (supervisors != null) {
        for (SupervisorBean supervisor : supervisors) {
            final String key = supervisor.getOrderId() + "_" + supervisor.getGUID();
            orderMap.put(key, supervisor);
        }
    }

    int orderId = 1;

    for (String key : orderMap.keySet()) {
        SupervisorBean supervisor = orderMap.get(key);

        supervisor.setOrderId(orderId);
        ordered.add(supervisor);

        orderId++;
    }
    return ordered;
}