Example usage for java.util Collections reverseOrder

List of usage examples for java.util Collections reverseOrder

Introduction

In this page you can find the example usage for java.util Collections reverseOrder.

Prototype

@SuppressWarnings("unchecked")
public static <T> Comparator<T> reverseOrder() 

Source Link

Document

Returns a comparator that imposes the reverse of the natural ordering on a collection of objects that implement the Comparable interface.

Usage

From source file:com.hin.hl7messaging.InvoiceSvgService.java

public Element createServiceElement(Document document, Element subG, HashMap<String, String> serviceMap,
        HashMap<String, String> xCoordinatesMap, String y, String style) {
    List<String> sortedKeys = new ArrayList<String>(serviceMap.size());
    sortedKeys.addAll(serviceMap.keySet());
    Collections.sort(sortedKeys, Collections.reverseOrder());

    int xService = 115;
    int xCost = 1150;

    for (String key : sortedKeys) {
        Element text = document.createElement("text");
        text.setAttribute("x", "223.8");
        text.setAttribute("y", "781.76");
        text.setAttribute("id", "legendTitle");
        Element tspan = document.createElement("tspan");
        tspan.setAttribute("id", "optimumTitle");
        tspan.setAttribute("x", String.valueOf(xService));
        tspan.setAttribute("y", y);
        tspan.setAttribute("style", style);
        tspan.setTextContent((String) key);
        text.appendChild(tspan);/*  w w  w. ja v  a 2  s  . c  om*/
        subG.appendChild(text);

        Element text1 = document.createElement("text");
        text1.setAttribute("x", "223.8");
        text1.setAttribute("y", "781.76");
        text1.setAttribute("id", "legendTitle");
        Element tspan1 = document.createElement("tspan");
        tspan1.setAttribute("id", "optimumTitle");
        tspan1.setAttribute("x", String.valueOf(xCost));
        tspan1.setAttribute("y", y);
        tspan1.setAttribute("style", style);
        tspan1.setTextContent((String) serviceMap.get(key));
        text1.appendChild(tspan1);
        subG.appendChild(text1);

        int yAxis = Integer.parseInt(y);
        y = String.valueOf(yAxis + 30);
    }

    return subG;
}

From source file:com.javielinux.utils.Utils.java

static public String toExportHTML(Context context, String text) {
    String out = text.replace("<", "&lt;");
    //out = out.replace(">", "&gt;");
    ArrayList<Integer> valStart = new ArrayList<Integer>();
    ArrayList<Integer> valEnd = new ArrayList<Integer>();

    Comparator<Integer> comparator = Collections.reverseOrder();

    // enlaces//w  w w . j  a  v  a  2s .c o  m

    String regex = "\\(?\\b(http://|https://|www[.])[-A-Za-z0-9+&@#/%?=~_()|!:,.;]*[-A-Za-z0-9+&@#/%=~_()|]";
    Pattern p = Pattern.compile(regex);
    Matcher m = p.matcher(out);
    while (m.find()) {
        valStart.add(m.start());
        valEnd.add(m.end());
    }

    // hashtag

    regex = "(#[\\w-]+)";
    p = Pattern.compile(regex);
    m = p.matcher(out);
    while (m.find()) {
        valStart.add(m.start());
        valEnd.add(m.end());
    }

    // usuarios twitter

    regex = "(@[\\w-]+)";
    p = Pattern.compile(regex);
    m = p.matcher(out);
    while (m.find()) {
        valStart.add(m.start());
        valEnd.add(m.end());
    }

    Collections.sort(valStart, comparator);
    Collections.sort(valEnd, comparator);

    for (int i = 0; i < valStart.size(); i++) {
        int s = valStart.get(i);
        int e = valEnd.get(i);
        String link = out.substring(s, e);

        if (out.substring(s, s + 1).equals("#")) {
            out = out.substring(0, s) + "<a href=\"http://twitter.com/#!/search/" + link
                    + "\" class=\"hashtag\">" + link + "</a>" + out.substring(e, out.length());
        } else if (out.substring(s, s + 1).equals("@")) {
            out = out.substring(0, s) + "<a href=\"http://twitter.com/#!/" + link + "\" class=\"user\">" + link
                    + "</a>" + out.substring(e, out.length());
        } else {
            out = out.substring(0, s) + "<a href=\"" + link + "\" class=\"link\">" + link + "</a>"
                    + out.substring(e, out.length());
        }
    }

    return out;
}

From source file:UI.MainStageController.java

/**
 * collects the data for the barChart// ww w.j a v a2 s  .  co  m
 * data includes:
 * degree distribution
 * hubs
 */
public void displayGraphAnalysis() {
    //Generate Data for the BarChart
    GraphAnalysis analysis = AnalysisData.getAnalysis();
    HashMap<Integer, Double> degreeDistribution = analysis.getDegreeDistribution();
    XYChart.Series<String, Double> degreeSeries = new XYChart.Series<>();

    for (Map.Entry<Integer, Double> entry : degreeDistribution.entrySet()) {
        degreeSeries.getData().add(new XYChart.Data<>(entry.getKey().toString(), entry.getValue()));
    }
    degreeDistributionChart.getData().clear();
    degreeDistributionChart.getData().add(degreeSeries);

    //Generate Graph Statistics to display in the TextArea
    HashMap<TaxonNode, Integer> hubs = analysis.getHubsList();
    graphStatText.setText("List of Hubs:\n\n");

    //Sort hubs by descending values
    Map<TaxonNode, Integer> hubsSorted = hubs.entrySet().stream()
            .sorted(Map.Entry.comparingByValue(Collections.reverseOrder())).collect(Collectors
                    .toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -> e1, LinkedHashMap::new));

    for (Map.Entry<TaxonNode, Integer> entry : hubsSorted.entrySet()) {
        graphStatText
                .setText(graphStatText.getText() + entry.getKey().getName() + " (" + entry.getValue() + ")\n");
    }
}

From source file:org.egov.wtms.service.es.WaterChargeElasticSearchService.java

private List<WaterTaxPayerDetails> returnTopResults(final List<WaterTaxPayerDetails> taxPayers, final int size,
        final Boolean order) {
    if (size > 10) {
        if (order)
            Collections.sort(taxPayers);
        else//w  ww  . j a  v a 2  s .c o  m
            Collections.sort(taxPayers, Collections.reverseOrder());

        return taxPayers.subList(0, taxPayers.size() < 10 ? taxPayers.size() : 10);
    }
    return taxPayers;
}

From source file:com.linkedin.pinot.controller.helix.core.realtime.PinotLLCRealtimeSegmentManager.java

protected void completeCommittingSegments(String realtimeTableName) {
    IdealState idealState = getTableIdealState(realtimeTableName);
    Set<String> segmentNamesIS = idealState.getPartitionSet();
    List<ZNRecord> segmentMetadataList = getExistingSegmentMetadata(realtimeTableName);
    if (segmentMetadataList == null || segmentMetadataList.isEmpty()) {
        return;/* w w  w . j av a 2s  .co  m*/
    }
    final List<LLCSegmentName> segmentNames = new ArrayList<>(segmentMetadataList.size());

    for (ZNRecord segment : segmentMetadataList) {
        if (SegmentName.isLowLevelConsumerSegmentName(segment.getId())) {
            segmentNames.add(new LLCSegmentName(segment.getId()));
        }
    }

    if (segmentNames.isEmpty()) {
        return;
    }

    Collections.sort(segmentNames, Collections.reverseOrder());

    int curPartition = segmentNames.get(0).getPartitionId(); // Current kafka partition we are working on.
    final int nSegments = segmentNames.size();

    /*
     * We only need to look at the most recent segment in the kafka partition. If that segment is also present
     * in the idealstate, we are good.
     * Otherwise, we need to add that segment to the idealstate:
     * - We find the current instance assignment for that partition and update idealstate accordingly.
     * NOTE: It may be that the kafka assignment of instances has changed for this partition. In that case,
     * we need to also modify the numPartitions field in the segment metadata.
     * TODO Modify numPartitions field in segment metadata and re-write it in propertystore.
     * The numPartitions field in the metadata is used by SegmentCompletionManager
     */
    for (int i = 0; i < nSegments; i++) {
        final LLCSegmentName segmentName = segmentNames.get(i);
        if (segmentName.getPartitionId() == curPartition) {
            final String curSegmentNameStr = segmentName.getSegmentName();
            if (!segmentNamesIS.contains(curSegmentNameStr)) {
                LOGGER.info("{}:Repairing segment for partition {}. Segment {} not found in idealstate",
                        realtimeTableName, curPartition, curSegmentNameStr);

                final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
                List<String> newInstances = partitionAssignment.getListField(Integer.toString(curPartition));
                LOGGER.info("{}: Assigning segment {} to {}", realtimeTableName, curSegmentNameStr,
                        newInstances);
                // TODO Re-write num-partitions in metadata if needed.

                String prevSegmentNameStr = null;
                // If there was a prev segment in the same partition, then we need to fix it to be ONLINE.
                if (i < nSegments - 1) {
                    LLCSegmentName prevSegmentName = segmentNames.get(i + 1);
                    if (prevSegmentName.getPartitionId() == segmentName.getPartitionId()) {
                        prevSegmentNameStr = prevSegmentName.getSegmentName();
                    }
                }
                updateHelixIdealState(realtimeTableName, newInstances, prevSegmentNameStr, curSegmentNameStr);
                // Skip all other segments in this partition.
            }
            curPartition--;
        }
        if (curPartition < 0) {
            break;
        }
    }
}

From source file:it.polito.tellmefirst.web.rest.clients.ClientEpub.java

public ArrayList<ClassifyOutput> sortByRank(HashMap<ClassifyOutput, Integer> inputList) {

    LOG.debug("[sortByRank] - BEGIN");

    ArrayList<ClassifyOutput> result = new ArrayList<>();
    LinkedMap apacheMap = new LinkedMap(inputList);
    for (int i = 0; i < apacheMap.size() - 1; i++) {
        TreeMap<Float, ClassifyOutput> treeMap = new TreeMap<>(Collections.reverseOrder());
        do {//from  w  w w  .j  a v a 2s. c  o m
            i++;
            treeMap.put(Float.valueOf(((ClassifyOutput) apacheMap.get(i - 1)).getScore()),
                    (ClassifyOutput) apacheMap.get(i - 1));
        } while (i < apacheMap.size() && apacheMap.getValue(i) == apacheMap.getValue(i - 1));
        i--;
        for (Float score : treeMap.keySet()) {
            result.add(treeMap.get(score));
        }
    }

    LOG.debug("[sortByRank] - END");
    return result;
}

From source file:de.uni_potsdam.hpi.asg.logictool.mapping.SequenceBasedAndGateDecomposer.java

private Map<Integer, List<Partition>> getPartitions(Set<Signal> signals, int startgatesize) {
    // cost function
    SortedMap<Integer, List<Partition>> retVal = new TreeMap<>(Collections.reverseOrder());
    Set<Set<Set<Signal>>> parts = getTailCombinations(signals);
    if (parts == null) {
        return null;
    }/*from   www.j  a v a  2 s. c o  m*/
    for (Set<Set<Signal>> partition : parts) {
        int cost = 0;
        Set<PartitionPart> parts2 = new HashSet<>();
        for (Set<Signal> partpart : partition) {
            parts2.add(new PartitionPart(partpart));
            if (partpart.size() != 1) {
                cost += partpart.size();
            }
        }
        if (partition.size() != 1) {
            cost += partition.size();
        }

        if (!retVal.containsKey(cost)) {
            retVal.put(cost, new ArrayList<Partition>());
        }
        retVal.get(cost).add(new Partition(parts2, cost));
    }

    //      System.out.println("Startgatesize: " + startgatesize);
    // filter too large
    List<Partition> rmPart = new ArrayList<>();
    List<Integer> rmKey = new ArrayList<>();
    for (Entry<Integer, List<Partition>> entry : retVal.entrySet()) {
        //         System.out.println(entry.getKey());
        rmPart.clear();
        for (Partition p : entry.getValue()) {
            //            System.out.println("\t" + p.toString());
            if (p.getPartition().size() >= startgatesize) {
                //               System.out.println("Rm: " + p);
                rmPart.add(p);
                continue;
            }
            for (PartitionPart p2 : p.getPartition()) {
                if (p2.getPart().size() >= startgatesize) {
                    //                  System.out.println("Rm: " + p);
                    rmPart.add(p);
                    continue;
                }
            }
        }
        entry.getValue().removeAll(rmPart);
        if (entry.getValue().isEmpty()) {
            rmKey.add(entry.getKey());
        }
    }
    for (int i : rmKey) {
        retVal.remove(i);
    }

    return retVal;
}

From source file:com.nextep.designer.beng.services.impl.DeliveryService.java

@Override
public List<IDeliveryInfo> getDeliveries(String moduleName, String versionPattern,
        IMatchingStrategy matchingStrategy) {
    // Retrieving module reference
    final Collection<IReference> moduleRefs = deliveryDao.lookupModuleName(moduleName);

    // Iterating over every module : if only one module resolves, then it is
    // ok, otherwise
    // we keep track of matching modules so that we could throw an explicit
    // exception to the
    // user/*from w  ww.j  a v  a 2s. com*/
    final Collection<IReference> matchingModules = new ArrayList<IReference>();
    List<IDeliveryInfo> deliveries = Collections.emptyList();
    for (IReference moduleRef : moduleRefs) {
        // Retrieving deliveries matching for this module
        final List<IDeliveryInfo> moduleDeliveries = getDeliveriesFor(moduleRef, versionPattern,
                matchingStrategy);
        // If found we register this module as matching
        if (!moduleDeliveries.isEmpty()) {
            matchingModules.add(moduleRef);
            deliveries = moduleDeliveries;
        }
    }
    // If we have more than one matching module, we throw an exception
    final IIdentifiableDAO identifiableDao = CorePlugin.getService(IIdentifiableDAO.class);
    if (matchingModules.size() > 1) {
        final List<ContainerInfo> containers = new ArrayList<ContainerInfo>();
        for (IReference r : matchingModules) {
            final List<IVersionInfo> versions = (List<IVersionInfo>) identifiableDao
                    .loadForeignKey(VersionInfo.class, r.getUID(), "reference", true, true);
            // Reverse sort
            Collections.sort(versions, Collections.reverseOrder());
            // Taking last module version
            final UID containerLastId = versions.iterator().next().getUID();
            final ContainerInfo container = (ContainerInfo) identifiableDao.load(ContainerInfo.class,
                    containerLastId);
            containers.add(container);
        }
        throw new UnresolvableDeliveryException(containers);
    } else if (matchingModules.isEmpty()) {
        throw new UnresolvableDeliveryException(null);
    } else {
        return deliveries;
    }

}

From source file:com.act.lcms.db.analysis.WaveformAnalysis.java

/**
 * This function picks the best retention time among the best peaks from the standard wells. The algorithm is
 * looking for the following heuristics for standard well peak detection: a) a great peak profile
 * b) magnitude of peak is high c) the well is not from MeOH media. It implements this by picking the global
 * 3 best peaks from ALL the standard wells which are not in MeOH media using a peak feature detector. It then
 * compares overlaps between these peaks against the local 3 best peaks of the negative controls and positive samples.
 * If there is an overlap, we have detected a positive signal.
 * @param standardWells The list of standard wells to benchmark from
 * @param representativeMetlinIon This is the metlin ion that is used for the analysis, usually it is the best
 *                                metlin ion picked up an algorithm among the standard well scans.
 * @param positiveAndNegativeWells These are positive and negative wells against which the retention times are
 *                                 compared to see for overlaps.
 * @return A map of Scandata to XZ values for those signals where peaks match between the standard and pos/neg runs.
 *//* w w  w . jav  a 2s .c o m*/
public static Map<ScanData<LCMSWell>, XZ> pickBestRepresentativeRetentionTimeFromStandardWells(
        List<ScanData<StandardWell>> standardWells, String representativeMetlinIon,
        List<ScanData<LCMSWell>> positiveAndNegativeWells) {

    List<XZ> bestStandardPeaks = new ArrayList<>();
    for (ScanData<StandardWell> well : standardWells) {
        if (well.getWell() != null) {
            // For retention times, select standard runs where the media is not MeOH since
            // MeOH has a lot more skew in retention time than other media. Moreover, none
            // of the feeding runs have their media as MeOH.
            if (well.getWell().getMedia() == null || !well.getWell().getMedia().equals("MeOH")) {
                bestStandardPeaks.addAll(detectPeaksInIntensityTimeWaveform(
                        well.getMs1ScanResults().getIonsToSpectra().get(representativeMetlinIon),
                        PEAK_DETECTION_THRESHOLD));
            }
        }
    }

    // Sort in descending order of intensity
    Collections.sort(bestStandardPeaks, new Comparator<XZ>() {
        @Override
        public int compare(XZ o1, XZ o2) {
            return o2.getIntensity().compareTo(o1.getIntensity());
        }
    });

    Map<ScanData<LCMSWell>, XZ> result = new HashMap<>();

    // Select from the top peaks in the standards run
    for (ScanData<LCMSWell> well : positiveAndNegativeWells) {
        List<XZ> topPeaksOfSample = detectPeaksInIntensityTimeWaveform(
                well.getMs1ScanResults().getIonsToSpectra().get(representativeMetlinIon),
                PEAK_DETECTION_THRESHOLD);

        for (XZ topPeak : bestStandardPeaks.subList(0, NUMBER_OF_BEST_PEAKS_TO_SELECTED_FROM - 1)) {
            int count = topPeaksOfSample.size() >= NUMBER_OF_BEST_PEAKS_TO_SELECTED_FROM
                    ? NUMBER_OF_BEST_PEAKS_TO_SELECTED_FROM - 1
                    : topPeaksOfSample.size();

            // Collisions do not matter here since we are just going to pick the highest intensity peak match, so ties
            // are arbitarily broker based on the order for access in the for loop below.
            TreeMap<Double, XZ> intensityToIntensityTimeValue = new TreeMap<>(Collections.reverseOrder());

            for (int i = 0; i < count; i++) {
                if (topPeaksOfSample.get(i).getTime() > topPeak.getTime() - TIME_SKEW_CORRECTION
                        && topPeaksOfSample.get(i).getTime() < topPeak.getTime() + TIME_SKEW_CORRECTION) {
                    // There has been significant overlap in peaks between standard and sample.
                    intensityToIntensityTimeValue.put(topPeaksOfSample.get(i).getIntensity(),
                            topPeaksOfSample.get(i));
                }
            }

            if (intensityToIntensityTimeValue.keySet().size() > 0) {
                // Get the best peak overlap based on the largest magnitude intensity
                result.put(well, intensityToIntensityTimeValue.firstEntry().getValue());
            }
        }
    }

    return result;
}

From source file:de.unijena.bioinf.FragmentationTreeConstruction.computation.FragmentationPatternAnalysis.java

/**
 * Step 7: Peak Scoring/*from w  w  w  . ja  v  a  2 s  .  c o m*/
 * Scores each peak. Expects a decomposition list
 */
public ProcessedInput performPeakScoring(ProcessedInput input) {
    final List<ProcessedPeak> processedPeaks = input.getMergedPeaks();
    final ProcessedPeak parentPeak = input.getParentPeak();
    final int n = processedPeaks.size();
    input.getOrCreateAnnotation(Scoring.class).initializeScoring(n);
    // score peak pairs
    final double[][] peakPairScores = input.getAnnotationOrThrow(Scoring.class).getPeakPairScores();
    for (PeakPairScorer scorer : peakPairScorers) {
        scorer.score(processedPeaks, input, peakPairScores);
    }
    // score fragment peaks
    final double[] peakScores = input.getAnnotationOrThrow(Scoring.class).getPeakScores();
    for (PeakScorer scorer : fragmentPeakScorers) {
        scorer.score(processedPeaks, input, peakScores);
    }

    final PeakAnnotation<DecompositionList> decomp = input.getPeakAnnotationOrThrow(DecompositionList.class);

    // dont score parent peak
    peakScores[peakScores.length - 1] = 0d;

    // score peaks
    {
        final ArrayList<Object> preparations = new ArrayList<Object>(decompositionScorers.size());
        for (DecompositionScorer<?> scorer : decompositionScorers)
            preparations.add(scorer.prepare(input));
        for (int i = 0; i < processedPeaks.size() - 1; ++i) {
            final DecompositionList decomps = decomp.get(processedPeaks.get(i));
            final ArrayList<ScoredMolecularFormula> scored = new ArrayList<ScoredMolecularFormula>(
                    decomps.getDecompositions().size());
            for (MolecularFormula f : decomps.getFormulas()) {
                double score = 0d;
                int k = 0;
                for (DecompositionScorer<?> scorer : decompositionScorers) {
                    score += ((DecompositionScorer<Object>) scorer).score(f, processedPeaks.get(i), input,
                            preparations.get(k++));
                }
                scored.add(new ScoredMolecularFormula(f, score));
            }
            decomp.set(processedPeaks.get(i), new DecompositionList(scored));
        }
    }
    // same with root
    {
        final ArrayList<Object> preparations = new ArrayList<Object>(rootScorers.size());
        for (DecompositionScorer<?> scorer : rootScorers)
            preparations.add(scorer.prepare(input));
        final ArrayList<ScoredMolecularFormula> scored = new ArrayList<ScoredMolecularFormula>(
                decomp.get(parentPeak).getDecompositions());
        for (int j = 0; j < scored.size(); ++j) {
            double score = 0d;
            int k = 0;
            final MolecularFormula f = scored.get(j).getFormula();
            for (DecompositionScorer<?> scorer : rootScorers) {
                score += ((DecompositionScorer<Object>) scorer).score(f, input.getParentPeak(), input,
                        preparations.get(k++));
            }
            scored.set(j, new ScoredMolecularFormula(f, score));

        }
        Collections.sort(scored, Collections.reverseOrder());
        decomp.set(parentPeak, new DecompositionList(scored));
        input.addAnnotation(DecompositionList.class, decomp.get(parentPeak));
    }
    // set peak indizes
    for (int i = 0; i < processedPeaks.size(); ++i)
        processedPeaks.get(i).setIndex(i);

    return input;
}