Example usage for java.lang Double compare

List of usage examples for java.lang Double compare

Introduction

In this page you can find the example usage for java.lang Double compare.

Prototype

public static int compare(double d1, double d2) 

Source Link

Document

Compares the two specified double values.

Usage

From source file:org.cbioportal.service.impl.GenesetHierarchyServiceImpl.java

private void calculateAndSetRepresentativeScoreAndPvalue(Geneset geneset,
        Map<String, List<GenesetMolecularData>> genesetScoresMap,
        Map<String, List<GenesetMolecularData>> genesetPvaluesMap, Integer percentile) {

    List<GenesetMolecularData> genesetScoreData = genesetScoresMap.get(geneset.getGenesetId());
    List<GenesetMolecularData> genesetPvalueData = genesetPvaluesMap.get(geneset.getGenesetId());

    //lists to hold the score and p-value pairs:
    List<ImmutablePair<Double, Double>> positiveScoresAndPvalues = new ArrayList<ImmutablePair<Double, Double>>();
    List<ImmutablePair<Double, Double>> negativeScoresAndPvalues = new ArrayList<ImmutablePair<Double, Double>>();

    //return the maximum absolute value found:
    double max = 0;
    double pvalueOfMax = 1;
    for (int i = 0; i < genesetScoreData.size(); i++) {
        String scoreString = genesetScoreData.get(i).getValue();
        String pvalueString = genesetPvalueData.get(i).getValue();

        if (!NumberUtils.isNumber(scoreString))
            continue;

        double score = Double.parseDouble(scoreString);
        double pvalue = 1.0;
        if (NumberUtils.isNumber(pvalueString))
            pvalue = Double.parseDouble(pvalueString);
        if (score >= 0) {
            positiveScoresAndPvalues.add(new ImmutablePair<Double, Double>(score, pvalue));
        } else {/*from  w w w  . j a  va2  s  .c om*/
            negativeScoresAndPvalues.add(new ImmutablePair<Double, Double>(score, pvalue));
        }

        //keep track of max, in case percentile is null
        if (Math.abs(score) > Math.abs(max)) {
            max = score; //here no abs, since we want to get the raw score (could be negative)
            pvalueOfMax = pvalue;
        }
    }

    if (percentile == null) {
        geneset.setRepresentativeScore(max);
        geneset.setRepresentativePvalue(pvalueOfMax);
    } else {
        //sort scores (NB: .getLeft() returns the score, .getRight() returns the pvalue of each pair):
        positiveScoresAndPvalues.sort((ImmutablePair<Double, Double> o1,
                ImmutablePair<Double, Double> o2) -> Double.compare(o1.getLeft(), o2.getLeft()));
        //negative scores descending:
        negativeScoresAndPvalues.sort((ImmutablePair<Double, Double> o1,
                ImmutablePair<Double, Double> o2) -> Double.compare(o2.getLeft(), o1.getLeft()));

        //use percentile:
        ImmutablePair<Double, Double> representativePositiveScoreAndPvalue = new ImmutablePair<Double, Double>(
                0.0, 1.0);
        ImmutablePair<Double, Double> representativeNegativeScoreAndPvalue = new ImmutablePair<Double, Double>(
                0.0, 1.0);
        if (positiveScoresAndPvalues.size() > 0) {
            int idxPositiveScores = (int) Math.round(percentile * positiveScoresAndPvalues.size() / 100.0);
            if (idxPositiveScores == 0) { //(can happen when positiveScoresAndPvalues.size() is small)
                idxPositiveScores = 1;
            }
            representativePositiveScoreAndPvalue = positiveScoresAndPvalues.get(idxPositiveScores - 1);
        }
        if (negativeScoresAndPvalues.size() > 0) {
            int idxNegativeScores = (int) Math.round(percentile * negativeScoresAndPvalues.size() / 100.0);
            if (idxNegativeScores == 0) { //(can happen when positiveScoresAndPvalues.size() is small)
                idxNegativeScores = 1;
            }
            representativeNegativeScoreAndPvalue = negativeScoresAndPvalues.get(idxNegativeScores - 1);
        }

        //set best one:
        if (Math.abs(representativePositiveScoreAndPvalue.getLeft()) > Math
                .abs(representativeNegativeScoreAndPvalue.getLeft())) {
            geneset.setRepresentativeScore(representativePositiveScoreAndPvalue.getLeft());
            geneset.setRepresentativePvalue(representativePositiveScoreAndPvalue.getRight());
        } else {
            geneset.setRepresentativeScore(representativeNegativeScoreAndPvalue.getLeft());
            geneset.setRepresentativePvalue(representativeNegativeScoreAndPvalue.getRight());
        }
    }
}

From source file:de.upb.timok.models.PDTTA.java

public TimedSequence sampleSequence() {
    int currentState = START_STATE;

    final TIntList eventList = new TIntArrayList();
    final TDoubleList timeList = new TDoubleArrayList();
    boolean choseFinalState = false;
    AnomalyInsertionType anomalyType = AnomalyInsertionType.NONE;
    while (!choseFinalState) {
        final List<Transition> possibleTransitions = getTransitions(currentState, true);
        Collections.sort(possibleTransitions,
                (t1, t2) -> -Double.compare(t2.getProbability(), t1.getProbability()));
        final double random = r.nextDouble();
        double summedProbs = 0;
        int index = -1;
        for (int i = 0; i < possibleTransitions.size(); i++) {
            summedProbs += possibleTransitions.get(i).getProbability();
            if (random < summedProbs) {
                index = i;//from  w w  w  .j  a v  a  2  s.  com
                break;
            }
        }

        final Transition chosenTransition = possibleTransitions.get(index);
        // XXX What happens for sequence based anomalies if we first choose an abnormal transition and then a normal one? Should we enforce choosing the
        // abnormal transitions labeled with type 2 and 4 when the first of those anomalies was chosen? The problem are sequence based anomalies!
        if (chosenTransition.isAbnormal()) {
            if (anomalyType != AnomalyInsertionType.NONE
                    && anomalyType != chosenTransition.getAnomalyInsertionType()) {
                // This is a conflict because the anomalyType was already set to anomaly
                throw new IllegalStateException("Two anomalies are mixed in this special case");
            }
            anomalyType = chosenTransition.getAnomalyInsertionType();
            // XXX what happens if one transition was normal and then the other one was abnormal or from another type? 0,1,2,0,0,5? What about the label for
            // the sequence? Is the label for the sequence really needed?
        }
        if (chosenTransition.isStopTraversingTransition() || eventList.size() > MAX_SEQUENCE_LENGTH) {
            choseFinalState = true;
            // TODO what happens if an abnormal stopping transiton (type 5) was chosen?
        } else {
            currentState = chosenTransition.getToState();
            final Distribution d = transitionDistributions.get(chosenTransition.toZeroProbTransition());
            if (d == null) {
                // XXX maybe this happens because the automaton is more general than the data. So not every possible path in the automaton is represented in
                // the training data.
                throw new IllegalStateException("This should never happen for transition " + chosenTransition);
            }
            double timeValue = d.sample(1, r)[0];
            if (anomalyType == AnomalyInsertionType.TYPE_THREE) {
                timeValue = changeTimeValue(timeValue, ANOMALY_3_CHANGE_RATE);
            } else if (anomalyType == AnomalyInsertionType.TYPE_FOUR) {
                timeValue = changeTimeValue(timeValue, ANOMALY_4_CHANGE_RATE);
            }
            eventList.add(chosenTransition.getSymbol());
            timeList.add(timeValue);
        }
    }
    if (anomalyType != AnomalyInsertionType.NONE) {
        return new TimedSequence(eventList, timeList, ClassLabel.ANOMALY);
    } else {
        return new TimedSequence(eventList, timeList, ClassLabel.NORMAL);
    }
}

From source file:ml.shifu.shifu.core.dtrain.wdl.WDLWorker.java

/**
 * Add to training set or validation set according to validation rate.
 * /* ww w .ja v  a  2 s.  c  o m*/
 * @param hashcode
 *            the hash code of the data
 * @param data
 *            data instance
 * @param attachment
 *            if it is validation
 * @return if in training, training is true, others are false.
 */
protected boolean addDataPairToDataSet(long hashcode, Data data, Object attachment) {
    // if validation data from configured validation data set
    boolean isValidation = (attachment != null && attachment instanceof Boolean) ? (Boolean) attachment : false;

    if (this.isKFoldCV) {
        int k = this.modelConfig.getTrain().getNumKFold();
        if (hashcode % k == this.trainerId) {
            this.validationData.append(data);
            if (isPositive(data.label)) {
                this.positiveValidationCount += 1L;
            } else {
                this.negativeValidationCount += 1L;
            }
            return false;
        } else {
            this.trainingData.append(data);
            if (isPositive(data.label)) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    }

    if (this.isManualValidation) {
        if (isValidation) {
            this.validationData.append(data);
            if (isPositive(data.label)) {
                this.positiveValidationCount += 1L;
            } else {
                this.negativeValidationCount += 1L;
            }
            return false;
        } else {
            this.trainingData.append(data);
            if (isPositive(data.label)) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    } else {
        if (Double.compare(this.modelConfig.getValidSetRate(), 0d) != 0) {
            int classValue = (int) (data.label + 0.01f);
            Random random = null;
            if (this.isStratifiedSampling) {
                // each class use one random instance
                random = validationRandomMap.get(classValue);
                if (random == null) {
                    random = new Random();
                    this.validationRandomMap.put(classValue, random);
                }
            } else {
                // all data use one random instance
                random = validationRandomMap.get(0);
                if (random == null) {
                    random = new Random();
                    this.validationRandomMap.put(0, random);
                }
            }

            if (this.modelConfig.isFixInitialInput()) {
                // for fix initial input, if hashcode%100 is in [start-hashcode, end-hashcode), validation,
                // otherwise training. start hashcode in different job is different to make sure bagging jobs have
                // different data. if end-hashcode is over 100, then check if hashcode is in [start-hashcode, 100]
                // or [0, end-hashcode]
                int startHashCode = (100 / this.modelConfig.getBaggingNum()) * this.trainerId;
                int endHashCode = startHashCode
                        + Double.valueOf(this.modelConfig.getValidSetRate() * 100).intValue();
                if (isInRange(hashcode, startHashCode, endHashCode)) {
                    this.validationData.append(data);
                    if (isPositive(data.label)) {
                        this.positiveValidationCount += 1L;
                    } else {
                        this.negativeValidationCount += 1L;
                    }
                    return false;
                } else {
                    this.trainingData.append(data);
                    if (isPositive(data.label)) {
                        this.positiveTrainCount += 1L;
                    } else {
                        this.negativeTrainCount += 1L;
                    }
                    return true;
                }
            } else {
                // not fixed initial input, if random value >= validRate, training, otherwise validation.
                if (random.nextDouble() >= this.modelConfig.getValidSetRate()) {
                    this.trainingData.append(data);
                    if (isPositive(data.label)) {
                        this.positiveTrainCount += 1L;
                    } else {
                        this.negativeTrainCount += 1L;
                    }
                    return true;
                } else {
                    this.validationData.append(data);
                    if (isPositive(data.label)) {
                        this.positiveValidationCount += 1L;
                    } else {
                        this.negativeValidationCount += 1L;
                    }
                    return false;
                }
            }
        } else {
            this.trainingData.append(data);
            if (isPositive(data.label)) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    }
}

From source file:edu.txstate.dmlab.clusteringwiki.rest.ClusterController.java

/**
 * Query transfer - get a similar query if the current query has 
 * not already been executed/*from ww w.  ja va2s  .  c  om*/
 * @param query  Executed query
 * @param analyzedQuery  Analyzed executed query string terms
 * @param userId  User id for logged in user
 * @param allUserId  User id for "all" user
 * @param loggedIn  Whether user is logged in
 * @param search  Search results collection
 * @param service  Service used to execute search
 * @param numResults  Number of results retrieved
 * @param clusteringAlgo  Clustering algorithm used to cluster results
 * @return
 */
protected Query transfer(String query, String analyzedQuery, Integer userId, Integer allUserId,
        boolean loggedIn, ICWSearchResultCol search, String service, Integer numResults,
        Integer clusteringAlgo) {

    //query for logged in user
    Query q = queryDao.selectExistingUserQuery(userId, service, numResults, query);

    if (q != null)
        return q;

    List<Query> matches = queryDao.selectUserQueryMatchingSearch(query, analyzedQuery, allUserId,
            ApplicationSettings.getTermSimQueryResultsLimit());

    //find query with largest similarity
    double sim = 0.0D;
    Query qPrime = null;
    ISimilarityCalculator calc = new JaccardSimilarityCalculator();
    for (Query a : matches) {
        double currentSim = calc.computeSimilarity(analyzedQuery, a.getParsedText());
        if (Double.compare(currentSim, sim) > 0) {
            qPrime = a;
            sim = currentSim;
        }
    }
    //make sure it is similar enough
    if (Double.compare(sim, ApplicationSettings.getTermSimThreshold()) < 0 || qPrime == null)
        return null;

    //check the result similarity between the top k results received and the query found
    List<String> responseUrls = search.getTopKResponseUrls(ApplicationSettings.getTopKQueryUrls());
    Set<String> responseUrlsSet = new HashSet<String>(responseUrls);
    Set<String> queryUrlsSet = qPrime.retrieveTopKQueryResponseUrlsSet();
    Set<String> intersection = new HashSet<String>(responseUrlsSet);
    intersection.removeAll(queryUrlsSet);
    Set<String> union = responseUrlsSet;
    union.addAll(queryUrlsSet);
    sim = intersection.size() / (double) union.size();

    //make sure it is similar enough
    if (Double.compare(sim, ApplicationSettings.getResultSimThreshold()) < 0 || qPrime == null)
        return null;

    //found q' that is similar enough to q
    //save q and copy preferences from q' to q
    if (loggedIn) {
        List<String> urls = search.getTopKResponseUrls(ApplicationSettings.getTopKQueryUrls());
        //save new queries
        q = new Query(userId, service, numResults, query, null, urls);
        q.setParsedText(analyzedQuery);
        queryDao.saveQuery(q);
        Query qAll = new Query(allUserId, service, numResults, query, null, urls);
        qAll.setParsedText(analyzedQuery);
        queryDao.saveQuery(qAll);
        //associate new edits
        Integer queryId = q.getId();
        List<ClusterEdit> edits = clusterEditDao.selectClusterEditsForUserQuery(qPrime.getId(), clusteringAlgo,
                qPrime.getUserId().equals(allUserId));
        for (ClusterEdit ePrime : edits) {
            ClusterEdit e = new ClusterEdit();
            e.setCardinality(ePrime.getCardinality());
            e.setClusteringAlgo(ePrime.getClusteringAlgo());
            e.setQueryId(queryId);
            e.setPath1(ePrime.getPath1());
            e.setPath2(ePrime.getPath2());
            e.setPath3(ePrime.getPath3());
            e.setPath4(ePrime.getPath4());
            e.setPath5(ePrime.getPath5());
            clusterEditDao.saveClusterEdit(e);
        }
    } else {
        q = qPrime;
    }
    return q;
}

From source file:hu.ppke.itk.nlpg.purepos.decoder.AbstractDecoder.java

protected Set<Entry<Integer, Double>> pruneGuessedTags(Map<Integer, Double> guessedTags) {
    TreeSet<Entry<Integer, Double>> set = new TreeSet<Map.Entry<Integer, Double>>(
            /* reverse comparator */
            new Comparator<Entry<Integer, Double>>() {

                @Override//from www  . j av  a 2s .c  om
                public int compare(Entry<Integer, Double> o1, Entry<Integer, Double> o2) {
                    if (o1.getValue() > o2.getValue())
                        return -1;
                    else if (o1.getValue() < o2.getValue())
                        return 1;
                    else
                        return Double.compare(o1.getKey(), o2.getKey());
                }
            });

    int maxTag = SuffixGuesser.getMaxProbabilityTag(guessedTags);
    double maxVal = guessedTags.get(maxTag);
    double minval = maxVal - sufTheta;
    for (Entry<Integer, Double> entry : guessedTags.entrySet()) {
        if (entry.getValue() > minval) {
            set.add(entry);
        }
    }
    if (set.size() > maxGuessedTags) {
        Iterator<Entry<Integer, Double>> it = set.descendingIterator();
        while (set.size() > maxGuessedTags) {
            it.next();
            it.remove();
        }
    }

    return set;
}

From source file:mase.app.soccer.ProgSoccerAgent.java

private List<Pair<SoccerAgent, Double>> sortByProximity(Collection<SoccerAgent> agents, Double2D target) {
    List<Pair<SoccerAgent, Double>> closer = new ArrayList<>();
    for (SoccerAgent a : agents) {
        closer.add(Pair.of(a, a.distanceTo(target)));
    }/*from   w ww. j ava2s  .  co m*/
    Collections.sort(closer, new Comparator<Pair<SoccerAgent, Double>>() {
        @Override
        public int compare(Pair<SoccerAgent, Double> o1, Pair<SoccerAgent, Double> o2) {
            return Double.compare(o1.getValue(), o2.getValue());
        }

    });
    return closer;
}

From source file:qa.experiment.SRLToAligner.java

private ProcessFrame constructProcessFrame(ProcessFrame frame,
        HashMap<String, ArrayList<RoleSpan>> roleRoleSpanPair) {
    final Comparator<RoleSpan> comp = (r1, r2) -> Double.compare(r1.getRoleScore(), r2.getRoleScore());
    ProcessFrame res = new ProcessFrame();
    res.setProcessName(frame.getProcessName());
    res.setTokenizedText(frame.getTokenizedText());
    res.setRawText(frame.getRawText());//w ww .ja  va2s.  co  m

    for (String argLabel : GlobalV.labels) {
        if (roleRoleSpanPair.get(argLabel) != null) {
            ArrayList<RoleSpan> spans = roleRoleSpanPair.get(argLabel);
            RoleSpan maxSpan = spans.stream().max(comp).get();
            if (argLabel.equalsIgnoreCase("A0")) {
                res.setUnderGoer(maxSpan.getTextSpan());
                res.setScores(0, maxSpan.getScores());
            }
            if (argLabel.equalsIgnoreCase("A1")) {
                res.setEnabler(maxSpan.getTextSpan());
                res.setScores(1, maxSpan.getScores());
            }
            if (argLabel.equalsIgnoreCase("T")) {
                res.setTrigger(maxSpan.getTextSpan());
                res.setScores(2, maxSpan.getScores());
            }
            if (argLabel.equalsIgnoreCase("A2")) {
                res.setResult(maxSpan.getTextSpan());
                res.setScores(3, maxSpan.getScores());
            }
        }
    }
    return res;
}

From source file:org.briljantframework.data.vector.Vectors.java

/**
 * @param vector the vector//from   w  w w  . j  a  va2 s.c o m
 * @return the indexes of {@code vector} sorted in increasing order by value
 */
public static int[] indexSort(Vector vector) {
    return indexSort(vector,
            (o1, o2) -> Double.compare(vector.loc().getAsDouble(o1), vector.loc().getAsDouble(o2)));
}

From source file:com.opengamma.analytics.financial.instrument.payment.CouponONSpreadDefinition.java

@Override
public boolean equals(final Object obj) {
    if (this == obj) {
        return true;
    }/*from   w w w.  jav a  2s.c  o  m*/
    if (!super.equals(obj)) {
        return false;
    }
    if (getClass() != obj.getClass()) {
        return false;
    }
    final CouponONSpreadDefinition other = (CouponONSpreadDefinition) obj;
    if (Double.compare(_spread, other._spread) != 0) {
        return false;
    }
    if (!Arrays.deepEquals(_fixingPeriodAccrualFactor, other._fixingPeriodAccrualFactor)) {
        return false;
    }
    if (!Arrays.deepEquals(_fixingPeriodDate, other._fixingPeriodDate)) {
        return false;
    }
    if (!ObjectUtils.equals(_index, other._index)) {
        return false;
    }
    return true;
}

From source file:relationalFramework.RelationalRule.java

@Override
public int compareTo(RelationalRule o) {
    if (o == null)
        return -1;
    int result = Double.compare(ruleHash_, o.ruleHash_);
    if (result != 0)
        return result;

    return toString().compareTo(o.toString());
}