Example usage for java.lang Double compare

List of usage examples for java.lang Double compare

Introduction

In this page you can find the example usage for java.lang Double compare.

Prototype

public static int compare(double d1, double d2) 

Source Link

Document

Compares the two specified double values.

Usage

From source file:csci310.parkhere.controller.ClientController.java

public SearchResults sortSearchResultBySpotRating() {
    if (searchResults != null) {
        Collections.sort(searchResults.searchResultList, new Comparator<ParkingSpot>() {
            public int compare(ParkingSpot p1, ParkingSpot p2) {
                return Double.compare(p1.review, p2.review);
            }//from   w w  w. ja  va 2 s. co  m
        });
    }
    return searchResults;
}

From source file:com.epam.catgenome.manager.vcf.reader.VcfFileReader.java

/**
 * Translates HTSJDK's {@code VariantContext} object into our {@code Variant} entity
 *
 * @param context     a {@code VariantContext} object, that presents a variation from parsed VCF file.
 * @param header      a {@code VCFHeader} object, that represents a header of parsed VCF file.
 * @param sampleIndex {@code Integer} a name of a sample.
 * @return a {@code Variation} object, representing desired variation.
 *//*from ww  w .j  a va  2s  . c o  m*/
public Variation createVariation(VariantContext context, VCFHeader header, Integer sampleIndex) {
    String ref = context.getReference().getDisplayString();
    List<String> alt = context.getAlternateAlleles().stream().map(Allele::getDisplayString)
            .collect(Collectors.toList());
    Genotype genotype = sampleIndex != null ? context.getGenotype(sampleIndex) : null;

    // First, determine OrganismType
    GenotypeData genotypeData = getGenotypeData(context, genotype);

    Variation variation = new Variation(context.getStart(), context.getEnd(), ref, alt);
    variation.setGenotypeData(genotypeData);

    variation.setFailedFilters(context.getFilters().stream().map(f -> {
        VCFHeaderLine vcfHeaderLine = header.getFilterHeaderLine(f);
        return new Filter(f, vcfHeaderLine != null ? vcfHeaderLine.getValue() : null);
    }).collect(Collectors.toList()));

    variation.setIdentifier(context.getID());

    Double qual = context.getPhredScaledQual();
    variation.setQuality(Double.compare(qual, HTSJDK_WRONG_QUALITY) != 0 ? qual : 0);

    determineVariationType(context, sampleIndex, variation);

    return variation;
}

From source file:org.briljantframework.data.Collectors.java

/**
 * @return an aggregator that computes the median.
 */// w  w w  .  j a va  2  s .c o m
public static Collector<Number, ?, Double> median() {
    return Collector.of(ArrayList::new, ArrayList::add, (left, right) -> {
        left.addAll(right);
        return left;
    }, (ArrayList<Number> list) -> {
        int size = list.size();
        if (size == 0) {
            return Na.of(Double.class);
        } else if (size == 1) {
            return list.get(0).doubleValue();
        } else if (size == 2) {
            return (list.get(0).doubleValue() + list.get(1).doubleValue()) / 2;
        } else {
            list.sort((a, b) -> Double.compare(a.doubleValue(), b.doubleValue()));
            int index = (size - 1) / 2;
            if (size % 2 == 0) {
                return (list.get(index).doubleValue() + list.get(index + 1).doubleValue()) / 2;
            } else {
                return list.get(index).doubleValue();
            }
        }
    });
}

From source file:de.upb.timok.models.PDTTA.java

@Deprecated
public TimedSequence createAbnormalEventSequence(Random mutation) {
    // choose very unlikely sequences
    final TIntList eventList = new TIntArrayList();
    final TDoubleList timeList = new TDoubleArrayList();
    boolean choseFinalState = false;
    int currentState = 0;
    while (!choseFinalState) {
        final List<Transition> possibleTransitions = getTransitions(currentState, true);
        possibleTransitions.sort((o1, o2) -> Double.compare(o1.getProbability(), o2.getProbability()));
        int listIndex = 3;
        if (possibleTransitions.size() <= listIndex) {
            listIndex = possibleTransitions.size() - 1;

        }//from w  ww .  ja  va 2 s.c om
        int tempListIndex = Math.min(3, possibleTransitions.size() - 1);
        if (tempListIndex != listIndex) {
            throw new IllegalStateException();
        }
        final List<Transition> topThree = possibleTransitions.subList(0, listIndex);
        final double randomValue = mutation.nextDouble();
        int chosenTransitionIndex = -1;
        if (randomValue <= ANOMALY_TYPE_TWO_P_1) {
            chosenTransitionIndex = 0;
        } else if (randomValue > ANOMALY_TYPE_TWO_P_1 && randomValue < ANOMALY_TYPE_TWO_P_2) {
            chosenTransitionIndex = 1;
        } else {
            chosenTransitionIndex = 2;
        }
        int indexToTake = chosenTransitionIndex;
        if (indexToTake >= topThree.size()) {
            indexToTake = topThree.size() - 1;
        }
        tempListIndex = Math.min(chosenTransitionIndex, topThree.size() - 1);
        if (tempListIndex != indexToTake) {
            throw new IllegalStateException();
        }
        final Transition chosenTransition = topThree.get(indexToTake);
        if (chosenTransition.isStopTraversingTransition() || eventList.size() > MAX_SEQUENCE_LENGTH) {
            choseFinalState = true;
        } else {
            currentState = chosenTransition.getToState();
            final Distribution d = transitionDistributions.get(chosenTransition.toZeroProbTransition());
            if (d == null) {
                // just do it again with other random sampling
                return createAbnormalEventSequence(mutation);
            }
            final double timeValue = d.sample(1, mutation)[0];
            eventList.add(chosenTransition.getSymbol());
            timeList.add(timeValue);
        }
    }
    return new TimedSequence(eventList, timeList, ClassLabel.ANOMALY);
}

From source file:csci310.parkhere.controller.ClientController.java

public SearchResults sortSearchResultByProviderRating() {
    if (searchResults != null) {
        Collections.sort(searchResults.searchResultList, new Comparator<ParkingSpot>() {
            public int compare(ParkingSpot p1, ParkingSpot p2) {
                return Double.compare(p1.providerReview, p2.providerReview);
            }//from  w  w w.j  av a 2s . c o  m
        });
    }
    return searchResults;
}

From source file:cc.redberry.core.number.Rational.java

@Override
public int compareTo(Real o) {
    NumberUtils.checkNotNull(o);//  ww  w.  j  a v  a2  s . c  om
    if (o instanceof Numeric)
        return Double.compare(doubleValue(), o.doubleValue());
    return fraction.compareTo(((Rational) o).fraction);
}

From source file:org.esa.nest.util.MathUtils.java

/**
 * The sinc function./*ww w .ja v  a 2s .c om*/
 * @param x The input variable.
 * @return The sinc function value.
 */
private static double sinc(final double x) {

    if (Double.compare(x, 0.0) == 0) {
        return 1.0;
    } else {
        return FastMath.sin(x * Math.PI) / (x * Math.PI);
    }
}

From source file:ml.shifu.shifu.core.dtrain.nn.NNWorker.java

@Override
public void load(GuaguaWritableAdapter<LongWritable> currentKey, GuaguaWritableAdapter<Text> currentValue,
        WorkerContext<NNParams, NNParams> workerContext) {
    super.count += 1;
    if ((super.count) % 5000 == 0) {
        LOG.info("Read {} records.", super.count);
    }//  w w w.j  a v a  2s.c  o m

    float[] inputs = new float[super.featureInputsCnt];
    float[] ideal = new float[super.outputNodeCount];

    if (super.isDry) {
        // dry train, use empty data.
        addDataPairToDataSet(0,
                new BasicFloatMLDataPair(new BasicFloatMLData(inputs), new BasicFloatMLData(ideal)));
        return;
    }

    long hashcode = 0;
    float significance = 1f;
    // use guava Splitter to iterate only once
    // use NNConstants.NN_DEFAULT_COLUMN_SEPARATOR to replace getModelConfig().getDataSetDelimiter(), super follows
    // the function in akka mode.
    int index = 0, inputsIndex = 0, outputIndex = 0;

    String[] fields = Lists.newArrayList(this.splitter.split(currentValue.getWritable().toString()))
            .toArray(new String[0]);
    int pos = 0;

    for (pos = 0; pos < fields.length;) {
        String input = fields[pos];
        // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f)
        float floatValue = input.length() == 0 ? 0f : NumberFormatUtils.getFloat(input, 0f);
        // no idea about why NaN in input data, we should process it as missing value TODO , according to norm type
        floatValue = (Float.isNaN(floatValue) || Double.isNaN(floatValue)) ? 0f : floatValue;

        if (pos == fields.length - 1) {
            // do we need to check if not weighted directly set to 1f; if such logic non-weight at first, then
            // weight, how to process???
            if (StringUtils.isBlank(modelConfig.getWeightColumnName())) {
                significance = 1f;
                // break here if we reach weight column which is last column
                break;
            }

            // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 1f)
            significance = input.length() == 0 ? 1f : NumberFormatUtils.getFloat(input, 1f);
            // if invalid weight, set it to 1f and warning in log
            if (Float.compare(significance, 0f) < 0) {
                LOG.warn(
                        "The {} record in current worker weight {} is less than 0f, it is invalid, set it to 1.",
                        count, significance);
                significance = 1f;
            }
            // the last field is significance, break here
            break;
        } else {
            ColumnConfig columnConfig = super.columnConfigList.get(index);
            if (columnConfig != null && columnConfig.isTarget()) {
                if (isLinearTarget || modelConfig.isRegression()) {
                    ideal[outputIndex++] = floatValue;
                } else {
                    if (modelConfig.getTrain().isOneVsAll()) {
                        // if one vs all, set correlated idea value according to trainerId which means in trainer
                        // with id 0, target 0 is treated with 1, other are 0. Such target value are set to index of
                        // tags like [0, 1, 2, 3] compared with ["a", "b", "c", "d"]
                        ideal[outputIndex++] = Float.compare(floatValue, trainerId) == 0 ? 1f : 0f;
                    } else {
                        if (modelConfig.getTags().size() == 2) {
                            // if only 2 classes, output node is 1 node. if target = 0 means 0 is the index for
                            // positive prediction, set positive to 1 and negative to 0
                            int ideaIndex = (int) floatValue;
                            ideal[0] = ideaIndex == 0 ? 1f : 0f;
                        } else {
                            // for multiple classification
                            int ideaIndex = (int) floatValue;
                            ideal[ideaIndex] = 1f;
                        }
                    }
                }
                pos++;
            } else {
                if (subFeatureSet.contains(index)) {
                    if (columnConfig.isMeta() || columnConfig.isForceRemove()) {
                        // it shouldn't happen here
                        pos += 1;
                    } else if (columnConfig != null && columnConfig.isNumerical()
                            && modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ONEHOT)) {
                        for (int k = 0; k < columnConfig.getBinBoundary().size() + 1; k++) {
                            String tval = fields[pos];
                            // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f)
                            float fval = tval.length() == 0 ? 0f : NumberFormatUtils.getFloat(tval, 0f);
                            // no idea about why NaN in input data, we should process it as missing value TODO ,
                            // according to norm type
                            fval = (Float.isNaN(fval) || Double.isNaN(fval)) ? 0f : fval;
                            inputs[inputsIndex++] = fval;
                            pos++;
                        }
                    } else if (columnConfig != null && columnConfig.isCategorical()
                            && (modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ZSCALE_ONEHOT)
                                    || modelConfig.getNormalizeType()
                                            .equals(ModelNormalizeConf.NormType.ONEHOT))) {
                        for (int k = 0; k < columnConfig.getBinCategory().size() + 1; k++) {
                            String tval = fields[pos];
                            // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f)
                            float fval = tval.length() == 0 ? 0f : NumberFormatUtils.getFloat(tval, 0f);
                            // no idea about why NaN in input data, we should process it as missing value TODO ,
                            // according to norm type
                            fval = (Float.isNaN(fval) || Double.isNaN(fval)) ? 0f : fval;
                            inputs[inputsIndex++] = fval;
                            pos++;
                        }
                    } else {
                        inputs[inputsIndex++] = floatValue;
                        pos++;
                    }
                    hashcode = hashcode * 31 + Double.valueOf(floatValue).hashCode();
                } else {
                    if (!CommonUtils.isToNormVariable(columnConfig, hasCandidates,
                            modelConfig.isRegression())) {
                        pos += 1;
                    } else if (columnConfig.isNumerical()
                            && modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ONEHOT)
                            && columnConfig.getBinBoundary() != null
                            && columnConfig.getBinBoundary().size() > 0) {
                        pos += (columnConfig.getBinBoundary().size() + 1);
                    } else if (columnConfig.isCategorical()
                            && (modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ZSCALE_ONEHOT)
                                    || modelConfig.getNormalizeType()
                                            .equals(ModelNormalizeConf.NormType.ONEHOT))
                            && columnConfig.getBinCategory().size() > 0) {
                        pos += (columnConfig.getBinCategory().size() + 1);
                    } else {
                        pos += 1;
                    }
                }
            }
        }
        index += 1;
    }

    if (index != this.columnConfigList.size() || pos != fields.length - 1) {
        throw new RuntimeException("Wrong data indexing. ColumnConfig index = " + index
                + ", while it should be " + columnConfigList.size() + ". " + "Data Pos = " + pos
                + ", while it should be " + (fields.length - 1));
    }

    // output delimiter in norm can be set by user now and if user set a special one later changed, this exception
    // is helped to quick find such issue.
    if (inputsIndex != inputs.length) {
        String delimiter = workerContext.getProps().getProperty(Constants.SHIFU_OUTPUT_DATA_DELIMITER,
                Constants.DEFAULT_DELIMITER);
        throw new RuntimeException("Input length is inconsistent with parsing size. Input original size: "
                + inputs.length + ", parsing size:" + inputsIndex + ", delimiter:" + delimiter + ".");
    }

    // sample negative only logic here
    if (modelConfig.getTrain().getSampleNegOnly()) {
        if (this.modelConfig.isFixInitialInput()) {
            // if fixInitialInput, sample hashcode in 1-sampleRate range out if negative records
            int startHashCode = (100 / this.modelConfig.getBaggingNum()) * this.trainerId;
            // here BaggingSampleRate means how many data will be used in training and validation, if it is 0.8, we
            // should take 1-0.8 to check endHashCode
            int endHashCode = startHashCode
                    + Double.valueOf((1d - this.modelConfig.getBaggingSampleRate()) * 100).intValue();
            if ((modelConfig.isRegression()
                    || (modelConfig.isClassification() && modelConfig.getTrain().isOneVsAll())) // regression or
                    // onevsall
                    && (int) (ideal[0] + 0.01d) == 0 // negative record
                    && isInRange(hashcode, startHashCode, endHashCode)) {
                return;
            }
        } else {
            // if not fixed initial input, and for regression or onevsall multiple classification (regression also).
            // if negative record
            if ((modelConfig.isRegression()
                    || (modelConfig.isClassification() && modelConfig.getTrain().isOneVsAll())) // regression or
                    // onevsall
                    && (int) (ideal[0] + 0.01d) == 0 // negative record
                    && Double.compare(super.sampelNegOnlyRandom.nextDouble(),
                            this.modelConfig.getBaggingSampleRate()) >= 0) {
                return;
            }
        }
    }

    FloatMLDataPair pair = new BasicFloatMLDataPair(new BasicFloatMLData(inputs), new BasicFloatMLData(ideal));

    // up sampling logic, just add more weights while bagging sampling rate is still not changed
    if (modelConfig.isRegression() && isUpSampleEnabled() && Double.compare(ideal[0], 1d) == 0) {
        // Double.compare(ideal[0], 1d) == 0 means positive tags; sample + 1 to avoid sample count to 0
        pair.setSignificance(significance * (super.upSampleRng.sample() + 1));
    } else {
        pair.setSignificance(significance);
    }

    boolean isValidation = false;
    if (workerContext.getAttachment() != null && workerContext.getAttachment() instanceof Boolean) {
        isValidation = (Boolean) workerContext.getAttachment();
    }

    boolean isInTraining = addDataPairToDataSet(hashcode, pair, isValidation);

    // do bagging sampling only for training data
    if (isInTraining) {
        float subsampleWeights = sampleWeights(pair.getIdealArray()[0]);
        if (isPositive(pair.getIdealArray()[0])) {
            this.positiveSelectedTrainCount += subsampleWeights * 1L;
        } else {
            this.negativeSelectedTrainCount += subsampleWeights * 1L;
        }
        // set weights to significance, if 0, significance will be 0, that is bagging sampling
        pair.setSignificance(pair.getSignificance() * subsampleWeights);
    } else {
        // for validation data, according bagging sampling logic, we may need to sampling validation data set, while
        // validation data set are only used to compute validation error, not to do real sampling is ok.
    }

}

From source file:com.github.lindenb.jvarkit.tools.redon.CopyNumber01.java

private void normalizeCoverage() {
    final Median medianOp = new Median();
    final Mean meanOp = new Mean();

    if (medianOp.evaluate(new double[] { 20, 1000, 19 }) != 20) {
        throw new RuntimeException("boum");
    }//w  ww  .  jav  a2  s. c o  m

    int autosome_count = 0;
    Collections.sort(this.interval2row, CopyNumber01.sortOnXY);

    for (int j = 0; j < this.interval2row.size(); ++j) {
        GCAndDepth r = this.interval2row.get(j);
        if (isSexualChrom(r.getChrom()))
            continue;
        autosome_count++;
    }

    double x[] = new double[autosome_count];
    double y[] = new double[autosome_count];

    int i = 0;
    for (int j = 0; j < this.interval2row.size(); ++j) {
        GCAndDepth r = this.interval2row.get(j);
        if (isSexualChrom(r.getChrom()))
            continue;
        x[i] = r.getX();
        y[i] = r.getY();
        ++i;
    }

    final double min_x = x[0];
    final double max_x = x[x.length - 1];

    /* merge adjacent x having same values */
    i = 0;
    int k = 0;
    while (i < x.length) {
        int j = i + 1;

        while (j < x.length && Double.compare(x[i], x[j]) == 0) {
            ++j;
        }
        x[k] = x[i];
        y[k] = meanOp.evaluate(y, i, j - i);
        ++k;
        i = j;
    }

    /* reduce size of x et y */
    if (k != x.length) {
        info("Compacting X from " + x.length + " to " + k);
        x = Arrays.copyOf(x, k);
        y = Arrays.copyOf(y, k);
    }

    //min depth cal
    double min_depth = Double.MAX_VALUE;

    UnivariateInterpolator interpolator = createInterpolator();
    UnivariateFunction spline = interpolator.interpolate(x, y);
    int points_removed = 0;
    i = 0;
    while (i < this.interval2row.size()) {
        GCAndDepth r = this.interval2row.get(i);
        if (r.getX() < min_x || r.getX() > max_x) {
            this.interval2row.remove(i);
            ++points_removed;
        } else {
            double norm = spline.value(r.getX());
            if (Double.isNaN(norm) || Double.isInfinite(norm)) {
                info("NAN " + r);
                this.interval2row.remove(i);
                ++points_removed;
                continue;
            }
            r.depth -= norm;
            min_depth = Math.min(min_depth, r.depth);
            ++i;
        }
    }
    info("Removed " + points_removed + " because GC% is too small (Sexual chrom)");
    spline = null;

    //fit to min, fill new y for median calculation
    info("min:" + min_depth);

    y = new double[this.interval2row.size()];
    for (i = 0; i < this.interval2row.size(); ++i) {
        GCAndDepth gc = this.interval2row.get(i);
        gc.depth -= min_depth;
        y[i] = gc.depth;
    }

    //normalize on median
    double median_depth = medianOp.evaluate(y, 0, y.length);
    info("median:" + median_depth);
    for (i = 0; i < this.interval2row.size(); ++i) {
        GCAndDepth gc = this.interval2row.get(i);
        gc.depth /= median_depth;
    }

    //restore genomic order
    Collections.sort(this.interval2row, CopyNumber01.sortOnPosition);

    /**  smoothing values with neighbours */
    final int SMOOTH_WINDOW = 5;
    y = new double[this.interval2row.size()];
    for (i = 0; i < this.interval2row.size(); ++i) {
        y[i] = this.interval2row.get(i).getY();
    }
    for (i = 0; i < this.interval2row.size(); ++i) {
        GCAndDepth gc = this.interval2row.get(i);
        int left = i;
        int right = i;
        while (left > 0 && i - left < SMOOTH_WINDOW && this.interval2row.get(left - 1).tid == gc.tid) {
            left--;
        }
        while (right + 1 < this.interval2row.size() && right - i < SMOOTH_WINDOW
                && this.interval2row.get(right + 1).tid == gc.tid) {
            right++;
        }
        gc.depth = medianOp.evaluate(y, left, (right - left) + 1);
    }

}

From source file:com.ebay.erl.mobius.core.model.TupleColumnComparator.java

private static int compare(double v1, double v2) {
    return Double.compare(v1, v2);
}