Example usage for java.lang Double isInfinite

List of usage examples for java.lang Double isInfinite

Introduction

In this page you can find the example usage for java.lang Double isInfinite.

Prototype

public static boolean isInfinite(double v) 

Source Link

Document

Returns true if the specified number is infinitely large in magnitude, false otherwise.

Usage

From source file:ml.shifu.shifu.core.Normalizer.java

/**
 * Parse raw value based on ColumnConfig.
 * //from w ww  . j a  v  a 2s  .  c  o m
 * @param config
 *            ColumnConfig info
 * @param raw
 *            input column value
 * @param categoryMissingNormType
 *            missing categorical value norm type
 * @return parsed raw value. For categorical type, return BinPosRate. For numerical type, return
 *         corresponding double value. For missing data, return default value using
 *         {@link Normalizer#defaultMissingValue}.
 */
private static double parseRawValue(ColumnConfig config, Object raw,
        CategoryMissingNormType categoryMissingNormType) {
    if (categoryMissingNormType == null) {
        categoryMissingNormType = CategoryMissingNormType.POSRATE;
    }
    double value = 0.0;
    if (raw == null || StringUtils.isBlank(raw.toString())) {
        log.debug("Not decimal format but null, using default!");
        if (config.isCategorical()) {
            value = fillDefaultValue(config, categoryMissingNormType);
        } else {
            value = defaultMissingValue(config);
        }
        return value;
    }

    if (config.isCategorical()) {
        // for categorical variable, no need convert to double but double should be in treated as String in
        // categorical variables
        int index = BinUtils.getBinNum(config, raw);
        if (index == -1) {
            value = fillDefaultValue(config, categoryMissingNormType);
        } else {
            Double binPosRate = config.getBinPosRate().get(index);
            if (binPosRate != null) {
                value = binPosRate.doubleValue();
            } else {
                value = fillDefaultValue(config, categoryMissingNormType);
            }
        }
    } else {
        // for numerical value, if double or int, no need parse again.
        if (raw instanceof Double) {
            value = (Double) raw;
        } else if (raw instanceof Integer) {
            value = ((Integer) raw).doubleValue();
        } else if (raw instanceof Float) {
            value = ((Float) raw).doubleValue();
        } else {
            try {
                // if raw is NaN, it won't throw Exception. The value will be Double.NaN
                value = Double.parseDouble(raw.toString());
            } catch (Exception e) {
                log.debug("Not decimal format " + raw + ", using default!");
                value = defaultMissingValue(config);
            }
        }
        if (Double.isInfinite(value) || Double.isNaN(value)) {
            // if the value is Infinite or NaN, treat it as missing value
            // should treat Infinite as missing value?
            value = defaultMissingValue(config);
        }
    }

    return value;
}

From source file:org.broadinstitute.sting.gatk.walkers.variantrecalibration.VariantDataManager.java

private static double decodeAnnotation(final String annotationKey, final VariantContext vc,
        final boolean jitter) {
    double value;

    try {/*from  ww w . j av  a2 s.  c  om*/
        value = vc.getAttributeAsDouble(annotationKey, Double.NaN);
        if (Double.isInfinite(value)) {
            value = Double.NaN;
        }
        if (jitter && annotationKey.equalsIgnoreCase("HaplotypeScore")
                && MathUtils.compareDoubles(value, 0.0, 0.01) == 0) {
            value += 0.01 * GenomeAnalysisEngine.getRandomGenerator().nextGaussian();
        }
        if (jitter && annotationKey.equalsIgnoreCase("FS") && MathUtils.compareDoubles(value, 0.0, 0.01) == 0) {
            value += 0.01 * GenomeAnalysisEngine.getRandomGenerator().nextGaussian();
        }
        if (jitter && annotationKey.equalsIgnoreCase("InbreedingCoeff")
                && MathUtils.compareDoubles(value, 0.0, 0.01) == 0) {
            value += 0.01 * GenomeAnalysisEngine.getRandomGenerator().nextGaussian();
        }
    } catch (Exception e) {
        value = Double.NaN; // The VQSR works with missing data by marginalizing over the missing dimension when evaluating the Gaussian mixture model
    }

    return value;
}

From source file:edu.jhuapl.bsp.detector.OpenMath.java

public static double percentile(double[] x, double p) {
    ArrayList<Double> intermediate = new ArrayList<Double>(x.length);
    for (int i = 0; i < x.length; i++) {
        if (!Double.isInfinite(x[i]) && !Double.isNaN(x[i])) {
            intermediate.add(x[i]);//ww w.j ava  2 s  . c o m
        }
    }
    Iterator<Double> iter = intermediate.iterator();
    double[] xcopy = new double[intermediate.size()];
    int index = 0;
    while (iter.hasNext()) {
        xcopy[index] = iter.next();
        index++;
    }
    Arrays.sort(xcopy);
    double minprctile = 100 * ((.5) / xcopy.length), maxprctile = 100 * ((xcopy.length - .5) / xcopy.length);
    if (p >= maxprctile) {
        return xcopy[xcopy.length - 1];
    } else if (p <= minprctile) {
        return xcopy[0];
    }
    double[] xprctile = new double[xcopy.length];
    for (int i = 0; i < xprctile.length; i++) {
        xprctile[i] = 100 * (((xprctile.length - (xprctile.length - i - 1)) - .5) / xprctile.length);
    }
    LinearInterpolation LI = new LinearInterpolation(xprctile, xcopy);
    return LI.interpolatedValue(p);
}

From source file:org.ebayopensource.twin.ElementImpl.java

/** 
 * Internal impl behind {get,waitFor}{Closest,}{Child,Children,Descendant,Descendants} methods 
 * @param path what to append to getPath(), e.g. "/children" or "/descendants"
 * @param criteria the criteria to apply (process-ID matching is added by the server)
 * @param count if 0, return all results. Else BFS layer-by-layer until we have at least count
 * @param timeout if 0, return results immediately. Else don't return an empty result set until this timeout has elapsed
 * @param shouldThrow should throw a TwinNoSuchElementException on empty list
 *//*from www .j  a  v  a 2  s.  com*/
@SuppressWarnings("unchecked")
private <T extends Element> List<T> getElements(String subpath, Criteria criteria, int count, double timeout,
        boolean shouldThrow) throws TwinException {
    Map<String, Object> data = new HashMap<String, Object>();
    if (criteria != null)
        data.put("criteria", criteria);
    if (count > 0)
        data.put("count", count);
    if (timeout > 0) {
        if (Double.isInfinite(timeout))
            data.put("waitForResults", true);
        else
            data.put("waitForResults", timeout);
    }
    List<Object> searchResults = session.requestArray("GET", getPath() + "/" + subpath, data);
    List<T> result = new ArrayList<T>();
    for (Object remote : searchResults)
        result.add((T) ElementImpl.create((RemoteObject) remote));
    if (shouldThrow && result.isEmpty()) {
        String message = "Found no " + subpath + " of " + this;
        if (criteria != null)
            message += " matching " + criteria;
        if (timeout > 0)
            message += " after waiting " + timeout;
        throw TwinError.NoSuchElement.create(message);
    }
    return result;
}

From source file:net.demilich.metastone.bahaviour.ModifiedMCTS.MCTSCritique.java

private double getIdealTestingError(double[][] labels, ArrayList<GameContext> games,
        ArrayList<Double> testValues) {
    double sum = 0;

    for (int i = 0; i < labels.length; i++) {
        if (testValues.size() != labels.length) {
            System.err.println("MISMATCH!");
            System.exit(0);//from   w  ww.  j  a v  a  2s .  c om
        }

        double actualResult = testValues.get(i);

        sum += ((labels[i][0] - actualResult) * (labels[i][0] - actualResult)) / ((double) labels.length);
        if (Double.isInfinite((labels[i][0] - actualResult) * (labels[i][0] - actualResult))) {
            System.err.println("HEY FUCK UP!! " + labels[i][0] + " " + actualResult);
        }
        System.err.println("test value was " + labels[i][0] + " actual value " + actualResult + " "
                + labels.length + " " + testValues.size());

    }
    return sum;

}

From source file:org.eclipse.january.metadata.internal.StatisticsMetadataImpl.java

/**
 * Calculate summary statistics for a dataset along an axis
 * @param ignoreNaNs if true, ignore NaNs
 * @param ignoreInfs if true, ignore infinities
 * @param axis/*from w w w.j av a  2s.c  o m*/
 */
@SuppressWarnings("deprecation")
private Dataset[] createAxisStats(final int axis, final boolean ignoreNaNs, final boolean ignoreInfs) {
    int rank = dataset.getRank();

    int[] oshape = dataset.getShape();
    int alen = oshape[axis];
    oshape[axis] = 1;

    int[] nshape = new int[rank - 1];
    for (int i = 0; i < axis; i++) {
        nshape[i] = oshape[i];
    }
    for (int i = axis + 1; i < rank; i++) {
        nshape[i - 1] = oshape[i];
    }

    Dataset max;
    Dataset min;
    IntegerDataset maxIndex;
    IntegerDataset minIndex;
    LongDataset count = DatasetFactory.zeros(LongDataset.class, nshape);
    Dataset sum;
    Dataset mean;
    Dataset var;

    if (isize == 1) {
        max = DatasetFactory.zeros(nshape, dtype);
        min = DatasetFactory.zeros(nshape, dtype);
        maxIndex = DatasetFactory.zeros(IntegerDataset.class, nshape);
        minIndex = DatasetFactory.zeros(IntegerDataset.class, nshape);
        sum = DatasetFactory.zeros(nshape, DTypeUtils.getLargestDType(dtype));
        mean = DatasetFactory.zeros(DoubleDataset.class, nshape);
        var = DatasetFactory.zeros(DoubleDataset.class, nshape);
    } else {
        max = null;
        min = null;
        maxIndex = null;
        minIndex = null;
        sum = DatasetFactory.zeros(isize, nshape, DTypeUtils.getLargestDType(dtype));
        mean = DatasetFactory.zeros(isize, CompoundDoubleDataset.class, nshape);
        var = DatasetFactory.zeros(isize, CompoundDoubleDataset.class, nshape);
    }

    IndexIterator qiter = count.getIterator(true);
    int[] qpos = qiter.getPos();
    int[] spos = oshape.clone();

    if (isize == 1) {
        DoubleDataset lmean = (DoubleDataset) mean;
        DoubleDataset lvar = (DoubleDataset) var;

        final SummaryStatistics stats = new SummaryStatistics();
        while (qiter.hasNext()) {
            int i = 0;
            for (; i < axis; i++) {
                spos[i] = qpos[i];
            }
            spos[i++] = 0;
            for (; i < rank; i++) {
                spos[i] = qpos[i - 1];
            }

            stats.clear();
            //sum of logs is slow and we dont use it, so blocking its calculation here
            stats.setSumLogImpl(new NullStorelessUnivariateStatistic());

            double amax = Double.NEGATIVE_INFINITY;
            double amin = Double.POSITIVE_INFINITY;
            boolean hasNaNs = false;
            if (ignoreNaNs) {
                for (int j = 0; j < alen; j++) {
                    spos[axis] = j;
                    final double val = dataset.getDouble(spos);

                    if (Double.isNaN(val)) {
                        hasNaNs = true;
                        continue;
                    } else if (ignoreInfs && Double.isInfinite(val)) {
                        continue;
                    }

                    if (val > amax) {
                        amax = val;
                    }
                    if (val < amin) {
                        amin = val;
                    }

                    stats.addValue(val);
                }
            } else {
                for (int j = 0; j < alen; j++) {
                    spos[axis] = j;
                    final double val = dataset.getDouble(spos);

                    if (hasNaNs) {
                        if (!Double.isNaN(val))
                            stats.addValue(0);
                        continue;
                    }

                    if (Double.isNaN(val)) {
                        amax = Double.NaN;
                        amin = Double.NaN;
                        hasNaNs = true;
                    } else if (ignoreInfs && Double.isInfinite(val)) {
                        continue;
                    } else {
                        if (val > amax) {
                            amax = val;
                        }
                        if (val < amin) {
                            amin = val;
                        }
                    }
                    stats.addValue(val);
                }
            }

            count.setAbs(qiter.index, stats.getN());

            max.set(amax, qpos);
            min.set(amin, qpos);
            boolean fmax = false;
            boolean fmin = false;
            if (hasNaNs) {
                if (ignoreNaNs) {
                    for (int j = 0; j < alen; j++) {
                        spos[axis] = j;
                        final double val = dataset.getDouble(spos);
                        if (Double.isNaN(val))
                            continue;

                        if (!fmax && val == amax) { // FIXME qiter.index is wrong!!!
                            maxIndex.setAbs(qiter.index, j);
                            fmax = true;
                            if (fmin)
                                break;
                        }
                        if (!fmin && val == amin) {
                            minIndex.setAbs(qiter.index, j);
                            fmin = true;
                            if (fmax)
                                break;
                        }
                    }
                } else {
                    for (int j = 0; j < alen; j++) {
                        spos[axis] = j;
                        final double val = dataset.getDouble(spos);
                        if (Double.isNaN(val)) {
                            maxIndex.setAbs(qiter.index, j);
                            minIndex.setAbs(qiter.index, j);
                            break;
                        }
                    }
                }
            } else {
                for (int j = 0; j < alen; j++) {
                    spos[axis] = j;
                    final double val = dataset.getDouble(spos);
                    if (!fmax && val == amax) {
                        maxIndex.setAbs(qiter.index, j);
                        fmax = true;
                        if (fmin)
                            break;
                    }
                    if (!fmin && val == amin) {
                        minIndex.setAbs(qiter.index, j);
                        fmin = true;
                        if (fmax)
                            break;
                    }
                }
            }
            sum.setObjectAbs(qiter.index, stats.getSum());
            lmean.setAbs(qiter.index, stats.getMean());
            lvar.setAbs(qiter.index, stats.getVariance());
        }
    } else {
        CompoundDataset ldataset = (CompoundDataset) dataset;
        CompoundDoubleDataset lmean = (CompoundDoubleDataset) mean;
        CompoundDoubleDataset lvar = (CompoundDoubleDataset) var;
        double[] darray = new double[isize];

        while (qiter.hasNext()) {
            int i = 0;
            for (; i < axis; i++) {
                spos[i] = qpos[i];
            }
            spos[i++] = 0;
            for (; i < rank; i++) {
                spos[i] = qpos[i - 1];
            }

            final SummaryStatistics[] stats = new SummaryStatistics[isize];
            for (int k = 0; k < isize; k++) {
                stats[k] = new SummaryStatistics();
            }
            for (int j = 0; j < alen; j++) {
                spos[axis] = j;
                ldataset.getDoubleArray(darray, spos);
                boolean skip = false;
                for (int k = 0; k < isize; k++) {
                    double v = darray[k];
                    if (ignoreNaNs && Double.isNaN(v)) {
                        skip = true;
                        break;
                    }
                    if (ignoreInfs && Double.isInfinite(v)) {
                        skip = true;
                        break;
                    }
                }
                if (!skip)
                    for (int k = 0; k < isize; k++) {
                        stats[k].addValue(darray[k]);
                    }
            }

            count.setAbs(qiter.index, (int) stats[0].getN());

            for (int k = 0; k < isize; k++) {
                darray[k] = stats[k].getSum();
            }
            sum.set(darray, qpos);
            for (int k = 0; k < isize; k++) {
                darray[k] = stats[k].getMean();
            }
            lmean.setItem(darray, qpos);
            for (int k = 0; k < isize; k++) {
                darray[k] = stats[k].getVariance();
            }
            lvar.setItem(darray, qpos);
        }
    }

    return new Dataset[] { max, min, maxIndex, minIndex, count, mean, sum, var };
}

From source file:de.biomedical_imaging.traj.math.TrajectorySplineFit.java

/**
 * Please see/*www .  j a  v  a 2  s . co m*/
 * https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Another_formula
 * and
 * http://mathworld.wolfram.com/Two-PointForm.html
 * @param p1 First point on line
 * @param p2 Second point on line
 * @param p Point for which the distance should be calculated.
 * @return distance of p to line defined by p1&p2
 */
public double distancePointLine(Point2D.Double p1, Point2D.Double p2, Point2D.Double p) {
    double d;
    double m = (p2.y - p1.y) / (p2.x - p1.x);

    if (Double.isInfinite(m)) {
        d = Math.abs(p2.x - p.x);
    } else {
        double k = -1 * m * p1.x + p1.y;
        d = Math.sqrt(Math.pow((p.x + m * p.y - m * k) / (m * m + 1) - p.x, 2)
                + Math.pow(m * ((p.x + m * p.y - m * k) / (m * m + 1)) + k - p.y, 2));
    }
    return d;
}

From source file:org.um.feri.ears.algorithms.moo.dbea.DBEA.java

/**
 * Updates the ideal point and intercepts given the new solution.
 * /*from  w w w.  j  a v  a 2 s . c  o m*/
 * @param solution the new solution
 */
void updateIdealPointAndIntercepts(MOSolutionBase<Type> solution) {
    if (!solution.violatesConstraints()) {
        // update the ideal point
        for (int j = 0; j < num_obj; j++) {
            idealPoint[j] = Math.min(idealPoint[j], solution.getObjective(j));
            intercepts[j] = Math.max(intercepts[j], solution.getObjective(j));
        }

        // compute the axis intercepts
        ParetoSolution<Type> feasibleSolutions = getFeasibleSolutions(population);
        feasibleSolutions.add(solution);

        ParetoSolution<Type> nondominatedSolutions = getNondominatedFront(feasibleSolutions);

        if (!nondominatedSolutions.isEmpty()) {
            // find the points with the largest value in each objective
            ParetoSolution<Type> extremePoints = new ParetoSolution<Type>();

            for (int i = 0; i < num_obj; i++) {
                extremePoints.add(largestObjectiveValue(i, nondominatedSolutions));
            }

            if (numberOfUniqueSolutions(extremePoints) != num_obj) {
                for (int i = 0; i < num_obj; i++) {
                    intercepts[i] = extremePoints.get(i).getObjective(i);
                }
            } else {
                try {
                    RealMatrix b = new Array2DRowRealMatrix(num_obj, 1);
                    RealMatrix A = new Array2DRowRealMatrix(num_obj, num_obj);

                    for (int i = 0; i < num_obj; i++) {
                        b.setEntry(i, 0, 1.0);

                        for (int j = 0; j < num_obj; j++) {
                            A.setEntry(i, j, extremePoints.get(i).getObjective(j));
                        }
                    }

                    double numerator = new LUDecomposition(A).getDeterminant();
                    b.scalarMultiply(numerator);
                    RealMatrix normal = MatrixUtils.inverse(A).multiply(b);

                    for (int i = 0; i < num_obj; i++) {
                        intercepts[i] = numerator / normal.getEntry(i, 0);

                        if (intercepts[i] <= 0 || Double.isNaN(intercepts[i])
                                || Double.isInfinite(intercepts[i])) {
                            intercepts[i] = extremePoints.get(i).getObjective(i);
                        }
                    }
                } catch (RuntimeException e) {
                    for (int i = 0; i < num_obj; i++) {
                        intercepts[i] = extremePoints.get(i).getObjective(i);
                    }
                }
            }
        }
    }
}

From source file:org.renjin.parser.NumericLiterals.java

/**
 * Finds the closest double-precision floating point number to the given decimal string, parsed by
 * {@link #parseDoubleDecimal(CharSequence, int, int, int, char)} above.
 *
 * <p>This implementation is based on OpenJDK's {@code com.sun.misc.FloatingDecimal.ASCIIToBinaryBuffer.doubleValue()},
 * but included here nearly verbatim to avoid a dependency on an internal SDK class. The original code
 * is copyright 1996, 2013, Oracle and/or its affiliates and licensed under the GPL v2.</p></p>
 *
 * @param in the input string/*from  w  w  w .  j av  a  2  s .co  m*/
 * @param sign the sign, -1 or +1, parsed above in {@link #parseDouble(CharSequence, int, int, char, boolean)}
 * @param startIndex the index at which to start parsing
 * @param endIndex the index, exclusive, at which to stop parsing
 * @param decimalPoint the decimal point character to use. Generally either '.' or ','
 * @return the number as a {@code double}, or {@code NA} if the string is malformatted.
 */
public static double doubleValue(boolean isNegative, int decExponent, char[] digits, int nDigits) {
    int kDigits = Math.min(nDigits, MAX_DECIMAL_DIGITS + 1);
    //
    // convert the lead kDigits to a long integer.
    //
    // (special performance hack: start to do it using int)
    int iValue = (int) digits[0] - (int) '0';
    int iDigits = Math.min(kDigits, INT_DECIMAL_DIGITS);
    for (int i = 1; i < iDigits; i++) {
        iValue = iValue * 10 + (int) digits[i] - (int) '0';
    }
    long lValue = (long) iValue;
    for (int i = iDigits; i < kDigits; i++) {
        lValue = lValue * 10L + (long) ((int) digits[i] - (int) '0');
    }
    double dValue = (double) lValue;
    int exp = decExponent - kDigits;
    //
    // lValue now contains a long integer with the value of
    // the first kDigits digits of the number.
    // dValue contains the (double) of the same.
    //

    if (nDigits <= MAX_DECIMAL_DIGITS) {
        //
        // possibly an easy case.
        // We know that the digits can be represented
        // exactly. And if the exponent isn't too outrageous,
        // the whole thing can be done with one operation,
        // thus one rounding error.
        // Note that all our constructors trim all leading and
        // trailing zeros, so simple values (including zero)
        // will always end up here
        //
        if (exp == 0 || dValue == 0.0) {
            return (isNegative) ? -dValue : dValue; // small floating integer
        } else if (exp >= 0) {
            if (exp <= MAX_SMALL_TEN) {
                //
                // Can get the answer with one operation,
                // thus one roundoff.
                //
                double rValue = dValue * SMALL_10_POW[exp];
                return (isNegative) ? -rValue : rValue;
            }
            int slop = MAX_DECIMAL_DIGITS - kDigits;
            if (exp <= MAX_SMALL_TEN + slop) {
                //
                // We can multiply dValue by 10^(slop)
                // and it is still "small" and exact.
                // Then we can multiply by 10^(exp-slop)
                // with one rounding.
                //
                dValue *= SMALL_10_POW[slop];
                double rValue = dValue * SMALL_10_POW[exp - slop];
                return (isNegative) ? -rValue : rValue;
            }
            //
            // Else we have a hard case with a positive exp.
            //
        } else {
            if (exp >= -MAX_SMALL_TEN) {
                //
                // Can get the answer in one division.
                //
                double rValue = dValue / SMALL_10_POW[-exp];
                return (isNegative) ? -rValue : rValue;
            }
            //
            // Else we have a hard case with a negative exp.
            //
        }
    }

    //
    // Harder cases:
    // The sum of digits plus exponent is greater than
    // what we think we can do with one error.
    //
    // Start by approximating the right answer by,
    // naively, scaling by powers of 10.
    //
    if (exp > 0) {
        if (decExponent > MAX_DECIMAL_EXPONENT + 1) {
            //
            // Lets face it. This is going to be
            // Infinity. Cut to the chase.
            //
            return (isNegative) ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
        }
        if ((exp & 15) != 0) {
            dValue *= SMALL_10_POW[exp & 15];
        }
        if ((exp >>= 4) != 0) {
            int j;
            for (j = 0; exp > 1; j++, exp >>= 1) {
                if ((exp & 1) != 0) {
                    dValue *= BIG_10_POW[j];
                }
            }
            //
            // The reason for the weird exp > 1 condition
            // in the above loop was so that the last multiply
            // would get unrolled. We handle it here.
            // It could overflow.
            //
            double t = dValue * BIG_10_POW[j];
            if (Double.isInfinite(t)) {
                //
                // It did overflow.
                // Look more closely at the result.
                // If the exponent is just one too large,
                // then use the maximum finite as our estimate
                // value. Else call the result infinity
                // and punt it.
                // ( I presume this could happen because
                // rounding forces the result here to be
                // an ULP or two larger than
                // Double.MAX_VALUE ).
                //
                t = dValue / 2.0;
                t *= BIG_10_POW[j];
                if (Double.isInfinite(t)) {
                    return (isNegative) ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
                }
                t = Double.MAX_VALUE;
            }
            dValue = t;
        }
    } else if (exp < 0) {
        exp = -exp;
        if (decExponent < MIN_DECIMAL_EXPONENT - 1) {
            //
            // Lets face it. This is going to be
            // zero. Cut to the chase.
            //
            return (isNegative) ? -0.0 : 0.0;
        }
        if ((exp & 15) != 0) {
            dValue /= SMALL_10_POW[exp & 15];
        }
        if ((exp >>= 4) != 0) {
            int j;
            for (j = 0; exp > 1; j++, exp >>= 1) {
                if ((exp & 1) != 0) {
                    dValue *= TINY_10_POW[j];
                }
            }
            //
            // The reason for the weird exp > 1 condition
            // in the above loop was so that the last multiply
            // would get unrolled. We handle it here.
            // It could underflow.
            //
            double t = dValue * TINY_10_POW[j];
            if (t == 0.0) {
                //
                // It did underflow.
                // Look more closely at the result.
                // If the exponent is just one too small,
                // then use the minimum finite as our estimate
                // value. Else call the result 0.0
                // and punt it.
                // ( I presume this could happen because
                // rounding forces the result here to be
                // an ULP or two less than
                // Double.MIN_VALUE ).
                //
                t = dValue * 2.0;
                t *= TINY_10_POW[j];
                if (t == 0.0) {
                    return (isNegative) ? -0.0 : 0.0;
                }
                t = Double.MIN_VALUE;
            }
            dValue = t;
        }
    }

    //
    // dValue is now approximately the result.
    // The hard part is adjusting it, by comparison
    // with FDBigInteger arithmetic.
    // Formulate the EXACT big-number result as
    // bigD0 * 10^exp
    //
    if (nDigits > MAX_NDIGITS) {
        nDigits = MAX_NDIGITS + 1;
        digits[MAX_NDIGITS] = '1';
    }
    FDBigInteger bigD0 = new FDBigInteger(lValue, digits, kDigits, nDigits);
    exp = decExponent - nDigits;

    long ieeeBits = Double.doubleToRawLongBits(dValue); // IEEE-754 bits of double candidate
    final int B5 = Math.max(0, -exp); // powers of 5 in bigB, value is not modified inside correctionLoop
    final int D5 = Math.max(0, exp); // powers of 5 in bigD, value is not modified inside correctionLoop
    bigD0 = bigD0.multByPow52(D5, 0);
    bigD0.makeImmutable(); // prevent bigD0 modification inside correctionLoop
    FDBigInteger bigD = null;
    int prevD2 = 0;

    correctionLoop: while (true) {
        // here ieeeBits can't be NaN, Infinity or zero
        int binexp = (int) (ieeeBits >>> EXP_SHIFT);
        long bigBbits = ieeeBits & SIGNIF_BIT_MASK;
        if (binexp > 0) {
            bigBbits |= FRACT_HOB;
        } else { // Normalize denormalized numbers.
            assert bigBbits != 0L : bigBbits; // doubleToBigInt(0.0)
            int leadingZeros = Long.numberOfLeadingZeros(bigBbits);
            int shift = leadingZeros - (63 - EXP_SHIFT);
            bigBbits <<= shift;
            binexp = 1 - shift;
        }
        binexp -= EXP_BIAS;
        int lowOrderZeros = Long.numberOfTrailingZeros(bigBbits);
        bigBbits >>>= lowOrderZeros;
        final int bigIntExp = binexp - EXP_SHIFT + lowOrderZeros;
        final int bigIntNBits = EXP_SHIFT + 1 - lowOrderZeros;

        //
        // Scale bigD, bigB appropriately for
        // big-integer operations.
        // Naively, we multiply by powers of ten
        // and powers of two. What we actually do
        // is keep track of the powers of 5 and
        // powers of 2 we would use, then factor out
        // common divisors before doing the work.
        //
        int B2 = B5; // powers of 2 in bigB
        int D2 = D5; // powers of 2 in bigD
        int Ulp2; // powers of 2 in halfUlp.
        if (bigIntExp >= 0) {
            B2 += bigIntExp;
        } else {
            D2 -= bigIntExp;
        }
        Ulp2 = B2;
        // shift bigB and bigD left by a number s. t.
        // halfUlp is still an integer.
        int hulpbias;
        if (binexp <= -EXP_BIAS) {
            // This is going to be a denormalized number
            // (if not actually zero).
            // half an ULP is at 2^-(DoubleConsts.EXP_BIAS+EXP_SHIFT+1)
            hulpbias = binexp + lowOrderZeros + EXP_BIAS;
        } else {
            hulpbias = 1 + lowOrderZeros;
        }
        B2 += hulpbias;
        D2 += hulpbias;
        // if there are common factors of 2, we might just as well
        // factor them out, as they add nothing useful.
        int common2 = Math.min(B2, Math.min(D2, Ulp2));
        B2 -= common2;
        D2 -= common2;
        Ulp2 -= common2;
        // do multiplications by powers of 5 and 2
        FDBigInteger bigB = FDBigInteger.valueOfMulPow52(bigBbits, B5, B2);
        if (bigD == null || prevD2 != D2) {
            bigD = bigD0.leftShift(D2);
            prevD2 = D2;
        }
        //
        // to recap:
        // bigB is the scaled-big-int version of our floating-point
        // candidate.
        // bigD is the scaled-big-int version of the exact value
        // as we understand it.
        // halfUlp is 1/2 an ulp of bigB, except for special cases
        // of exact powers of 2
        //
        // the plan is to compare bigB with bigD, and if the difference
        // is less than halfUlp, then we're satisfied. Otherwise,
        // use the ratio of difference to halfUlp to calculate a fudge
        // factor to add to the floating value, then go 'round again.
        //
        FDBigInteger diff;
        int cmpResult;
        boolean overvalue;
        if ((cmpResult = bigB.cmp(bigD)) > 0) {
            overvalue = true; // our candidate is too big.
            diff = bigB.leftInplaceSub(bigD); // bigB is not user further - reuse
            if ((bigIntNBits == 1) && (bigIntExp > -EXP_BIAS + 1)) {
                // candidate is a normalized exact power of 2 and
                // is too big (larger than Double.MIN_NORMAL). We will be subtracting.
                // For our purposes, ulp is the ulp of the
                // next smaller range.
                Ulp2 -= 1;
                if (Ulp2 < 0) {
                    // rats. Cannot de-scale ulp this far.
                    // must scale diff in other direction.
                    Ulp2 = 0;
                    diff = diff.leftShift(1);
                }
            }
        } else if (cmpResult < 0) {
            overvalue = false; // our candidate is too small.
            diff = bigD.rightInplaceSub(bigB); // bigB is not user further - reuse
        } else {
            // the candidate is exactly right!
            // this happens with surprising frequency
            break correctionLoop;
        }
        cmpResult = diff.cmpPow52(B5, Ulp2);
        if ((cmpResult) < 0) {
            // difference is small.
            // this is close enough
            break correctionLoop;
        } else if (cmpResult == 0) {
            // difference is exactly half an ULP
            // round to some other value maybe, then finish
            if ((ieeeBits & 1) != 0) { // half ties to even
                ieeeBits += overvalue ? -1 : 1; // nextDown or nextUp
            }
            break correctionLoop;
        } else {
            // difference is non-trivial.
            // could scale addend by ratio of difference to
            // halfUlp here, if we bothered to compute that difference.
            // Most of the time ( I hope ) it is about 1 anyway.
            ieeeBits += overvalue ? -1 : 1; // nextDown or nextUp
            if (ieeeBits == 0 || ieeeBits == EXP_BIT_MASK) { // 0.0 or Double.POSITIVE_INFINITY
                break correctionLoop; // oops. Fell off end of range.
            }
            continue; // try again.
        }

    }
    if (isNegative) {
        ieeeBits |= SIGN_BIT_MASK;
    }
    return Double.longBitsToDouble(ieeeBits);
}

From source file:org.broadinstitute.gatk.tools.walkers.variantrecalibration.VariantDataManager.java

private static double decodeAnnotation(final String annotationKey, final VariantContext vc,
        final boolean jitter, final VariantRecalibratorArgumentCollection vrac, final VariantDatum datum) {
    double value;

    final double LOG_OF_TWO = 0.6931472;

    try {//  w w  w  .  j  a  v a2  s.  com
        //if we're in allele-specific mode and an allele-specific annotation has been requested, parse the appropriate value from the list
        if (vrac.useASannotations && annotationKey.startsWith(GATKVCFConstants.ALLELE_SPECIFIC_PREFIX)) {
            final List<Object> valueList = vc.getAttributeAsList(annotationKey);
            if (vc.hasAllele(datum.alternateAllele)) {
                final int altIndex = vc.getAlleleIndex(datum.alternateAllele) - 1; //-1 is to convert the index from all alleles (including reference) to just alternate alleles
                value = Double.parseDouble((String) valueList.get(altIndex));
            }
            //if somehow our alleles got mixed up
            else
                throw new IllegalStateException("VariantDatum allele " + datum.alternateAllele
                        + " is not contained in the input VariantContext.");
        } else
            value = vc.getAttributeAsDouble(annotationKey, Double.NaN);
        if (Double.isInfinite(value)) {
            value = Double.NaN;
        }
        if (jitter && annotationKey.equalsIgnoreCase(GATKVCFConstants.HAPLOTYPE_SCORE_KEY)
                && MathUtils.compareDoubles(value, 0.0, PRECISION) == 0) {
            value += 0.01 * Utils.getRandomGenerator().nextGaussian();
        }
        if (jitter
                && (annotationKey.equalsIgnoreCase(GATKVCFConstants.FISHER_STRAND_KEY)
                        || annotationKey.equalsIgnoreCase(GATKVCFConstants.AS_FILTER_STATUS_KEY))
                && MathUtils.compareDoubles(value, 0.0, PRECISION) == 0) {
            value += 0.01 * Utils.getRandomGenerator().nextGaussian();
        }
        if (jitter && annotationKey.equalsIgnoreCase(GATKVCFConstants.INBREEDING_COEFFICIENT_KEY)
                && MathUtils.compareDoubles(value, 0.0, PRECISION) == 0) {
            value += 0.01 * Utils.getRandomGenerator().nextGaussian();
        }
        if (jitter
                && (annotationKey.equalsIgnoreCase(GATKVCFConstants.STRAND_ODDS_RATIO_KEY)
                        || annotationKey.equalsIgnoreCase(GATKVCFConstants.AS_STRAND_ODDS_RATIO_KEY))
                && MathUtils.compareDoubles(value, LOG_OF_TWO, PRECISION) == 0) {
            value += 0.01 * Utils.getRandomGenerator().nextGaussian();
        } //min SOR is 2.0, then we take ln
        if (jitter && (annotationKey.equalsIgnoreCase(VCFConstants.RMS_MAPPING_QUALITY_KEY)
                || annotationKey.equalsIgnoreCase(GATKVCFConstants.AS_RMS_MAPPING_QUALITY_KEY))) {
            if (vrac.MQ_CAP > 0) {
                value = logitTransform(value, -SAFETY_OFFSET, vrac.MQ_CAP + SAFETY_OFFSET);
                if (MathUtils.compareDoubles(value,
                        logitTransform(vrac.MQ_CAP, -SAFETY_OFFSET, vrac.MQ_CAP + SAFETY_OFFSET),
                        PRECISION) == 0) {
                    value += vrac.MQ_JITTER * Utils.getRandomGenerator().nextGaussian();
                }
            } else if (MathUtils.compareDoubles(value, vrac.MQ_CAP, PRECISION) == 0) {
                value += vrac.MQ_JITTER * Utils.getRandomGenerator().nextGaussian();
            }
        }
    } catch (Exception e) {
        value = Double.NaN; // The VQSR works with missing data by marginalizing over the missing dimension when evaluating the Gaussian mixture model
    }

    return value;
}