List of usage examples for java.lang Double isInfinite
public static boolean isInfinite(double v)
From source file:MathFunc.java
/** * Converts rectangular coordinates (x, y) to polar (r, <i>theta</i>). This method * computes the phase <i>theta</i> by computing an arc tangent of y/x in the range * of <i>-pi</i> to <i>pi</i>. Special cases: * <ul>/*from w w w . ja v a2s . c o m*/ * <li>If either argument is <code>NaN</code>, then the result is <code>NaN</code>. * <li>If the first argument is positive zero and the second argument is * positive, or the first argument is positive and finite and the second * argument is positive infinity, then the result is positive zero. * <li>If the first argument is negative zero and the second argument is * positive, or the first argument is negative and finite and the second * argument is positive infinity, then the result is negative zero. * <li>If the first argument is positive zero and the second argument is * negative, or the first argument is positive and finite and the second * argument is negative infinity, then the result is the <code>double</code> value * closest to <i>pi</i>. * <li>If the first argument is negative zero and the second argument is * negative, or the first argument is negative and finite and the second * argument is negative infinity, then the result is the <code>double</code> value * closest to <i>-pi</i>. * <li>If the first argument is positive and the second argument is positive * zero or negative zero, or the first argument is positive infinity and * the second argument is finite, then the result is the <code>double</code> value * closest to <i>pi</i>/2. * <li>If the first argument is negative and the second argument is positive * zero or negative zero, or the first argument is negative infinity and * the second argument is finite, then the result is the <code>double</code> value * closest to <i>-pi</i>/2. * <li>If both arguments are positive infinity, then the result is the double * value closest to <i>pi</i>/4. * <li>If the first argument is positive infinity and the second argument is * negative infinity, then the result is the double value closest to 3*<i>pi</i>/4. * <li>If the first argument is negative infinity and the second argument is * positive infinity, then the result is the double value closest to -<i>pi</i>/4. * <li>If both arguments are negative infinity, then the result is the double * value closest to -3*<i>pi</i>/4. * </ul> * <p> * A result must be within 2 ulps of the correctly rounded result. Results * must be semi-monotonic. * * @param y - the ordinate coordinate * @param x - the abscissa coordinate * @return the <i>theta</i> component of the point (r, <i>theta</i>) in polar * coordinates that corresponds to the point (x, y) in Cartesian coordinates. */ public static double atan2(double y, double x) { // Special cases. if (Double.isNaN(y) || Double.isNaN(x)) { return Double.NaN; } else if (Double.isInfinite(y)) { if (y > 0.0) // Positive infinity { if (Double.isInfinite(x)) { if (x > 0.0) { return PIover4; } else { return 3.0 * PIover4; } } else if (x != 0.0) { return PIover2; } } else // Negative infinity { if (Double.isInfinite(x)) { if (x > 0.0) { return -PIover4; } else { return -3.0 * PIover4; } } else if (x != 0.0) { return -PIover2; } } } else if (y == 0.0) { if (x > 0.0) { return y; } else if (x < 0.0) { return Math.PI; } } else if (Double.isInfinite(x)) { if (x > 0.0) // Positive infinity { if (y > 0.0) { return 0.0; } else if (y < 0.0) { return -0.0; } } else // Negative infinity { if (y > 0.0) { return Math.PI; } else if (y < 0.0) { return -Math.PI; } } } else if (x == 0.0) { if (y > 0.0) { return PIover2; } else if (y < 0.0) { return -PIover2; } } // Implementation a simple version ported from a PASCAL implementation: // http://everything2.com/index.pl?node_id=1008481 double arcTangent; // Use arctan() avoiding division by zero. if (Math.abs(x) > Math.abs(y)) { arcTangent = atan(y / x); } else { arcTangent = atan(x / y); // -PI/4 <= a <= PI/4 if (arcTangent < 0) { arcTangent = -PIover2 - arcTangent; // a is negative, so we're adding } else { arcTangent = PIover2 - arcTangent; } } // Adjust result to be from [-PI, PI] if (x < 0) { if (y < 0) { arcTangent = arcTangent - Math.PI; } else { arcTangent = arcTangent + Math.PI; } } return arcTangent; }
From source file:org.deeplearning4j.optimize.VectorizedBackTrackLineSearch.java
public double optimize(DoubleMatrix line, double initialStep) { DoubleMatrix g, x, oldParameters;//from w ww . ja v a2 s . c o m double slope, test, alamin, alam, alam2, tmplam; double rhs1, rhs2, a, b, disc, oldAlam; double f, fold, f2; g = function.getValueGradient(); // gradient x = function.getParameters(); // parameters oldParameters = x.dup(); alam2 = tmplam = 0.0; f2 = fold = function.getValue(); if (logger.isDebugEnabled()) { logger.trace("ENTERING BACKTRACK\n"); logger.trace("Entering BackTrackLnSrch, value=" + fold + ",\ndirection.oneNorm:" + line.norm1() + " direction.infNorm:" + FastMath.max(Double.NEGATIVE_INFINITY, MatrixFunctions.abs(line).max())); } assert (!MatrixUtil.isNaN(g)); double sum = line.norm2(); if (sum > stpmax) { logger.warn("attempted step too big. scaling: sum= " + sum + ", stpmax= " + stpmax); line.muli(stpmax / sum); } //dot product slope = SimpleBlas.dot(g, line); logger.debug("slope = " + slope); if (slope < 0) { throw new InvalidOptimizableException("Slope = " + slope + " is negative"); } if (slope == 0) throw new InvalidOptimizableException("Slope = " + slope + " is zero"); // find maximum lambda // converge when (delta x) / x < REL_TOLX for all coordinates. // the largest step size that triggers this threshold is // precomputed and saved in alamin DoubleMatrix maxOldParams = new DoubleMatrix(oldParameters.length); for (int i = 0; i < oldParameters.length; i++) maxOldParams.put(i, Math.max(Math.abs(oldParameters.get(i)), 1.0)); DoubleMatrix testMatrix = MatrixFunctions.abs(line).div(maxOldParams); test = testMatrix.max(); alamin = relTolx / test; alam = 1.0; oldAlam = 0.0; int iteration = 0; // look for step size in direction given by "line" for (iteration = 0; iteration < maxIterations; iteration++) { // x = oldParameters + alam*line // initially, alam = 1.0, i.e. take full Newton step logger.trace("BackTrack loop iteration " + iteration + " : alam=" + alam + " oldAlam=" + oldAlam); logger.trace("before step, x.1norm: " + x.norm1() + "\nalam: " + alam + "\noldAlam: " + oldAlam); assert (alam != oldAlam) : "alam == oldAlam"; x.addi(line.mul(alam - oldAlam)); // step logger.debug("after step, x.1norm: " + x.norm1()); // check for convergence //convergence on delta x if ((alam < alamin) || smallAbsDiff(oldParameters, x)) { // if ((alam < alamin)) { function.setParameters(oldParameters); f = function.getValue(); logger.trace("EXITING BACKTRACK: Jump too small (alamin=" + alamin + "). Exiting and using xold. Value=" + f); return 0.0; } function.setParameters(x); oldAlam = alam; f = function.getValue(); logger.debug("value = " + f); // sufficient function increase (Wolf condition) if (f >= fold + ALF * alam * slope) { logger.debug("EXITING BACKTRACK: value=" + f); if (f < fold) throw new IllegalStateException("Function did not increase: f=" + f + " < " + fold + "=fold"); return alam; } // if value is infinite, i.e. we've // jumped to unstable territory, then scale down jump else if (Double.isInfinite(f) || Double.isInfinite(f2)) { logger.warn("Value is infinite after jump " + oldAlam + ". f=" + f + ", f2=" + f2 + ". Scaling back step size..."); tmplam = .2 * alam; if (alam < alamin) { //convergence on delta x function.setParameters(oldParameters); f = function.getValue(); logger.warn("EXITING BACKTRACK: Jump too small. Exiting and using xold. Value=" + f); return 0.0; } } else { // backtrack if (alam == 1.0) // first time through tmplam = -slope / (2.0 * (f - fold - slope)); else { rhs1 = f - fold - alam * slope; rhs2 = f2 - fold - alam2 * slope; assert ((alam - alam2) != 0) : "FAILURE: dividing by alam-alam2. alam=" + alam; a = (rhs1 / (FastMath.pow(alam, 2)) - rhs2 / (FastMath.pow(alam2, 2))) / (alam - alam2); b = (-alam2 * rhs1 / (alam * alam) + alam * rhs2 / (alam2 * alam2)) / (alam - alam2); if (a == 0.0) tmplam = -slope / (2.0 * b); else { disc = b * b - 3.0 * a * slope; if (disc < 0.0) { tmplam = .5 * alam; } else if (b <= 0.0) tmplam = (-b + FastMath.sqrt(disc)) / (3.0 * a); else tmplam = -slope / (b + FastMath.sqrt(disc)); } if (tmplam > .5 * alam) tmplam = .5 * alam; // lambda <= .5 lambda_1 } } alam2 = alam; f2 = f; logger.debug("tmplam:" + tmplam); alam = Math.max(tmplam, .1 * alam); // lambda >= .1*Lambda_1 } if (iteration >= maxIterations) throw new IllegalStateException("Too many iterations."); return 0.0; }
From source file:net.sf.mzmine.modules.peaklistmethods.dataanalysis.heatmaps.HeatMapTask.java
public void run() { setStatus(TaskStatus.PROCESSING);/*w w w . j av a 2s .c om*/ logger.info("Heat map plot"); if (plegend) { newPeakList = groupingDataset(selectedParameter, referenceGroup.toString()); } else { newPeakList = modifySimpleDataset(selectedParameter, referenceGroup.toString()); } if (newPeakList.length == 0 || newPeakList[0].length == 0) { setStatus(TaskStatus.ERROR); errorMessage = "The data for heat map is empty."; return; } Rengine rEngine = null; try { rEngine = RUtilities.getREngine(); } catch (Throwable t) { setStatus(TaskStatus.ERROR); errorMessage = "Heat map requires R but it could not be loaded (" + t.getMessage() + ')'; return; } finishedPercentage = 0.3f; synchronized (RUtilities.R_SEMAPHORE) { // Load gplots library if (rEngine.eval("require(gplots)").asBool().isFALSE()) { setStatus(TaskStatus.ERROR); errorMessage = "Heap maps plot requires the \"gplots\" R package, which could not be loaded. Please add it to your R installation."; } try { if (outputType.contains("png")) { if (height < 500 || width < 500) { setStatus(TaskStatus.ERROR); errorMessage = "Figure height or width is too small. Minimun height and width is 500."; return; } } rEngine.eval("dataset<- matrix(\"\",nrow =" + newPeakList[0].length + ",ncol=" + newPeakList.length + ")"); if (plegend) { rEngine.eval("stars<- matrix(\"\",nrow =" + newPeakList[0].length + ",ncol=" + newPeakList.length + ")"); } // assing the values to the matrix for (int row = 0; row < newPeakList[0].length; row++) { for (int column = 0; column < newPeakList.length; column++) { int r = row + 1; int c = column + 1; double value = newPeakList[column][row]; if (plegend) { String pValue = pValueMatrix[column][row]; rEngine.eval("stars[" + r + "," + c + "] = \"" + pValue + "\""); } if (!Double.isInfinite(value) && !Double.isNaN(value)) { rEngine.eval("dataset[" + r + "," + c + "] = " + value); } else { rEngine.eval("dataset[" + r + "," + c + "] = NA"); } } } finishedPercentage = 0.4f; rEngine.eval("dataset <- apply(dataset, 2, as.numeric)"); // Assign row names to the data set long rows = rEngine.rniPutStringArray(rowNames); rEngine.rniAssign("rowNames", rows, 0); rEngine.eval("rownames(dataset)<-rowNames"); // Assign column names to the data set long columns = rEngine.rniPutStringArray(colNames); rEngine.rniAssign("colNames", columns, 0); rEngine.eval("colnames(dataset)<-colNames"); finishedPercentage = 0.5f; // Remove the rows with too many NA's. The distances between // rows can't be calculated if the rows don't have // at least one sample in common. rEngine.eval(" d <- as.matrix(dist(dataset))"); rEngine.eval("d[upper.tri(d)] <- 0"); rEngine.eval("dataset <- dataset[-na.action(na.omit(d)),]"); finishedPercentage = 0.8f; String marginParameter = "margins = c(" + columnMargin + "," + rowMargin + ")"; rEngine.eval( "br<-c(seq(from=min(dataset,na.rm=T),to=0,length.out=256),seq(from=0,to=max(dataset,na.rm=T),length.out=256))", false); // Possible output file types if (outputType.contains("pdf")) { rEngine.eval("pdf(\"" + outputFile + "\", height=" + height + ", width=" + width + ")"); } else if (outputType.contains("fig")) { rEngine.eval("xfig(\"" + outputFile + "\", height=" + height + ", width=" + width + ", horizontal = FALSE, pointsize = 12)"); } else if (outputType.contains("svg")) { // Load RSvgDevice library if (rEngine.eval("require(RSvgDevice)").asBool().isFALSE()) { throw new IllegalStateException( "The \"RSvgDevice\" R package couldn't be loaded - is it installed in R?"); } rEngine.eval("devSVG(\"" + outputFile + "\", height=" + height + ", width=" + width + ")"); } else if (outputType.contains("png")) { rEngine.eval("png(\"" + outputFile + "\", height=" + height + ", width=" + width + ")"); } if (plegend) { rEngine.eval("heatmap.2(dataset," + marginParameter + ", trace=\"none\", col=bluered(length(br)-1), breaks=br, cellnote=stars, notecol=\"black\", notecex=" + starSize + ", na.color=\"grey\")", false); } else { rEngine.eval( "heatmap.2(dataset," + marginParameter + ", trace=\"none\", col=bluered(length(br)-1), breaks=br, na.color=\"grey\")", false); } rEngine.eval("dev.off()", false); finishedPercentage = 1.0f; } catch (Throwable t) { throw new IllegalStateException("R error during the heat map creation", t); } } setStatus(TaskStatus.FINISHED); }
From source file:org.deeplearning4j.optimize.solvers.VectorizedBackTrackLineSearchMinimum.java
public double optimize(INDArray line, int lineSearchIteration, INDArray g, INDArray params) { INDArray x, oldParameters;//from ww w .j av a2 s .c om double slope, test, alamin, alam, alam2, tmplam; double rhs1, rhs2, a, b, disc, oldAlam; double f, fold, f2; oldParameters = params.dup(); alam2 = tmplam = 0.0; f2 = fold = function.getValue(); if (logger.isDebugEnabled()) { logger.trace("ENTERING BACKTRACK\n"); logger.trace("Entering BackTrackLnSrch, value=" + fold + ",\ndirection.oneNorm:" + line.norm1(Integer.MAX_VALUE) + " direction.infNorm:" + FastMath.max(Double.NEGATIVE_INFINITY, (double) Transforms.abs(line).max(Integer.MAX_VALUE).element())); } LinAlgExceptions.assertValidNum(g); double sum = (double) line.norm2(Integer.MAX_VALUE).element(); // if(sum > stpmax) { // logger.warn("attempted step too big. scaling: sum= " + sum + // ", stpmax= "+ stpmax); // line.muli(stpmax / sum); // } //dot product slope = Nd4j.getBlasWrapper().dot(g, line); logger.debug("slope = " + slope); // find maximum lambda // converge when (delta x) / x < REL_TOLX for all coordinates. // the largest step size that triggers this threshold is // precomputed and saved in alamin INDArray maxOldParams = Nd4j.create(line.length()); for (int i = 0; i < line.length(); i++) { maxOldParams.putScalar(i, Math.max(Math.abs((double) oldParameters.getScalar(i).element()), 1.0)); } INDArray testMatrix = Transforms.abs(line).div(maxOldParams); test = (double) testMatrix.max(Integer.MAX_VALUE).element(); alamin = relTolx / test; alam = 1.0; oldAlam = 0.0; int iteration = 0; // look for step size in direction given by "line" for (iteration = 0; iteration < maxIterations; iteration++) { function.setCurrentIteration(lineSearchIteration); // x = oldParameters + alam*line // initially, alam = 1.0, i.e. take full Newton step logger.trace("BackTrack loop iteration " + iteration + " : alam=" + alam + " oldAlam=" + oldAlam); logger.trace("before step, x.1norm: " + params.norm1(Integer.MAX_VALUE) + "\nalam: " + alam + "\noldAlam: " + oldAlam); assert (alam != oldAlam) : "alam == oldAlam"; params.addi(line.mul(alam - oldAlam)); // step logger.debug("after step, x.1norm: " + params.norm1(Integer.MAX_VALUE)); // check for convergence //convergence on delta x if ((alam < alamin) || smallAbsDiff(oldParameters, params)) { // if ((alam < alamin)) { function.setParameters(oldParameters); f = function.getValue(); logger.trace("EXITING BACKTRACK: Jump too small (alamin=" + alamin + "). Exiting and using xold. Value=" + f); return 0.0; } function.setParameters(params); oldAlam = alam; f = function.getValue(); logger.debug("value = " + f); // sufficient function increase (Wolf condition) if (f >= fold + ALF * alam * slope) { logger.debug("EXITING BACKTRACK: value=" + f); if (f < fold) throw new IllegalStateException("Function did not increase: f=" + f + " < " + fold + "=fold"); return alam; } // if value is infinite, i.e. we've // jumped to unstable territory, then scale down jump else if (Double.isInfinite(f) || Double.isInfinite(f2)) { logger.warn("Value is infinite after jump " + oldAlam + ". f=" + f + ", f2=" + f2 + ". Scaling back step size..."); tmplam = .2 * alam; if (alam < alamin) { //convergence on delta x function.setParameters(oldParameters); f = function.getValue(); logger.warn("EXITING BACKTRACK: Jump too small. Exiting and using xold. Value=" + f); return 0.0; } } else { // backtrack if (alam == 1.0) // first time through tmplam = -slope / (2.0 * (f - fold - slope)); else { rhs1 = f - fold - alam * slope; rhs2 = f2 - fold - alam2 * slope; assert ((alam - alam2) != 0) : "FAILURE: dividing by alam-alam2. alam=" + alam; a = (rhs1 / (FastMath.pow(alam, 2)) - rhs2 / (FastMath.pow(alam2, 2))) / (alam - alam2); b = (-alam2 * rhs1 / (alam * alam) + alam * rhs2 / (alam2 * alam2)) / (alam - alam2); if (a == 0.0) tmplam = -slope / (2.0 * b); else { disc = b * b - 3.0 * a * slope; if (disc < 0.0) { tmplam = .5 * alam; } else if (b <= 0.0) tmplam = (-b + FastMath.sqrt(disc)) / (3.0 * a); else tmplam = -slope / (b + FastMath.sqrt(disc)); } if (tmplam > .5 * alam) tmplam = .5 * alam; // lambda <= .5 lambda_1 } } alam2 = alam; f2 = f; logger.debug("tmplam:" + tmplam); alam = Math.max(tmplam, .1 * alam); // lambda >= .1*Lambda_1 } if (iteration >= maxIterations) throw new IllegalStateException("Too many iterations."); return 0.0; }
From source file:projects.hip.exec.HrDiagram.java
/** * Update the {@link HrDiagram#hrDiagPanel}. *///from ww w .j a v a2 s . c o m private void updateChart() { XYSeries series = new XYSeries("Hipparcos HR diagram"); // Build a histogram of distance // Number of bins int bins = (int) Math.ceil((d_max - d_min) / d_step); // Histogram data double[] d_hist = new double[bins]; // Count the stars int n = 0; for (HipStar hipStar : hipStars) { // Get the Hipparcos magnitude and colour indices double h = hipStar.Hpmag; double bv = hipStar.bv; // double vi = hipStar.vi; // Use the parallax to correct the apparent magnitude to absolute magnitude. // Extract the parallax and error to use double p = hipStar.Plx; double sigma_p = hipStar.e_Plx; // Filter on the fractional parallax error double f = sigma_p / Math.abs(p); if (f > fMax) { continue; } // Filter out objects with no B-V index if (bv == 0.0) { continue; } // Correct to arcseconds p /= 1000; sigma_p /= 1000; // Get the distance double d = DistanceFromParallax.getDistance(p, sigma_p, method); // Filter & convert to absolute magnitude if (d > 0 && !Double.isInfinite(d)) { n++; double H = MagnitudeUtils.getAbsoluteMagnitude(d, h); series.add(bv, H); // Add to distance histogram if (d > d_min && d < d_max) { int bin = (int) Math.floor((d - d_min) / d_step); d_hist[bin]++; } } } logger.log(Level.INFO, "Plotting " + n + " Hipparcos stars."); JFreeChart hrChart = getHrChart(series); if (hrDiagPanel == null) { // Branch is used on initialisation hrDiagPanel = new ChartPanel(hrChart); } else { hrDiagPanel.setChart(hrChart); } JFreeChart dChart = getDistanceChart(d_hist); if (dDistPanel == null) { // Branch is used on initialisation dDistPanel = new ChartPanel(dChart); } else { dDistPanel.setChart(dChart); } }
From source file:org.onebusaway.nyc.vehicle_tracking.impl.inference.CategoricalDist.java
@SuppressWarnings("unchecked") public Multiset<T> sample(int samples) { Preconditions.checkArgument(samples > 0); Preconditions.checkState(!_entriesToLogProbs.isEmpty(), "No entries in the CDF"); Preconditions.checkState(!Double.isInfinite(_logCumulativeProb), "No cumulative probability in CDF"); final Multiset<T> sampled = HashMultiset.create(samples); if (_entriesToLogProbs.size() == 1) { sampled.add(Iterables.getOnlyElement(_entriesToLogProbs.keySet()), samples); } else {/*from w ww. ja v a 2s . com*/ if (_emd == null) { initializeDistribution(); } _emd.setNumTrials(samples); final Vector sampleRes = _emd.sample(threadLocalRng.get()); int i = 0; for (final VectorEntry ventry : sampleRes) { if (ventry.getValue() > 0.0) sampled.add((T) _entries[_objIdx.get(i)], (int) ventry.getValue()); i++; } } return sampled; }
From source file:org.sonar.plugins.buildstability.BuildStabilitySensor.java
private double normalize(double value) { return Double.isInfinite(value) ? 0 : value; }
From source file:com.opengamma.analytics.math.minimization.QuasiNewtonVectorMinimizer.java
private void bisectBacktrack(final DoubleMatrix1D p, final Function1D<DoubleMatrix1D, Double> function, final DataBundle data) { do {/*from w w w . j av a2 s . c om*/ data.setLambda0(data.getLambda0() * 0.1); updatePosition(p, function, data); } while (Double.isNaN(data.getG1()) || Double.isInfinite(data.getG1()) || Double.isNaN(data.getG2()) || Double.isInfinite(data.getG2())); }
From source file:com.clust4j.metrics.pairwise.TestDistanceEnums.java
@Test public void testInfP() { assertTrue(Distance.CHEBYSHEV.getP() == Double.POSITIVE_INFINITY); assertTrue(Double.isInfinite(Distance.CHEBYSHEV.getP())); for (Distance d : Distance.values()) { if (d.equals(Distance.CHEBYSHEV)) continue; assertFalse(d.getP() == Double.POSITIVE_INFINITY); assertFalse(Double.isInfinite(d.getP())); }/*from w w w . jav a 2 s .c o m*/ }
From source file:com.opengamma.analytics.math.interpolation.MonotonicityPreservingQuinticSplineInterpolator.java
@Override public PiecewisePolynomialResult interpolate(final double[] xValues, final double[][] yValuesMatrix) { ArgumentChecker.notNull(xValues, "xValues"); ArgumentChecker.notNull(yValuesMatrix, "yValuesMatrix"); ArgumentChecker.isTrue(/*w w w .j av a 2 s .c om*/ xValues.length == yValuesMatrix[0].length | xValues.length + 2 == yValuesMatrix[0].length, "(xValues length = yValuesMatrix's row vector length) or (xValues length + 2 = yValuesMatrix's row vector length)"); ArgumentChecker.isTrue(xValues.length > 2, "Data points should be more than 2"); final int nDataPts = xValues.length; final int yValuesLen = yValuesMatrix[0].length; final int dim = yValuesMatrix.length; for (int i = 0; i < nDataPts; ++i) { ArgumentChecker.isFalse(Double.isNaN(xValues[i]), "xValues containing NaN"); ArgumentChecker.isFalse(Double.isInfinite(xValues[i]), "xValues containing Infinity"); } for (int i = 0; i < yValuesLen; ++i) { for (int j = 0; j < dim; ++j) { ArgumentChecker.isFalse(Double.isNaN(yValuesMatrix[j][i]), "yValuesMatrix containing NaN"); ArgumentChecker.isFalse(Double.isInfinite(yValuesMatrix[j][i]), "yValuesMatrix containing Infinity"); } } for (int i = 0; i < nDataPts; ++i) { for (int j = i + 1; j < nDataPts; ++j) { ArgumentChecker.isFalse(xValues[i] == xValues[j], "xValues should be distinct"); } } double[] xValuesSrt = new double[nDataPts]; DoubleMatrix2D[] coefMatrix = new DoubleMatrix2D[dim]; for (int i = 0; i < dim; ++i) { xValuesSrt = Arrays.copyOf(xValues, nDataPts); double[] yValuesSrt = new double[nDataPts]; if (nDataPts == yValuesLen) { yValuesSrt = Arrays.copyOf(yValuesMatrix[i], nDataPts); } else { yValuesSrt = Arrays.copyOfRange(yValuesMatrix[i], 1, nDataPts + 1); } ParallelArrayBinarySort.parallelBinarySort(xValuesSrt, yValuesSrt); final double[] intervals = _solver.intervalsCalculator(xValuesSrt); final double[] slopes = _solver.slopesCalculator(yValuesSrt, intervals); final PiecewisePolynomialResult result = _method.interpolate(xValues, yValuesMatrix[i]); ArgumentChecker.isTrue(result.getOrder() >= 3, "Primary interpolant should be degree >= 2"); final double[] initialFirst = _function.differentiate(result, xValuesSrt).getData()[0]; final double[] initialSecond = _function.differentiateTwice(result, xValuesSrt).getData()[0]; final double[] first = firstDerivativeCalculator(yValuesSrt, intervals, slopes, initialFirst); boolean modFirst = false; int k; double[] aValues = aValuesCalculator(slopes, first); double[] bValues = bValuesCalculator(slopes, first); double[][] intervalsA = getIntervalsA(intervals, slopes, first, bValues); double[][] intervalsB = getIntervalsB(intervals, slopes, first, aValues); while (modFirst == false) { k = 0; for (int j = 0; j < nDataPts - 2; ++j) { if (first[j + 1] > 0.) { if (intervalsA[j + 1][1] + Math.abs(intervalsA[j + 1][1]) * ERROR < intervalsB[j][0] - Math.abs(intervalsB[j][0]) * ERROR | intervalsA[j + 1][0] - Math.abs(intervalsA[j + 1][0]) * ERROR > intervalsB[j][1] + Math.abs(intervalsB[j][1]) * ERROR) { ++k; first[j + 1] = firstDerivativesRecalculator(intervals, slopes, aValues, bValues, j + 1); } } } if (k == 0) { modFirst = true; } aValues = aValuesCalculator(slopes, first); bValues = bValuesCalculator(slopes, first); intervalsA = getIntervalsA(intervals, slopes, first, bValues); intervalsB = getIntervalsB(intervals, slopes, first, aValues); } final double[] second = secondDerivativeCalculator(initialSecond, intervalsA, intervalsB); coefMatrix[i] = new DoubleMatrix2D(_solver.solve(yValuesSrt, intervals, slopes, first, second)); } final int nIntervals = coefMatrix[0].getNumberOfRows(); final int nCoefs = coefMatrix[0].getNumberOfColumns(); double[][] resMatrix = new double[dim * nIntervals][nCoefs]; for (int i = 0; i < nIntervals; ++i) { for (int j = 0; j < dim; ++j) { resMatrix[dim * i + j] = coefMatrix[j].getRowVector(i).getData(); } } for (int i = 0; i < (nIntervals * dim); ++i) { for (int j = 0; j < nCoefs; ++j) { ArgumentChecker.isFalse(Double.isNaN(resMatrix[i][j]), "Too large input"); ArgumentChecker.isFalse(Double.isInfinite(resMatrix[i][j]), "Too large input"); } } return new PiecewisePolynomialResult(new DoubleMatrix1D(xValuesSrt), new DoubleMatrix2D(resMatrix), nCoefs, dim); }