List of usage examples for java.lang Double isNaN
public static boolean isNaN(double v)
From source file:edu.cornell.med.icb.goby.R.TestFisherExact.java
/** * An an example of an R x C table from Agresti (2002, p. 57) Job Satisfaction. *//*from ww w .j av a 2 s . c o m*/ @Test public void agrestiJobSatisfaction() { final int[] inputTable = { /* income /* satisfaction <15k 15-25k 25-40k >40k */ /* VeryD */ 1, 2, 1, 0, /* LittleD */ 3, 3, 6, 1, /* ModerateS */ 10, 10, 14, 9, /* VeryS */ 6, 7, 12, 11 }; final FisherExact.Result result = FisherExact.fexact(inputTable, 4, 4); assertEquals("pValue does not match", 0.7826849389656096, result.getPValue(), EPSILON); // everything else should be invalid since the input was not a 2x2 matrix assertNotNull("Confidence interval should not be null", result.getConfidenceInterval()); assertTrue("Confidence interval should be an empty array", ArrayUtils.isEmpty(result.getConfidenceInterval())); assertTrue("Estimate should be NaN", Double.isNaN(result.getEstimate())); assertTrue("Odds ratio should be NaN", Double.isNaN(result.getOddsRatio())); assertEquals("Wrong Hypothesis for result", FisherExact.AlternativeHypothesis.twosided, result.getAlternativeHypothesis()); }
From source file:net.sourceforge.processdash.ui.web.reports.RadarChart.java
private void maybeScaleDataAxes() { for (int i = 0; i < data.numCols(); i++) { int n = i + 1; String target = getParameter("t" + n); if (!StringUtils.hasValue(target)) continue; double targetVal = 0; try {/* w w w . ja v a 2 s .c o m*/ targetVal = FormatUtil.parseNumber(target); } catch (Exception e) { SaveableData val = getDataRepository().getInheritableValue(getPrefix(), target); if (val != null) { SimpleData sVal = val.getSimpleValue(); if (sVal instanceof NumberData) targetVal = ((NumberData) sVal).getDouble(); } } if (targetVal == 0) continue; boolean reverse = parameters.containsKey("r" + n); SimpleData d = data.getData(1, n); if (d instanceof NumberData) { NumberData num = (NumberData) d; double val = num.getDouble(); if (Double.isInfinite(val) || Double.isNaN(val)) val = 1.0; else if (reverse) val = 2.0 / (1.0 + (val / targetVal)); else val = val / targetVal; data.setData(1, n, new DoubleData(val)); } } }
From source file:etomica.models.co2.PNGCPM.java
public double energy(IMoleculeList atoms) { double sum = 0; if (component != Component.INDUCTION) { for (int i = 0; i < atoms.getMoleculeCount() - 1; i++) { pair.atom0 = atoms.getMolecule(i); for (int j = i + 1; j < atoms.getMoleculeCount(); j++) { pair.atom1 = atoms.getMolecule(j); sum += getNonPolarizationEnergy(pair); if (Double.isInfinite(sum)) { return sum; }// ww w. j a va2 s . c o m } } } if (component != Component.TWO_BODY) { sum += getPolarizationEnergy(atoms); } if (!oops && Double.isNaN(sum)) { oops = true; energy(atoms); throw new RuntimeException("oops NaN"); } return sum; }
From source file:org.tsho.dmc2.core.chart.CowebRenderer.java
public void render(final Graphics2D g2, final Rectangle2D dataArea, final PlotRenderingInfo info) { state = STATE_RUNNING;/*from w w w.java 2s . c om*/ if (plot.isAlpha()) { g2.setComposite(AlphaComposite.SrcOver); } Stepper.Point2D result = stepper.getCurrentPoint2D(); int transX, transY; double start = (int) dataArea.getMinX(); double end = (int) dataArea.getMaxX(); double[] value = new double[1]; int prevY = 0; boolean flagOld = false; boolean flagNew = false; label: for (double i = start; i <= end; i += 1) { value[0] = this.domainAxis.java2DToValue(i, dataArea, RectangleEdge.BOTTOM); stepper.setInitialValue(value); stepper.initialize(); for (int j = 0; j < power; j++) { stepper.step(); } result = stepper.getCurrentPoint2D(); transX = (int) i; transY = (int) rangeAxis.valueToJava2D(result.getX(), dataArea, RectangleEdge.LEFT); flagNew = Double.isNaN(result.getX()); if (bigDots) { g2.fillRect(transX - 1, transY - 1, 3, 3); } else { g2.fillRect(transX, transY, 1, 1); } if (connectWithLines) { if (i > start) { if (!flagOld && !flagNew) g2.drawLine(transX, transY, transX - 1, prevY); } prevY = transY; flagOld = flagNew; } if (stopped) { state = STATE_STOPPED; return; } } if (animate) { animateCowebPlot(g2, dataArea); } state = STATE_FINISHED; }
From source file:geogebra.kernel.AlgoRootNewton.java
final double calcRoot(Function fun, double start) { double root = Double.NaN; if (rootFinderBrent == null) rootFinderBrent = new BrentSolver(Kernel.STANDARD_PRECISION); // try Brent method with borders close to start value try {// www .ja v a 2 s. co m double step = (kernel.getXmax() - kernel.getXmin()) / 10; root = rootFinderBrent.solve(MAX_ITERATIONS, new RealRootAdapter(fun), start - step, start + step, start); if (checkRoot(fun, root)) { //System.out.println("1. Brent worked: " + root); return root; } } catch (Exception e) { root = Double.NaN; } // try Brent method on valid interval around start double[] borders = getDomain(fun, start); try { root = rootFinderBrent.solve(MAX_ITERATIONS, new RealRootAdapter(fun), borders[0], borders[1], start); if (checkRoot(fun, root)) { //System.out.println("2. Brent worked: " + root); return root; } } catch (Exception e) { root = Double.NaN; } // try Newton's method RealRootDerivFunction derivFun = fun.getRealRootDerivFunction(); if (derivFun != null) { // check if fun(start) is defined double eval = fun.evaluate(start); if (Double.isNaN(eval) || Double.isInfinite(eval)) { // shift left border slightly right borders[0] = 0.9 * borders[0] + 0.1 * borders[1]; start = (borders[0] + borders[1]) / 2; } if (rootFinderNewton == null) { rootFinderNewton = new NewtonSolver(); } try { root = rootFinderNewton.solve(MAX_ITERATIONS, new RealRootDerivAdapter(derivFun), borders[0], borders[1], start); if (checkRoot(fun, root)) { //System.out.println("Newton worked: " + root); return root; } } catch (Exception e) { root = Double.NaN; } } // neither Brent nor Newton worked return Double.NaN; }
From source file:ml.shifu.shifu.core.dtrain.nn.NNWorker.java
@Override public void load(GuaguaWritableAdapter<LongWritable> currentKey, GuaguaWritableAdapter<Text> currentValue, WorkerContext<NNParams, NNParams> workerContext) { super.count += 1; if ((super.count) % 5000 == 0) { LOG.info("Read {} records.", super.count); }/*from w w w. j a va 2 s . c o m*/ float[] inputs = new float[super.featureInputsCnt]; float[] ideal = new float[super.outputNodeCount]; if (super.isDry) { // dry train, use empty data. addDataPairToDataSet(0, new BasicFloatMLDataPair(new BasicFloatMLData(inputs), new BasicFloatMLData(ideal))); return; } long hashcode = 0; float significance = 1f; // use guava Splitter to iterate only once // use NNConstants.NN_DEFAULT_COLUMN_SEPARATOR to replace getModelConfig().getDataSetDelimiter(), super follows // the function in akka mode. int index = 0, inputsIndex = 0, outputIndex = 0; String[] fields = Lists.newArrayList(this.splitter.split(currentValue.getWritable().toString())) .toArray(new String[0]); int pos = 0; for (pos = 0; pos < fields.length;) { String input = fields[pos]; // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f) float floatValue = input.length() == 0 ? 0f : NumberFormatUtils.getFloat(input, 0f); // no idea about why NaN in input data, we should process it as missing value TODO , according to norm type floatValue = (Float.isNaN(floatValue) || Double.isNaN(floatValue)) ? 0f : floatValue; if (pos == fields.length - 1) { // do we need to check if not weighted directly set to 1f; if such logic non-weight at first, then // weight, how to process??? if (StringUtils.isBlank(modelConfig.getWeightColumnName())) { significance = 1f; // break here if we reach weight column which is last column break; } // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 1f) significance = input.length() == 0 ? 1f : NumberFormatUtils.getFloat(input, 1f); // if invalid weight, set it to 1f and warning in log if (Float.compare(significance, 0f) < 0) { LOG.warn( "The {} record in current worker weight {} is less than 0f, it is invalid, set it to 1.", count, significance); significance = 1f; } // the last field is significance, break here break; } else { ColumnConfig columnConfig = super.columnConfigList.get(index); if (columnConfig != null && columnConfig.isTarget()) { if (isLinearTarget || modelConfig.isRegression()) { ideal[outputIndex++] = floatValue; } else { if (modelConfig.getTrain().isOneVsAll()) { // if one vs all, set correlated idea value according to trainerId which means in trainer // with id 0, target 0 is treated with 1, other are 0. Such target value are set to index of // tags like [0, 1, 2, 3] compared with ["a", "b", "c", "d"] ideal[outputIndex++] = Float.compare(floatValue, trainerId) == 0 ? 1f : 0f; } else { if (modelConfig.getTags().size() == 2) { // if only 2 classes, output node is 1 node. if target = 0 means 0 is the index for // positive prediction, set positive to 1 and negative to 0 int ideaIndex = (int) floatValue; ideal[0] = ideaIndex == 0 ? 1f : 0f; } else { // for multiple classification int ideaIndex = (int) floatValue; ideal[ideaIndex] = 1f; } } } pos++; } else { if (subFeatureSet.contains(index)) { if (columnConfig.isMeta() || columnConfig.isForceRemove()) { // it shouldn't happen here pos += 1; } else if (columnConfig != null && columnConfig.isNumerical() && modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ONEHOT)) { for (int k = 0; k < columnConfig.getBinBoundary().size() + 1; k++) { String tval = fields[pos]; // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f) float fval = tval.length() == 0 ? 0f : NumberFormatUtils.getFloat(tval, 0f); // no idea about why NaN in input data, we should process it as missing value TODO , // according to norm type fval = (Float.isNaN(fval) || Double.isNaN(fval)) ? 0f : fval; inputs[inputsIndex++] = fval; pos++; } } else if (columnConfig != null && columnConfig.isCategorical() && (modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ZSCALE_ONEHOT) || modelConfig.getNormalizeType() .equals(ModelNormalizeConf.NormType.ONEHOT))) { for (int k = 0; k < columnConfig.getBinCategory().size() + 1; k++) { String tval = fields[pos]; // check here to avoid bad performance in failed NumberFormatUtils.getFloat(input, 0f) float fval = tval.length() == 0 ? 0f : NumberFormatUtils.getFloat(tval, 0f); // no idea about why NaN in input data, we should process it as missing value TODO , // according to norm type fval = (Float.isNaN(fval) || Double.isNaN(fval)) ? 0f : fval; inputs[inputsIndex++] = fval; pos++; } } else { inputs[inputsIndex++] = floatValue; pos++; } hashcode = hashcode * 31 + Double.valueOf(floatValue).hashCode(); } else { if (!CommonUtils.isToNormVariable(columnConfig, hasCandidates, modelConfig.isRegression())) { pos += 1; } else if (columnConfig.isNumerical() && modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ONEHOT) && columnConfig.getBinBoundary() != null && columnConfig.getBinBoundary().size() > 0) { pos += (columnConfig.getBinBoundary().size() + 1); } else if (columnConfig.isCategorical() && (modelConfig.getNormalizeType().equals(ModelNormalizeConf.NormType.ZSCALE_ONEHOT) || modelConfig.getNormalizeType() .equals(ModelNormalizeConf.NormType.ONEHOT)) && columnConfig.getBinCategory().size() > 0) { pos += (columnConfig.getBinCategory().size() + 1); } else { pos += 1; } } } } index += 1; } if (index != this.columnConfigList.size() || pos != fields.length - 1) { throw new RuntimeException("Wrong data indexing. ColumnConfig index = " + index + ", while it should be " + columnConfigList.size() + ". " + "Data Pos = " + pos + ", while it should be " + (fields.length - 1)); } // output delimiter in norm can be set by user now and if user set a special one later changed, this exception // is helped to quick find such issue. if (inputsIndex != inputs.length) { String delimiter = workerContext.getProps().getProperty(Constants.SHIFU_OUTPUT_DATA_DELIMITER, Constants.DEFAULT_DELIMITER); throw new RuntimeException("Input length is inconsistent with parsing size. Input original size: " + inputs.length + ", parsing size:" + inputsIndex + ", delimiter:" + delimiter + "."); } // sample negative only logic here if (modelConfig.getTrain().getSampleNegOnly()) { if (this.modelConfig.isFixInitialInput()) { // if fixInitialInput, sample hashcode in 1-sampleRate range out if negative records int startHashCode = (100 / this.modelConfig.getBaggingNum()) * this.trainerId; // here BaggingSampleRate means how many data will be used in training and validation, if it is 0.8, we // should take 1-0.8 to check endHashCode int endHashCode = startHashCode + Double.valueOf((1d - this.modelConfig.getBaggingSampleRate()) * 100).intValue(); if ((modelConfig.isRegression() || (modelConfig.isClassification() && modelConfig.getTrain().isOneVsAll())) // regression or // onevsall && (int) (ideal[0] + 0.01d) == 0 // negative record && isInRange(hashcode, startHashCode, endHashCode)) { return; } } else { // if not fixed initial input, and for regression or onevsall multiple classification (regression also). // if negative record if ((modelConfig.isRegression() || (modelConfig.isClassification() && modelConfig.getTrain().isOneVsAll())) // regression or // onevsall && (int) (ideal[0] + 0.01d) == 0 // negative record && Double.compare(super.sampelNegOnlyRandom.nextDouble(), this.modelConfig.getBaggingSampleRate()) >= 0) { return; } } } FloatMLDataPair pair = new BasicFloatMLDataPair(new BasicFloatMLData(inputs), new BasicFloatMLData(ideal)); // up sampling logic, just add more weights while bagging sampling rate is still not changed if (modelConfig.isRegression() && isUpSampleEnabled() && Double.compare(ideal[0], 1d) == 0) { // Double.compare(ideal[0], 1d) == 0 means positive tags; sample + 1 to avoid sample count to 0 pair.setSignificance(significance * (super.upSampleRng.sample() + 1)); } else { pair.setSignificance(significance); } boolean isValidation = false; if (workerContext.getAttachment() != null && workerContext.getAttachment() instanceof Boolean) { isValidation = (Boolean) workerContext.getAttachment(); } boolean isInTraining = addDataPairToDataSet(hashcode, pair, isValidation); // do bagging sampling only for training data if (isInTraining) { float subsampleWeights = sampleWeights(pair.getIdealArray()[0]); if (isPositive(pair.getIdealArray()[0])) { this.positiveSelectedTrainCount += subsampleWeights * 1L; } else { this.negativeSelectedTrainCount += subsampleWeights * 1L; } // set weights to significance, if 0, significance will be 0, that is bagging sampling pair.setSignificance(pair.getSignificance() * subsampleWeights); } else { // for validation data, according bagging sampling logic, we may need to sampling validation data set, while // validation data set are only used to compute validation error, not to do real sampling is ok. } }
From source file:com.sun.japex.report.ChartGenerator.java
/** * Create a chart for a single mean across all drivers. *//*www. j ava 2 s . c o m*/ public JFreeChart createTrendChart(MeanMode mean) { DefaultCategoryDataset dataset = new DefaultCategoryDataset(); final int size = _reports.size(); for (int i = 0; i < size; i++) { TestSuiteReport report = _reports.get(i); SimpleDateFormat formatter = _dateFormatter; // If previous or next are on the same day, include time if (i > 0 && onSameDate(report, _reports.get(i - 1))) { formatter = _dateTimeFormatter; } if (i + 1 < size && onSameDate(report, _reports.get(i + 1))) { formatter = _dateTimeFormatter; } List<TestSuiteReport.Driver> drivers = report.getDrivers(); for (TestSuiteReport.Driver driver : drivers) { double value = driver.getResult(mean); if (!Double.isNaN(value)) { dataset.addValue(driver.getResult(MeanMode.ARITHMETIC), driver.getName(), formatter.format(report.getDate().getTime())); } } } JFreeChart chart = ChartFactory.createLineChart(mean.toString(), "", "", dataset, PlotOrientation.VERTICAL, true, true, false); configureLineChart(chart); chart.setAntiAlias(true); return chart; }
From source file:com.ning.metrics.collector.util.Stats.java
/** * 90th percentile/*from www .j av a 2s .c om*/ * * @return 90th percentile */ @Managed @SuppressWarnings("unused") public double getMillisTP90() { double percentile = millisStats.getPercentile(90); return Double.isNaN(percentile) ? 0.0 : percentile; }
From source file:marytts.tools.voiceimport.HalfPhoneUnitLabelComputer.java
@Override protected List<Double> getMidTimes(List<String> labels, List<Double> endTimes) { assert labels.size() == endTimes.size(); List<Double> midTimes = new ArrayList<Double>(endTimes.size()); double startTime = 0; for (int i = 0; i < labels.size(); i++) { String label = labels.get(i); double endTime = endTimes.get(i); boolean isTransient = false; double peakTime = Double.NaN; if (energyBasedTransientSplitting) { try { Allophone allophone = db.getAllophoneSet().getAllophone(label); isTransient = allophone.isPlosive() || allophone.isAffricate(); if (isTransient) { peakTime = getEnergyPeak(startTime, endTime); }/* w w w .j a v a2s. c om*/ } catch (NullPointerException e) { // ignore for now } catch (IOException e) { // ignore for now } } double midTime; if (isTransient && !Double.isNaN(peakTime)) { midTime = peakTime; } else { midTime = (startTime + endTime) / 2; } midTimes.add(midTime); startTime = endTime; } return midTimes; }
From source file:com.cloudera.oryx.rdf.common.tree.DecisionTree.java
private static TreeNode build(ExampleSet examples, int buildAtDepth, int minNodeSize, double minInfoGainNats, int featuresToTry, int numFeatures, int suggestedMaxSplitCandidates, int maxDepth, RandomGenerator random) {/*from ww w. ja v a 2 s . co m*/ if (buildAtDepth >= maxDepth - 1 || examples.getExamples().size() < minNodeSize) { return new TerminalNode(Prediction.buildPrediction(examples)); } double bestGain = Double.NEGATIVE_INFINITY; Decision bestDecision = null; for (int featureNumber : randomFeatures(examples, featuresToTry, numFeatures, random)) { Iterable<Decision> decisions = Decision.decisionsFromExamples(examples, featureNumber, suggestedMaxSplitCandidates); Pair<Decision, Double> decisionAndGain = Information.bestGain(decisions, examples); if (decisionAndGain != null) { double gain = decisionAndGain.getSecond(); if (gain > bestGain) { bestGain = gain; bestDecision = decisionAndGain.getFirst(); } } } if (Double.isNaN(bestGain) || bestGain < minInfoGainNats) { return new TerminalNode(Prediction.buildPrediction(examples)); } bestDecision.setInformationGain(bestGain); ExampleSet[] negPosSplit = examples.split(bestDecision); examples = null; // For GC? TreeNode left = build(negPosSplit[0], buildAtDepth + 1, minNodeSize, minInfoGainNats, featuresToTry, numFeatures, suggestedMaxSplitCandidates, maxDepth, random); TreeNode right = build(negPosSplit[1], buildAtDepth + 1, minNodeSize, minInfoGainNats, featuresToTry, numFeatures, suggestedMaxSplitCandidates, maxDepth, random); return new DecisionNode(bestDecision, left, right); }