List of usage examples for java.lang Double NaN
double NaN
To view the source code for java.lang Double NaN.
Click Source Link
From source file:com.rapidminer.operator.validation.significance.TTestSignificanceTestOperator.java
@Override public SignificanceTestResult performSignificanceTest(PerformanceVector[] allVectors, double alpha) { double[][] resultMatrix = new double[allVectors.length][allVectors.length]; for (int i = 0; i < allVectors.length; i++) { for (int j = 0; j < i + 1; j++) { resultMatrix[i][j] = Double.NaN; // fill lower triangle with }/*from w ww . j a v a 2 s . c om*/ // NaN --> empty in result // string for (int j = i + 1; j < allVectors.length; j++) { resultMatrix[i][j] = getProbability(allVectors[i].getMainCriterion(), allVectors[j].getMainCriterion()); } } return new TTestSignificanceTestResult(allVectors, resultMatrix, alpha); }
From source file:com.itemanalysis.psychometrics.irt.estimation.ItemFitG2.java
public void compute() { int validRowCount = 0; initializeExpectedFrequencies();//from ww w . j av a 2 s. co m //For debugging // System.out.println(printContingencyTable("ORIGINAL TABLE")); condenseTable(); //For debugging // System.out.println(printContingencyTable("CONDENSED TABLE")); double r = 0; G2 = 0; double expectedFrequency = 0; for (int i = 0; i < numberOfBins; i++) { if (validRow[i]) { for (int j = 0; j < numberOfCategories; j++) { r = table[i][j] + .001;//Add small amount to avoid taking log of zero expectedFrequency = expectedValues[i][j] + .001;//Add small amount to avoid taking log of zero G2 += r * Math.log(r / expectedFrequency); //For debugging // System.out.println("i: " + i + " j: " + j + " R: " + r + " EF: " + expectedFrequency ); } validRowCount++; } } G2 = G2 * 2.0; dfG2 = ((double) validRowCount) * ((double) numberOfCategories - 1.0); try { ChiSquaredDistribution chiSquaredDistribution = new ChiSquaredDistribution(dfG2); pvalueG2 = 1.0 - chiSquaredDistribution.cumulativeProbability(G2); } catch (Exception ex) { pvalueG2 = Double.NaN; } }
From source file:com.itemanalysis.psychometrics.polycor.PolychoricLogLikelihoodML.java
public void chiSquare(double fmin) { df = getDf();//w ww . ja va 2 s. c o m if (df <= 0.0) { probChiSquare = Double.NaN; } else { ChiSquaredDistribution cs = new ChiSquaredDistribution(df); double n = 0.0; double sum = 0.0; for (int i = 0; i < nrow; i++) { for (int j = 0; j < ncol; j++) { n += data[i][j]; } } for (int i = 0; i < nrow; i++) { for (int j = 0; j < ncol; j++) { sum += Math.log((data[i][j] + 1e-6) / n) * data[i][j]; } } chiSquare = 2.0 * (fmin + sum); probChiSquare = 1.0 - cs.cumulativeProbability(chiSquare); } }
From source file:com.frank.search.solr.core.schema.SolrSchemaWriter.java
Double retrieveSchemaVersion(String collectionName) { try {/*from w w w . j a v a2s. c o m*/ SolrJsonResponse response = SolrSchemaRequest.version().process(factory.getSolrClient(collectionName)); JsonNode node = response.getNode("version"); return node != null ? node.asDouble() : Double.NaN; } catch (SolrServerException e) { EXCEPTION_TRANSLATOR.translateExceptionIfPossible(new RuntimeException(e)); } catch (IOException e) { EXCEPTION_TRANSLATOR.translateExceptionIfPossible(new RuntimeException(e)); } catch (SolrException e) { EXCEPTION_TRANSLATOR.translateExceptionIfPossible(new RuntimeException(e)); } return Double.NaN; }
From source file:org.jfree.data.RangeTest.java
/** * Tests the constrain() method for various values. *//* w ww .j av a 2 s.c om*/ @Test public void testConstrain() { Range r1 = new Range(0.0, 1.0); double d = r1.constrain(0.5); assertEquals(0.5, d, 0.0000001); d = r1.constrain(0.0); assertEquals(0.0, d, 0.0000001); d = r1.constrain(1.0); assertEquals(1.0, d, 0.0000001); d = r1.constrain(-1.0); assertEquals(0.0, d, 0.0000001); d = r1.constrain(2.0); assertEquals(1.0, d, 0.0000001); d = r1.constrain(Double.POSITIVE_INFINITY); assertEquals(1.0, d, 0.0000001); d = r1.constrain(Double.NEGATIVE_INFINITY); assertEquals(0.0, d, 0.0000001); d = r1.constrain(Double.NaN); assertTrue(Double.isNaN(d)); }
From source file:dr.inference.distribution.GammaDistributionModel.java
public double quantile(double y) { try {// w w w . java 2 s . c o m return (new GammaDistributionImpl(getShape(), getScale())).inverseCumulativeProbability(y) + offset; } catch (MathException e) { return Double.NaN; } }
From source file:org.kalypso.ogc.sensor.diagview.jfreechart.CurveDataset.java
/** * @see org.jfree.data.xy.XYDataset#getXValue(int, int) *//*from w ww. ja v a2 s .c o m*/ @Override public double getXValue(final int series, final int item) { final Number x = getX(series, item); return x == null ? Double.NaN : x.doubleValue(); }
From source file:org.springframework.data.solr.core.schema.SolrSchemaWriter.java
Double retrieveSchemaVersion(String collectionName) { try {/* w w w . j av a2 s. c o m*/ SolrJsonResponse response = SolrSchemaRequest.version().process(factory.getSolrServer(collectionName)); JsonNode node = response.getNode("version"); return node != null ? node.asDouble() : Double.NaN; } catch (SolrServerException e) { EXCEPTION_TRANSLATOR.translateExceptionIfPossible(new RuntimeException(e)); } catch (IOException e) { EXCEPTION_TRANSLATOR.translateExceptionIfPossible(new RuntimeException(e)); } catch (SolrException e) { EXCEPTION_TRANSLATOR.translateExceptionIfPossible(new RuntimeException(e)); } return Double.NaN; }
From source file:com.clust4j.algo.KMedoids.java
@Override protected KMedoids fit() { synchronized (fitLock) { if (null != labels) // already fit return this; final LogTimer timer = new LogTimer(); final double[][] X = data.getData(); final double nan = Double.NaN; // Corner case: K = 1 or all singular if (1 == k) { labelFromSingularK(X);//from w ww .j av a 2 s .c o m fitSummary.add(new Object[] { iter, converged, tss, // tss tss, // avg per cluster tss, // wss nan, // bss (none) timer.wallTime() }); sayBye(timer); return this; } // We do this in KMedoids and not KMeans, because KMedoids uses // real points as medoids and not means for centroids, thus // the recomputation of distances is unnecessary with the dist mat dist_mat = Pairwise.getDistance(X, getSeparabilityMetric(), true, false); info("distance matrix computed in " + timer.toString()); // Initialize labels medoid_indices = init_centroid_indices; ClusterAssignments clusterAssignments; MedoidReassignmentHandler rassn; int[] newMedoids = medoid_indices; // Cost vars double bestCost = Double.POSITIVE_INFINITY, maxCost = Double.NEGATIVE_INFINITY, avgCost = Double.NaN, wss_sum = nan; // Iterate while the cost decreases: boolean convergedFromCost = false; // from cost or system changes? boolean configurationChanged = true; while (configurationChanged && iter < maxIter) { /* * 1. In each cluster, make the point that minimizes * the sum of distances within the cluster the medoid */ try { clusterAssignments = assignClosestMedoid(newMedoids); } catch (IllegalClusterStateException ouch) { exitOnBadDistanceMetric(X, timer); return this; } /* * 1.5 The entries are not 100% equal, so we can (re)assign medoids... */ try { rassn = new MedoidReassignmentHandler(clusterAssignments); } catch (IllegalClusterStateException ouch) { exitOnBadDistanceMetric(X, timer); return this; } /* * 1.75 This happens in the case of bad kernels that cause * infinities to propagate... we can't segment the input * space and need to just return a single cluster. */ if (rassn.new_clusters.size() == 1) { this.k = 1; warn("(dis)similarity metric cannot partition space without propagating Infs. Returning one cluster"); labelFromSingularK(X); fitSummary.add(new Object[] { iter, converged, tss, // tss tss, // avg per cluster tss, // wss nan, // bss (none) timer.wallTime() }); sayBye(timer); return this; } /* * 2. Reassign each point to the cluster defined by the * closest medoid determined in the previous step. */ newMedoids = rassn.reassignedMedoidIdcs; /* * 2.5 Determine whether configuration changed */ boolean lastIteration = VecUtils.equalsExactly(newMedoids, medoid_indices); /* * 3. Update the costs */ converged = lastIteration || (convergedFromCost = FastMath.abs(wss_sum - bestCost) < tolerance); double tmp_wss_sum = rassn.new_clusters.total_cst; double tmp_bss = tss - tmp_wss_sum; // Check whether greater than max if (tmp_wss_sum > maxCost) maxCost = tmp_wss_sum; if (tmp_wss_sum < bestCost) { bestCost = wss_sum = tmp_wss_sum; labels = rassn.new_clusters.assn; // will be medoid idcs until encoded at end med_to_wss = rassn.new_clusters.costs; centroids = rassn.centers; medoid_indices = newMedoids; bss = tmp_bss; // get avg cost avgCost = wss_sum / (double) k; } if (converged) { reorderLabelsAndCentroids(); } /* * 3.5 If this is the last one, it'll show the wss and bss */ fitSummary.add(new Object[] { iter, converged, tss, avgCost, wss_sum, bss, timer.wallTime() }); iter++; configurationChanged = !converged; } if (!converged) warn("algorithm did not converge"); else info("algorithm converged due to " + (convergedFromCost ? "cost minimization" : "harmonious state")); // wrap things up, create summary.. sayBye(timer); return this; } }
From source file:com.ironiacorp.statistics.r.type.LinearModelSummary.java
/** * @return/*from w ww . j a v a2s . co m*/ */ public Double getInterceptP() { if (coefficients != null) { if (coefficients.hasRow(INTERCEPT_COEFFICIENT_NAME_IN_R)) { return coefficients.getByKeys(INTERCEPT_COEFFICIENT_NAME_IN_R, "Pr(>|t|)"); } else if (coefficients.rows() == 1) { /* * This is a bit of a kludge. When we use lm.fit instead of lm, we end up with a somewhat screwy * coefficent matrix in the case of one-sample ttest, and R put in x1 (I think it starts as 1 and it * prepends the x). */ assert coefficients.getRowName(0).equals("x1"); return coefficients.getByKeys(coefficients.getRowName(0), "Pr(>|t|)"); } } return Double.NaN; }