List of usage examples for java.lang Double isInfinite
public static boolean isInfinite(double v)
From source file:org.wso2.carbon.ml.core.spark.algorithms.SupervisedSparkModelBuilder.java
private boolean isValidWeights(Vector weights) { for (int i = 0; i < weights.size(); i++) { double d = weights.apply(i); if (Double.isNaN(d) || Double.isInfinite(d)) { return false; }// www . ja v a2 s . com } return true; }
From source file:edu.cmu.tetrad.data.DataUtils.java
/** * Returns a simulation from the given covariance matrix, zero means. * * @param cov The variables and covariance matrix over the variables. * @return The simulated data.//from ww w. jav a2 s .co m */ public static DataSet choleskySimulation(CovarianceMatrix cov) { System.out.println(cov); int sampleSize = cov.getSampleSize(); List<Node> variables = cov.getVariables(); DataSet dataSet = new ColtDataSet(sampleSize, variables); TetradMatrix _cov = cov.getMatrix().copy(); TetradMatrix cholesky = MatrixUtils.choleskyC(_cov); System.out.println("Cholesky decomposition" + cholesky); // Simulate the data by repeatedly calling the Cholesky.exogenousData // method. Store only the data for the measured variables. for (int row = 0; row < sampleSize; row++) { // Step 1. Generate normal samples. double exoData[] = new double[cholesky.rows()]; for (int i = 0; i < exoData.length; i++) { exoData[i] = RandomUtil.getInstance().nextNormal(0, 1); } // Step 2. Multiply by cholesky to get correct covariance. double point[] = new double[exoData.length]; for (int i = 0; i < exoData.length; i++) { double sum = 0.0; for (int j = 0; j <= i; j++) { sum += cholesky.get(i, j) * exoData[j]; } point[i] = sum; } double rowData[] = point; for (int col = 0; col < variables.size(); col++) { int index = variables.indexOf(variables.get(col)); double value = rowData[index]; if (Double.isNaN(value) || Double.isInfinite(value)) { System.out.println("Value out of range: " + value); } dataSet.setDouble(row, col, value); } } return dataSet; }
From source file:org.eclipse.january.dataset.MathsTest.java
@Test public void testDivision() { Dataset a, b, c = null, d = null;//w ww. j a va2s . com Complex zv = new Complex(-3.5, 0); final double dv = zv.getReal(); long start; int n; int eCount = 0; for (String dn : classes.keySet()) { final int dtype = classes.get(dn); Random.seed(12735L); for (String en : classes.keySet()) { final int etype = classes.get(en); TestUtils.verbosePrintf("%s by %s, ", dn, en); n = 32; for (int i = 0; i < SITER; i++) { if (dtype < Dataset.ARRAYINT8) { a = Random.randn(n).imultiply(100); a = a.cast(dtype); } else { Dataset[] aa = new Dataset[ISIZEA]; for (int j = 0; j < ISIZEA; j++) { aa[j] = Random.randn(n).imultiply(100); } a = DatasetUtils.cast(aa, dtype); } if (etype < Dataset.ARRAYINT8) { b = Random.randn(n).imultiply(100); b = b.cast(etype); } else { Dataset[] ab = new Dataset[ISIZEB]; for (int j = 0; j < ISIZEB; j++) { ab[j] = Random.randn(n).imultiply(100); } b = DatasetUtils.cast(ab, etype); } start = -System.nanoTime(); try { c = Maths.divide(a, b); } catch (IllegalArgumentException e) { TestUtils.verbosePrintf("Could not perform this operation: %s\n", e.getMessage()); eCount++; continue; } start += System.nanoTime(); double ntime = ((double) start) / c.getSize(); d = DatasetFactory.zeros(c); start = -System.nanoTime(); IndexIterator ita = a.getIterator(); IndexIterator itb = b.getIterator(); int j = 0; if ((dtype == Dataset.COMPLEX64 || dtype == Dataset.COMPLEX128) && (etype == Dataset.COMPLEX64 || etype == Dataset.COMPLEX128)) { final int is = d.getElementsPerItem(); while (ita.hasNext() && itb.hasNext()) { d.setObjectAbs(j, ((Complex) a.getObjectAbs(ita.index)) .divide((Complex) b.getObjectAbs(itb.index))); j += is; } } else if ((dtype == Dataset.COMPLEX64 || dtype == Dataset.COMPLEX128) && !(etype == Dataset.COMPLEX64 || etype == Dataset.COMPLEX128)) { final int is = d.getElementsPerItem(); while (ita.hasNext() && itb.hasNext()) { Complex z = (Complex) a.getObjectAbs(ita.index); double br = b.getElementDoubleAbs(itb.index); Complex zr = z.divide(br); if (br == 0) { // CM's implementation is different to NumPy's zr = new Complex(z.getReal() != 0 ? z.getReal() / br : zr.getReal(), z.getImaginary() != 0 ? z.getImaginary() / br : zr.getImaginary()); } d.setObjectAbs(j, zr); j += is; } } else if (!(dtype == Dataset.COMPLEX64 || dtype == Dataset.COMPLEX128) && (etype == Dataset.COMPLEX64 || etype == Dataset.COMPLEX128)) { final int is = d.getElementsPerItem(); while (ita.hasNext() && itb.hasNext()) { d.setObjectAbs(j, new Complex(a.getElementDoubleAbs(ita.index), 0) .divide((Complex) b.getObjectAbs(itb.index))); j += is; } } else { if (dtype < Dataset.ARRAYINT8 && etype < Dataset.ARRAYINT8) { if (d.hasFloatingPointElements()) { while (ita.hasNext() && itb.hasNext()) { d.setObjectAbs(j++, ((Number) a.getObjectAbs(ita.index)).doubleValue() / ((Number) b.getObjectAbs(itb.index)).doubleValue()); } } else { while (ita.hasNext() && itb.hasNext()) { double bv = ((Number) b.getObjectAbs(itb.index)).doubleValue(); d.setObjectAbs(j++, bv == 0 ? 0 : ((Number) a.getObjectAbs(ita.index)).doubleValue() / bv); } } } else { final double[] answer = new double[MAXISIZE]; final int is = d.getElementsPerItem(); if (a.getElementsPerItem() < is) { while (ita.hasNext() && itb.hasNext()) { final double xa = a.getElementDoubleAbs(ita.index); if (d.hasFloatingPointElements()) { for (int k = 0; k < ISIZEB; k++) { answer[k] = xa / b.getElementDoubleAbs(itb.index + k); } } else { for (int k = 0; k < ISIZEB; k++) { final double v = xa / b.getElementDoubleAbs(itb.index + k); answer[k] = Double.isInfinite(v) || Double.isNaN(v) ? 0 : v; } } d.setObjectAbs(j, answer); j += is; } } else if (b.getElementsPerItem() < is) { while (ita.hasNext() && itb.hasNext()) { final double xb = b.getElementDoubleAbs(itb.index); if (d.hasFloatingPointElements()) { for (int k = 0; k < ISIZEA; k++) { answer[k] = a.getElementDoubleAbs(ita.index + k) / xb; } } else { if (xb == 0) { for (int k = 0; k < ISIZEA; k++) { answer[k] = 0; } } else { for (int k = 0; k < ISIZEA; k++) { answer[k] = a.getElementDoubleAbs(ita.index + k) / xb; } } } d.setObjectAbs(j, answer); j += is; } } else { while (ita.hasNext() && itb.hasNext()) { if (d.hasFloatingPointElements()) { double v; for (int k = 0; k < is; k++) { v = a.getElementDoubleAbs(ita.index + k) / b.getElementDoubleAbs(itb.index + k); answer[k] = Double.isInfinite(v) || Double.isNaN(v) ? 0 : v; } } else { double v; for (int k = 0; k < is; k++) { v = a.getElementDoubleAbs(ita.index + k) / b.getElementDoubleAbs(itb.index + k); answer[k] = Double.isInfinite(v) || Double.isNaN(v) ? 0 : v; } } d.setObjectAbs(j, answer); j += is; } } } } start += System.nanoTime(); double otime = ((double) start) / d.getSize(); TestUtils.verbosePrintf("Time taken by div for %s: %s; %s (%.1f%%)\n", n, otime, ntime, 100. * (otime - ntime) / otime); checkDatasets(a, b, c, d); n *= SSTEP; } } Random.seed(12735L); n = 32; TestUtils.verbosePrintf("%s by constant, ", dn); for (int i = 0; i < SITER; i++) { if (dtype < Dataset.ARRAYINT8) { a = Random.randn(n); a.imultiply(100); a = a.cast(dtype); } else { Dataset[] aa = new Dataset[ISIZEA]; for (int j = 0; j < ISIZEA; j++) { aa[j] = Random.randn(n).imultiply(100); } a = DatasetUtils.cast(aa, dtype); } start = -System.nanoTime(); try { c = Maths.divide(a, dv); } catch (IllegalArgumentException e) { TestUtils.verbosePrintf("Could not perform this operation: %s\n", e.getMessage()); eCount++; continue; } start += System.nanoTime(); double ntime = ((double) start) / c.getSize(); d = DatasetFactory.zeros(c); start = -System.nanoTime(); IndexIterator ita = a.getIterator(); int j = 0; if (dtype == Dataset.COMPLEX64 || dtype == Dataset.COMPLEX128) { final int is = d.getElementsPerItem(); while (ita.hasNext()) { d.setObjectAbs(j, ((Complex) a.getObjectAbs(ita.index)).divide(zv)); j += is; } } else { if (dtype < Dataset.ARRAYINT8) { while (ita.hasNext()) { d.setObjectAbs(j++, ((Number) a.getObjectAbs(ita.index)).doubleValue() / dv); } } else { final double[] answer = new double[ISIZEA]; while (ita.hasNext()) { for (int k = 0; k < ISIZEA; k++) { answer[k] = a.getElementDoubleAbs(ita.index + k) / dv; } d.setObjectAbs(j, answer); j += ISIZEA; } } } if (d == null) break; start += System.nanoTime(); double otime = ((double) start) / d.getSize(); TestUtils.verbosePrintf("Time taken by div for %s: %s; %s (%.1f%%)\n", n, otime, ntime, 100. * (otime - ntime) / otime); checkDatasets(a, dv, c, d); n *= SSTEP; } Random.seed(12735L); n = 32; TestUtils.verbosePrintf("constant by %s, ", dn); for (int i = 0; i < SITER; i++) { if (dtype < Dataset.ARRAYINT8) { a = Random.randn(n); a.imultiply(100); a = a.cast(dtype); } else { Dataset[] aa = new Dataset[ISIZEA]; for (int j = 0; j < ISIZEA; j++) { aa[j] = Random.randn(n).imultiply(100); } a = DatasetUtils.cast(aa, dtype); } start = -System.nanoTime(); try { c = Maths.divide(dv, a); } catch (IllegalArgumentException e) { TestUtils.verbosePrintf("Could not perform this operation: %s\n", e.getMessage()); eCount++; continue; } start += System.nanoTime(); double ntime = ((double) start) / c.getSize(); d = DatasetFactory.zeros(c); start = -System.nanoTime(); IndexIterator ita = a.getIterator(); int j = 0; if (dtype == Dataset.COMPLEX64 || dtype == Dataset.COMPLEX128) { final int is = d.getElementsPerItem(); while (ita.hasNext()) { d.setObjectAbs(j, zv.divide((Complex) a.getObjectAbs(ita.index))); j += is; } } else { if (dtype < Dataset.ARRAYINT8) { while (ita.hasNext()) { d.setObjectAbs(j++, dv / ((Number) a.getObjectAbs(ita.index)).doubleValue()); } } else { final double[] answer = new double[ISIZEA]; while (ita.hasNext()) { for (int k = 0; k < ISIZEA; k++) { answer[k] = dv / a.getElementDoubleAbs(ita.index + k); } d.setObjectAbs(j, answer); j += ISIZEA; } } } if (d == null) break; start += System.nanoTime(); double otime = ((double) start) / d.getSize(); TestUtils.verbosePrintf("Time taken by div for %s: %s; %s (%.1f%%)\n", n, otime, ntime, 100. * (otime - ntime) / otime); checkDatasets(dv, a, c, d); n *= SSTEP; } } if (eCount > 0) { TestUtils.verbosePrintf("Number of exceptions caught: %d\n", eCount); } }
From source file:org.egov.works.web.actions.tender.TenderNegotiationPDFGenerator.java
public double calculatPercentage(final double amt1, final double am2, final int type) { double percentage = 0.0; try {//from w w w . ja va2s.c o m if (type == 1) percentage = (amt1 - am2) / amt1 * 100; else if (type == 2) percentage = (amt1 - am2) / am2 * 100; if (Double.isInfinite(percentage)) percentage = 0.00; } catch (final Exception e) { percentage = 0.00; LOGGER.info("Exception while calculating totTenderNegMarketPer" + e); } return Math.abs(percentage); }
From source file:org.eclipse.january.dataset.Stats.java
/** * @param a dataset/* ww w . ja va 2 s .co m*/ * @param axis * @param ignoreInvalids see {@link Dataset#max(int, boolean...)} * @return cumulative product of items along axis in dataset * @since 2.0 */ public static Dataset cumulativeProduct(final Dataset a, int axis, final boolean... ignoreInvalids) { axis = a.checkAxis(axis); int dtype = a.getDType(); int[] oshape = a.getShape(); int alen = oshape[axis]; oshape[axis] = 1; final boolean ignoreNaNs; final boolean ignoreInfs; if (a.hasFloatingPointElements()) { ignoreNaNs = ignoreInvalids != null && ignoreInvalids.length > 0 ? ignoreInvalids[0] : false; ignoreInfs = ignoreInvalids != null && ignoreInvalids.length > 1 ? ignoreInvalids[1] : ignoreNaNs; } else { ignoreNaNs = false; ignoreInfs = false; } Dataset result = DatasetFactory.zeros(a); PositionIterator pi = result.getPositionIterator(axis); int[] pos = pi.getPos(); while (pi.hasNext()) { if (a.isComplex()) { double rv = 1, iv = 0; switch (dtype) { case Dataset.COMPLEX64: ComplexFloatDataset af = (ComplexFloatDataset) a; ComplexFloatDataset rf = (ComplexFloatDataset) result; for (int j = 0; j < alen; j++) { if (!Double.isNaN(rv) || !Double.isNaN(iv)) { pos[axis] = j; final float r1 = af.getReal(pos); final float i1 = af.getImag(pos); if (ignoreNaNs && (Float.isNaN(r1) || Float.isNaN(i1))) { continue; } if (ignoreInfs && (Float.isInfinite(r1) || Float.isInfinite(i1))) { continue; } final double tv = r1 * rv - i1 * iv; iv = r1 * iv + i1 * rv; rv = tv; } rf.set((float) rv, (float) iv, pos); } break; case Dataset.COMPLEX128: ComplexDoubleDataset ad = (ComplexDoubleDataset) a; ComplexDoubleDataset rd = (ComplexDoubleDataset) result; for (int j = 0; j < alen; j++) { if (!Double.isNaN(rv) || !Double.isNaN(iv)) { pos[axis] = j; final double r1 = ad.getReal(pos); final double i1 = ad.getImag(pos); if (ignoreNaNs && (Double.isNaN(r1) || Double.isNaN(i1))) { continue; } if (ignoreInfs && (Double.isInfinite(r1) || Double.isInfinite(i1))) { continue; } final double tv = r1 * rv - i1 * iv; iv = r1 * iv + i1 * rv; rv = tv; } rd.set(rv, iv, pos); } break; } } else { final int is; final long[] lresults; final double[] dresults; switch (dtype) { case Dataset.BOOL: case Dataset.INT8: case Dataset.INT16: case Dataset.INT32: case Dataset.INT64: long lresult = 1; for (int j = 0; j < alen; j++) { pos[axis] = j; lresult *= a.getInt(pos); result.set(lresult, pos); } break; case Dataset.ARRAYINT8: is = a.getElementsPerItem(); lresults = new long[is]; for (int k = 0; k < is; k++) { lresults[k] = 1; } for (int j = 0; j < alen; j++) { pos[axis] = j; final byte[] va = (byte[]) a.getObject(pos); for (int k = 0; k < is; k++) { lresults[k] *= va[k]; } result.set(lresults, pos); } break; case Dataset.ARRAYINT16: is = a.getElementsPerItem(); lresults = new long[is]; for (int k = 0; k < is; k++) { lresults[k] = 1; } for (int j = 0; j < alen; j++) { pos[axis] = j; final short[] va = (short[]) a.getObject(pos); for (int k = 0; k < is; k++) { lresults[k] *= va[k]; } result.set(lresults, pos); } break; case Dataset.ARRAYINT32: is = a.getElementsPerItem(); lresults = new long[is]; for (int k = 0; k < is; k++) { lresults[k] = 1; } for (int j = 0; j < alen; j++) { pos[axis] = j; final int[] va = (int[]) a.getObject(pos); for (int k = 0; k < is; k++) { lresults[k] *= va[k]; } result.set(lresults, pos); } break; case Dataset.ARRAYINT64: is = a.getElementsPerItem(); lresults = new long[is]; for (int k = 0; k < is; k++) { lresults[k] = 1; } for (int j = 0; j < alen; j++) { pos[axis] = j; final long[] va = (long[]) a.getObject(pos); for (int k = 0; k < is; k++) { lresults[k] *= va[k]; } result.set(lresults, pos); } break; case Dataset.FLOAT32: case Dataset.FLOAT64: double dresult = 1.; for (int j = 0; j < alen; j++) { if (!Double.isNaN(dresult)) { pos[axis] = j; final double x = a.getDouble(pos); if (ignoreNaNs && Double.isNaN(x)) { continue; } if (ignoreInfs && Double.isInfinite(x)) { continue; } dresult *= x; } result.set(dresult, pos); } break; case Dataset.ARRAYFLOAT32: case Dataset.ARRAYFLOAT64: is = a.getElementsPerItem(); CompoundDataset da = (CompoundDataset) a; double[] dvalues = new double[is]; dresults = new double[is]; for (int k = 0; k < is; k++) { dresults[k] = 1.; } for (int j = 0; j < alen; j++) { pos[axis] = j; da.getDoubleArray(dvalues, pos); boolean okay = true; for (int k = 0; k < is; k++) { final double val = dvalues[k]; if (ignoreNaNs && Double.isNaN(val)) { okay = false; break; } if (ignoreInfs && Double.isInfinite(val)) { okay = false; break; } } if (okay) { for (int k = 0; k < is; k++) { dresults[k] *= dvalues[k]; } } result.set(dresults, pos); } break; } } } return result; }
From source file:org.broadinstitute.gatk.utils.MathUtils.java
/** * Checks that the result is a well-formed probability * * @param result a supposedly well-formed probability value * @return true if result is really well formed *//*from w w w . jav a2s. c om*/ public static boolean goodProbability(final double result) { return result >= 0.0 && result <= 1.0 && !Double.isInfinite(result) && !Double.isNaN(result); }
From source file:msi.gaml.operators.Maths.java
@operator(value = "is_finite", can_be_const = true, category = { IOperatorCategory.ARITHMETIC }) @doc(value = "Returns whether the argument is a finite number or not", examples = { @example(value = "is_finite(4.66)", equals = "true"), @example(value = "is_finite(#infinity)", equals = "false") }) public static Boolean is_finite(final Double d) { return !Double.isInfinite(d); }
From source file:org.broadinstitute.gatk.utils.MathUtilsUnitTest.java
@Test public void testDirichletMultinomial() { List<double[]> testAlleles = Arrays.asList(new double[] { 80, 240 }, new double[] { 1, 10000 }, new double[] { 0, 500 }, new double[] { 5140, 20480 }, new double[] { 5000, 800, 200 }, new double[] { 6, 3, 1000 }, new double[] { 100, 400, 300, 800 }, new double[] { 8000, 100, 20, 80, 2 }, new double[] { 90, 20000, 400, 20, 4, 1280, 720, 1 }); Assert.assertTrue(/* w w w . j a v a 2 s .c o m*/ !Double.isInfinite(MathUtils.log10Gamma(1e-3)) && !Double.isNaN(MathUtils.log10Gamma(1e-3))); int[] numAlleleSampled = new int[] { 2, 5, 10, 20, 25 }; for (double[] alleles : testAlleles) { for (int count : numAlleleSampled) { // test that everything sums to one. Generate all multinomial draws List<Double> likelihoods = new ArrayList<>(100000); NextCounts generator = new NextCounts(alleles.length, count); double maxLog = Double.MIN_VALUE; //List<String> countLog = new ArrayList<String>(200); while (generator.hasNext()) { int[] thisCount = generator.next(); //countLog.add(Arrays.toString(thisCount)); Double likelihood = MathUtils.dirichletMultinomial(addEpsilon(alleles), thisCount); Assert.assertTrue(!Double.isNaN(likelihood) && !Double.isInfinite(likelihood), String.format("Likelihood for counts %s and nAlleles %d was %s", Arrays.toString(thisCount), alleles.length, Double.toString(likelihood))); if (likelihood > maxLog) maxLog = likelihood; likelihoods.add(likelihood); } //System.out.printf("%d likelihoods and max is (probability) %e\n",likelihoods.size(),Math.pow(10,maxLog)); Assert.assertEquals(MathUtils.sumLog10(unwrap(likelihoods)), 1.0, 1e-7, String.format("Counts %d and alleles %d have nLikelihoods %d. \n Counts: %s", count, alleles.length, likelihoods.size(), "NODEBUG"/*,countLog*/)); } } }
From source file:org.apache.sysml.hops.codegen.opt.PlanSelectionFuseCostBasedV2.java
private double rGetPlanCosts(CPlanMemoTable memo, final Hop current, HashSet<VisitMarkCost> visited, PlanPartition part, InterestingPoint[] matPoints, boolean[] plan, HashMap<Long, Double> computeCosts, CostVector costsCurrent, TemplateType currentType, final double costBound) { final long currentHopId = current.getHopID(); //memoization per hop id and cost vector to account for redundant //computation without double counting materialized results or compute //costs of complex operation DAGs within a single fused operator if (!visited.add(new VisitMarkCost(currentHopId, (costsCurrent == null || currentType == TemplateType.MAGG) ? -1 : costsCurrent.ID))) return 0; //already existing //open template if necessary, including memoization //under awareness of current plan choice MemoTableEntry best = null;/* w ww.j a va 2 s .c o m*/ boolean opened = (currentType == null); if (memo.contains(currentHopId)) { //note: this is the inner loop of plan enumeration and hence, we do not //use streams, lambda expressions, etc to avoid unnecessary overhead if (currentType == null) { for (MemoTableEntry me : memo.get(currentHopId)) best = me.isValid() && hasNoRefToMatPoint(currentHopId, me, matPoints, plan) && BasicPlanComparator.icompare(me, best) < 0 ? me : best; opened = true; } else { for (MemoTableEntry me : memo.get(currentHopId)) best = (me.type == currentType || me.type == TemplateType.CELL) && hasNoRefToMatPoint(currentHopId, me, matPoints, plan) && TypedPlanComparator.icompare(me, best, currentType) < 0 ? me : best; } } //create new cost vector if opened, initialized with write costs CostVector costVect = !opened ? costsCurrent : new CostVector(getSize(current)); double costs = 0; //add other roots for multi-agg template to account for shared costs if (opened && best != null && best.type == TemplateType.MAGG) { //account costs to first multi-agg root if (best.input1 == currentHopId) for (int i = 1; i < 3; i++) { if (!best.isPlanRef(i)) continue; costs += rGetPlanCosts(memo, memo.getHopRefs().get(best.input(i)), visited, part, matPoints, plan, computeCosts, costVect, TemplateType.MAGG, costBound - costs); if (costs >= costBound) return Double.POSITIVE_INFINITY; } //skip other multi-agg roots else return 0; } //add compute costs of current operator to costs vector costVect.computeCosts += computeCosts.get(currentHopId); //process children recursively for (int i = 0; i < current.getInput().size(); i++) { Hop c = current.getInput().get(i); if (best != null && best.isPlanRef(i)) costs += rGetPlanCosts(memo, c, visited, part, matPoints, plan, computeCosts, costVect, best.type, costBound - costs); else if (best != null && isImplicitlyFused(current, i, best.type)) costVect.addInputSize(c.getInput().get(0).getHopID(), getSize(c)); else { //include children and I/O costs if (part.getPartition().contains(c.getHopID())) costs += rGetPlanCosts(memo, c, visited, part, matPoints, plan, computeCosts, null, null, costBound - costs); if (costVect != null && c.getDataType().isMatrix()) costVect.addInputSize(c.getHopID(), getSize(c)); } if (costs >= costBound) return Double.POSITIVE_INFINITY; } //add costs for opened fused operator if (opened) { double memInputs = sumInputMemoryEstimates(memo, costVect); double tmpCosts = costVect.outSize * 8 / WRITE_BANDWIDTH_MEM + Math.max(memInputs / READ_BANDWIDTH_MEM, costVect.computeCosts / COMPUTE_BANDWIDTH); //read correction for distributed computation if (memInputs > OptimizerUtils.getLocalMemBudget()) tmpCosts += costVect.getSideInputSize() * 8 / READ_BANDWIDTH_BROADCAST; //sparsity correction for outer-product template (and sparse-safe cell) Hop driver = memo.getHopRefs().get(costVect.getMaxInputSizeHopID()); if (best != null && best.type == TemplateType.OUTER) tmpCosts *= driver.dimsKnown(true) ? driver.getSparsity() : SPARSE_SAFE_SPARSITY_EST; //write correction for known evictions in CP else if (memInputs <= OptimizerUtils.getLocalMemBudget() && sumTmpInputOutputSize(memo, costVect) * 8 > LazyWriteBuffer.getWriteBufferLimit()) tmpCosts += costVect.outSize * 8 / WRITE_BANDWIDTH_IO; costs += tmpCosts; if (LOG.isTraceEnabled()) { String type = (best != null) ? best.type.name() : "HOP"; LOG.trace("Cost vector (" + type + " " + currentHopId + "): " + costVect + " -> " + tmpCosts); } } //add costs for non-partition read in the middle of fused operator else if (part.getExtConsumed().contains(current.getHopID())) { costs += rGetPlanCosts(memo, current, visited, part, matPoints, plan, computeCosts, null, null, costBound - costs); } //sanity check non-negative costs if (costs < 0 || Double.isNaN(costs) || Double.isInfinite(costs)) throw new RuntimeException("Wrong cost estimate: " + costs); return costs; }
From source file:org.gitools.analysis.groupcomparison.format.math33Preview.MathArrays.java
/** * Normalizes an array to make it sum to a specified value. * Returns the result of the transformation * <pre>/*from w ww . ja v a2 s. co m*/ * x |-> x * normalizedSum / sum * </pre> * applied to each non-NaN element x of the input array, where sum is the * sum of the non-NaN entries in the input array. * <p/> * Throws IllegalArgumentException if {@code normalizedSum} is infinite * or NaN and ArithmeticException if the input array contains any infinite elements * or sums to 0. * <p/> * Ignores (i.e., copies unchanged to the output array) NaNs in the input array. * * @param values Input array to be normalized * @param normalizedSum Target sum for the normalized array * @return the normalized array. * @throws MathArithmeticException if the input array contains infinite * elements or sums to zero. * @throws MathIllegalArgumentException if the target sum is infinite or {@code NaN}. * @since 2.1 */ public static double[] normalizeArray(double[] values, double normalizedSum) throws MathIllegalArgumentException, MathArithmeticException { if (Double.isInfinite(normalizedSum)) { throw new MathIllegalArgumentException(LocalizedFormats.NORMALIZE_INFINITE); } if (Double.isNaN(normalizedSum)) { throw new MathIllegalArgumentException(LocalizedFormats.NORMALIZE_NAN); } double sum = 0d; final int len = values.length; double[] out = new double[len]; for (int i = 0; i < len; i++) { if (Double.isInfinite(values[i])) { throw new MathIllegalArgumentException(LocalizedFormats.INFINITE_ARRAY_ELEMENT, values[i], i); } if (!Double.isNaN(values[i])) { sum += values[i]; } } if (sum == 0) { throw new MathArithmeticException(LocalizedFormats.ARRAY_SUMS_TO_ZERO); } for (int i = 0; i < len; i++) { if (Double.isNaN(values[i])) { out[i] = Double.NaN; } else { out[i] = values[i] * normalizedSum / sum; } } return out; }