List of usage examples for java.lang Math log10
@HotSpotIntrinsicCandidate public static double log10(double a)
From source file:org.broadinstitute.sting.utils.MathUtils.java
public static double log10sumLog10(final double[] log10p, final int start, final int finish) { double sum = 0.0; double maxValue = arrayMax(log10p, finish); if (maxValue == Double.NEGATIVE_INFINITY) return maxValue; for (int i = start; i < finish; i++) { if (Double.isNaN(log10p[i]) || log10p[i] == Double.POSITIVE_INFINITY) { throw new IllegalArgumentException("log10p: Values must be non-infinite and non-NAN"); }/*from w ww. ja v a 2 s . c om*/ sum += Math.pow(10.0, log10p[i] - maxValue); } return Math.log10(sum) + maxValue; }
From source file:hulo.localization.models.obs.GaussianProcessLDPLMeanMulti.java
public void optimizeLDPLMultiHyperParams() { final GaussianProcessLDPLMeanMulti gpLDPLMulti = this; MultivariateFunction costFunc = new MultivariateFunction() { @Override//from ww w .ja v a2 s .c o m public double value(double[] point) { double[] params = gpLDPLMulti.getParams(); params[0] = point[0]; params[1] = point[1]; gpLDPLMulti.setParams(params); double[] lmdRegu = gpLDPLMulti.getStabilizeParameter(); lmdRegu[0] = Math.pow(10, point[2]); lmdRegu[1] = Math.pow(10, point[3]); gpLDPLMulti.setStabilizeParameter(lmdRegu); int ns = X.length; double aveLOOMSE = 0.0; for (int k = 0; k < ns; k++) { gpLDPLMulti.fit(X, Y); double looMSE = gpLDPLMulti.looErrorLDPLMultiPart(k); aveLOOMSE += looMSE / ns; } final StringBuilder sb = new StringBuilder(); sb.setLength(0); sb.append("optimizeLDPLMultiHyperParams: "); sb.append("aveLOOMSE=" + aveLOOMSE + ", "); sb.append("n=" + params[0] + ",A=" + params[1] + ", lmdn=" + lmdRegu[0] + ",lmdA=" + lmdRegu[1]); sb.append(", ns=" + ns); System.out.println(sb.toString()); return aveLOOMSE; } }; double[] pointInit = { params[0], params[1], Math.log10(lambdas[0]), Math.log10(lambdas[1]) }; double[] dPointInit = { 0.1, 1.0, 0.1, 0.1 }; PointValuePair pair = GaussianProcessLDPLMean.minimize(costFunc, pointInit, dPointInit); double[] point = pair.getPoint(); double[] params = gpLDPLMulti.getParams(); params[0] = point[0]; params[1] = point[1]; gpLDPLMulti.setParams(params); double[] lambdas = gpLDPLMulti.getStabilizeParameter(); lambdas[0] = Math.pow(10, point[2]); lambdas[1] = Math.pow(10, point[3]); gpLDPLMulti.setStabilizeParameter(lambdas); }
From source file:edu.cornell.med.icb.goby.modes.CompactAlignmentToTranscriptCountsMode.java
private void processTranscriptAlignment(final String basename) throws IOException { final AlignmentReaderImpl reader = new AlignmentReaderImpl(basename); PrintWriter outputWriter = null; try {//from w ww.ja v a 2 s . c o m WeightsInfo weights = null; if (useWeights) { weights = CompactAlignmentToAnnotationCountsMode.loadWeights(basename, useWeights, weightId); if (weights != null) { System.err.println( "Weights have been provided and loaded and will be used to reweight transcript counts."); } } outputWriter = new PrintWriter(new FileWriter(outputFile)); // outputWriter.write("# One line per reference id. Count indicates the number of times a query \n" + // "# partially overlaps a target, given the various quality filters used to create the alignment.\n"); outputWriter.write("sampleId\treferenceId\tcount\tlog10(count+1)\tcumulativeBasesAligned\n"); reader.readHeader(); final int numberOfReferences = reader.getNumberOfTargets(); // The following is the raw count per transcript, or reweighted count per transcript when use-weights==true final double[] numberOfReadsPerReference = new double[numberOfReferences]; final int[] cumulativeBasesPerReference = new int[numberOfReferences]; System.out.printf("Scanning alignment %s%n", basename); for (final Alignments.AlignmentEntry alignmentEntry : reader) { final int referenceIndex = alignmentEntry.getTargetIndex(); numberOfReadsPerReference[referenceIndex] += (weights != null ? weights.getWeight(alignmentEntry.getQueryIndex()) : 1); cumulativeBasesPerReference[referenceIndex] += Math.min(alignmentEntry.getQueryAlignedLength(), alignmentEntry.getTargetAlignedLength()); } final IndexedIdentifier targetIds = reader.getTargetIdentifiers(); final DoubleIndexedIdentifier targetIdBackward = new DoubleIndexedIdentifier(targetIds); final String sampleId = FilenameUtils.getBaseName(basename); deCalculator.reserve(numberOfReferences, inputFiles.length); int numAlignedReadsInSample = 0; // define elements that will be tested for differential expression: for (int referenceIndex = 0; referenceIndex < numberOfReferences; ++referenceIndex) { final String transcriptId = targetIdBackward.getId(referenceIndex).toString(); final int index = deCalculator.defineElement(transcriptId, DifferentialExpressionCalculator.ElementType.TRANSCRIPT); deCalculator.defineElementLength(index, reader.getTargetLength(referenceIndex)); } // observe elements: for (int referenceIndex = 0; referenceIndex < numberOfReferences; ++referenceIndex) { outputWriter.printf("%s\t%s\t%g\t%g\t%d%n", basename, targetIdBackward.getId(referenceIndex), numberOfReadsPerReference[referenceIndex], Math.log10(numberOfReadsPerReference[referenceIndex] + 1), cumulativeBasesPerReference[referenceIndex]); final String transcriptId = targetIdBackward.getId(referenceIndex).toString(); deCalculator.observe(sampleId, transcriptId, numberOfReadsPerReference[referenceIndex]); numAlignedReadsInSample += numberOfReadsPerReference[referenceIndex]; } deCalculator.setNumAlignedInSample(sampleId, numAlignedReadsInSample); outputWriter.flush(); } finally { IOUtils.closeQuietly(outputWriter); reader.close(); } }
From source file:org.broadinstitute.gatk.engine.recalibration.RecalDatumNode.java
/** * Calculate the phred-scaled p-value for a chi^2 test for independent among subnodes of this node. * * The chi^2 value indicates the degree of independence of the implied error rates among the * immediate subnodes//ww w .j av a 2 s .c o m * * @return the phred-scaled p-value for chi2 penalty, or 0.0 if it cannot be calculated */ private double calcPenalty() { if (isLeaf() || freeToMerge()) return 0.0; else if (subnodes.size() == 1) // only one value, so its free to merge away return 0.0; else { final long[][] counts = new long[subnodes.size()][2]; int i = 0; for (final RecalDatumNode<T> subnode : subnodes) { // use the yates correction to help avoid all zeros => NaN counts[i][0] = Math.round(subnode.getRecalDatum().getNumMismatches()) + 1L; counts[i][1] = subnode.getRecalDatum().getNumObservations() + 2L; i++; } try { final double chi2PValue = new ChiSquareTestImpl().chiSquareTest(counts); final double penalty = -10.0 * Math.log10(Math.max(chi2PValue, SMALLEST_CHI2_PVALUE)); // make sure things are reasonable and fail early if not if (Double.isInfinite(penalty) || Double.isNaN(penalty)) throw new ReviewedGATKException("chi2 value is " + chi2PValue + " at " + getRecalDatum()); return penalty; } catch (MathException e) { throw new ReviewedGATKException("Failed in calculating chi2 value", e); } } }
From source file:com.hurence.logisland.botsearch.Trace.java
/** * * In the next step, we compute the Power Spectral Density (PSD) of the Fast * Fourier Transformation over our sampled trace and extract the most * significant frequency. The FFT peaks are corralated with time * periodicities and resistant against irregular large gaps in the trace. We * observed the introduction of gaps in the wild for bots in which * communication with the C&C server is periodic and then pauses for a * while. When malware authors randomly vary the C&C connection frequency * within a certain window, the random variation lowers the FFT peak. * However, the peak remains detectable and at the same frequency, enabling * the detection of the malware communication. * */// w ww. j a v a 2s . c o m double[] computePowerSpectralDensity(double[] samples) { // compute FFT FastFourierTransformer fft = new FastFourierTransformer(DftNormalization.STANDARD); Complex[] frequencies = fft.transform(samples, TransformType.FORWARD); // take the highest magnitude of power spectral density double[] magnitudes = new double[frequencies.length / 2]; for (int i = 0; i < magnitudes.length; i++) { // Convert to db magnitudes[i] = 10 * Math.log10(frequencies[i].abs()); } // apply a low pass filter to smooth high frequency magnitudes smoothArray(magnitudes, 2.0); return magnitudes; }
From source file:edu.ucuenca.authorsrelatedness.Distance.java
private double NGD(String a, String b) throws IOException, SQLException { a = a.trim();//w w w . j a v a2 s . c om b = b.trim(); if (a.compareToIgnoreCase(b) == 0) { return 0; } //double n0 = getResultsCount(""+a+""); //double n1 = getResultsCount(""+b+""); //String c = ""+a+" "+b+""; String _a = "\"" + a + "\"~10"; String _b = "\"" + b + "\"~10"; String c = "\"" + a + " " + b + "\"~50"; if (Cache.getInstance().config.get("relaxMode").getAsBoolean().value()) { _a = "" + a; _b = "" + b; c = a + " " + b; } double n0 = getResultsCount(_a); double n1 = getResultsCount(_b); double n2 = 0; if (n0 == 0 || n1 == 0) { n2 = 0; } else { n2 = getResultsCount(c); } //double m = 5026040.0 * 590; double m = getResultsCount("the"); double distance = 0; int Measure = 0; double l1 = Math.max(Math.log10(n0), Math.log10(n1)) - Math.log10(n2); double l2 = Math.log10(m) - Math.min(Math.log10(n0), Math.log10(n1)); if (Measure == 0) { distance = l1 / l2; } if (Measure == 1) { distance = 1 - (Math.log10(n2) / Math.log10(n0 + n1 - n2)); } if (n0 == 0 || n1 == 0 || n2 == 0) { distance = 1; } //System.out.println("n0="+n0); //System.out.println("n1="+n1); //System.out.println("n2="+n2); //System.out.println(a + "," + b + "=" + distance2); return distance; }
From source file:org.apache.hadoop.hive.serde2.io.TimestampWritable.java
public static int getNanos(byte[] bytes, int offset) { LazyBinaryUtils.readVInt(bytes, offset, vInt); int val = vInt.value; int len = (int) Math.floor(Math.log10(val)) + 1; // Reverse the value int tmp = 0;/* w ww . j ava2s . c om*/ while (val != 0) { tmp *= 10; tmp += val % 10; val /= 10; } val = tmp; if (len < 9) { val *= Math.pow(10, 9 - len); } return val; }
From source file:net.naonedbus.activity.impl.OldSettingsActivity.java
/** * Formatter la taille/* w w w. j a v a 2 s .c o m*/ * * @param size * @return Taille comprhensible par les humains ordinaires */ private String readableFileSize(final long size) { if (size <= 0) return getString(R.string.msg_vide); final String[] units = new String[] { "o", "Ko", "Mo", "Go", "To" }; final int digitGroups = (int) (Math.log10(size) / Math.log10(1024)); return new DecimalFormat("#,##0.#").format(size / Math.pow(1024, digitGroups)) + " " + units[digitGroups]; }
From source file:org.easyrec.plugin.arm.impl.AssocRuleMiningServiceImpl.java
/** * @param tuples Vector/*from ww w . j a v a2 s . co m*/ * @param L1 HashMap * @param configuration int * @param stats * @param minConfidence minConfidence * @return Vector */ @Override public List<ItemAssocVO<Integer, Integer>> createRules(List<TupleVO> tuples, TObjectIntHashMap<ItemVO<Integer, Integer>> L1, ARMConfigurationInt configuration, ARMStatistics stats, Double minConfidence) { // Integer h1, h2; Double dh1, dh2; Integer sup1, sup2; Double dsup1, dsup2, assocValue1, assocValue2; Double baskets = new Double(stats.getNrBaskets()); stats.setMetricType(configuration.getMetricType()); ArrayList<ItemAssocVO<Integer, Integer>> ret = new ArrayList<>(); for (TupleVO tuple : tuples) { sup1 = L1.get(tuple.getItem1()); dsup1 = new Double(sup1); sup2 = L1.get(tuple.getItem2()); dsup2 = new Double(sup2); if (sup1 == null || sup2 == null) { continue; } // confidence // h1 = (tuple.getSupport() * 100) / sup1; // h2 = (tuple.getSupport() * 100) / sup2; // confidence dh1 = (tuple.getSupport() * 100) / dsup1; dh2 = (tuple.getSupport() * 100) / dsup2; // lift Double lift = tuple.getSupport() / (dsup1 * dsup2); // conviction Double conviction1 = (1 - (dsup2 / baskets)) / (100 - dh1); Double conviction2 = (1 - (dsup1 / baskets)) / (100 - dh2); // ltc Double ltc1 = dsup1 * Math.log10(dsup1 / dsup2); Double ltc2 = dsup2 * Math.log10(dsup2 / dsup1); switch (configuration.getMetricType()) { case CONFIDENCE: assocValue1 = dh1; assocValue2 = dh2; break; case CONVICTION: assocValue1 = conviction1; assocValue2 = conviction2; break; case LIFT: assocValue1 = lift; assocValue2 = lift; break; case LONGTAIL: assocValue1 = ltc1; assocValue2 = ltc2; break; default: assocValue1 = dh1; assocValue2 = dh2; break; } // public ItemAssocVO(T tenant, ItemVO<T, I, IT> itemFrom, AT assocType, // Double assocValue, ItemVO<T, I, IT> itemTo, ST sourceType, // String sourceInfo, VT viewType, Boolean active) // TODO: confidence always used as quality reference! maybe better use assocValue if (dh1 >= (minConfidence)) { String comment1 = null; if (configuration.getStoreAlternativeMetrics()) { comment1 = new StringBuilder("conf=").append(String.format("%04f", dh1)).append(" lift=") .append(String.format("%04f", lift)).append(" convic=") .append(String.format("%04f", conviction1)).append(" ltc=") .append(String.format("%04f", ltc1)).append(" sup1=") .append(String.format("%04f", dsup1)).append(" sup2=") .append(String.format("%04f", dsup2)).append(" tsup=").append(tuple.getSupport()) .toString(); } ItemAssocVO<Integer, Integer> rule = new ItemAssocVO<>(configuration.getTenantId(), tuple.getItem1(), configuration.getAssocType(), assocValue1/*new Double(h1)*/, tuple.getItem2(), typeMappingService.getIdOfSourceType(configuration.getTenantId(), ARMGenerator.ID.toString() + "/" + ARMGenerator.VERSION), comment1, typeMappingService.getIdOfViewType(configuration.getTenantId(), TypeMappingService.VIEW_TYPE_COMMUNITY), true, stats.getStartDate()); ret.add(rule); } if (dh2 >= (minConfidence)) { String comment2 = null; if (configuration.getStoreAlternativeMetrics()) { comment2 = new StringBuilder("conf=").append(String.format("%04f", dh2)).append(" lift=") .append(String.format("%04f", lift)).append(" convic=") .append(String.format("%04f", conviction2)).append(" ltc=") .append(String.format("%04f", ltc2)).append(" sup2=") .append(String.format("%04f", dsup2)).append(" sup1=") .append(String.format("%04f", dsup1)).append(" tsup=").append(tuple.getSupport()) .toString(); } ItemAssocVO<Integer, Integer> rule = new ItemAssocVO<>(configuration.getTenantId(), tuple.getItem2(), configuration.getAssocType(), assocValue2/*new Double(h2)*/, tuple.getItem1(), typeMappingService.getIdOfSourceType(configuration.getTenantId(), ARMGenerator.ID.toString() + "/" + ARMGenerator.VERSION), comment2, typeMappingService.getIdOfViewType(configuration.getTenantId(), TypeMappingService.VIEW_TYPE_COMMUNITY), true, stats.getStartDate()); ret.add(rule); } } return ret; }
From source file:org.broadinstitute.sting.gatk.walkers.genotyper.UnifiedGenotyperEngineUnitTest.java
@DataProvider(name = "ReferenceQualityCalculation") public Object[][] makeReferenceQualityCalculation() { List<Object[]> tests = new ArrayList<Object[]>(); // this functionality can be adapted to provide input data for whatever you might want in your data final double p = Math.log10(0.5); for (final double theta : Arrays.asList(0.1, 0.01, 0.001)) { for (final int depth : Arrays.asList(0, 1, 2, 10, 100, 1000, 10000)) { final double log10PofNonRef = Math.log10(theta / 2.0) + MathUtils.log10BinomialProbability(depth, 0, p); final double log10POfRef = MathUtils.log10OneMinusX(Math.pow(10.0, log10PofNonRef)); tests.add(new Object[] { depth, theta, log10POfRef }); }//w ww . j av a2 s. c o m } return tests.toArray(new Object[][] {}); }