List of usage examples for java.lang StrictMath log
public static native double log(double a);
From source file:Main.java
public static void main(String[] args) { double d1 = 10, d2 = 0.0, d3 = (1.0 / 0.0), d4 = 1.0; System.out.println("Log value of " + d1 + " = " + StrictMath.log(d1)); System.out.println("Log value of " + d2 + " = " + StrictMath.log(d2)); System.out.println("Log value of " + d3 + " = " + StrictMath.log(d3)); System.out.println("Log value of " + d4 + " = " + StrictMath.log(d4)); }
From source file:net.nicoulaj.benchmarks.math.DoubleLog.java
@GenerateMicroBenchmark public void strictmath(BlackHole hole) { for (int i = 0; i < data.length - 1; i++) hole.consume(StrictMath.log(data[i])); }
From source file:edu.umd.gorden2.RunPersonalizedPageRankBasic.java
private float[] phase1(int i, int j, String basePath, int numNodes, String m) throws Exception { Job job = Job.getInstance(getConf()); job.setJobName("PageRank:Basic:iteration" + j + ":Phase1"); job.setJarByClass(RunPersonalizedPageRankBasic.class); String in = basePath + "/iter" + formatter.format(i); String out = basePath + "/iter" + formatter.format(j) + "t"; String outm = out + "-mass"; // We need to actually count the number of part files to get the number of partitions (because // the directory might contain _log). int numPartitions = 0; for (FileStatus s : FileSystem.get(getConf()).listStatus(new Path(in))) { if (s.getPath().getName().contains("part-")) numPartitions++;//from www . j a v a2s . c om } LOG.info("PageRank: iteration " + j + ": Phase1"); LOG.info(" - input: " + in); LOG.info(" - output: " + out); LOG.info(" - nodeCnt: " + numNodes); LOG.info("computed number of partitions: " + numPartitions); LOG.info("- sources: " + m); int numReduceTasks = numPartitions; job.getConfiguration().setInt("NodeCount", numNodes); job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false); job.getConfiguration().setBoolean("mapred.reduce.tasks.speculative.execution", false); //job.getConfiguration().set("mapred.child.java.opts", "-Xmx2048m"); job.getConfiguration().set("PageRankMassPath", outm); job.getConfiguration().setStrings("sources", m); job.setNumReduceTasks(numReduceTasks); FileInputFormat.setInputPaths(job, new Path(in)); FileOutputFormat.setOutputPath(job, new Path(out)); job.setInputFormatClass(NonSplitableSequenceFileInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setMapOutputKeyClass(IntWritable.class); job.setMapOutputValueClass(PageRankNode.class); job.setOutputKeyClass(IntWritable.class); job.setOutputValueClass(PageRankNode.class); job.setMapperClass(MapClass.class); job.setCombinerClass(CombineClass.class); job.setReducerClass(ReduceClass.class); FileSystem.get(getConf()).delete(new Path(out), true); FileSystem.get(getConf()).delete(new Path(outm), true); long startTime = System.currentTimeMillis(); job.waitForCompletion(true); System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds"); String[] mm = m.split(","); float[] mass = new float[mm.length]; for (int x = 0; x < mm.length; x++) { mass[x] = (float) StrictMath.log(0); } FileSystem fs = FileSystem.get(getConf()); for (FileStatus f : fs.listStatus(new Path(outm))) { FSDataInputStream fin = fs.open(f.getPath()); float[] tMass = new float[mm.length]; for (int y = 0; y < mm.length; y++) tMass[y] = fin.readFloat(); mass = sumListLogProbs(mass, tMass); fin.close(); } LOG.info("- total Mass: " + Arrays.toString(mass)); return mass; }
From source file:MersenneTwisterFast.java
public final double nextGaussian() { if (__haveNextNextGaussian) { __haveNextNextGaussian = false;//from w w w.ja va 2 s . c o m return __nextNextGaussian; } else { double v1, v2, s; do { int y; int z; int a; int b; if (mti >= N) // generate N words at one time { int kk; final int[] mt = this.mt; // locals are slightly faster final int[] mag01 = this.mag01; // locals are slightly faster for (kk = 0; kk < N - M; kk++) { y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + M] ^ (y >>> 1) ^ mag01[y & 0x1]; } for (; kk < N - 1; kk++) { y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + (M - N)] ^ (y >>> 1) ^ mag01[y & 0x1]; } y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[N - 1] = mt[M - 1] ^ (y >>> 1) ^ mag01[y & 0x1]; mti = 0; } y = mt[mti++]; y ^= y >>> 11; // TEMPERING_SHIFT_U(y) y ^= (y << 7) & TEMPERING_MASK_B; // TEMPERING_SHIFT_S(y) y ^= (y << 15) & TEMPERING_MASK_C; // TEMPERING_SHIFT_T(y) y ^= (y >>> 18); // TEMPERING_SHIFT_L(y) if (mti >= N) // generate N words at one time { int kk; final int[] mt = this.mt; // locals are slightly faster final int[] mag01 = this.mag01; // locals are slightly faster for (kk = 0; kk < N - M; kk++) { z = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + M] ^ (z >>> 1) ^ mag01[z & 0x1]; } for (; kk < N - 1; kk++) { z = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + (M - N)] ^ (z >>> 1) ^ mag01[z & 0x1]; } z = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[N - 1] = mt[M - 1] ^ (z >>> 1) ^ mag01[z & 0x1]; mti = 0; } z = mt[mti++]; z ^= z >>> 11; // TEMPERING_SHIFT_U(z) z ^= (z << 7) & TEMPERING_MASK_B; // TEMPERING_SHIFT_S(z) z ^= (z << 15) & TEMPERING_MASK_C; // TEMPERING_SHIFT_T(z) z ^= (z >>> 18); // TEMPERING_SHIFT_L(z) if (mti >= N) // generate N words at one time { int kk; final int[] mt = this.mt; // locals are slightly faster final int[] mag01 = this.mag01; // locals are slightly faster for (kk = 0; kk < N - M; kk++) { a = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + M] ^ (a >>> 1) ^ mag01[a & 0x1]; } for (; kk < N - 1; kk++) { a = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + (M - N)] ^ (a >>> 1) ^ mag01[a & 0x1]; } a = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[N - 1] = mt[M - 1] ^ (a >>> 1) ^ mag01[a & 0x1]; mti = 0; } a = mt[mti++]; a ^= a >>> 11; // TEMPERING_SHIFT_U(a) a ^= (a << 7) & TEMPERING_MASK_B; // TEMPERING_SHIFT_S(a) a ^= (a << 15) & TEMPERING_MASK_C; // TEMPERING_SHIFT_T(a) a ^= (a >>> 18); // TEMPERING_SHIFT_L(a) if (mti >= N) // generate N words at one time { int kk; final int[] mt = this.mt; // locals are slightly faster final int[] mag01 = this.mag01; // locals are slightly faster for (kk = 0; kk < N - M; kk++) { b = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + M] ^ (b >>> 1) ^ mag01[b & 0x1]; } for (; kk < N - 1; kk++) { b = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + (M - N)] ^ (b >>> 1) ^ mag01[b & 0x1]; } b = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[N - 1] = mt[M - 1] ^ (b >>> 1) ^ mag01[b & 0x1]; mti = 0; } b = mt[mti++]; b ^= b >>> 11; // TEMPERING_SHIFT_U(b) b ^= (b << 7) & TEMPERING_MASK_B; // TEMPERING_SHIFT_S(b) b ^= (b << 15) & TEMPERING_MASK_C; // TEMPERING_SHIFT_T(b) b ^= (b >>> 18); // TEMPERING_SHIFT_L(b) /* derived from nextDouble documentation in jdk 1.2 docs, see top */ v1 = 2 * (((((long) (y >>> 6)) << 27) + (z >>> 5)) / (double) (1L << 53)) - 1; v2 = 2 * (((((long) (a >>> 6)) << 27) + (b >>> 5)) / (double) (1L << 53)) - 1; s = v1 * v1 + v2 * v2; } while (s >= 1 || s == 0); double multiplier = StrictMath.sqrt(-2 * StrictMath.log(s) / s); __nextNextGaussian = v2 * multiplier; __haveNextNextGaussian = true; return v1 * multiplier; } }
From source file:org.deidentifier.arx.risk.Gamma.java
/** * Approximates the digamma function. Java port of the * "The Lightspeed Matlab toolbox" version 2.7 by Tom Minka see: * http://research.microsoft.com/en-us/um/people/minka/software/lightspeed/ * //from w w w. j av a 2s . c o m * @param x * input value * @return approximation of digamma for x */ static double digamma(double x) { /* Illegal arguments */ if (Double.isInfinite(x) || Double.isNaN(x)) { return Double.NaN; } /* Singularities */ if (x == 0.0d) { return Double.NEGATIVE_INFINITY; } /* Negative values */ /* * Use the reflection formula (Jeffrey 11.1.6): digamma(-x) = * digamma(x+1) + pi*cot(pi*x) * * This is related to the identity digamma(-x) = digamma(x+1) - * digamma(z) + digamma(1-z) where z is the fractional part of x For * example: digamma(-3.1) = 1/3.1 + 1/2.1 + 1/1.1 + 1/0.1 + * digamma(1-0.1) = digamma(4.1) - digamma(0.1) + digamma(1-0.1) Then we * use digamma(1-z) - digamma(z) = pi*cot(pi*z) */ if (x < 0.0d) { return digamma(1.0d - x) + (StrictMath.PI / StrictMath.tan(-StrictMath.PI * x)); } /* Use Taylor series if argument <= small */ if (x <= SMALL_DIGAMMA) { return (DIGAMMA_1 - (1.0d / x)) + (TRIGAMMA_1 * x); } double result = 0.0d; /* Reduce to digamma(X + N) where (X + N) >= large */ while (x < LARGE_DIGAMMA) { result -= 1.0d / x; x++; } /* Use de Moivre's expansion if argument >= C */ /* This expansion can be computed in Maple via asympt(Psi(x),x) */ if (x >= LARGE_DIGAMMA) { double r = 1.0d / x; result += StrictMath.log(x) - (0.5d * r); r *= r; result -= r * (S3 - (r * (S4 - (r * (S5 - (r * (S6 - (r * S7)))))))); } return result; }
From source file:org.esa.beam.util.math.FastMathPerformance.java
public void testLog() { System.gc();/* ww w.j av a 2s. com*/ double x = 0; long time = System.nanoTime(); for (int i = 0; i < RUNS; i++) x += StrictMath.log(Math.PI + i/* 1.0 + i/1e9 */); long strictMath = System.nanoTime() - time; System.gc(); double y = 0; time = System.nanoTime(); for (int i = 0; i < RUNS; i++) y += FastMath.log(Math.PI + i/* 1.0 + i/1e9 */); long fastTime = System.nanoTime() - time; System.gc(); double z = 0; time = System.nanoTime(); for (int i = 0; i < RUNS; i++) z += Math.log(Math.PI + i/* 1.0 + i/1e9 */); long mathTime = System.nanoTime() - time; report("log", x + y + z, strictMath, fastTime, mathTime); }
From source file:org.mmadsen.sim.transmissionlab.analysis.OverallStatisticsRecorder.java
@Override public void process() { this.log.debug("OverallStatisticsRecorder running process()"); DoubleArrayList turnoverHistory = (DoubleArrayList) this.model .retrieveSharedObject(TraitFrequencyAnalyzer.TURNOVER_HISTORY_KEY); DoubleArrayList traitCountHistory = (DoubleArrayList) this.model .retrieveSharedObject(TraitFrequencyAnalyzer.TRAIT_COUNT_HISTORY_KEY); DoubleArrayList agentsTopNHistory = (DoubleArrayList) this.model .retrieveSharedObject(TraitFrequencyAnalyzer.AGENT_TRAIT_TOPN_KEY); Map<Integer, TraitCount> traitResidenceMap = (Map<Integer, TraitCount>) this.model .retrieveSharedObject(TraitFrequencyAnalyzer.TRAIT_RESIDENCE_TIME_KEY); Map<Integer, ArrayList<Integer>> cumTraitTopNResidenceTimes = (Map<Integer, ArrayList<Integer>>) this.model .retrieveSharedObject(TraitFrequencyAnalyzer.TRAIT_TOPN_RESIDENCE_MAP_KEY); Map<Integer, Map<Integer, Integer>> sharedClusterTraitCountsByTick = (Map<Integer, Map<Integer, Integer>>) this.model .retrieveSharedObject(ClusterTraitFrequencyFileSnapshot.TRAITS_SHARED_ACROSS_CLUSTER_COUNTS); // calculate turnover statistics this.meanTurnover = Descriptive.mean(turnoverHistory); double varianceTurnover = Descriptive.sampleVariance(turnoverHistory, this.meanTurnover); this.stdevTurnover = Descriptive.standardDeviation(varianceTurnover); this.log.info("Mean turnover: " + this.meanTurnover + " stdev: " + this.stdevTurnover); // calculate total variation statistics this.meanTraitCount = Descriptive.mean(traitCountHistory); double varianceTraitCount = Descriptive.sampleVariance(traitCountHistory, this.meanTraitCount); this.stdevTraitCount = Descriptive.standardDeviation(varianceTraitCount); this.log.info("Mean num traits in population: " + this.meanTraitCount + " stdev: " + this.stdevTraitCount); // calculate stats for the number of agents with traits in the top N this.meanAgentCount = Descriptive.mean(agentsTopNHistory); double varianceAgentCount = Descriptive.sampleVariance(agentsTopNHistory, this.meanAgentCount); this.stdevAgentCount = Descriptive.standardDeviation(varianceAgentCount); this.log.info("Mean num agents with traits in top N: " + this.meanAgentCount + " stdev: " + this.stdevAgentCount); // calculate stats for the "residence" time of traits - basically this is just the values from the // residenceTimeMap // 11/10/2007 - calculate ln(residenceTime) since it's a highly skewed distribution DoubleArrayList residenceTimeList = new DoubleArrayList(); Map<Integer, Integer> residenceTimesFreq = new HashMap<Integer, Integer>(); for (TraitCount tc : traitResidenceMap.values()) { // First we track the frequency of traits that last N ticks. // This data comes from the TraitCount objects contained in the traitResidenceMap. // We're not interested in the trait ID here, just the count of ticks the trait lasted // before becoming extinct. Thus, we grab the count from each TraitCount object, // and hash into residenceTimesFreq and increment that "time slot" -- i.e., if trait // 1001 had lasted 5 ticks, we'd look at key "5" and increment it. If key "5" hadn't // existed before, we'd establish it. Pretty typical frequency counting stuff, other than // the fact that we're ignoring the exact trait ID. if (residenceTimesFreq.containsKey(tc.getCount())) { Integer numTraitsWithCount = residenceTimesFreq.get(tc.getCount()); numTraitsWithCount++;/*ww w . j a v a 2 s. c om*/ residenceTimesFreq.put(tc.getCount(), numTraitsWithCount); } else { residenceTimesFreq.put(tc.getCount(), (Integer) 1); } // Now, let's add the ln(tc.getCount) to the list of residence times we'll use to calc the // log-mean of residence times for the final stats summary. residenceTimeList.add(StrictMath.log((double) tc.getCount())); } this.meanResidenceTime = Descriptive.mean(residenceTimeList); double varianceResidenceTime = Descriptive.sampleVariance(residenceTimeList, this.meanResidenceTime); this.stdevResidenceTime = Descriptive.standardDeviation(varianceResidenceTime); this.log.info( "Mean log trait sojourn time: " + this.meanResidenceTime + " stdev: " + this.stdevResidenceTime); this.traitsAcrossClustersHistory = this.extractCountTraitsAcrossClusters(sharedClusterTraitCountsByTick); this.meanNumberClustersPerTrait = Descriptive.mean(this.traitsAcrossClustersHistory); double varianceNumClustersPerTrait = Descriptive.sampleVariance(this.traitsAcrossClustersHistory, this.meanNumberClustersPerTrait); this.stdevNumberClustersPerTrait = Descriptive.standardDeviation(varianceNumClustersPerTrait); this.log.info("Mean number of clusters per trait: " + this.meanNumberClustersPerTrait + " stdev: " + this.stdevNumberClustersPerTrait); // record the population structure graph to a Pajek file for display and external analysis IAgentPopulation population = this.model.getPopulation(); FileWriter socialGraphWriter = this.model.getFileWriterForPerRunOutput(pajekGraphOutputFile); population.saveGraphToFile(socialGraphWriter, IStructuredPopulationWriter.WriterType.Pajek); // HACK this.calculateGraphStatistics(); this.log.info("Characteristic length of graph: " + this.meanDistanceBetweenVertices); this.log.info("Clustering coefficient of graph: " + this.clusteringCoefficient); // record overall stats to a file this.recordStats(); this.recordResidenceMatrix(cumTraitTopNResidenceTimes); this.recordResidenceTimeFrequencies(residenceTimesFreq); if (population.isPopulationClustered()) { this.recordTraitsSharedAcrossClusters(sharedClusterTraitCountsByTick); } }
From source file:ubic.gemma.core.ontology.GoMetric.java
/** * @param pMin min p//from w w w. ja v a2s .c om * @param probC prob C * @param probM prob M * @return Jiang semantic similarity measure between two terms */ private Double calcJiang(Double pMin, Double probM, Double probC) { return 1 / ((-1 * StrictMath.log(probM)) + (-1 * StrictMath.log(probC)) - (-2 * StrictMath.log(pMin)) + 1); }
From source file:ubic.gemma.core.ontology.GoMetric.java
/** * @param pMin min p/*from w ww. j a va 2 s . c o m*/ * @param probC prob C * @param probM prob M * @return Lin semantic similarity measure between two terms */ private Double calcLin(Double pMin, Double probM, Double probC) { return (2 * (StrictMath.log(pMin))) / ((StrictMath.log(probM)) + (StrictMath.log(probC))); }
From source file:ubic.gemma.core.ontology.GoMetric.java
/** * @param pMin min p/*from w ww. j a va 2 s.c om*/ * @return Resnik semantic similarity measure between two terms */ private Double calcResnik(Double pMin) { return -1 * (StrictMath.log(pMin)); }