List of usage examples for java.lang Math log10
@HotSpotIntrinsicCandidate public static double log10(double a)
From source file:io.github.msdk.features.ransacaligner.RANSAC.java
/** * Calculate k (number of trials)// ww w . ja va 2s. c om * * @return number of trials "k" required to select a subset of n good data * points. */ private double getK() { double w = 0.1; double b = Math.pow(w, n); return Math.log10(1 - 0.99) / Math.log10(1 - b) + (Math.sqrt(1 - b) / b); }
From source file:org.dkpro.similarity.experiments.sts2013.util.WordIdfValuesGenerator.java
public static void computeIdfScores(Mode mode, Dataset dataset) throws Exception { URL inputUrl = ResourceUtils.resolveLocation( DATASET_DIR + "/" + mode.toString().toLowerCase() + "/STS.input." + dataset.toString() + ".txt"); List<String> lines = IOUtils.readLines(inputUrl.openStream(), "utf-8"); Map<String, Double> idfValues = new HashMap<String, Double>(); File outputFile = new File( UTILS_DIR + "/word-idf/" + mode.toString().toLowerCase() + "/" + dataset.toString() + ".txt"); System.out.println("Computing word idf values"); if (outputFile.exists()) { System.out.println(" - skipping, already exists"); } else {/*from w w w .jav a 2s . c om*/ System.out.println(" - this may take a while..."); // Build up token representations of texts Set<List<String>> docs = new HashSet<List<String>>(); for (String line : lines) { List<String> doc = new ArrayList<String>(); Collection<Lemma> lemmas = getLemmas(line); for (Lemma lemma : lemmas) { try { String token = lemma.getValue().toLowerCase(); doc.add(token); } catch (NullPointerException e) { System.err.println(" - unparsable token: " + lemma.getCoveredText()); } } docs.add(doc); } // Get the shared token list Set<String> tokens = new HashSet<String>(); for (List<String> doc : docs) { tokens.addAll(doc); } // Get the idf numbers for (String token : tokens) { double count = 0; for (List<String> doc : docs) { if (doc.contains(token)) { count++; } } idfValues.put(token, count); } // Compute the idf for (String lemma : idfValues.keySet()) { double idf = Math.log10(lines.size() / idfValues.get(lemma)); idfValues.put(lemma, idf); } // Store persistently StringBuilder sb = new StringBuilder(); for (String key : idfValues.keySet()) { sb.append(key + "\t" + idfValues.get(key) + LF); } FileUtils.writeStringToFile(outputFile, sb.toString()); System.out.println(" - done"); } }
From source file:org.apache.gossip.accrual.FailureDetector.java
public synchronized Double computePhiMeasure(long now) { if (latestHeartbeatMs == -1 || descriptiveStatistics.getN() < minimumSamples) { return null; }/* w ww . j ava2 s.co m*/ long delta = now - latestHeartbeatMs; try { double probability; if (distribution.equals("normal")) { double standardDeviation = descriptiveStatistics.getStandardDeviation(); standardDeviation = standardDeviation < 0.1 ? 0.1 : standardDeviation; probability = new NormalDistributionImpl(descriptiveStatistics.getMean(), standardDeviation) .cumulativeProbability(delta); } else { probability = new ExponentialDistributionImpl(descriptiveStatistics.getMean()) .cumulativeProbability(delta); } final double eps = 1e-12; if (1 - probability < eps) { probability = 1.0; } return -1.0d * Math.log10(1.0d - probability); } catch (MathException | IllegalArgumentException e) { LOGGER.debug(e); return null; } }
From source file:com.slownet5.pgprootexplorer.utils.FileUtils.java
public static String getReadableSize(long size) { if (size <= 0) return "0B"; final String[] units = new String[] { "B", "KB", "MB", "GB", "TB" }; int digitGroups = (int) (Math.log10(size) / Math.log10(1024)); return new DecimalFormat("#,##0.#").format(size / Math.pow(1024, digitGroups)) + " " + units[digitGroups]; }
From source file:org.um.feri.ears.problems.Task.java
/** * @param stop the stopping criteria/* w w w .j a v a2 s .c o m*/ * @param eval the maximum number of evaluations allowed * @param allowedTime the maximum CPU time allowed in milliseconds * @param epsilon the epsilon value for global optimum * @param p the problem */ public Task(EnumStopCriteria stop, int eval, long allowedTime, int maxIterations, double epsilon, Problem p) { this(stop, eval, allowedTime, maxIterations, epsilon, p, (int) Math.log10((1. / epsilon) + 1)); }
From source file:de.tudarmstadt.ukp.similarity.experiments.coling2012.util.CharacterNGramIdfValuesGenerator.java
@SuppressWarnings("unchecked") public static void computeIdfScores(Dataset dataset, int n) throws Exception { File outputFile = new File(UTILS_DIR + "/character-ngrams-idf/" + n + "/" + dataset.toString() + ".txt"); System.out.println("Computing character " + n + "-grams"); if (outputFile.exists()) { System.out.println(" - skipping, already exists"); } else {/*from w ww. jav a2s . c om*/ System.out.println(" - this may take a while..."); CollectionReader reader = ColingUtils.getCollectionReader(dataset); // Tokenization AnalysisEngineDescription seg = createPrimitiveDescription(BreakIteratorSegmenter.class); AggregateBuilder builder = new AggregateBuilder(); builder.add(seg, CombinationReader.INITIAL_VIEW, CombinationReader.VIEW_1); builder.add(seg, CombinationReader.INITIAL_VIEW, CombinationReader.VIEW_2); AnalysisEngine aggr_seg = builder.createAggregate(); // Output Writer AnalysisEngine writer = createPrimitive(CharacterNGramIdfValuesGeneratorWriter.class, CharacterNGramIdfValuesGeneratorWriter.PARAM_OUTPUT_FILE, outputFile.getAbsolutePath()); SimplePipeline.runPipeline(reader, aggr_seg, writer); // We now have plain text format List<String> lines = FileUtils.readLines(outputFile); Map<String, Double> idfValues = new HashMap<String, Double>(); CharacterNGramMeasure measure = new CharacterNGramMeasure(n, new HashMap<String, Double>()); // Get n-gram representations of texts List<Set<String>> docs = new ArrayList<Set<String>>(); for (String line : lines) { Set<String> ngrams = measure.getNGrams(line); docs.add(ngrams); } // Get all ngrams Set<String> allNGrams = new HashSet<String>(); for (Set<String> doc : docs) allNGrams.addAll(doc); // Compute idf values for (String ngram : allNGrams) { double count = 0; for (Set<String> doc : docs) { if (doc.contains(ngram)) count++; } idfValues.put(ngram, count); } // Compute the idf for (String lemma : idfValues.keySet()) { double idf = Math.log10(lines.size() / idfValues.get(lemma)); idfValues.put(lemma, idf); } // Store persistently StringBuilder sb = new StringBuilder(); for (String key : idfValues.keySet()) { sb.append(key + "\t" + idfValues.get(key) + LF); } FileUtils.writeStringToFile(outputFile, sb.toString()); System.out.println(" - done"); } }
From source file:org.lanes.text.mining.UnithoodAnalyser.java
public boolean isUnit(String left, String connector, String right) { long timestart = System.currentTimeMillis(); double N = simobj.getTotalDocCount(); //double ns = simobj.getTitleCount(left + " " + connector + " " + right); //double ps = ns/N; boolean isunitornot = false; //if(ps > 0){ double nx = simobj.getDocCount(left); double ny = simobj.getDocCount(right); double px = nx / N; double py = ny / N; double nxy = simobj.getDocCount(left, right); double pxy = nxy / N; double pmi = Math.log10(pxy / (px * py)); double normalisedpmi = pmi / -Math.log10(pxy); double idrleft = (nx - nxy) / nx; double idrrght = (ny - nxy) / ny; double idr = idrleft * idrrght; if ((pmi >= 1.1) || ((0.5 <= pmi && pmi < 0.7) && (idr < 0.6)) || ((0.7 <= pmi && pmi < 0.9) && (0.6 <= idr && idr < 0.75)) || ((0.9 <= pmi && pmi < 1.1) && (0.75 <= idr && idr < 0.95))) { isunitornot = true;//from w ww . jav a 2 s . c om } //} return isunitornot; }
From source file:edu.scripps.fl.curves.plot.GCurvePlot.java
public void addCurve(Curve curve, FitFunction function) { double[] yValues = (double[]) ConvertUtils.convert(curve.getResponses(), double[].class); double curveMinY = NumberUtils.min(yValues); double curveMaxY = NumberUtils.max(yValues); this.minY = Math.min(minY, curveMinY); this.maxY = Math.min(maxY, curveMaxY); Data yData = DataUtil.scaleWithinRange(curveMinY, curveMaxY, yValues); double[] xValues = (double[]) ConvertUtils.convert(curve.getConcentrations(), double[].class); for (int ii = 0; ii < xValues.length; ii++) { double x = Math.log10(xValues[ii]); xValues[ii] = x;//from w w w . j ava 2 s .c om } double curveMinX = NumberUtils.min(xValues); double curveMaxX = NumberUtils.max(xValues); this.minX = Math.min(minX, curveMinX); this.maxX = Math.min(maxX, curveMaxX); Data xData = DataUtil.scaleWithinRange(NumberUtils.min(xValues), NumberUtils.max(xValues), xValues); String hexColor = Integer .toHexString(((java.awt.Color) drawingSupplier.getNextPaint()).getRGB() & 0x00ffffff); StringBuffer sb = new StringBuffer(); sb.append(hexColor); while (sb.length() < 6) sb.insert(0, "0"); Color color = Color.newColor(sb.toString()); XYLine line1 = Plots.newXYLine(xData, yData, getBackgroundColor(), ""); // line1.setLineStyle(LineStyle.newLineStyle(3, 1, 0)); line1.addShapeMarkers(Shape.CIRCLE, color, 5); XYLine fittedLine = sampleFunctionToLine(curve, function, curveMinX, curveMaxX, 100); // fittedLine.setLineStyle(LineStyle.newLineStyle(3, 1, 0)); fittedLine.setColor(color); lines.add(line1); lines.add(fittedLine); }
From source file:com.hviper.codec.uodecode.PeakSignalNoiseRatioTest.java
private double computePsnrPcm16(String uoResource, String referenceResource) throws Exception { // Read the UO file and the reference encoder's decode output into byte arrays byte uoFile[] = IOUtils.toByteArray(this.getClass().getResourceAsStream(uoResource)); byte referenceWavFile[] = IOUtils.toByteArray(this.getClass().getResourceAsStream(referenceResource)); // Decode the UO file ourselves into a byte array ByteArrayOutputStream decodedWavOutputStream = new ByteArrayOutputStream(referenceWavFile.length); UODecode.uoToPcm16Wav(uoFile, decodedWavOutputStream); byte decodedWavFile[] = decodedWavOutputStream.toByteArray(); // Find the start of the sample data; this will be after a 'data' header and four bytes of // content length. int dataStart = -1; for (int i = 0; i < decodedWavFile.length - 4; ++i) { if ((decodedWavFile[i] == 'd') && (decodedWavFile[i + 1] == 'a') && (decodedWavFile[i + 2] == 't') && (decodedWavFile[i + 3] == 'a')) { dataStart = i + 8; // 8 = length of header + chunk length break; }/*from www .ja va 2 s.co m*/ } assertFalse("No 'data' header in decoded output", dataStart < 0); // Headers must be equal. Compare as hex strings for better assert failures here. String refHeaders = DatatypeConverter.printHexBinary(Arrays.copyOfRange(referenceWavFile, 0, dataStart)); String ourHeaders = DatatypeConverter.printHexBinary(Arrays.copyOfRange(decodedWavFile, 0, dataStart)); assertEquals("WAV headers do not match", refHeaders, ourHeaders); assertEquals("File lengths do not match", referenceWavFile.length, decodedWavFile.length); // Compute total squared error int cursor = dataStart; long totalSqError = 0; int sampleCount = 0; int worstSoFar = 0; for (; (cursor + 1) < referenceWavFile.length; cursor += 2) { short refSample = (short) ((referenceWavFile[cursor] & 0xff) | ((referenceWavFile[cursor + 1] & 0xff) << 8)); short ourSample = (short) ((decodedWavFile[cursor] & 0xff) | ((decodedWavFile[cursor + 1] & 0xff) << 8)); int absDiff = Math.abs(ourSample - refSample); long sqError = ((long) absDiff) * ((long) absDiff); totalSqError += sqError; ++sampleCount; } assertNotEquals("No samples read!", 0, sampleCount); // Compute the PSNR in decibels; higher the better double psnr; if (totalSqError > 0) { double sqrtMeanSquaredError = Math.sqrt((double) (totalSqError) / (double) (sampleCount)); double maxValue = 65535.0; psnr = 20.0 * Math.log10(maxValue / sqrtMeanSquaredError); } else { // Identical! Pick a large PSNR result psnr = 1000.0; } return psnr; }
From source file:de.uniba.wiai.lspi.chord.data.ID.java
private static double log2(double x) { return Math.log10(x) / Math.log10(2); }