Example usage for java.lang Math log1p

List of usage examples for java.lang Math log1p

Introduction

In this page you can find the example usage for java.lang Math log1p.

Prototype

public static double log1p(double x) 

Source Link

Document

Returns the natural logarithm of the sum of the argument and 1.

Usage

From source file:eu.amidst.core.inference.ImportanceSamplingRobust.java

private double robustSumOfLogarithms(double log_x1, double log_x2) {
    double result;
    if (log_x1 != 0 && log_x2 != 0) {

        double aux_max = Math.max(log_x1, log_x2);
        double aux_min = Math.min(log_x1, log_x2);

        double tail;
        double aux = Math.exp(aux_min - aux_max);
        if (aux < 0.5) {
            tail = Math.log1p(aux);
        } else {// www  . j a  v a  2s.  c  o m
            tail = Math.log(1 + aux);
        }
        //            tail = Math.log( 1+aux );

        //double tail = Math.log1p( Math.exp(aux_min-aux_max) );
        result = aux_max + (Double.isFinite(tail) ? tail : 0);
    } else if (log_x1 == 0) {
        result = log_x2;
    } else {
        result = log_x1;
    }
    return result;
}

From source file:edu.cornell.med.icb.goby.modes.CompactAlignmentToAnnotationCountsMode.java

/**
 * Calculate the log2 of x+1.//from www .jav  a2  s. c  o m
 *
 * @param x
 * @return log2(x+1)=Math.log1p(x)/Math.log(2)
 */
private double log2(final double x) {
    return Math.log1p(x) / LOG_2;
}

From source file:org.apache.hadoop.mapred.CreditScheduler.java

public double getJobWeight(JobInProgress job, TaskType taskType) {
    if (!isRunnable(job)) {
        // Job won't launch tasks, but don't return 0 to avoid division errors
        return 1.0;
    } else {//from   w  ww.ja  v a2 s  .  c o  m
        double weight = 1.0;
        if (sizeBasedWeight) {
            // Set weight based on runnable tasks
            JobInfo info = infos.get(job);
            int runnableTasks = (taskType == TaskType.MAP) ? info.mapSchedulable.getDemand()
                    : info.reduceSchedulable.getDemand();
            weight = Math.log1p(runnableTasks) / Math.log(2);
        }
        weight *= getPriorityFactor(job.getPriority());
        if (weightAdjuster != null) {
            // Run weight through the user-supplied weightAdjuster
            weight = weightAdjuster.adjustWeight(job, taskType, weight);
        }
        return weight;
    }
}

From source file:org.apache.hadoop.mapred.Yunti3Scheduler.java

private double calculateWeight(JobInProgress job, YTTaskType taskType, JobInfo jobInfo, YTPool pool) {
    if (!jobInfo.runnable) {
        return 0;
    } else {//from   w  ww.j av  a2s .c  om
        double weight = 1.0;
        if (sizeBasedWeight) {
            // Set weight based on runnable tasks
            weight = Math.log1p(pool.runnableTasks(job, taskType)) / Math.log(2);
        }
        weight *= getPriorityFactor(job.getPriority());
        if (weightAdjuster != null) {
            // Run weight through the user-supplied weightAdjuster
            weight = weightAdjuster.adjustWeight(job, taskType, weight);
        }
        return weight;
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.java

public synchronized ResourceWeights getAppWeight(FSAppAttempt app) {
    double weight = 1.0;
    if (sizeBasedWeight) {
        // Set weight based on current memory demand
        weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2);
    }/* ww  w. j a  va2s .  c om*/
    weight *= app.getPriority().getPriority();
    if (weightAdjuster != null) {
        // Run weight through the user-supplied weightAdjuster
        weight = weightAdjuster.adjustWeight(app, weight);
    }
    ResourceWeights resourceWeights = app.getResourceWeights();
    resourceWeights.setWeight((float) weight);
    return resourceWeights;
}

From source file:org.apache.mahout.classifier.bayes.mapreduce.common.BayesFeatureMapper.java

/**
 * We need to count the number of times we've seen a term with a given label and we need to output that. But
 * this Mapper does more than just outputing the count. It first does weight normalisation. Secondly, it
 * outputs for each unique word in a document value 1 for summing up as the Term Document Frequency. Which
 * later is used to calculate the Idf Thirdly, it outputs for each label the number of times a document was
 * seen(Also used in Idf Calculation)/*from  w w w. j  a  v a2 s . co m*/
 * 
 * @param key
 *          The label
 * @param value
 *          the features (all unique) associated w/ this label in stringtuple format
 * @param output
 *          The OutputCollector to write the results to
 * @param reporter
 *          Not used
 */
@Override
public void map(Text key, Text value, final OutputCollector<StringTuple, DoubleWritable> output,
        Reporter reporter) throws IOException {
    final String label = key.toString();
    String[] tokens = SPACE_TAB.split(value.toString());
    OpenObjectIntHashMap<String> wordList = new OpenObjectIntHashMap<String>(tokens.length * gramSize);

    if (gramSize > 1) {
        ShingleFilter sf = new ShingleFilter(new IteratorTokenStream(Iterators.forArray(tokens)), gramSize);
        do {
            String term = sf.getAttribute(CharTermAttribute.class).toString();
            if (!term.isEmpty()) {
                if (wordList.containsKey(term)) {
                    wordList.put(term, 1 + wordList.get(term));
                } else {
                    wordList.put(term, 1);
                }
            }
        } while (sf.incrementToken());
    } else {
        for (String term : tokens) {
            if (wordList.containsKey(term)) {
                wordList.put(term, 1 + wordList.get(term));
            } else {
                wordList.put(term, 1);
            }
        }
    }
    final MutableDouble lengthNormalisationMut = new MutableDouble(0.0);
    wordList.forEachPair(new ObjectIntProcedure<String>() {
        @Override
        public boolean apply(String word, int dKJ) {
            long squared = (long) dKJ * (long) dKJ;
            lengthNormalisationMut.add(squared);
            return true;
        }
    });

    final double lengthNormalisation = Math.sqrt(lengthNormalisationMut.doubleValue());

    // Output Length Normalized + TF Transformed Frequency per Word per Class
    // Log(1 + D_ij)/SQRT( SIGMA(k, D_kj) )
    wordList.forEachPair(new ObjectIntProcedure<String>() {
        @Override
        public boolean apply(String token, int dKJ) {
            try {
                StringTuple tuple = new StringTuple();
                tuple.add(BayesConstants.WEIGHT);
                tuple.add(label);
                tuple.add(token);
                DoubleWritable f = new DoubleWritable(Math.log1p(dKJ) / lengthNormalisation);
                output.collect(tuple, f);
            } catch (IOException e) {
                throw new IllegalStateException(e);
            }
            return true;
        }
    });
    reporter.setStatus("Bayes Feature Mapper: Document Label: " + label);

    // Output Document Frequency per Word per Class
    // Corpus Document Frequency (FEATURE_COUNT)
    // Corpus Term Frequency (FEATURE_TF)
    wordList.forEachPair(new ObjectIntProcedure<String>() {
        @Override
        public boolean apply(String token, int dKJ) {
            try {
                StringTuple dfTuple = new StringTuple();
                dfTuple.add(BayesConstants.DOCUMENT_FREQUENCY);
                dfTuple.add(label);
                dfTuple.add(token);
                output.collect(dfTuple, ONE);

                StringTuple tokenCountTuple = new StringTuple();
                tokenCountTuple.add(BayesConstants.FEATURE_COUNT);
                tokenCountTuple.add(token);
                output.collect(tokenCountTuple, ONE);

                StringTuple tokenTfTuple = new StringTuple();
                tokenTfTuple.add(BayesConstants.FEATURE_TF);
                tokenTfTuple.add(token);
                output.collect(tokenTfTuple, new DoubleWritable(dKJ));
            } catch (IOException e) {
                throw new IllegalStateException(e);
            }
            return true;
        }
    });

    // output that we have seen the label to calculate the Count of Document per
    // class
    StringTuple labelCountTuple = new StringTuple();
    labelCountTuple.add(BayesConstants.LABEL_COUNT);
    labelCountTuple.add(label);
    output.collect(labelCountTuple, ONE);
}

From source file:org.apache.mahout.classifier.NewsgroupHelper.java

public Vector encodeFeatureVector(File file, int actual, int leakType, Multiset<String> overallCounts)
        throws IOException {
    long date = (long) (1000 * (DATE_REFERENCE + actual * MONTH + 1 * WEEK * rand.nextDouble()));
    Multiset<String> words = ConcurrentHashMultiset.create();

    try (BufferedReader reader = Files.newReader(file, Charsets.UTF_8)) {
        String line = reader.readLine();
        Reader dateString = new StringReader(DATE_FORMATS[leakType % 3].format(new Date(date)));
        countWords(analyzer, words, dateString, overallCounts);
        while (line != null && !line.isEmpty()) {
            boolean countHeader = (line.startsWith("From:") || line.startsWith("Subject:")
                    || line.startsWith("Keywords:") || line.startsWith("Summary:")) && leakType < 6;
            do {//from  w  ww .  j a  v a  2 s  .c  om
                Reader in = new StringReader(line);
                if (countHeader) {
                    countWords(analyzer, words, in, overallCounts);
                }
                line = reader.readLine();
            } while (line != null && line.startsWith(" "));
        }
        if (leakType < 3) {
            countWords(analyzer, words, reader, overallCounts);
        }
    }

    Vector v = new RandomAccessSparseVector(FEATURES);
    bias.addToVector("", 1, v);
    for (String word : words.elementSet()) {
        encoder.addToVector(word, Math.log1p(words.count(word)), v);
    }

    return v;
}

From source file:org.apache.mahout.classifier.sgd.TPrior.java

@Override
public double logP(double betaIJ) {
    return Gamma.logGamma((df + 1.0) / 2.0) - Math.log(df * Math.PI) - Gamma.logGamma(df / 2.0)
            - (df + 1.0) / 2.0 * Math.log1p(betaIJ * betaIJ);
}

From source file:org.apache.mahout.clustering.dirichlet.UncommonDistributions.java

/**
 * Returns an integer sampled according to this distribution. Takes time proportional to np + 1. (Reference:
 * Non-Uniform Random Variate Generation, Devroye http://cgm.cs.mcgill.ca/~luc/rnbookindex.html) Second
 * time-waiting algorithm.// ww w  .j  a v  a  2 s  .  c om
 */
public static int rBinomial(int n, double p) {
    if (p >= 1.0) {
        return n; // needed to avoid infinite loops and negative results
    }
    double q = -Math.log1p(-p);
    double sum = 0.0;
    int x = 0;
    while (sum <= q) {
        double u = RANDOM.nextDouble();
        double e = -Math.log(u);
        sum += e / (n - x);
        x++;
    }
    if (x == 0) {
        return 0;
    }
    return x - 1;
}

From source file:org.broadinstitute.gatk.utils.MathUtils.java

/**
 * Calculates {@code log(1-exp(a))} without loosing precision.
 *
 * <p>/* w w  w . java 2s .co m*/
 *     This is based on the approach described in:
 *
 * </p>
 * <p>
 *     Maechler M, Accurately Computing log(1-exp(-|a|)) Assessed by the Rmpfr package, 2012 <br/>
 *     <a ref="http://cran.r-project.org/web/packages/Rmpfr/vignettes/log1mexp-note.pdf">Online document</a>.
 *
 * </p>
 *
 * @param a the input exponent.
 * @return {@link Double#NaN NaN} if {@code a > 0}, otherwise the corresponding value.
 */
public static double log1mexp(final double a) {
    if (a > 0)
        return Double.NaN;
    if (a == 0)
        return Double.NEGATIVE_INFINITY;

    return (a < LOG1MEXP_THRESHOLD) ? Math.log1p(-Math.exp(a)) : Math.log(-Math.expm1(a));
}