Example usage for java.lang Double compare

List of usage examples for java.lang Double compare

Introduction

In this page you can find the example usage for java.lang Double compare.

Prototype

public static int compare(double d1, double d2) 

Source Link

Document

Compares the two specified double values.

Usage

From source file:org.sakaiproject.tool.assessment.services.GradingService.java

public void updateItemScore(ItemGradingData gdata, double scoreDifference, PublishedAssessmentIfc pub) {
    try {/*from  w  ww .  ja  v a 2  s . c o  m*/
        AssessmentGradingData adata = load(gdata.getAssessmentGradingId().toString());
        adata.setItemGradingSet(getItemGradingSet(adata.getAssessmentGradingId().toString()));

        Set itemGradingSet = adata.getItemGradingSet();
        Iterator iter = itemGradingSet.iterator();
        double totalAutoScore = 0;
        double totalOverrideScore = adata.getTotalOverrideScore().doubleValue();
        while (iter.hasNext()) {
            ItemGradingData i = (ItemGradingData) iter.next();
            if (i.getItemGradingId().equals(gdata.getItemGradingId())) {
                i.setAutoScore(gdata.getAutoScore());
                i.setComments(gdata.getComments());
                i.setGradedBy(AgentFacade.getAgentString());
                i.setGradedDate(new Date());
            }
            if (i.getAutoScore() != null)
                totalAutoScore += i.getAutoScore().doubleValue();
        }

        adata.setTotalAutoScore(Double.valueOf(totalAutoScore));
        if (Double.compare((totalAutoScore + totalOverrideScore), Double.valueOf("0").doubleValue()) < 0) {
            adata.setFinalScore(Double.valueOf("0"));
        } else {
            adata.setFinalScore(Double.valueOf(totalAutoScore + totalOverrideScore));
        }
        saveOrUpdateAssessmentGrading(adata);
        if (scoreDifference != 0) {
            notifyGradebookByScoringType(adata, pub);
        }
    } catch (GradebookServiceException ge) {
        ge.printStackTrace();
        throw ge;
    } catch (Exception e) {
        e.printStackTrace();
        throw new RuntimeException(e);
    }
}

From source file:cs.man.ac.uk.stats.ComputeANOVAStats.java

/**
 * Performs an ANOVA analysis on the data read in.
 * /*from w  ww. j  a va 2  s . co m*/
 * @param outputPath the path to output details of the ANOVA analysis to.
 */
private static void ANOVAAnalysis(String outputPath) {
    /**
     * OUTPUT FILE PREPARATION
     */

    // Clear up output path.
    Common.fileDelete(outputPath);

    String tukeyOutputPath = outputPath.replace(".csv", "_HSD.csv");
    Common.fileDelete(tukeyOutputPath);
    Writer.append(tukeyOutputPath,
            "Result 1,Result 2,Test,Metric,MSwg,DFwg,n,alpha,HSD,H0 (1=rejected & 0=accepted),Outcome\n");

    // Write header information to output path
    String[] headers = header.split(",");
    Writer.append(outputPath, headers[0] + ",");

    for (int i = 1; i < headers.length; i++)
        Writer.append(outputPath,
                headers[i] + ",F-ratio,P-value,Fb,Fw,H0 (1=rejected & 0=accepted), alpha=" + alpha + ",");

    Writer.append(outputPath, "\n");

    /**
     * PERFROM ANOVA
     */

    for (Map.Entry<String, Vector<ANOVA>> entry : anovaObjects.entrySet()) {
        String key = entry.getKey();
        Vector<ANOVA> vector = entry.getValue();

        /**
         * OK, its crucial to understand what is going on here. We have a number of files
         * containing results of algorithm tests. Each File will contain the results of
         * a number of different tests using different parameters. Note that each files contains
         * results for a specific algorithm only.
         * 
         * Now if we want to perform ANOVA analysis on results from multiple algorithms, we
         * must analyze the results in these files together rather than in isolation. So we have
         * the following situation: n files, containing results of m tests, occurring in the same order in each
         * file. These are directly comparable results, for instance:
         * 
         *    FILE 1               FILE 2               FILE N
         *  Test 1               Test 1               Test 1   -|
         *  Test 1               Test 1               Test 1   |
         *  Test 1               Test 1               Test 1   |---> Test Block 1 (i.e. multiple runs of same test)
         *  Test 1               Test 1               Test 1   |
         *  Test 1               Test 1               Test 1   -|
         *  
         *  Test 2               Test 2               Test 2
         *  Test 2               Test 2               Test 2
         *  Test 2               Test 2               Test 2
         *  Test 2               Test 2               Test 2
         *  Test 2               Test 2               Test 2
         *  
         *  Test n               Test n               Test n   -|
         *  Test n               Test n               Test n    |
         *  Test n               Test n               Test n    |---> Test Block n
         *  Test n               Test n               Test n    |
         *  Test n               Test n               Test n   -|
         *  
         *  ** Note each test result is made up of a number of recorded metrics. For instance Test Block 1 in file 1
         *  would look something like (with four metrics recorded during testing lets say TP,TN,FP,FN):
         *  
         *  120   ,   6   ,   5   ,   3   -|---> Run 1 --->
         *  118   ,   7   ,   6   ,   4    |            |
         *  122   ,   8   ,   7   ,   5    |            |---> Test Block 1.
         *  130   ,   12   ,   5   ,   13    |            |
         *  100   ,   2   ,   5   ,   7   -|---> Run 5 --->
         *  
         *  The results of each test are actually described in terms of k variables (typically k=16). These variables
         *  include the true positives (TP), false positives (FP), accuracy, f-score etc. Thus to compare the results
         *  we need to do ANOVA analysis using data occurring in each of the three files. However we can only compare
         *  like with like. So in the example above we can only perform ANOVA on comparable test blocks. In
         *  which case ANOVA would be performed on Test 1 data in files 1, 2 and 3, then Test 2 data and so on.
         *  At no point would Test 1 data in any of the files be compared with Test 2 data for example.
         *  
         *  The code below does this. Using arrays we perform ANOVA on each metric
         *  in the test blocks. Clearly this makes the code below somewhat complicated to understand!
         *  I'm genuinely sorry for that, the main reason is because I may have to perform this type of 
         *  analysis many thousands of times. But I'll try to explain how it works.
         *  
         *  For each Test block, in each file, an ANOVA object is generated in code above ( in the process() method).
         *  Each ANOVA object essentially contains a matrix of the data collected in a test block. These ANOVA objects
         *  have methods that enable them to calculate the mean and sum of the values in their matrix. For instance,
         *  Test 1 involves ten runs of the same test. For each test, lets say we collect 4 pieces of data, the number of 
         *  true positives, true negatives, false positives and false negatives. An ANOVA object for Test 1 for File 1
         *  will contain a matrix of this information, and calculate the means/sums of these four variables storing them in:
         *  
         *  private double sums[];
         *   private double means[];
         *
         *  So then,
         *  
         *  sums[0] contains the sum of true positives.
         *  sums[1] contains the sum of true negatives.
         *  sums[2] contains the sum of false positives.
         *  sums[3] contains the sum of false negatives.  
         *  
         *  And likewise for the means.
         *  
         *  When the process() method terminates we have a number of ANOVA objects stored in a TreeMap structure,
         *  which groups comparable ANOVA objects by storing them in the same vector. 
         *  
         *  Here then we begin iterating through this tree map, and calculate the F-ratio for comparable ANOVA objects.
         *  This way we can calculate all the ANOVA results automatically, for every variable we have.
         */

        /*
         * ANOVA WORKED EXAMPLE (credit Wikipedia!).
         * 
         * Consider an experiment to study the effect of three different levels of a factor
         * on a response (e.g. three levels of a fertilizer on plant growth). If we had 6 observations
         * for each level, we could write the outcome of the experiment in a table like this, 
         * where a1, a2, and a3 are the three levels of the factor being studied. 
         * 
         * a1   a2   a3
         * 6   8   13
         * 8   12   9
         * 4   9   11
         * 5   11   8
         * 3   6   7
         * 4   8   12
         * 
         * The null hypothesis, denoted H0, for the overall F-test for this experiment would be that
         * all three levels of the factor produce the same response, on average. To calculate the F-ratio:
         * 
         * Step 1: Calculate the mean within each group:
         * 
         *    Y1 = ( 6 + 8 + 4 + 5 + 3 + 4 ) / 6       = 5
         *  Y2 = ( 8 + 12 + 9 + 11 + 6 + 8 ) / 6   = 9
         *  Y3 = ( 13 + 9 + 11 + 8 + 7 + 12 ) / 6   = 10
         * 
         * Step 2: Calculate the overall mean, Y:
         * 
         *    Y = (Y1 + Y2 + Y3) / 3 = 8.
         * 
         * Step 3: Calculate the "between-group" sum of squares:
         * 
         *  "between-group" sum of squares = n(Y1-Y)^2 + n(Y2-Y)^2 + n(Y3-Y)^2
         *                          = 6(5-8)^2 + 6(9-8)^2 + 6(9-8)^2
         *                          = 84
         * 
         * Step 4: The between-group degrees of freedom is one less than the number of groups.
         * 
         *   between-group degrees of freedom = a - 1
         *                             = 3-1
         *                             = 2
         *   
         * Step 5: The between-group mean square value is
         * 
         *  between-group mean square value = "between-group" sum of squares / between-group degrees of freedom
         *                          = 84/2
         *                          = 42
         *  
         * Step 6: Calculate the "within-group" sum of squares. Begin by centering the data in each group
         * 
         *       a1         a2         a3
         * 6 - 5 = 1   8 - 9 = -1   13 - 10 = 3
         * 8 - 5 = 3   12 - 9 = 3   9 - 10 = -1
         * 4 - 5 = -1   9 - 9 = 0   11 - 10 = 1
         * 5 - 5 = 0   11 - 9 = 2   8 - 10 = -2
         * 3 - 5 = -2   6 - 9 = -3   7 - 10 = -3
         * 4 - 5 = -1   8 - 9 = -1   12 - 10 = 2
         * 
         *    within-group sum of squares = 1^2 + 3^2 + (-1)^2 + 0^2 + (-2)^2 + (-1)^2 +
         *                           (-1)^2 + 3^2 + 0^2 + 2^2 + (-3)^2 + (-1)^2 +
         *                           3^2 + (-1)^2 + 1^2 + (-2)^2 + (-3)^2 + 2^2
         * 
         *                         = 1 + 9 + 1 + 0 + 4 + 1 + 1 + 9 + 0 + 4 + 9 + 1 + 9 + 1 + 1 + 4 + 9 + 4
         *                         = 68
         * 
         * Step 7: The within-group degrees of freedom is 
         *  
         *  within-group degrees of freedom = a(n-1)
         *                          = 3(6-1)
         *                          = 15
         * 
         * Step 8: Thus the within-group mean square value is,
         * 
         *  within-group mean square value = within-group sum of squares / within-group degrees of freedom
         *                          = 68 / 15
         *                          = 4.5
         * Step 9: The F-ratio is
         * 
         *  F-ratio = between-group mean square value /  within-group mean square value
         *        = 42/4.5
         *        = 9.3
         *  
         *  The critical value is the number that the test statistic must exceed to reject the test.
         *  In this case, Fcrit(2,15) = 3.68 at alpha = 0.05. Since F = 9.3 > 3.68, the results are
         *  significant at the 5% significance level. One would reject the null hypothesis, concluding
         *  that there is strong evidence that the expected values in the three groups differ. 
         *  The p-value for this test is 0.002.
         */

        /**
         * ANOVA Variables:
         * 
         * a         =   Number of distinct test groups (corresponds to number of input files).
         * 
         * n         =   Number of data items per test group (corresponds to data items in a test block).
         * 
         * overallMeans   =   An array which stores the means for each metric recorded in a test block.
         * 
         * sumSquaresBetweenGroup   =   the "between-group" sum of squares.
         * 
         * freedomBetweenGroup      =   The between-group degrees of freedom is one less than the number of groups.
         * 
         * meanSquareBetweenGroup   =   Stores the between-group mean square values.
         * 
         * sumSquaresWithinGroup   =   The within-group sum of squares is the sum of squares.
         * 
         * freedomWithinGroup      =   The within-group degrees of freedom is.
         * 
         * meanSquareWithinGroup   =   Stores the within-group mean square values.
         * 
         * F_Ratios               =   The F-ratio's.
         */

        int a = vector.size();// Number of groups.
        int n = vector.elementAt(0).getRows();// Number of data values per group.

        // Number of recorded metrics per test (number of variables).
        int metrics = vector.elementAt(0).getColumns();

        double[] overallMeans = new double[metrics];
        double[] sumSquaresBetweenGroup = new double[metrics];
        double[] meanSquareBetweenGroup = new double[metrics];
        double[] sumSquaresWithinGroup = new double[metrics];
        double[] meanSquareWithinGroup = new double[metrics];
        double[] F_Ratios = new double[metrics];

        //STEP 1. Calculate the overall means.
        for (int i = 0; i < vector.size(); i++)
            for (int j = 0; j < vector.elementAt(0).getColumns(); j++)
                overallMeans[j] += vector.elementAt(i).getMean(j);

        //STEP 2. Divide the overall means by the number of groups.
        for (int j = 0; j < overallMeans.length; j++)
            overallMeans[j] = overallMeans[j] / (double) vector.size();

        //STEP 3.  Calculate the "between-group" sum of squares:
        for (int i = 0; i < vector.size(); i++)
            for (int j = 0; j < vector.elementAt(0).getColumns(); j++)
                sumSquaresBetweenGroup[j] += (double) n
                        * (Math.pow((vector.elementAt(i).getMean(j) - overallMeans[j]), 2));

        //STEP 4: The between-group degrees of freedom
        double freedomBetweenGroup = a - 1;

        //STEP 5. between-group mean square value
        for (int i = 0; i < meanSquareBetweenGroup.length; i++)
            meanSquareBetweenGroup[i] = sumSquaresBetweenGroup[i] / freedomBetweenGroup;

        //STEP 6. Sum of centered squares (partly already calculated by ANOVA objects.
        for (int i = 0; i < vector.size(); i++)
            for (int j = 0; j < vector.elementAt(0).getColumns(); j++)
                sumSquaresWithinGroup[j] += vector.elementAt(i).getSumCentredSquares(j);

        //STEP 7.
        double freedomWithinGroup = (double) a * (n - 1);

        //STEP 8. The within-group mean square value is...
        for (int i = 0; i < meanSquareWithinGroup.length; i++)
            meanSquareWithinGroup[i] = sumSquaresWithinGroup[i] / freedomWithinGroup;

        // STEP 9. The final F-ratios are...
        for (int i = 0; i < F_Ratios.length; i++)
            F_Ratios[i] = meanSquareBetweenGroup[i] / meanSquareWithinGroup[i];

        Writer.append(outputPath, key + ",");

        for (int i = 0; i < F_Ratios.length; i++) {
            // The p-value is the probability of obtaining a test statistic,
            // at least as extreme as the one that was actually observed, 
            // assuming that the null hypothesis is true.
            FDistribution fdist = new FDistribution(freedomBetweenGroup, freedomWithinGroup);

            double pValue = (1.0 - fdist.cumulativeProbability(F_Ratios[i]));

            // headers[i]+",F-ratio,P-value,Fb,Fw,H0 (1=rejected & 0=accepted), alpha="+alpha+","
            if (pValue < alpha)
                Writer.append(outputPath, "," + F_Ratios[i] + "," + pValue + "," + freedomBetweenGroup + ","
                        + freedomWithinGroup + "," + "1,,");
            else
                Writer.append(outputPath, "," + F_Ratios[i] + "," + pValue + "," + freedomBetweenGroup + ","
                        + freedomWithinGroup + "," + "0,,");
        }

        Writer.append(outputPath, "\n");

        /**
         * TUKEY TEST
         * 
         * Now we have established the ANOVA results, that is we know the significance of the variance
         * between the individual test results. But knowing that there is a significant difference is not
         * enough. We need to know which test results were better and which were worse in order to determine
         * which algorithm performed better. To do this we need to perform the Tukey test. It performs a pair
         * wise comparison of the results so that they can be ranked.
         * 
         * The Studentized range statistic can then be calculated for any particular pair as:
         * 
         *    Q = ( ML  MS ) / sqrt( meanSquareWithinGroup / values per sample)
         * 
         *  and ML is the largest mean for a group, and MS is the smallest mean for a group.
         */

        // PAIRWISE COMPARISON
        for (int i = 0; i < vector.size(); i++) {
            for (int j = i + 1; j < vector.size(); j++) {
                // Here the comparison is performed. Remember we must do the Tukey test
                // on each metric. So we will calculate the HSD (Honestly Significant Difference)
                // multiple times.

                // For each metric
                for (int k = 0; k < vector.elementAt(i).getColumns(); k++) {
                    double mean_one = vector.elementAt(i).getMean(k);
                    double mean_two = vector.elementAt(j).getMean(k);
                    double meanSquaredWithinGroup = meanSquareWithinGroup[k];
                    double valuesPerSample = vector.elementAt(i).getRows();// All objects have same number of rows here.

                    double Q = 0;

                    // This is a string used to summarize the outcome of the test.
                    String outcome = vector.elementAt(i).getFileName() + " - "
                            + vector.elementAt(j).getFileName() + " +";

                    if (Double.compare(mean_one, mean_two) < 0) // mean_one < mean_two
                    {
                        Q = (mean_two - mean_one) / Math.sqrt(meanSquaredWithinGroup / valuesPerSample);
                        outcome = outcome.replace("-", " < ");
                    } else if (Double.compare(mean_one, mean_two) > 0) // mean_one > mean_two
                    {
                        Q = (mean_one - mean_two) / Math.sqrt(meanSquaredWithinGroup / valuesPerSample);
                        outcome = outcome.replace("-", " > ");
                    }

                    String H0Result = ""; // 1=rejected & 0=accepted
                    double QDist = getQDist(freedomWithinGroup, a, alpha);

                    if (Double.compare(Q, QDist) < 0) {
                        H0Result = "0";
                        outcome = outcome.replace("+", "H0 Accepted");
                    } else if (Double.compare(Q, QDist) > 0) {
                        H0Result = "1";
                        outcome = outcome.replace("+", "H0 Rejected");
                    } else {
                        H0Result = "-1";
                        outcome = outcome.replace("+", "H0 Accepted");
                    }

                    Writer.append(tukeyOutputPath,
                            vector.elementAt(i).getFileName() + "," + vector.elementAt(j).getFileName() + ","
                                    + key + "," + headers[k + 1] + "," + meanSquaredWithinGroup + ","
                                    + freedomWithinGroup + "," + valuesPerSample + "," + alpha + "," + Q + ","
                                    + H0Result + "," + outcome + "\n");
                }

                Writer.append(tukeyOutputPath, ",,,,\n");
            }

            Writer.append(tukeyOutputPath, ",,,,\n");
        }

        //System.out.println("\n\n");
    }
}

From source file:edu.harvard.iq.safe.lockss.impl.LOCKSSDaemonStatusTableTO.java

@Override
public Peers tabulateAgreedPeers(long pollCutoffTime) {

    String timeZone = this.getTimezoneOffset();
    //String rawlastPollTime = "03:16:33 04/14/12";

    logger.log(Level.FINE, "timeZone={0}", timeZone);

    ///*from   ww  w  .j  a  va  2s.c  om*/
    logger.log(Level.FINE, "Poll cutoff Time={0}", pollCutoffTime);

    // use map
    List<Map<String, String>> tblh = this.getTableData();

    Peers result = new Peers();

    result.setAuId(this.tableKey);
    result.setAuName(this.tableTitle);

    Set<PeerRepairBox> ipSet100pct = new LinkedHashSet<PeerRepairBox>();
    Set<PeerRepairBox> ipSetNon100pct = new LinkedHashSet<PeerRepairBox>();
    long newestPollDate = 0;
    for (int i = 0; i < tblh.size(); i++) {
        // 1st filter: deals with concesus-reached peers only
        if (!tblh.get(i).get("Last").equals("Yes")) {
            continue;
        }

        // 2nd filter: exclude old-poll peer
        String pcnt = null;
        if (StringUtils.isNotBlank(tblh.get(i).get("LastPercentAgreement"))) {
            pcnt = tblh.get(i).get("LastPercentAgreement").replace("%", "");
            logger.log(Level.FINE, "pcnt is not null:{0}", pcnt);
        } else {
            logger.log(Level.FINE, "pcnt is null or empty:{0}", pcnt);
        }

        String ipAddressFromTable = DaemonStatusDataUtil.getPeerIpAddress(tblh.get(i).get("Box"));
        logger.log(Level.FINE, "{0}-th ip={1} pcnt={2}", new Object[] { i, ipAddressFromTable, pcnt });

        long pollEndTime = DaemonStatusDataUtil.getEpocTimeFromString(tblh.get(i).get("LastAgree"), timeZone);
        logger.log(Level.FINE, "H:pollEndTime is    {0}", pollEndTime);

        if (pollEndTime < pollCutoffTime) {
            logger.log(Level.FINE, "H: {0}: pollEndTime is beyond the cutoff point", ipAddressFromTable);
            continue;
        }
        logger.log(Level.FINE, "H: {0}: pollEndTime is within the cutoff range", ipAddressFromTable);

        if (pollEndTime > newestPollDate) {
            newestPollDate = pollEndTime;
        }
        // older daemon 1.53.3 used 100% instead of 100.00
        //            if (pcnt.startsWith("100")) {
        //                // add this peer's Ip address
        //                ipSet100pct.add(new PeerRepairBox(ipAddressFromTable,
        //                    pollEndTime));
        //            } else {
        //                Double parsedPcnt = 0.0d;
        //                if (StringUtils.isNotBlank(pcnt)) {
        //                    try {
        //                        parsedPcnt = Double.parseDouble(pcnt);
        //                    } catch (NumberFormatException e) {
        //                        logger.log(Level.WARNING, "percent value(={1}) cannot be parsed for {0}-th auId", new Object[]{i, pcnt});
        //                    } finally {
        //logger.log(Level.INFO, "double value(={1}) for {0}-th auId", new Object[]{i, parsedPcnt});
        //                    ipSetNon100pct.add(new PeerRepairBox(ipAddressFromTable,
        //                        pollEndTime, parsedPcnt));
        //                    }
        //                } else {
        //
        //                    ipSetNon100pct.add(new PeerRepairBox(ipAddressFromTable,
        //                        pollEndTime, parsedPcnt));
        //                }
        //
        //            }

        Double parsedPcnt = 0.0d;

        if (StringUtils.isNotBlank(pcnt)) {
            try {
                parsedPcnt = Double.parseDouble(pcnt);
            } catch (NumberFormatException e) {
                logger.log(Level.WARNING, "percent value(={1}) cannot be parsed for {0}-th auId",
                        new Object[] { i, pcnt });
            } finally {
                logger.log(Level.FINE, "double value(={1}) for {0}-th auId", new Object[] { i, parsedPcnt });

                if (Double.compare(parsedPcnt, 100d) == 0) {
                    // 100% case
                    ipSet100pct.add(new PeerRepairBox(ipAddressFromTable, pollEndTime));
                    logger.log(Level.FINER, "100%: double value(={1}) for {0}-th auId",
                            new Object[] { i, parsedPcnt });
                } else {
                    // less than 100% cases
                    ipSetNon100pct.add(new PeerRepairBox(ipAddressFromTable, pollEndTime, parsedPcnt));
                    logger.log(Level.FINER, "not 100%: double value(={1}) for {0}-th auId",
                            new Object[] { i, parsedPcnt });
                }

            }
        } else {
            logger.log(Level.FINE, "null %: double value(={1}) for {0}-th auId",
                    new Object[] { i, parsedPcnt });
            ipSetNon100pct.add(new PeerRepairBox(ipAddressFromTable, pollEndTime, parsedPcnt));
        }

    }
    logger.log(Level.FINE, "The latest Poll time of this AU={0}", newestPollDate);
    result.setLastAgree(newestPollDate);
    result.setFullyAgreedPeerSet(ipSet100pct);
    result.setNonfullyAgreedPeerSet(ipSetNon100pct);
    result.setPoller(this.ipAddress);

    logger.log(Level.FINE, "H:number of 100% boxes={0}", ipSet100pct.size());
    logger.log(Level.FINE, "H:number of non-100% boxes={0}", ipSetNon100pct.size());

    return result;
}

From source file:ml.shifu.shifu.core.dtrain.nn.AbstractNNWorker.java

/**
 * Add to training set or validation set according to validation rate.
 * //from   w  w w .j  a va2s.  c  o m
 * @param hashcode
 *            the hash code of the data
 * @param pair
 *            data instance
 * @param isValidation
 *            if it is validation
 * @return if in training, training is true, others are false.
 */
protected boolean addDataPairToDataSet(long hashcode, FloatMLDataPair pair, boolean isValidation) {
    if (this.isKFoldCV) {
        int k = this.modelConfig.getTrain().getNumKFold();
        if (hashcode % k == this.trainerId) {
            this.validationData.add(pair);
            if (isPositive(pair.getIdealArray()[0])) {
                this.positiveValidationCount += 1L;
            } else {
                this.negativeValidationCount += 1L;
            }
            return false;
        } else {
            this.trainingData.add(pair);
            if (isPositive(pair.getIdealArray()[0])) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    }

    if (this.isSpecificValidation) {
        if (isValidation) {
            this.validationData.add(pair);
            if (isPositive(pair.getIdealArray()[0])) {
                this.positiveValidationCount += 1L;
            } else {
                this.negativeValidationCount += 1L;
            }
            return false;
        } else {
            this.trainingData.add(pair);
            if (isPositive(pair.getIdealArray()[0])) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    } else {
        if (Double.compare(this.modelConfig.getValidSetRate(), 0d) != 0) {
            int classValue = (int) (pair.getIdealArray()[0] + 0.01f);
            Random random = null;
            if (this.isStratifiedSampling) {
                // each class use one random instance
                random = validationRandomMap.get(classValue);
                if (random == null) {
                    random = new Random();
                    this.validationRandomMap.put(classValue, random);
                }
            } else {
                // all data use one random instance
                random = validationRandomMap.get(0);
                if (random == null) {
                    random = new Random();
                    this.validationRandomMap.put(0, random);
                }
            }

            if (this.modelConfig.isFixInitialInput()) {
                // for fix initial input, if hashcode%100 is in [start-hashcode, end-hashcode), validation,
                // otherwise training. start hashcode in different job is different to make sure bagging jobs have
                // different data. if end-hashcode is over 100, then check if hashcode is in [start-hashcode, 100]
                // or [0, end-hashcode]
                int startHashCode = (100 / this.modelConfig.getBaggingNum()) * this.trainerId;
                int endHashCode = startHashCode
                        + Double.valueOf(this.modelConfig.getValidSetRate() * 100).intValue();
                if (isInRange(hashcode, startHashCode, endHashCode)) {
                    this.validationData.add(pair);
                    if (isPositive(pair.getIdealArray()[0])) {
                        this.positiveValidationCount += 1L;
                    } else {
                        this.negativeValidationCount += 1L;
                    }
                    return false;
                } else {
                    this.trainingData.add(pair);
                    if (isPositive(pair.getIdealArray()[0])) {
                        this.positiveTrainCount += 1L;
                    } else {
                        this.negativeTrainCount += 1L;
                    }
                    return true;
                }
            } else {
                // not fixed initial input, if random value >= validRate, training, otherwise validation.
                if (random.nextDouble() >= this.modelConfig.getValidSetRate()) {
                    this.trainingData.add(pair);
                    if (isPositive(pair.getIdealArray()[0])) {
                        this.positiveTrainCount += 1L;
                    } else {
                        this.negativeTrainCount += 1L;
                    }
                    return true;
                } else {
                    this.validationData.add(pair);
                    if (isPositive(pair.getIdealArray()[0])) {
                        this.positiveValidationCount += 1L;
                    } else {
                        this.negativeValidationCount += 1L;
                    }
                    return false;
                }
            }
        } else {
            this.trainingData.add(pair);
            if (isPositive(pair.getIdealArray()[0])) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    }
}

From source file:gov.opm.scrd.batchprocessing.jobs.BatchProcessingJob.java

/**
 * Send file import email./*from   www .j  av  a2s . c  o m*/
 *
 * @param procMessage The process message. Used to build the mail message.
 * @param importStatus The file import status
 */
private void sendFileImportEmail(StringBuilder procMessage, ImportStatus importStatus) {

    // Determine the file import mail subject
    String subjectLine = "Service Credit Import: ";
    if (importStatus.getSuccessful() && importStatus.getPaymentsGood() && importStatus.getChangeRecordsGood()) {
        subjectLine += "OK ";
    } else {
        if (!importStatus.getSuccessful()) {
            subjectLine += "BATCH IMPORT ERROR ";
        }
        if (importStatus.getNumberGoodSummaryRecords() > 0) {
            if (Double.compare(importStatus.getFileSummaryTotal().doubleValue(),
                    importStatus.getTotalCheckPayments().add(importStatus.getTotalACHPayments())
                            .add(importStatus.getTotalDupePayments())
                            .add(importStatus.getTotalSkippedPayments()).doubleValue()) != 0) {
                subjectLine += "OUT OF BALANCE! ";
            }
        }
        if (importStatus.getNumberDupeCheckPayments() + importStatus.getNumberDupeAchPayments() > 0) {
            subjectLine += "DUPLICATE PAYMENTS ";
        }
        if (!importStatus.getPaymentsGood()) {
            subjectLine += "PAYMENT ERROR ";
        }
        if (!importStatus.getChangeRecordsGood()) {
            if (importStatus.getSuccessful() && importStatus.getPaymentsGood()) {
                subjectLine += "OK except for ";
            }
            subjectLine += "Change record rejected. ";
        }
    }

    // Notify file import email
    notifyByEmail(procMessage.append(makeImportMailMessage(importStatus)).toString(), subjectLine,
            "FileImport");
    // Audit error
    auditError(subjectLine, makeImportMailMessage(importStatus));
}

From source file:ml.shifu.shifu.core.dtrain.lr.LogisticRegressionWorker.java

/**
 * Add to training set or validation set according to validation rate.
 * /*from w  ww. j a va 2s  .c  o  m*/
 * @param hashcode
 *            the hash code of the data
 * @param data
 *            data instance
 * @param isValidation
 *            if it is validation
 * @return if in training, training is true, others are false.
 */
protected boolean addDataPairToDataSet(long hashcode, Data data, boolean isValidation) {
    if (this.isKFoldCV) {
        int k = this.modelConfig.getTrain().getNumKFold();
        if (hashcode % k == this.trainerId) {
            this.validationData.append(data);
            if (isPositive(data.outputs[0])) {
                this.positiveValidationCount += 1L;
            } else {
                this.negativeValidationCount += 1L;
            }
            return false;
        } else {
            this.trainingData.append(data);
            if (isPositive(data.outputs[0])) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    }

    if (this.isSpecificValidation) {
        if (isValidation) {
            this.validationData.append(data);
            if (isPositive(data.outputs[0])) {
                this.positiveValidationCount += 1L;
            } else {
                this.negativeValidationCount += 1L;
            }
            return false;
        } else {
            this.trainingData.append(data);
            if (isPositive(data.outputs[0])) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    } else {
        if (Double.compare(this.modelConfig.getValidSetRate(), 0d) != 0) {
            int classValue = (int) (data.outputs[0] + 0.01f);
            Random random = null;
            if (this.isStratifiedSampling) {
                // each class use one random instance
                random = validationRandomMap.get(classValue);
                if (random == null) {
                    random = new Random();
                    this.validationRandomMap.put(classValue, random);
                }
            } else {
                // all data use one random instance
                random = validationRandomMap.get(0);
                if (random == null) {
                    random = new Random();
                    this.validationRandomMap.put(0, random);
                }
            }

            if (this.modelConfig.isFixInitialInput()) {
                // for fix initial input, if hashcode%100 is in [start-hashcode, end-hashcode), validation,
                // otherwise training. start hashcode in different job is different to make sure bagging jobs have
                // different data. if end-hashcode is over 100, then check if hashcode is in [start-hashcode, 100]
                // or [0, end-hashcode]
                int startHashCode = (100 / this.modelConfig.getBaggingNum()) * this.trainerId;
                int endHashCode = startHashCode
                        + Double.valueOf(this.modelConfig.getValidSetRate() * 100).intValue();
                if (isInRange(hashcode, startHashCode, endHashCode)) {
                    this.validationData.append(data);
                    if (isPositive(data.outputs[0])) {
                        this.positiveValidationCount += 1L;
                    } else {
                        this.negativeValidationCount += 1L;
                    }
                    return false;
                } else {
                    this.trainingData.append(data);
                    if (isPositive(data.outputs[0])) {
                        this.positiveTrainCount += 1L;
                    } else {
                        this.negativeTrainCount += 1L;
                    }
                    return true;
                }
            } else {
                // not fixed initial input, if random value >= validRate, training, otherwise validation.
                if (random.nextDouble() >= this.modelConfig.getValidSetRate()) {
                    this.trainingData.append(data);
                    if (isPositive(data.outputs[0])) {
                        this.positiveTrainCount += 1L;
                    } else {
                        this.negativeTrainCount += 1L;
                    }
                    return true;
                } else {
                    this.validationData.append(data);
                    if (isPositive(data.outputs[0])) {
                        this.positiveValidationCount += 1L;
                    } else {
                        this.negativeValidationCount += 1L;
                    }
                    return false;
                }
            }
        } else {
            this.trainingData.append(data);
            if (isPositive(data.outputs[0])) {
                this.positiveTrainCount += 1L;
            } else {
                this.negativeTrainCount += 1L;
            }
            return true;
        }
    }
}

From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java

@Test
public void testEntryLogManagerMetrics() throws Exception {
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    TestStatsProvider statsProvider = new TestStatsProvider();
    TestStatsLogger statsLogger = statsProvider.getStatsLogger(BookKeeperServerStats.ENTRYLOGGER_SCOPE);
    int maximumNumberOfActiveEntryLogs = 3;
    int entryLogPerLedgerCounterLimitsMultFactor = 2;

    // Creating a new configuration with a number of ledger directories.
    conf.setLedgerDirNames(ledgerDirs);//from  www  .j av a  2s.  c  om
    // pre-allocation is enabled
    conf.setEntryLogFilePreAllocationEnabled(true);
    conf.setEntryLogPerLedgerEnabled(true);
    conf.setMaximumNumberOfActiveEntryLogs(maximumNumberOfActiveEntryLogs);
    conf.setEntryLogPerLedgerCounterLimitsMultFactor(entryLogPerLedgerCounterLimitsMultFactor);
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
    EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager, null, statsLogger,
            UnpooledByteBufAllocator.DEFAULT);
    EntryLogManagerForEntryLogPerLedger entrylogManager = (EntryLogManagerForEntryLogPerLedger) entryLogger
            .getEntryLogManager();
    // set same thread executor for entryLoggerAllocator's allocatorExecutor
    setSameThreadExecutorForEntryLoggerAllocator(entryLogger.getEntryLoggerAllocator());

    Counter numOfWriteActiveLedgers = statsLogger.getCounter(BookKeeperServerStats.NUM_OF_WRITE_ACTIVE_LEDGERS);
    Counter numOfWriteLedgersRemovedCacheExpiry = statsLogger
            .getCounter(BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY);
    Counter numOfWriteLedgersRemovedCacheMaxSize = statsLogger
            .getCounter(BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE);
    Counter numLedgersHavingMultipleEntrylogs = statsLogger
            .getCounter(BookKeeperServerStats.NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS);
    TestOpStatsLogger entryLogsPerLedger = (TestOpStatsLogger) statsLogger
            .getOpStatsLogger(BookKeeperServerStats.ENTRYLOGS_PER_LEDGER);
    // initially all the counters should be 0
    Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", 0, numOfWriteActiveLedgers.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 0,
            numOfWriteLedgersRemovedCacheExpiry.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 0,
            numOfWriteLedgersRemovedCacheMaxSize.get().intValue());
    Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 0,
            numLedgersHavingMultipleEntrylogs.get().intValue());
    Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount());

    // lid-1 : 3 entrylogs, lid-2 : 2 entrylogs, lid-3 : 1 entrylog
    int numOfEntrylogsForLedger1 = 3;
    createNewLogs(entrylogManager, 1L, numOfEntrylogsForLedger1);
    int numOfEntrylogsForLedger2 = 2;
    createNewLogs(entrylogManager, 2L, numOfEntrylogsForLedger2);
    createNewLogs(entrylogManager, 3L, 1);

    Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", 3, numOfWriteActiveLedgers.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 0,
            numOfWriteLedgersRemovedCacheExpiry.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 0,
            numOfWriteLedgersRemovedCacheMaxSize.get().intValue());
    Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 2,
            numLedgersHavingMultipleEntrylogs.get().intValue());
    Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount());

    /*
     * since entrylog for lid-4 is created and entrylogmap cachesize is 3,
     * lid-1 will be removed from entrylogmap cache
     */
    createNewLogs(entrylogManager, 4L, 1);
    Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", maximumNumberOfActiveEntryLogs,
            numOfWriteActiveLedgers.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 1,
            numOfWriteLedgersRemovedCacheMaxSize.get().intValue());
    Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount());

    /*
     * entrylog for lid-5, lid-6, lid-7 are created. Since
     * maximumNumberOfActiveEntryLogs = 3 and
     * entryLogPerLedgerCounterLimitsMultFactor = 2, when the entrylog for
     * lid-7 is created, count of lid-1 should be removed from countermap.
     */
    createNewLogs(entrylogManager, 5L, 1);
    createNewLogs(entrylogManager, 6L, 1);
    createNewLogs(entrylogManager, 7L, 1);
    Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", maximumNumberOfActiveEntryLogs,
            numOfWriteActiveLedgers.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 4,
            numOfWriteLedgersRemovedCacheMaxSize.get().intValue());
    Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 1, entryLogsPerLedger.getSuccessCount());
    Assert.assertTrue("ENTRYLOGS_PER_LEDGER average value",
            Double.compare(numOfEntrylogsForLedger1, entryLogsPerLedger.getSuccessAverage()) == 0);

    /*
     * entrylog for new lid-8 is created so one more entry from countermap
     * should be removed.
     */
    createNewLogs(entrylogManager, 8L, 4);
    Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", maximumNumberOfActiveEntryLogs,
            numOfWriteActiveLedgers.get().intValue());
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 5,
            numOfWriteLedgersRemovedCacheMaxSize.get().intValue());
    Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 3,
            numLedgersHavingMultipleEntrylogs.get().intValue());
    Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 2, entryLogsPerLedger.getSuccessCount());
    Assert.assertTrue("ENTRYLOGS_PER_LEDGER average value",
            Double.compare((numOfEntrylogsForLedger1 + numOfEntrylogsForLedger2) / 2.0,
                    entryLogsPerLedger.getSuccessAverage()) == 0);

    /*
     * lid-3 is still in countermap. So when new entrylogs are created for
     * lid-3, no new entry from counter should be removed. so
     * entryLogsPerLedger.getSuccessCount() should be still old value. Also,
     * since lid-3 is still in countermap, these new 4 entrylogs should be
     * added to previous value 1 and hence the EntryLogsPerLedger for ledger
     * - 3l should be updated to 5.
     */
    createNewLogs(entrylogManager, 3L, 4);
    Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 6,
            numOfWriteLedgersRemovedCacheMaxSize.get().intValue());
    Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 4,
            numLedgersHavingMultipleEntrylogs.get().intValue());
    Assert.assertEquals("Numofentrylogs for ledger: 3l", 5,
            entrylogManager.entryLogsPerLedgerCounter.getCounterMap().get(3L).intValue());
    Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 2, entryLogsPerLedger.getSuccessCount());
}

From source file:org.ow2.proactive_grid_cloud_portal.rm.RMRest.java

@Override
@GET/*from   w ww  . j ava  2s.  c  o  m*/
@GZIP
@Path("stathistory")
@Produces("application/json")
public String getStatHistory(@HeaderParam("sessionid") String sessionId, @QueryParam("range") String range)
        throws InstanceNotFoundException, IntrospectionException, ReflectionException, IOException,
        MalformedObjectNameException, NullPointerException, InterruptedException, NotConnectedException {

    RMProxyUserInterface rm = checkAccess(sessionId);

    // if range String is too large, shorten it
    // to make it recognizable by StatHistoryCaching
    if (range.length() > dataSources.length) {
        range = range.substring(0, dataSources.length);
    }
    // complete range if too short
    while (range.length() < dataSources.length) {
        range += 'a';
    }

    StatHistoryCacheEntry cache = StatHistoryCaching.getInstance().getEntry(range);
    // found unexpired cache entry matching the parameters: return it immediately
    if (cache != null) {
        return cache.getValue();
    }

    long l1 = System.currentTimeMillis();

    ObjectName on = new ObjectName(RMJMXBeans.RUNTIMEDATA_MBEAN_NAME);
    AttributeList attrs = rm.getMBeanAttributes(on, new String[] { "StatisticHistory" });
    Attribute attr = (Attribute) attrs.get(0);
    // content of the RRD4J database backing file
    byte[] rrd4j = (byte[]) attr.getValue();

    File rrd4jDb = File.createTempFile("database", "rr4dj");
    rrd4jDb.deleteOnExit();

    try (OutputStream out = new FileOutputStream(rrd4jDb)) {
        out.write(rrd4j);
    }

    // create RRD4J DB, should be identical to the one held by the RM
    RrdDb db = new RrdDb(rrd4jDb.getAbsolutePath(), true);

    long timeEnd = db.getLastUpdateTime();
    // force float separator for JSON parsing
    DecimalFormatSymbols otherSymbols = new DecimalFormatSymbols(Locale.US);
    otherSymbols.setDecimalSeparator('.');
    // formatting will greatly reduce response size
    DecimalFormat formatter = new DecimalFormat("###.###", otherSymbols);

    // construct the JSON response directly in a String
    StringBuilder result = new StringBuilder();
    result.append("{");

    for (int i = 0; i < dataSources.length; i++) {
        String dataSource = dataSources[i];
        char zone = range.charAt(i);
        long timeStart;

        switch (zone) {
        default:
        case 'a': // 1 minute
            timeStart = timeEnd - 60;
            break;
        case 'm': // 10 minute
            timeStart = timeEnd - 60 * 10;
            break;
        case 'h': // 1 hours
            timeStart = timeEnd - 60 * 60;
            break;
        case 'H': // 8 hours
            timeStart = timeEnd - 60 * 60 * 8;
            break;
        case 'd': // 1 day
            timeStart = timeEnd - 60 * 60 * 24;
            break;
        case 'w': // 1 week
            timeStart = timeEnd - 60 * 60 * 24 * 7;
            break;
        case 'M': // 1 month
            timeStart = timeEnd - 60 * 60 * 24 * 28;
            break;
        case 'y': // 1 year
            timeStart = timeEnd - 60 * 60 * 24 * 365;
            break;
        }

        FetchRequest req = db.createFetchRequest(ConsolFun.AVERAGE, timeStart, timeEnd);
        req.setFilter(dataSource);
        FetchData fetchData = req.fetchData();
        result.append("\"").append(dataSource).append("\":[");

        double[] values = fetchData.getValues(dataSource);
        for (int j = 0; j < values.length; j++) {
            if (Double.compare(Double.NaN, values[j]) == 0) {
                result.append("null");
            } else {
                result.append(formatter.format(values[j]));
            }
            if (j < values.length - 1)
                result.append(',');
        }
        result.append(']');
        if (i < dataSources.length - 1)
            result.append(',');
    }
    result.append("}");

    db.close();
    rrd4jDb.delete();

    String ret = result.toString();

    StatHistoryCaching.getInstance().addEntry(range, l1, ret);

    return ret;
}

From source file:org.esa.nest.gpf.filtering.SpeckleFilterOp.java

/**
 * Get the Lee filtered pixel intensity for pixels in a given rectangular region.
 *
 * @param neighborValues Array holding the pixel values.
 * @param numSamples     The number of samples.
 * @param noDataValue    Place holder for no data value.
 * @return val The Lee filtered value.//from w  w  w .  j a  v a2 s  .  com
 * @throws org.esa.beam.framework.gpf.OperatorException If an error occurs in computation of the Lee filtered value.
 */
private double getLeeValue(final double[] neighborValues, final int numSamples, final double noDataValue,
        final double cu, final double cu2) {

    final double mean = getMeanValue(neighborValues, numSamples, noDataValue);
    if (Double.compare(mean, Double.MIN_VALUE) <= 0) {
        return mean;
    }

    final double var = getVarianceValue(neighborValues, numSamples, mean, noDataValue);
    if (Double.compare(var, Double.MIN_VALUE) <= 0) {
        return mean;
    }

    final double ci = Math.sqrt(var) / mean;
    if (ci < cu) {
        return mean;
    }

    final double cp = neighborValues[neighborValues.length / 2];
    final double w = 1 - cu2 / (ci * ci);

    return cp * w + mean * (1 - w);
}

From source file:sadl.models.TauPTA.java

@Override
public TimedWord sampleSequence() {
    if (getAnomalyType() == AnomalyInsertionType.NONE) {
        return super.sampleSequence();
    }/*from  ww w.  j  ava  2  s . c om*/
    // this TauPTA should sample anomalies of the one specified type
    int currentState = START_STATE;

    final List<String> eventList = new ArrayList<>();
    final TIntList timeList = new TIntArrayList();
    boolean choseFinalState = false;
    @SuppressWarnings("hiding")
    AnomalyInsertionType anomalyType = AnomalyInsertionType.NONE;
    int timedAnomalyCounter = 0;
    while (!choseFinalState) {
        List<Transition> possibleTransitions = getOutTransitions(currentState, true);
        double random = r.nextDouble();
        double newProbSum = -1;
        if (getAnomalyType() == AnomalyInsertionType.TYPE_TWO
                || getAnomalyType() == AnomalyInsertionType.TYPE_FOUR) {
            // Filter out all transitions that do not belong to the sequential anomaly type and are no stopping transitions
            // The TauPTA should have a field containing its anomaly type. So if the TauPTA is of anomaly type 2, then only transitions with anomaly type 2
            // are allowed to be chosen.
            possibleTransitions = possibleTransitions.stream().filter(
                    t -> (t.getAnomalyInsertionType() == getAnomalyType() || t.isStopTraversingTransition()))
                    .collect(Collectors.toList());
            // after that normalize s.t. the remaining transition probs sum up to one (or make the random value smaller)
            newProbSum = possibleTransitions.stream().mapToDouble(t -> t.getProbability()).sum();
            if (!Precision.equals(newProbSum, 1)) {
                logger.debug("New ProbSum={}, so decreasing random value from {} to {}", newProbSum, random,
                        random * newProbSum);
                random *= newProbSum;
            }
        }
        // the most probable transition (with the highest probability) should be at index 0
        // should be right in this way
        Collections.sort(possibleTransitions,
                (t1, t2) -> -Double.compare(t1.getProbability(), t2.getProbability()));
        if (possibleTransitions.size() <= 0) {
            logger.error(
                    "There are no transitions for state {} with newProbSum={} and randomValue={}. This is not possible.",
                    currentState, newProbSum, random);
        }
        double summedProbs = 0;
        int index = -1;
        for (int i = 0; i < possibleTransitions.size(); i++) {
            summedProbs += possibleTransitions.get(i).getProbability();
            if (random < summedProbs) {
                index = i;
                break;
            }
        }
        if (index == -1) {
            logger.error("Found no possible transition from {}", possibleTransitions);
        }
        final Transition chosenTransition = possibleTransitions.get(index);
        if (chosenTransition.isAbnormal()) {
            if (getAnomalyType() != chosenTransition.getAnomalyInsertionType()) {
                // This is a conflict because the anomalyType was already set to anomaly. This should never happen!
                throw new IllegalStateException(
                        "Two anomalies are mixed in this special case. This should never happen.");
            }
            anomalyType = chosenTransition.getAnomalyInsertionType();
        }
        if (chosenTransition.isStopTraversingTransition()) {
            choseFinalState = true;
            // what happens if an abnormal stopping transiton (type 5) was chosen?
            // Nothing should happen because we label the sequence as type 5 anomaly
        } else if (eventList.size() > MAX_SEQUENCE_LENGTH) {
            throw new IllegalStateException(
                    "A sequence longer than " + MAX_SEQUENCE_LENGTH + " events should have been generated");
        } else {
            currentState = chosenTransition.getToState();
            final Distribution d = transitionDistributions.get(chosenTransition.toZeroProbTransition());
            if (d == null) {
                // maybe this happens because the automaton is more general than the data. So not every possible path in the automaton is represented in
                // the training data.
                throw new IllegalStateException("This should never happen for transition " + chosenTransition);
            }
            int timeValue = (int) d.sample(1, r)[0];
            if (anomalyType == AnomalyInsertionType.TYPE_THREE) {
                if (chosenTransition.isAbnormal()) {
                    timeValue = changeTimeValue(timeValue, ANOMALY_3_CHANGE_RATE);
                    timedAnomalyCounter++;
                }
            } else if (anomalyType == AnomalyInsertionType.TYPE_FOUR) {
                if (chosenTransition.isAbnormal()) {
                    timedAnomalyCounter++;
                    timeValue = changeTimeValue(timeValue, ANOMALY_4_CHANGE_RATE);
                }
            }
            eventList.add(chosenTransition.getSymbol());
            timeList.add(timeValue);
        }
    }
    if (anomalyType == AnomalyInsertionType.TYPE_THREE || anomalyType == AnomalyInsertionType.TYPE_FOUR) {
        logger.debug("{} out of {} transitions are marked with anomaly {}", timedAnomalyCounter,
                eventList.size(), anomalyType);
    }
    if (anomalyType != AnomalyInsertionType.NONE) {
        return new TimedWord(eventList, timeList, ClassLabel.ANOMALY);
    } else {
        return new TimedWord(eventList, timeList, ClassLabel.NORMAL);
    }
}