Example usage for java.util Random nextDouble

List of usage examples for java.util Random nextDouble

Introduction

In this page you can find the example usage for java.util Random nextDouble.

Prototype

public double nextDouble() 

Source Link

Document

Returns the next pseudorandom, uniformly distributed double value between 0.0 and 1.0 from this random number generator's sequence.

Usage

From source file:ubic.gemma.core.analysis.expression.coexpression.links.LinkAnalysisServiceImpl.java

/**
 * Write links as text./*  w w  w .ja v a 2 s.c o  m*/
 */
private void writeLinks(final LinkAnalysis la, FilterConfig filterConfig, Writer wr) throws IOException {
    Map<CompositeSequence, Set<Gene>> probeToGeneMap = la.getProbeToGeneMap();
    ObjectArrayList links = la.getKeep();
    double subsetSize = la.getConfig().getSubsetSize();
    List<String> buf = new ArrayList<>();
    if (la.getConfig().isSubset() && links.size() > subsetSize) {
        la.getConfig().setSubsetUsed(true);
    }
    wr.write(la.getConfig().toString());
    wr.write(filterConfig.toString());
    NumberFormat nf = NumberFormat.getInstance();
    nf.setMaximumFractionDigits(4);

    Integer probeDegreeThreshold = la.getConfig().getProbeDegreeThreshold();

    int i = 0;
    int keptLinksCount = 0;
    Random generator = new Random();
    double rand;
    double fraction = subsetSize / links.size();
    int skippedDueToDegree = 0;
    for (int n = links.size(); i < n; i++) {

        Object val = links.getQuick(i);
        if (val == null)
            continue;
        Link m = (Link) val;
        Double w = m.getWeight();

        int x = m.getx();
        int y = m.gety();

        if (probeDegreeThreshold > 0 && (la.getProbeDegree(x) > probeDegreeThreshold
                || la.getProbeDegree(y) > probeDegreeThreshold)) {
            skippedDueToDegree++;
            continue;
        }

        CompositeSequence p1 = la.getProbe(x);
        CompositeSequence p2 = la.getProbe(y);

        Set<Gene> g1 = probeToGeneMap.get(p1);
        Set<Gene> g2 = probeToGeneMap.get(p2);

        List<String> genes1 = new ArrayList<>();
        for (Gene cluster : g1) {
            String t = cluster.getOfficialSymbol();
            genes1.add(t);
        }

        List<String> genes2 = new ArrayList<>();
        for (Gene cluster : g2) {
            String t = cluster.getOfficialSymbol();
            genes2.add(t);
        }

        if (genes2.size() == 0 || genes1.size() == 0) {
            continue;
        }

        String gene1String = StringUtils.join(genes1.iterator(), "|");
        String gene2String = StringUtils.join(genes2.iterator(), "|");

        if (gene1String.equals(gene2String)) {
            continue;
        }

        if (++keptLinksCount % 50000 == 0) {
            LinkAnalysisServiceImpl.log.info(keptLinksCount + " links retained");
        }

        if (la.getConfig().isSubsetUsed()) {
            rand = generator.nextDouble();
            if (rand > fraction)
                continue;
        }

        buf.add(p1.getId() + "\t" + p2.getId() + "\t" + gene1String + "\t" + gene2String + "\t" + nf.format(w)
                + "\n");// save links
        // wr.write( p1.getId() + "\t" + p2.getId() + "\t" + gene1String + "\t" + gene2String + "\t" + nf.format( w
        // ) + "\n" );

    }

    wr.write("# totalLinks:" + keptLinksCount + "\n");
    wr.write("# printedLinks:" + buf.size() + "\n");
    wr.write("# skippedDueToHighNodeDegree:" + skippedDueToDegree + "\n");

    for (String line : buf) {// write links to file
        wr.write(line);
    }

    if (la.getConfig().isSubsetUsed()) {// subset option activated
        LinkAnalysisServiceImpl.log.info("Done, " + keptLinksCount + "/" + links.size() + " links kept, "
                + buf.size() + " links printed");
        // wr.write("# Amount of links before subsetting/after subsetting: " + links.size() + "/" + numPrinted +
        // "\n" );
    } else {
        LinkAnalysisServiceImpl.log.info("Done, " + keptLinksCount + "/" + links.size()
                + " links printed (some may have been filtered)");
    }
    wr.flush();

}

From source file:ml.shifu.shifu.core.dtrain.dt.DTWorker.java

private float[] sampleWeights(float label) {
    float[] sampleWeights = null;
    // sample negative or kFoldCV, sample rate is 1d
    double sampleRate = (modelConfig.getTrain().getSampleNegOnly() || this.isKFoldCV) ? 1d
            : modelConfig.getTrain().getBaggingSampleRate();
    int classValue = (int) (label + 0.01f);
    if (this.treeNum == 1 || (this.isGBDT && !this.gbdtSampleWithReplacement)) {
        // if tree == 1 or GBDT, don't use with replacement sampling; for GBDT, every time is one tree
        sampleWeights = new float[1];
        Random random = null;
        if (this.isStratifiedSampling) {
            random = baggingRandomMap.get(classValue);
            if (random == null) {
                random = DTrainUtils.generateRandomBySampleSeed(modelConfig.getTrain().getBaggingSampleSeed(),
                        CommonConstants.NOT_CONFIGURED_BAGGING_SEED);
                baggingRandomMap.put(classValue, random);
            }/* w w  w .  j  ava2  s.c o  m*/
        } else {
            random = baggingRandomMap.get(0);
            if (random == null) {
                random = DTrainUtils.generateRandomBySampleSeed(modelConfig.getTrain().getBaggingSampleSeed(),
                        CommonConstants.NOT_CONFIGURED_BAGGING_SEED);
                baggingRandomMap.put(0, random);
            }
        }
        if (random.nextDouble() <= sampleRate) {
            sampleWeights[0] = 1f;
        } else {
            sampleWeights[0] = 0f;
        }
    } else {
        // if gbdt and gbdtSampleWithReplacement = true, still sampling with replacement
        sampleWeights = new float[this.treeNum];
        if (this.isStratifiedSampling) {
            PoissonDistribution[] rng = this.baggingRngMap.get(classValue);
            if (rng == null) {
                rng = new PoissonDistribution[treeNum];
                for (int i = 0; i < treeNum; i++) {
                    rng[i] = new PoissonDistribution(sampleRate);
                }
                this.baggingRngMap.put(classValue, rng);
            }
            for (int i = 0; i < sampleWeights.length; i++) {
                sampleWeights[i] = rng[i].sample();
            }
        } else {
            PoissonDistribution[] rng = this.baggingRngMap.get(0);
            if (rng == null) {
                rng = new PoissonDistribution[treeNum];
                for (int i = 0; i < treeNum; i++) {
                    rng[i] = new PoissonDistribution(sampleRate);
                }
                this.baggingRngMap.put(0, rng);
            }
            for (int i = 0; i < sampleWeights.length; i++) {
                sampleWeights[i] = rng[i].sample();
            }
        }
    }
    return sampleWeights;
}

From source file:org.deidentifier.arx.gui.Controller.java

/**
 * Creates a subset via random sampling/*from  w  w w.  ja v  a  2 s  . co m*/
 */
public void actionSubsetRandom() {

    String result = main.showInputDialog(main.getShell(), Resources.getMessage("Controller.130"), //$NON-NLS-1$
            Resources.getMessage("Controller.131"), //$NON-NLS-1$
            Resources.getMessage("Controller.132"), //$NON-NLS-1$
            new IInputValidator() {
                @Override
                public String isValid(String arg0) {
                    double value = 0d;
                    try {
                        value = Double.valueOf(arg0);
                    } catch (Exception e) {
                        return "Not a decimal";
                    }
                    if (value < 0d || value > 1d) {
                        return "Out of range";
                    }
                    return null;
                }
            });

    // Check
    if (result == null) {
        return;
    }

    // Convert
    double probability = Double.valueOf(result);

    // Create a data subset via sampling based on beta
    Set<Integer> subsetIndices = new HashSet<Integer>();
    Random random = new SecureRandom();
    int records = model.getInputConfig().getInput().getHandle().getNumRows();
    for (int i = 0; i < records; ++i) {
        if (random.nextDouble() < probability) {
            subsetIndices.add(i);
        }
    }
    DataSubset subset = DataSubset.create(records, subsetIndices);

    this.model.getInputConfig().setResearchSubset(subset.getSet());
    model.setSubsetOrigin(Resources.getMessage("Controller.133")); //$NON-NLS-1$
    update(new ModelEvent(this, ModelPart.RESEARCH_SUBSET, subset.getSet()));
}

From source file:sadl.run.datagenerators.SmacDataGenerator.java

private void run() throws IOException, InterruptedException {
    if (Files.notExists(outputDir)) {
        Files.createDirectories(outputDir);
    }//from   w  w w.ja va 2 s .c o m
    Files.walk(outputDir).filter(p -> !Files.isDirectory(p)).forEach(p -> {
        try {
            logger.info("Deleting file {}", p);
            Files.delete(p);
        } catch (final Exception e) {
            e.printStackTrace();
        }
    });
    int k = 0;
    final boolean splitTimedEvents = true;
    // parse timed sequences
    TimedInput trainingTimedSequences = TimedInput.parseAlt(Paths.get(dataString), 1);
    if (splitTimedEvents) {
        final ButlaPdtaLearner butla = new ButlaPdtaLearner(10000, EventsCreationStrategy.SplitEvents,
                KDEFormelVariant.OriginalKDE);
        final Pair<TimedInput, Map<String, Event>> p = butla
                .splitEventsInTimedSequences(trainingTimedSequences);
        trainingTimedSequences = p.getKey();
    }
    final Random r = MasterSeed.nextRandom();
    final List<TimedWord> trainSequences = new ArrayList<>();
    final List<TimedWord> testSequences = new ArrayList<>();
    final TauPtaLearner learner = new TauPtaLearner();
    final TauPTA pta = learner.train(trainingTimedSequences);
    final TauPTA typeTwoNormalPta = SerializationUtils.clone(pta);
    final DecimalFormat df = new DecimalFormat("00");
    // final Path p = Paths.get("pta_normal.dot");
    // pta.toGraphvizFile(outputDir.resolve(p), false);
    // final Process ps = Runtime.getRuntime().exec("dot -Tpdf -O " + outputDir.resolve(p));
    // System.out.println(outputDir.resolve(p));
    // ps.waitFor();
    logger.info("Finished TauPTA ({} states) creation.", pta.getStateCount());
    TauPTA currentPta;
    while (k < 54) {
        for (final AnomalyInsertionType type : AnomalyInsertionType.values()) {
            if (type != AnomalyInsertionType.NONE && type != AnomalyInsertionType.ALL) {
                // if (type != AnomalyInsertionType.TYPE_TWO) {
                // continue;
                // }
                if (type == AnomalyInsertionType.TYPE_TWO) {
                    currentPta = SerializationUtils.clone(typeTwoNormalPta);
                    currentPta.setRandom(MasterSeed.nextRandom());
                } else {
                    currentPta = pta;
                }
                trainSequences.clear();
                testSequences.clear();
                final TauPTA anomaly = SerializationUtils.clone(currentPta);
                logger.info("inserting Anomaly Type {}", type);
                anomaly.makeAbnormal(type);
                if (type == AnomalyInsertionType.TYPE_TWO) {
                    anomaly.removeAbnormalSequences(currentPta);
                }
                for (int i = 0; i < TRAIN_SIZE; i++) {
                    trainSequences.add(currentPta.sampleSequence());
                }
                // PTAs of Type 2 and 4 always produce abnormal sequences
                // it is possible to sample abnormal and normal sequences with abnormal ptas of the other types (1,3,5).
                // but I don't know how the distribution is, so to be fair, i sample all anomalies the same
                for (int i = 0; i < TEST_SIZE; i++) {
                    if (r.nextDouble() < ANOMALY_PERCENTAGE) {
                        boolean wasAnormal = false;
                        TimedWord seq = null;
                        while (!wasAnormal) {
                            seq = anomaly.sampleSequence();
                            wasAnormal = seq.isAnomaly();
                        }
                        testSequences.add(seq);
                    } else {
                        testSequences.add(currentPta.sampleSequence());
                    }
                }
                final TimedInput trainset = new TimedInput(trainSequences);
                final TimedInput testset = new TimedInput(testSequences);
                final Path outputFile = outputDir
                        .resolve(Paths.get(df.format(k) + "_smac_type" + type.getTypeIndex() + ".txt"));
                try (BufferedWriter bw = Files.newBufferedWriter(outputFile, StandardCharsets.UTF_8)) {
                    trainset.toFile(bw, true);
                    bw.write('\n');
                    bw.write(TRAIN_TEST_SEP);
                    bw.write('\n');
                    testset.toFile(bw, true);
                }
                logger.info("Wrote file #{} ({})", k, outputFile);
                k++;
            }
        }
    }
}

From source file:org.dllearner.algorithms.qtl.qald.QALDExperiment.java

private void generateNoise(List<String> examples, String sparqlQuery, double noise, Random randomGen) {
    // generate noise example candidates
    List<String> noiseCandidateExamples = null;
    switch (noiseMethod) {
    case RANDOM:/*w  w  w . j  a va  2 s  . com*/
        noiseCandidateExamples = generateNoiseCandidatesRandom(examples, 10);
        break;
    case SIMILAR:
        noiseCandidateExamples = generateNoiseCandidatesSimilar(examples, sparqlQuery);
        break;
    case SIMILARITY_PARAMETERIZED://TODO implement configurable noise method
        break;
    default:
        noiseCandidateExamples = generateNoiseCandidatesRandom(examples, 10);
        break;
    }
    Collections.shuffle(noiseCandidateExamples, randomGen);

    // add some noise by using instances close to the positive examples
    // we have two ways of adding noise t_n
    // 1: iterate over pos. examples and if random number is below t_n, replace the example
    // 2: replace the (#posExamples * t_n) randomly chosen pos. examples by randomly chosen negative examples
    boolean probabilityBased = false;

    if (probabilityBased) {
        // 1. way
        List<String> newExamples = new ArrayList<>();
        for (Iterator<String> iterator = examples.iterator(); iterator.hasNext();) {
            String posExample = iterator.next();
            double rnd = randomGen.nextDouble();
            if (rnd <= noise) {
                // remove the positive example
                iterator.remove();
                // add one of the negative examples
                String negExample = noiseCandidateExamples.remove(0);
                newExamples.add(negExample);
                logger.info("Replacing " + posExample + " by " + negExample);
            }
        }
        examples.addAll(newExamples);
    } else {
        // 2. way
        // replace at least 1 but not more than half of the examples
        int upperBound = examples.size() / 2;
        int nrOfPosExamples2Replace = (int) Math.ceil(noise * examples.size());
        nrOfPosExamples2Replace = Math.min(nrOfPosExamples2Replace, upperBound);
        logger.info("replacing " + nrOfPosExamples2Replace + "/" + examples.size()
                + " examples to introduce noise");
        List<String> posExamples2Replace = new ArrayList<>(examples.subList(0, nrOfPosExamples2Replace));
        examples.removeAll(posExamples2Replace);
        List<String> negExamples4Replacement = noiseCandidateExamples.subList(0, nrOfPosExamples2Replace);
        examples.addAll(negExamples4Replacement);
        logger.info("replaced " + posExamples2Replace + " by " + negExamples4Replacement);
    }
}

From source file:ubic.gemma.apps.LinkEvalCli.java

private Collection<GenePair> loadLinks(File f) throws IOException {

    log.info("Loading data from " + f);
    BufferedReader in = new BufferedReader(new FileReader(f));

    Collection<GenePair> geneMap = new HashSet<GenePair>();
    String line;/* www .java  2  s  . co  m*/
    Double printedLinks = -1.0;
    Random generator = new Random();
    double rand = 0.0;
    double fraction = 0.0;
    boolean alreadyWarned = false;

    while ((line = in.readLine()) != null) {
        line = line.trim();
        if (line.startsWith("#")) {
            if (line.contains("printedLinks")) {
                int ind = line.indexOf(':');
                printedLinks = Double.parseDouble(line.substring(ind + 1));
                fraction = this.subsetSize / printedLinks;
            }
            continue;
        }
        if (printedLinks == -1.0) {
            System.out.println("Printed link count not found in file header");
            System.exit(0);
        }
        if (selectSubset && printedLinks > this.subsetSize) {
            this.subsetUsed = true;
            rand = generator.nextDouble();
            if (rand > fraction)
                continue;
        }
        String[] fields = StringUtils.split(line, "\t");

        if (fields.length < 2) {
            if (!alreadyWarned) {
                log.warn("Bad field on line: " + line + " (subsequent errors suppressed)");
                alreadyWarned = true;
            }
            continue;
        }

        String g1 = fields[firstProbeColumn];
        String g2 = fields[secondProbeColumn];

        /*
         * Use the probe field, get the gene mapping from the probemap
         */
        CompositeSequence cs1 = css.load(Long.parseLong(g1));
        CompositeSequence cs2 = css.load(Long.parseLong(g2));

        Collection<Gene> genes1 = probemap.get(cs1);
        Collection<Gene> genes2 = probemap.get(cs2);

        GenePair genePair = null;

        if (genes1 == null) {
            log.warn("No genes found for probe ID " + g1 + " in array design");
        } else if (genes2 == null) {
            log.warn("No genes found for probe ID " + g2 + " in array design");
        } else {
            genePair = makeGenePair(genes1, genes2);
        }

        if (!this.checkGenePair(genePair)) {
            continue;
        }

        geneMap.add(genePair);

        if (geneMap.size() % 50000 == 0) {
            log.info("Loaded " + geneMap.size() + " links");
        }
        // compute the median of gooverlaps and do something with the
        // result.

    }
    log.info("Loaded " + geneMap.size() + " links");
    saveCacheToDisk(geneCache, GENE_CACHE);
    return geneMap;
}

From source file:jeplus.JEPlusProject.java

private String[] defaultLHSdistributionSample(int n, String funcstr, int type, Random randomsrc) {
    // Trim off brackets
    int start = funcstr.indexOf("(") + 1;
    int end = funcstr.indexOf(")");
    funcstr = funcstr.substring(start, end).trim();

    ArrayList<String> list = new ArrayList<>();
    String[] params = funcstr.split("\\s*,\\s*");
    // For integer/double types, returns randomized N samples conforming
    // a specified distribution, currently 'gaussian'/'normal'/'n', 
    // 'uniform'/'u', 'triangular'/'tr', or 'discrete'/'d'
    // for examples: @sample(gaussian, 0, 1.5, 20), with mean, sd and N
    //           or  @sample(uniform, -10, 10, 20), with lb, ub and N
    //           of  @sample(triangular, -1.0, 0.3, 1.0, 20), with lb, mode, ub and N
    //           of  @sample(discrete, option_A, 0.3, option_B, 0.5, option_C, 0.2, 20), with lb, mode, ub and N
    String distribution = params[0].toLowerCase();
    switch (distribution) {
    case "uniform":
    case "u":
        // requires lb, ub, n
        double lb = Double.parseDouble(params[1]);
        double ub = Double.parseDouble(params[2]);
        for (int i = 0; i < n; i++) {
            if (type == ParameterItem.DOUBLE) {
                double bin = (ub - lb) / n;
                double v = randomsrc.nextDouble() * bin + lb + i * bin;
                list.add(Double.toString(v));
            } else if (type == ParameterItem.INTEGER) {
                double bin = (ub + 1. - lb) / n;
                double v = randomsrc.nextDouble() * bin + lb + i * bin;
                list.add(Integer.toString((int) Math.floor(v)));
            }//w  ww.j a v a  2s. c  o  m
        }
        break;
    case "gaussian":
    case "normal":
    case "n": {
        // requires mean, sd, n
        double mean = Double.parseDouble(params[1]);
        double sd = Double.parseDouble(params[2]);
        NormalDistribution Dist = new NormalDistribution(mean, sd);
        double bin = 1.0 / n;
        for (int i = 0; i < n; i++) {
            double a = Dist.inverseCumulativeProbability((i == 0) ? bin / 10 : i * bin); // lb of each bin
            double b = Dist.inverseCumulativeProbability((i == n - 1) ? 1. - bin / n : (i + 1) * bin); // ub of each bin
            double v = randomsrc.nextDouble() * (b - a) + a;
            if (type == ParameterItem.DOUBLE) {
                list.add(Double.toString(v));
            } else if (type == ParameterItem.INTEGER) {
                // Warning: for integer, binomial distribution should be used.
                // the following function is provided just for convenience
                list.add(Long.toString(Math.round(v)));
            }
        }
        break;
    }
    case "lognormal":
    case "ln": {
        // requires mean, sd, n
        double mean = Double.parseDouble(params[1]);
        double sd = Double.parseDouble(params[2]);
        LogNormalDistribution Dist = new LogNormalDistribution(mean, sd);
        double bin = 1.0 / n;
        for (int i = 0; i < n; i++) {
            double a = Dist.inverseCumulativeProbability((i == 0) ? bin / 10 : i * bin); // lb of each bin
            double b = Dist.inverseCumulativeProbability((i == n - 1) ? 1. - bin / n : (i + 1) * bin); // ub of each bin
            double v = randomsrc.nextDouble() * (b - a) + a;
            if (type == ParameterItem.DOUBLE) {
                list.add(Double.toString(v));
            } else if (type == ParameterItem.INTEGER) {
                // Warning: for integer, binomial distribution should be used.
                // the following function is provided just for convenience
                list.add(Long.toString(Math.round(v)));
            }
        }
        break;
    }
    case "exponential":
    case "e": {
        // requires mean, sd, n
        double mean = Double.parseDouble(params[1]);
        ExponentialDistribution Dist = new ExponentialDistribution(mean);
        double bin = 1.0 / n;
        for (int i = 0; i < n; i++) {
            double a = Dist.inverseCumulativeProbability((i == 0) ? bin / 10 : i * bin); // lb of each bin
            double b = Dist.inverseCumulativeProbability((i == n - 1) ? 1. - bin / n : (i + 1) * bin); // ub of each bin
            double v = randomsrc.nextDouble() * (b - a) + a;
            if (type == ParameterItem.DOUBLE) {
                list.add(Double.toString(v));
            } else if (type == ParameterItem.INTEGER) {
                // Warning: for integer, binomial distribution should be used.
                // the following function is provided just for convenience
                list.add(Long.toString(Math.round(v)));
            }
        }
        break;
    }
    case "triangular":
    case "tr": {
        // requires a(lb), c(mode), b(ub), n
        double a = Double.parseDouble(params[1]);
        double c = Double.parseDouble(params[2]);
        double b = Double.parseDouble(params[3]);
        TriangularDistribution Dist = new TriangularDistribution(a, c, b);
        double bin = 1.0 / n;
        for (int i = 0; i < n; i++) {
            a = Dist.inverseCumulativeProbability(i * bin); // lb of each bin
            b = Dist.inverseCumulativeProbability((i + 1) * bin); // ub of each bin
            double v = randomsrc.nextDouble() * (b - a) + a;
            if (type == ParameterItem.DOUBLE) {
                list.add(Double.toString(v));
            } else if (type == ParameterItem.INTEGER) {
                // Warning: for integer, user defined discrete distribution should be used.
                // the following function is provided just for convenience
                list.add(Long.toString(Math.round(v)));
            }
        }
        break;
    }
    case "discrete":
    case "d": {
        // requires op1, prob1, op2, prob2, ..., n
        int nOptions = params.length / 2 - 1;
        String[] options = new String[nOptions];
        double[] probabilities = new double[nOptions];
        double sum = 0;
        for (int i = 0; i < nOptions; i++) {
            options[i] = params[2 * i + 1];
            try {
                probabilities[i] = Double.parseDouble(params[2 * i + 2]);
            } catch (NumberFormatException nfe) {
                probabilities[i] = 0.1;
            }
            sum += probabilities[i];
        }
        RouletteWheel Wheel = new RouletteWheel(probabilities, randomsrc);
        double bin = sum / n;
        for (int i = 0; i < n; i++) {
            double a = i * bin; // lb of each bin
            double b = (i + 1) * bin; // ub of each bin
            int sel = Wheel.spin(a, b);
            list.add(options[sel]);
        }
        break;
    }
    case "custom":
        break;
    }
    return list.toArray(new String[0]);
}

From source file:chibi.gemmaanalysis.LinkEvalCli.java

private Collection<GenePair> loadLinks(File f) throws IOException {

    log.info("Loading data from " + f);
    try (BufferedReader in = new BufferedReader(new FileReader(f));) {

        Collection<GenePair> geneMap = new HashSet<GenePair>();
        String line;/*from w ww.j  a va 2s .  c  o  m*/
        Double printedLinks = -1.0;
        Random generator = new Random();
        double rand = 0.0;
        double fraction = 0.0;
        boolean alreadyWarned = false;

        while ((line = in.readLine()) != null) {
            line = line.trim();
            if (line.startsWith("#")) {
                if (line.contains("printedLinks")) {
                    int ind = line.indexOf(':');
                    printedLinks = Double.parseDouble(line.substring(ind + 1));
                    fraction = this.subsetSize / printedLinks;
                }
                continue;
            }
            if (printedLinks == -1.0) {
                System.out.println("Printed link count not found in file header");
                System.exit(0);
            }
            if (selectSubset && printedLinks > this.subsetSize) {
                this.subsetUsed = true;
                rand = generator.nextDouble();
                if (rand > fraction)
                    continue;
            }
            String[] fields = StringUtils.split(line, "\t");

            if (fields.length < 2) {
                if (!alreadyWarned) {
                    log.warn("Bad field on line: " + line + " (subsequent errors suppressed)");
                    alreadyWarned = true;
                }
                continue;
            }

            String g1 = fields[firstProbeColumn];
            String g2 = fields[secondProbeColumn];

            /*
             * Use the probe field, get the gene mapping from the probemap
             */
            CompositeSequence cs1 = css.load(Long.parseLong(g1));
            CompositeSequence cs2 = css.load(Long.parseLong(g2));

            Collection<Gene> genes1 = probemap.get(cs1);
            Collection<Gene> genes2 = probemap.get(cs2);

            GenePair genePair = null;

            if (genes1 == null) {
                log.warn("No genes found for probe ID " + g1 + " in array design");
            } else if (genes2 == null) {
                log.warn("No genes found for probe ID " + g2 + " in array design");
            } else {
                genePair = makeGenePair(genes1, genes2);
            }

            if (!this.checkGenePair(genePair)) {
                continue;
            }

            geneMap.add(genePair);

            if (geneMap.size() % 50000 == 0) {
                log.info("Loaded " + geneMap.size() + " links");
            }
            // compute the median of gooverlaps and do something with the
            // result.

        }
        log.info("Loaded " + geneMap.size() + " links");
        saveCacheToDisk(geneCache, GENE_CACHE);
        return geneMap;
    }
}

From source file:com.linkedin.pinot.common.utils.DataTableBuilderTest.java

@Test
public void testSimple() throws Exception {
    final DataType[] columnTypes = DataType.values();
    final String[] columnNames = new String[columnTypes.length];

    for (int i = 0; i < columnTypes.length; i++) {
        columnNames[i] = columnTypes[i].toString();
    }//from w  w  w.  j av  a2 s . c  o  m
    final DataSchema schema = new DataSchema(columnNames, columnTypes);

    final DataTableBuilder builder = new DataTableBuilder(schema);
    builder.open();
    final Random r = new Random();
    final int NUM_ROWS = 100;

    final boolean[] boolArr = new boolean[NUM_ROWS];
    final char[] cArr = new char[NUM_ROWS];
    final byte[] bArr = new byte[NUM_ROWS];
    final short[] sArr = new short[NUM_ROWS];
    final int[] iArr = new int[NUM_ROWS];
    final float[] fArr = new float[NUM_ROWS];
    final long[] lArr = new long[NUM_ROWS];
    final double[] dArr = new double[NUM_ROWS];
    final String[] strArr = new String[NUM_ROWS];
    final Object[] oArr = new Object[NUM_ROWS];

    for (int rowId = 0; rowId < NUM_ROWS; rowId++) {
        builder.startRow();
        for (int colId = 0; colId < schema.columnNames.length; colId++) {
            final DataType type = columnTypes[colId];
            switch (type) {
            case BOOLEAN:
                final boolean bool = r.nextBoolean();
                boolArr[rowId] = bool;
                builder.setColumn(colId, bool);
                break;
            case CHAR:
                final char ch = (char) (r.nextInt(26) + 'a');
                cArr[rowId] = ch;
                builder.setColumn(colId, ch);
                break;
            case BYTE:
                final byte b = (byte) (r.nextInt((int) Math.pow(2, 8)));
                bArr[rowId] = b;
                builder.setColumn(colId, b);

                break;
            case SHORT:
                final short s = (short) (r.nextInt((int) Math.pow(2, 16)));
                sArr[rowId] = s;
                builder.setColumn(colId, s);

                break;
            case INT:
                final int i = (r.nextInt());
                iArr[rowId] = i;
                builder.setColumn(colId, i);

                break;
            case LONG:
                final long l = (r.nextLong());
                lArr[rowId] = l;
                builder.setColumn(colId, l);

                break;
            case FLOAT:
                final float f = (r.nextFloat());
                fArr[rowId] = f;
                builder.setColumn(colId, f);
                break;
            case DOUBLE:
                final double d = (r.nextDouble());
                dArr[rowId] = d;
                builder.setColumn(colId, d);
                break;
            case STRING:
                final String str = new BigInteger(130, r).toString(32);
                strArr[rowId] = str;
                builder.setColumn(colId, str);
                break;
            case OBJECT:
                final A obj = new A(r.nextInt());
                oArr[rowId] = obj;
                builder.setColumn(colId, obj);

                break;
            default:
                break;
            }
        }
        builder.finishRow();
    }
    builder.seal();
    final DataTable dataTable = builder.build();
    //System.out.println(dataTable);
    validate(dataTable, NUM_ROWS, schema, boolArr, cArr, bArr, sArr, iArr, fArr, lArr, dArr, strArr, oArr);
    final byte[] bytes = dataTable.toBytes();

    final DataTable newDataTable = new DataTable(bytes);
    validate(newDataTable, NUM_ROWS, schema, boolArr, cArr, bArr, sArr, iArr, fArr, lArr, dArr, strArr, oArr);

}

From source file:org.shaman.terrain.polygonal.PolygonalMapGenerator.java

/**
 * Third step, assign coastline//from   ww  w .  ja  v  a2s .c  om
 */
private void assignCoastline() {
    if (graph == null || coastline == null) {
        return;
    }
    Random rand = new Random(seed);
    //reset
    for (Graph.Center c : graph.centers) {
        c.water = false;
        c.border = false;
        c.ocean = false;
    }
    for (Graph.Corner c : graph.corners) {
        c.water = false;
        c.ocean = false;
    }
    //set water parameter of corners
    int waterCorners = 0;
    switch (coastline) {
    case PERLIN:
        //Fractal perlin noise
        Noise[] noise = new Noise[5];
        for (int i = 0; i < noise.length; ++i) {
            noise[i] = new Noise(rand.nextLong());
        }
        for (Graph.Corner c : graph.corners) {
            float val = 0;
            float octave = 6; //to be tuned
            float amplitude = 0.5f; //to be tuned
            for (int i = 0; i < noise.length; ++i) {
                val += noise[i].noise(c.point.x * octave, c.point.y * octave) * amplitude;
                octave *= 2;
                amplitude /= 2.5;
            }
            float dist = c.point.distanceSquared(0.5f, 0.5f);
            float distInfluence = 2.2f; //to be tuned
            float perlinOffset = -0.2f; //to be tuned
            if (val > perlinOffset + distInfluence * dist && !c.border) {
                c.water = false;
            } else {
                c.water = true;
                waterCorners++;
            }
        }
        break;

    case RADIAL:
        //radial sine waves
        double islandFactor = 1.07;
        int bumps = rand.nextInt(6) + 1;
        double startAngle = rand.nextDouble() * 2 * Math.PI;
        double dipAngle = rand.nextDouble() * 2 * Math.PI;
        double dipWidth = rand.nextDouble() * 0.5 + 0.2;
        for (Graph.Corner c : graph.corners) {
            double x = (c.point.x - 0.5) * 2.2;
            double y = (c.point.y - 0.5) * 2.2;
            double angle = Math.atan2(y, x);
            double length = 0.5 * (Math.max(Math.abs(x), Math.abs(y)) + new Vector2d(x, y).length());
            double r1 = 0.5 * 0.4 * Math.sin(startAngle + bumps * angle + Math.cos((bumps + 3) * angle));
            double r2 = 0.7 - 0.2 * Math.sin(startAngle + bumps * angle - Math.sin((bumps + 2) * angle));
            if (Math.abs(angle - dipAngle) < dipWidth || Math.abs(angle - dipAngle + 2 * Math.PI) < dipWidth
                    || Math.abs(angle - dipAngle - 2 * Math.PI) < dipWidth) {
                r1 = r2 = 0.2;
            }
            if ((length < r1 || (length > r1 * islandFactor && length < r2)) && !c.border) {
                c.water = false;
            } else {
                c.water = true;
                waterCorners++;
            }
        }
        break;
    }
    LOG.log(Level.INFO, "corners with water: {0}, without water: {1}",
            new Object[] { waterCorners, graph.corners.size() - waterCorners });

    findOceans();

    updateBiomesGeometry();
}