Example usage for java.util.concurrent ThreadLocalRandom current

List of usage examples for java.util.concurrent ThreadLocalRandom current

Introduction

In this page you can find the example usage for java.util.concurrent ThreadLocalRandom current.

Prototype

public static ThreadLocalRandom current() 

Source Link

Document

Returns the current thread's ThreadLocalRandom .

Usage

From source file:com.linkedin.pinot.integration.tests.UploadRefreshDeleteIntegrationTest.java

@Test(enabled = false)
public void testUploadRefreshDelete() throws Exception {
    final int THREAD_COUNT = 1;
    final int SEGMENT_COUNT = 5;

    final int MIN_ROWS_PER_SEGMENT = 500;
    final int MAX_ROWS_PER_SEGMENT = 1000;

    final int OPERATIONS_PER_ITERATION = 10;
    final int ITERATION_COUNT = 5;

    final double UPLOAD_PROBABILITY = 0.8d;

    final String[] segmentNames = new String[SEGMENT_COUNT];
    final int[] segmentRowCounts = new int[SEGMENT_COUNT];

    for (int i = 0; i < SEGMENT_COUNT; i++) {
        segmentNames[i] = "segment_" + i;
        segmentRowCounts[i] = 0;/*  www . j  a v  a  2 s  .c  om*/
    }

    for (int i = 0; i < ITERATION_COUNT; i++) {
        // Create THREAD_COUNT threads
        ExecutorService executorService = Executors.newFixedThreadPool(THREAD_COUNT);

        // Submit OPERATIONS_PER_ITERATION uploads/deletes
        for (int j = 0; j < OPERATIONS_PER_ITERATION; j++) {
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        ThreadLocalRandom random = ThreadLocalRandom.current();

                        // Pick a random segment
                        int segmentIndex = random.nextInt(SEGMENT_COUNT);
                        String segmentName = segmentNames[segmentIndex];

                        // Pick a random operation
                        if (random.nextDouble() < UPLOAD_PROBABILITY) {
                            // Upload this segment
                            LOGGER.info("Will upload segment {}", segmentName);

                            synchronized (segmentName) {
                                // Create a segment with a random number of rows
                                int segmentRowCount = random.nextInt(MIN_ROWS_PER_SEGMENT,
                                        MAX_ROWS_PER_SEGMENT);
                                LOGGER.info("Generating and uploading segment {} with {} rows", segmentName,
                                        segmentRowCount);
                                generateAndUploadRandomSegment(segmentName, segmentRowCount);

                                // Store the number of rows
                                LOGGER.info("Uploaded segment {} with {} rows", segmentName, segmentRowCount);
                                segmentRowCounts[segmentIndex] = segmentRowCount;
                            }
                        } else {
                            // Delete this segment
                            LOGGER.info("Will delete segment {}", segmentName);

                            synchronized (segmentName) {
                                // Delete this segment
                                LOGGER.info("Deleting segment {}", segmentName);
                                String reply = sendDeleteRequest(
                                        ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL)
                                                .forSegmentDelete("myresource", segmentName));
                                LOGGER.info("Deletion returned {}", reply);

                                // Set the number of rows to zero
                                LOGGER.info("Deleted segment {}", segmentName);
                                segmentRowCounts[segmentIndex] = 0;
                            }
                        }
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            });
        }

        // Await for all tasks to complete
        executorService.shutdown();
        executorService.awaitTermination(5L, TimeUnit.MINUTES);

        // Count number of expected rows
        int expectedRowCount = 0;
        for (int segmentRowCount : segmentRowCounts) {
            expectedRowCount += segmentRowCount;
        }

        // Wait for up to one minute for the row count to match the expected row count
        LOGGER.info("Awaiting for the row count to match {}", expectedRowCount);
        int pinotRowCount = (int) getCurrentServingNumDocs();
        long timeInOneMinute = System.currentTimeMillis() + 60 * 1000L;
        while (System.currentTimeMillis() < timeInOneMinute && pinotRowCount != expectedRowCount) {
            LOGGER.info("Row count is {}, expected {}, awaiting for row count to match", pinotRowCount,
                    expectedRowCount);
            Thread.sleep(5000L);

            try {
                pinotRowCount = (int) getCurrentServingNumDocs();
            } catch (Exception e) {
                LOGGER.warn("Caught exception while sending query to Pinot, retrying", e);
            }
        }

        // Compare row counts
        Assert.assertEquals(pinotRowCount, expectedRowCount,
                "Expected and actual row counts don't match after waiting one minute");
    }
}

From source file:com.facebook.presto.operator.aggregation.AbstractTestApproximateCountDistinct.java

private List<Object> createRandomSample(int uniques, int total) {
    Preconditions.checkArgument(uniques <= total, "uniques (%s) must be <= total (%s)", uniques, total);

    List<Object> result = new ArrayList<>(total);
    result.addAll(makeRandomSet(uniques));

    Random random = ThreadLocalRandom.current();
    while (result.size() < total) {
        int index = random.nextInt(result.size());
        result.add(result.get(index));// w ww. j  a  v  a2 s. c o m
    }

    return result;
}

From source file:org.wso2.carbon.sample.isanalyticsclient.Client.java

private static Object[] getEventDataObjectForSession(Object[] data) {

    int[] actions = new int[] { 1, 2, 3 };

    String sessionId, username, userStoreDomain, remoteIp, tenantDomain, serviceProvider, identityProvider;
    int action;// ww  w .  j  a va2  s. c o  m
    Boolean rememberMeFlag;
    int index = ThreadLocalRandom.current().nextInt(0, 15);

    sessionId = UUID.randomUUID().toString();
    Long startTimestamp = (Long) data[22];
    Long renewTimestamp = (Long) data[22];
    Long terminationTimestamp = new Timestamp(System.currentTimeMillis() + 900000).getTime();
    action = actions[index % 3];
    username = String.valueOf(data[4]);
    userStoreDomain = String.valueOf(data[6]);
    remoteIp = String.valueOf(data[8]);
    tenantDomain = String.valueOf(data[7]);
    serviceProvider = String.valueOf(data[11]);
    identityProvider = String.valueOf(data[17]);
    rememberMeFlag = ThreadLocalRandom.current().nextBoolean();
    Long timestamp = (Long) data[22];

    return (new Object[] { sessionId, startTimestamp, renewTimestamp, terminationTimestamp, action, username,
            userStoreDomain, remoteIp, "N/A", tenantDomain, serviceProvider, identityProvider, rememberMeFlag,
            "N/A", timestamp });
}

From source file:ai.grakn.kb.internal.computer.GraknSparkComputer.java

@SuppressWarnings("PMD.UnusedFormalParameter")
private Future<ComputerResult> submitWithExecutor(Executor exec) {
    jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
    String jobDescription = this.vertexProgram == null ? this.mapReducers.toString()
            : this.vertexProgram + "+" + this.mapReducers;

    // Use different output locations
    this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId);

    updateConfigKeys(sparkConfiguration);

    final Future<ComputerResult> result = computerService.submit(() -> {
        final long startTime = System.currentTimeMillis();

        // apache and hadoop configurations that are used throughout the graph computer computation
        final org.apache.commons.configuration.Configuration graphComputerConfiguration = new HadoopConfiguration(
                this.sparkConfiguration);
        if (!graphComputerConfiguration.containsKey(Constants.SPARK_SERIALIZER)) {
            graphComputerConfiguration.setProperty(Constants.SPARK_SERIALIZER,
                    GryoSerializer.class.getCanonicalName());
        }//w  ww.jav  a  2 s  . c  o  m
        graphComputerConfiguration.setProperty(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES,
                this.persist.equals(GraphComputer.Persist.EDGES));

        final Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(graphComputerConfiguration);

        final Storage fileSystemStorage = FileSystemStorage.open(hadoopConfiguration);
        final boolean inputFromHDFS = FileInputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean inputFromSpark = PersistedInputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class));
        final boolean outputToHDFS = FileOutputFormat.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean outputToSpark = PersistedOutputRDD.class.isAssignableFrom(
                hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class));
        final boolean skipPartitioner = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_PARTITIONER, false);
        final boolean skipPersist = graphComputerConfiguration
                .getBoolean(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE, false);

        if (inputFromHDFS) {
            String inputLocation = Constants
                    .getSearchGraphLocation(hadoopConfiguration.get(Constants.GREMLIN_HADOOP_INPUT_LOCATION),
                            fileSystemStorage)
                    .orElse(null);
            if (null != inputLocation) {
                try {
                    graphComputerConfiguration.setProperty(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                    hadoopConfiguration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR,
                            FileSystem.get(hadoopConfiguration).getFileStatus(new Path(inputLocation)).getPath()
                                    .toString());
                } catch (final IOException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        final InputRDD inputRDD;
        final OutputRDD outputRDD;
        final boolean filtered;
        try {
            inputRDD = InputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER,
                                    InputRDD.class, InputRDD.class).newInstance()
                            : InputFormatRDD.class.newInstance();
            outputRDD = OutputRDD.class.isAssignableFrom(
                    hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, Object.class))
                            ? hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER,
                                    OutputRDD.class, OutputRDD.class).newInstance()
                            : OutputFormatRDD.class.newInstance();

            // if the input class can filter on load, then set the filters
            if (inputRDD instanceof InputFormatRDD
                    && GraphFilterAware.class.isAssignableFrom(hadoopConfiguration.getClass(
                            Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class))) {
                GraphFilterAware.storeGraphFilter(graphComputerConfiguration, hadoopConfiguration,
                        this.graphFilter);
                filtered = false;
            } else if (inputRDD instanceof GraphFilterAware) {
                ((GraphFilterAware) inputRDD).setGraphFilter(this.graphFilter);
                filtered = false;
            } else
                filtered = this.graphFilter.hasFilter();
        } catch (final InstantiationException | IllegalAccessException e) {
            throw new IllegalStateException(e.getMessage(), e);
        }

        // create the spark context from the graph computer configuration
        final JavaSparkContext sparkContext = new JavaSparkContext(Spark.create(hadoopConfiguration));
        final Storage sparkContextStorage = SparkContextStorage.open();

        sparkContext.setJobGroup(jobGroupId, jobDescription);

        GraknSparkMemory memory = null;
        // delete output location
        final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION, null);
        if (null != outputLocation) {
            if (outputToHDFS && fileSystemStorage.exists(outputLocation)) {
                fileSystemStorage.rm(outputLocation);
            }
            if (outputToSpark && sparkContextStorage.exists(outputLocation)) {
                sparkContextStorage.rm(outputLocation);
            }
        }

        // the Spark application name will always be set by SparkContextStorage,
        // thus, INFO the name to make it easier to debug
        logger.debug(Constants.GREMLIN_HADOOP_SPARK_JOB_PREFIX
                + (null == this.vertexProgram ? "No VertexProgram" : this.vertexProgram) + "["
                + this.mapReducers + "]");

        // add the project jars to the cluster
        this.loadJars(hadoopConfiguration, sparkContext);
        updateLocalConfiguration(sparkContext, hadoopConfiguration);

        // create a message-passing friendly rdd from the input rdd
        boolean partitioned = false;
        JavaPairRDD<Object, VertexWritable> loadedGraphRDD = inputRDD.readGraphRDD(graphComputerConfiguration,
                sparkContext);

        // if there are vertex or edge filters, filter the loaded graph rdd prior to partitioning and persisting
        if (filtered) {
            this.logger.debug("Filtering the loaded graphRDD: " + this.graphFilter);
            loadedGraphRDD = GraknSparkExecutor.applyGraphFilter(loadedGraphRDD, this.graphFilter);
        }
        // if the loaded graph RDD is already partitioned use that partitioner,
        // else partition it with HashPartitioner
        if (loadedGraphRDD.partitioner().isPresent()) {
            this.logger.debug("Using the existing partitioner associated with the loaded graphRDD: "
                    + loadedGraphRDD.partitioner().get());
        } else {
            if (!skipPartitioner) {
                final Partitioner partitioner = new HashPartitioner(
                        this.workersSet ? this.workers : loadedGraphRDD.partitions().size());
                this.logger.debug("Partitioning the loaded graphRDD: " + partitioner);
                loadedGraphRDD = loadedGraphRDD.partitionBy(partitioner);
                partitioned = true;
                assert loadedGraphRDD.partitioner().isPresent();
            } else {
                // no easy way to test this with a test case
                assert skipPartitioner == !loadedGraphRDD.partitioner().isPresent();

                this.logger.debug("Partitioning has been skipped for the loaded graphRDD via "
                        + Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            }
        }
        // if the loaded graphRDD was already partitioned previous,
        // then this coalesce/repartition will not take place
        if (this.workersSet) {
            // ensures that the loaded graphRDD does not have more partitions than workers
            if (loadedGraphRDD.partitions().size() > this.workers) {
                loadedGraphRDD = loadedGraphRDD.coalesce(this.workers);
            } else {
                // ensures that the loaded graphRDD does not have less partitions than workers
                if (loadedGraphRDD.partitions().size() < this.workers) {
                    loadedGraphRDD = loadedGraphRDD.repartition(this.workers);
                }
            }
        }
        // persist the vertex program loaded graph as specified by configuration
        // or else use default cache() which is MEMORY_ONLY
        if (!skipPersist && (!inputFromSpark || partitioned || filtered)) {
            loadedGraphRDD = loadedGraphRDD.persist(StorageLevel.fromString(
                    hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
        }
        // final graph with view
        // (for persisting and/or mapReducing -- may be null and thus, possible to save space/time)
        JavaPairRDD<Object, VertexWritable> computedGraphRDD = null;
        try {
            ////////////////////////////////
            // process the vertex program //
            ////////////////////////////////
            if (null != this.vertexProgram) {
                memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, sparkContext);
                /////////////////
                // if there is a registered VertexProgramInterceptor, use it to bypass the GraphComputer semantics
                if (graphComputerConfiguration
                        .containsKey(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR)) {
                    try {
                        final GraknSparkVertexProgramInterceptor<VertexProgram> interceptor = (GraknSparkVertexProgramInterceptor) Class
                                .forName(graphComputerConfiguration
                                        .getString(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR))
                                .newInstance();
                        computedGraphRDD = interceptor.apply(this.vertexProgram, loadedGraphRDD, memory);
                    } catch (final ClassNotFoundException | IllegalAccessException | InstantiationException e) {
                        throw new IllegalStateException(e.getMessage());
                    }
                } else {
                    // standard GraphComputer semantics
                    // get a configuration that will be propagated to all workers
                    final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration();
                    this.vertexProgram.storeState(vertexProgramConfiguration);
                    // set up the vertex program and wire up configurations
                    this.vertexProgram.setup(memory);
                    JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null;
                    memory.broadcastMemory(sparkContext);
                    // execute the vertex program
                    while (true) {
                        if (Thread.interrupted()) {
                            sparkContext.cancelAllJobs();
                            throw new TraversalInterruptedException();
                        }
                        memory.setInExecute(true);
                        viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(loadedGraphRDD,
                                viewIncomingRDD, memory, graphComputerConfiguration,
                                vertexProgramConfiguration);
                        memory.setInExecute(false);
                        if (this.vertexProgram.terminate(memory)) {
                            break;
                        } else {
                            memory.incrIteration();
                            memory.broadcastMemory(sparkContext);
                        }
                    }
                    // if the graph will be continued to be used (persisted or mapreduced),
                    // then generate a view+graph
                    if ((null != outputRDD && !this.persist.equals(Persist.NOTHING))
                            || !this.mapReducers.isEmpty()) {
                        computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(loadedGraphRDD,
                                viewIncomingRDD, this.vertexProgram.getVertexComputeKeys());
                        assert null != computedGraphRDD && computedGraphRDD != loadedGraphRDD;
                    } else {
                        // ensure that the computedGraphRDD was not created
                        assert null == computedGraphRDD;
                    }
                }
                /////////////////
                memory.complete(); // drop all transient memory keys
                // write the computed graph to the respective output (rdd or output format)
                if (null != outputRDD && !this.persist.equals(Persist.NOTHING)) {
                    // the logic holds that a computeGraphRDD must be created at this point
                    assert null != computedGraphRDD;

                    outputRDD.writeGraphRDD(graphComputerConfiguration, computedGraphRDD);
                }
            }

            final boolean computedGraphCreated = computedGraphRDD != null && computedGraphRDD != loadedGraphRDD;
            if (!computedGraphCreated) {
                computedGraphRDD = loadedGraphRDD;
            }

            final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory);

            //////////////////////////////
            // process the map reducers //
            //////////////////////////////
            if (!this.mapReducers.isEmpty()) {
                // create a mapReduceRDD for executing the map reduce jobs on
                JavaPairRDD<Object, VertexWritable> mapReduceRDD = computedGraphRDD;
                if (computedGraphCreated && !outputToSpark) {
                    // drop all the edges of the graph as they are not used in mapReduce processing
                    mapReduceRDD = computedGraphRDD.mapValues(vertexWritable -> {
                        vertexWritable.get().dropEdges(Direction.BOTH);
                        return vertexWritable;
                    });
                    // if there is only one MapReduce to execute, don't bother wasting the clock cycles.
                    if (this.mapReducers.size() > 1) {
                        mapReduceRDD = mapReduceRDD.persist(StorageLevel.fromString(hadoopConfiguration
                                .get(Constants.GREMLIN_SPARK_GRAPH_STORAGE_LEVEL, "MEMORY_ONLY")));
                    }
                }

                for (final MapReduce mapReduce : this.mapReducers) {
                    // execute the map reduce job
                    final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(
                            graphComputerConfiguration);
                    mapReduce.storeState(newApacheConfiguration);
                    // map
                    final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(mapReduceRDD, mapReduce,
                            newApacheConfiguration);
                    // combine
                    final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE)
                            ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration)
                            : mapRDD;
                    // reduce
                    final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE)
                            ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration)
                            : combineRDD;
                    // write the map reduce output back to disk and computer result memory
                    if (null != outputRDD) {
                        mapReduce.addResultToMemory(finalMemory, outputRDD.writeMemoryRDD(
                                graphComputerConfiguration, mapReduce.getMemoryKey(), reduceRDD));
                    }
                }
                // if the mapReduceRDD is not simply the computed graph, unpersist the mapReduceRDD
                if (computedGraphCreated && !outputToSpark) {
                    assert loadedGraphRDD != computedGraphRDD;
                    assert mapReduceRDD != computedGraphRDD;
                    mapReduceRDD.unpersist();
                } else {
                    assert mapReduceRDD == computedGraphRDD;
                }
            }

            // unpersist the loaded graph if it will not be used again (no PersistedInputRDD)
            // if the graphRDD was loaded from Spark, but then partitioned or filtered, its a different RDD
            if (!inputFromSpark || partitioned || filtered) {
                loadedGraphRDD.unpersist();
            }
            // unpersist the computed graph if it will not be used again (no PersistedOutputRDD)
            // if the computed graph is the loadedGraphRDD because it was not mutated and not-unpersisted,
            // then don't unpersist the computedGraphRDD/loadedGraphRDD
            if ((!outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING))
                    && computedGraphCreated) {
                computedGraphRDD.unpersist();
            }
            // delete any file system or rdd data if persist nothing
            if (null != outputLocation && this.persist.equals(GraphComputer.Persist.NOTHING)) {
                if (outputToHDFS) {
                    fileSystemStorage.rm(outputLocation);
                }
                if (outputToSpark) {
                    sparkContextStorage.rm(outputLocation);
                }
            }
            // update runtime and return the newly computed graph
            finalMemory.setRuntime(System.currentTimeMillis() - startTime);
            // clear properties that should not be propagated in an OLAP chain
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_GRAPH_FILTER);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_HADOOP_VERTEX_PROGRAM_INTERCEPTOR);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_GRAPH_CACHE);
            graphComputerConfiguration.clearProperty(Constants.GREMLIN_SPARK_SKIP_PARTITIONER);
            return new DefaultComputerResult(InputOutputHelper.getOutputGraph(graphComputerConfiguration,
                    this.resultGraph, this.persist), finalMemory.asImmutable());
        } catch (Exception e) {
            // So it throws the same exception as tinker does
            throw new RuntimeException(e);
        }
    });
    computerService.shutdown();
    return result;
}

From source file:org.briljantframework.data.dataframe.DataFrames.java

/**
 * Returns a column-permuted shallow copy of {@code in}.
 *
 * @param in input data frame/*from  w ww. j av a 2s.  c  o  m*/
 * @return a column permuted copy
 * @see #permuteRecords(DataFrame)
 */
public static DataFrame permute(DataFrame in) {
    DataFrame.Builder builder = transferableColumnCopy(in);
    Random random = ThreadLocalRandom.current();
    for (int i = builder.columns(); i > 1; i--) {
        builder.loc().swap(i - 1, random.nextInt(i));
    }
    return builder.build();
}

From source file:su.opencode.shuffler.RenamingVisitor.java

protected List<String> generateNameList(Table<String, String, Double> chainTable) {
    List<String> result = new ArrayList<String>();
    String current = "";
    while (result.isEmpty() || !"".equals(current)) {
        Map<String, Double> probs = chainTable.row(current);
        if (probs.isEmpty()) {
            current = "";
        } else {/*from  ww  w  .  j  av a2 s. c o  m*/
            Iterator<Map.Entry<String, Double>> i = probs.entrySet().iterator();
            double rnd = ThreadLocalRandom.current().nextDouble();

            double sum = 0;
            Map.Entry<String, Double> e = null;
            while (i.hasNext() && rnd > sum) {
                e = i.next();
                sum += e.getValue();
            }
            current = e == null ? "" : e.getKey();
            result.add(current);
        }
    }
    return result;
}

From source file:uk.dsxt.voting.tests.TestDataGenerator.java

private void generate(String name, int totalParticipant, int holdersCount, int vmCount, int levelsCount,
        int minutes, boolean generateVotes, int victimsCount, boolean generateDisconnect, int disconnectNodes)
        throws Exception {
    ClientFullInfo[] clients = new ClientFullInfo[totalParticipant];
    Participant[] participants = new Participant[totalParticipant];
    //generating keys
    long start = System.currentTimeMillis();
    log.debug("generating {} keys", totalParticipant);
    KeyPair[] holderKeys = cryptoHelper.createCryptoKeysGenerator().generateKeys(holdersCount + 1);
    KeyPair[] keys = new KeyPair[totalParticipant];
    for (int i = 0; i < keys.length; i++) {
        keys[i] = holderKeys[Math.min(i, holderKeys.length - 1)];
    }/*w ww .jav a2 s.  c om*/
    log.debug("{} keys generated. {} seconds spent", totalParticipant,
            (System.currentTimeMillis() - start) / 1000);

    //generating voting
    long now = System.currentTimeMillis();
    long dayStart = now - now % (24 * 60 * 60 * 1000);
    Voting voting = generateVotingEn(dayStart, dayStart + minutes * 60000);
    //generating participants info
    for (int i = 0; i < totalParticipant; i++) {
        ParticipantRole role;
        if (i == 0)
            role = ParticipantRole.NRD;
        else if (i < holdersCount)
            role = ParticipantRole.NominalHolder;
        else
            role = ParticipantRole.Owner;
        HashMap<String, BigDecimal> securities = new HashMap<>();
        securities.put(SECURITY,
                role == ParticipantRole.Owner ? new BigDecimal(randomInt(15, 100)) : BigDecimal.ZERO);
        int ownerIdx = role == ParticipantRole.NRD ? -1
                : i < 6 ? 0 : randomInt(0, Math.min(i, holdersCount) - 1);
        VoteResult vote = role != ParticipantRole.Owner ? null
                : generateVote(Integer.toString(i), securities, voting);
        clients[i] = new ClientFullInfo(securities, i, ownerIdx, role, keys[i].getPrivateKey(),
                keys[i].getPublicKey(), String.format("Random name #%d", i), vote, new ArrayList<>(), false,
                true, "");
        participants[i] = new Participant(i == 0 ? "00" : Integer.toString(i), clients[i].getName(),
                clients[i].getPublicKey());
        if (role != ParticipantRole.NRD) {
            clients[ownerIdx].clients.add(clients[i]);
            for (; ownerIdx >= 0; ownerIdx = clients[ownerIdx].getHolderId()) {
                for (Map.Entry<String, BigDecimal> secEntry : securities.entrySet()) {
                    clients[ownerIdx].getPacketSizeBySecurity().put(secEntry.getKey(), clients[ownerIdx]
                            .getPacketSizeBySecurity().get(secEntry.getKey()).add(secEntry.getValue()));
                }
            }
        }
    }

    if (victimsCount > 0) {
        ThreadLocalRandom.current().ints(1, holdersCount - 1).distinct().limit(victimsCount)
                .forEach(i -> clients[i].setVictim(true));
        ThreadLocalRandom.current().ints(1, holdersCount - 1).filter(i -> !clients[i].isVictim()).distinct()
                .limit(victimsCount).forEach(i -> clients[i].setHonest(false));
    }

    saveData(clients, participants, name, voting, holdersCount, vmCount, minutes, generateVotes,
            generateDisconnect, disconnectNodes);
}

From source file:ffx.algorithms.mc.RosenbluthCBMC.java

public boolean controlStep() {
    double temperature;
    if (thermostat != null) {
        temperature = thermostat.getCurrentTemperature();
    } else {//from  w w w  .  j  a  va2  s.  co  m
        temperature = 298.15;
    }
    double beta = 1.0 / (BOLTZMANN * temperature);
    int which = ThreadLocalRandom.current().nextInt(targets.size());
    Residue target = targets.get(which);
    RosenbluthChiAllMove cbmcMove = new RosenbluthChiAllMove(mola, target, -1, ffe, temperature, false,
            numMovesProposed, true);
    return cbmcMove.wasAccepted();
}

From source file:org.apache.hadoop.hdfs.TestRecoverStripedFile.java

/**
 * Test the file blocks recovery.//from  ww w .j a v a  2s  .c  o m
 * 1. Check the replica is recovered in the target datanode, 
 *    and verify the block replica length, generationStamp and content.
 * 2. Read the file and verify content. 
 */
private void assertFileBlocksRecovery(String fileName, int fileLen, int recovery, int toRecoverBlockNum)
        throws Exception {
    if (recovery != 0 && recovery != 1 && recovery != 2) {
        Assert.fail("Invalid recovery: 0 is to recovery parity blocks,"
                + "1 is to recovery data blocks, 2 is any.");
    }
    if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
        Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
    }

    Path file = new Path(fileName);

    final byte[] data = new byte[fileLen];
    ThreadLocalRandom.current().nextBytes(data);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);

    LocatedBlocks locatedBlocks = getLocatedBlocks(file);
    assertEquals(locatedBlocks.getFileLength(), fileLen);

    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();

    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    byte[] indices = lastBlock.getBlockIndices();

    BitSet bitset = new BitSet(dnNum);
    for (DatanodeInfo storageInfo : storageInfos) {
        bitset.set(dnMap.get(storageInfo));
    }

    int[] toDead = new int[toRecoverBlockNum];
    int n = 0;
    for (int i = 0; i < indices.length; i++) {
        if (n < toRecoverBlockNum) {
            if (recovery == 0) {
                if (indices[i] >= dataBlkNum) {
                    toDead[n++] = i;
                }
            } else if (recovery == 1) {
                if (indices[i] < dataBlkNum) {
                    toDead[n++] = i;
                }
            } else {
                toDead[n++] = i;
            }
        } else {
            break;
        }
    }

    DatanodeInfo[] dataDNs = new DatanodeInfo[toRecoverBlockNum];
    int[] deadDnIndices = new int[toRecoverBlockNum];
    ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
    File[] replicas = new File[toRecoverBlockNum];
    File[] metadatas = new File[toRecoverBlockNum];
    byte[][] replicaContents = new byte[toRecoverBlockNum][];
    for (int i = 0; i < toRecoverBlockNum; i++) {
        dataDNs[i] = storageInfos[toDead[i]];
        deadDnIndices[i] = dnMap.get(dataDNs[i]);

        // Check the block replica file on deadDn before it dead.
        blocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlkNum,
                indices[toDead[i]]);
        replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
        metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
        // the block replica on the datanode should be the same as expected
        assertEquals(replicas[i].length(), StripedBlockUtil.getInternalBlockLength(lastBlock.getBlockSize(),
                cellSize, dataBlkNum, indices[toDead[i]]));
        assertTrue(metadatas[i].getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
    }

    int cellsNum = (fileLen - 1) / cellSize + 1;
    int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;

    for (int i = 0; i < toRecoverBlockNum; i++) {
        /*
         * Kill the datanode which contains one replica
         * We need to make sure it dead in namenode: clear its update time and
         * trigger NN to check heartbeat.
         */
        DataNode dn = cluster.getDataNodes().get(deadDnIndices[i]);
        dn.shutdown();
        cluster.setDataNodeDead(dn.getDatanodeId());
    }

    // Check the locatedBlocks of the file again
    locatedBlocks = getLocatedBlocks(file);
    lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    storageInfos = lastBlock.getLocations();
    assertEquals(storageInfos.length, groupSize - toRecoverBlockNum);

    int[] targetDNs = new int[dnNum - groupSize];
    n = 0;
    for (int i = 0; i < dnNum; i++) {
        if (!bitset.get(i)) { // not contain replica of the block.
            targetDNs[n++] = i;
        }
    }

    waitForRecoveryFinished(file, groupSize);

    targetDNs = sortTargetsByReplicas(blocks, targetDNs);

    // Check the replica on the new target node.
    for (int i = 0; i < toRecoverBlockNum; i++) {
        File replicaAfterRecovery = cluster.getBlockFile(targetDNs[i], blocks[i]);
        File metadataAfterRecovery = cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
        assertEquals(replicaAfterRecovery.length(), replicas[i].length());
        assertTrue(metadataAfterRecovery.getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        byte[] replicaContentAfterRecovery = DFSTestUtil.readFileAsBytes(replicaAfterRecovery);

        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterRecovery);
    }
}

From source file:xyz.lejon.sampling.GammaDistribution.java

public static double nextGamma(double shape, double scale, boolean slowCode) {

    double sample = 0.0;

    if (shape < 0.00001) {

        if (shape < 0) {
            System.out.println("Negative shape parameter");
            throw new IllegalArgumentException("Negative shape parameter");
        }/*from   w  ww  .ja va 2 s.c  o  m*/

        /*
         * special case: shape==0.0 is an improper distribution; but
         * sampling works if very small values are ignored (v. large ones
         * don't happen) This is useful e.g. for sampling from the truncated
         * Gamma(0,x)-distribution.
         */

        double minimum = 1.0e-20;
        double maximum = 50;
        double normalizingConstant = Math.log(maximum) - Math.log(minimum);
        // Draw from 1/x (with boundaries), and shape by exp(-x)
        do {
            sample = Math
                    .exp(Math.log(minimum) + normalizingConstant * ThreadLocalRandom.current().nextDouble());
        } while (Math.exp(-sample) < ThreadLocalRandom.current().nextDouble());
        // This distribution is actually scale-free, so multiplying by
        // 'scale' is not necessary
        return sample;
    }

    if (slowCode && Math.floor(shape) == shape && shape > 4.0) {
        for (int i = 0; i < shape; i++)
            sample += -Math.log(ThreadLocalRandom.current().nextDouble());
        return sample * scale;
    } else {

        // Fast special cases
        if (shape == 1.0) {
            return -Math.log(ThreadLocalRandom.current().nextDouble()) * scale;
        }
        if (shape == 2.0) {
            return -Math
                    .log(ThreadLocalRandom.current().nextDouble() * ThreadLocalRandom.current().nextDouble())
                    * scale;
        }
        if (shape == 3.0) {
            return -Math.log(ThreadLocalRandom.current().nextDouble() * ThreadLocalRandom.current().nextDouble()
                    * ThreadLocalRandom.current().nextDouble()) * scale;
        }
        if (shape == 4.0) {
            return -Math.log(ThreadLocalRandom.current().nextDouble() * ThreadLocalRandom.current().nextDouble()
                    * ThreadLocalRandom.current().nextDouble() * ThreadLocalRandom.current().nextDouble())
                    * scale;
        }
    }

    // general case
    do {
        try {
            sample = quantile(ThreadLocalRandom.current().nextDouble(), shape, scale);
        } catch (IllegalArgumentException e) {
            // random doubles do go outside the permissible range 0.000002 <
            // q < 0.999998
            sample = 0.0;
        }
    } while (sample == 0.0);
    return sample;
}