Example usage for java.util.concurrent ThreadLocalRandom current

List of usage examples for java.util.concurrent ThreadLocalRandom current

Introduction

In this page you can find the example usage for java.util.concurrent ThreadLocalRandom current.

Prototype

public static ThreadLocalRandom current() 

Source Link

Document

Returns the current thread's ThreadLocalRandom .

Usage

From source file:org.apache.druid.segment.IndexMergerV9WithSpatialIndexTest.java

private static IncrementalIndex makeIncrementalIndex() throws IOException {
    IncrementalIndex theIndex = new IncrementalIndex.Builder()
            .setIndexSchema(//from w  w  w. j a  va2 s . co  m
                    new IncrementalIndexSchema.Builder().withMinTimestamp(DATA_INTERVAL.getStartMillis())
                            .withQueryGranularity(Granularities.DAY).withMetrics(METRIC_AGGS)
                            .withDimensionsSpec(new DimensionsSpec(null, null, Arrays.asList(
                                    new SpatialDimensionSchema("dim.geo", Arrays.asList("lat", "long")),
                                    new SpatialDimensionSchema("spatialIsRad", Arrays.asList("lat2", "long2"))

                            ))).build()).setReportParseExceptions(false).setMaxRowCount(NUM_POINTS)
            .buildOnheap();

    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-01").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-01").toString(), "dim", "foo", "lat", 0.0f, "long", 0.0f, "val", 17L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-02").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-02").toString(), "dim", "foo", "lat", 1.0f, "long", 3.0f, "val", 29L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-03").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-03").toString(), "dim", "foo", "lat", 4.0f, "long", 2.0f, "val", 13L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-04").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-04").toString(), "dim", "foo", "lat", 7.0f, "long", 3.0f, "val", 91L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-05").toString(), "dim", "foo", "lat", 8.0f, "long", 6.0f, "val", 47L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS,
            ImmutableMap.of("timestamp", DateTimes.of("2013-01-05").toString(), "dim", "foo", "lat",
                    "_mmx.unknown", "long", "_mmx.unknown", "val", 101L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-05").toString(), "dim", "foo", "dim.geo", "_mmx.unknown", "val", 501L)));
    theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp",
            DateTimes.of("2013-01-05").toString(), "lat2", 0.0f, "long2", 0.0f, "val", 13L)));

    // Add a bunch of random points
    Random rand = ThreadLocalRandom.current();
    for (int i = 8; i < NUM_POINTS; i++) {
        theIndex.add(new MapBasedInputRow(DateTimes.of("2013-01-01").getMillis(), DIMS,
                ImmutableMap.of("timestamp", DateTimes.of("2013-01-01").toString(), "dim", "boo", "lat",
                        (float) (rand.nextFloat() * 10 + 10.0), "long", (float) (rand.nextFloat() * 10 + 10.0),
                        "val", i)));
    }

    return theIndex;
}

From source file:com.graphaware.neo4j.graphgen.faker.FakerService.java

public int numberBetween(Property property) {
    if (property.parameters().size() != 2) {
        throw new IllegalArgumentException(
                String.format("Expected exactly %d arguments, %d received", 2, property.parameters().size()));
    }//from   w  w w. j  a va 2  s  .  c om

    Integer i1 = parseInt(property.parameters().get(0));
    Integer i2 = parseInt(property.parameters().get(1));

    if (i1 >= i2) {
        throw new IllegalArgumentException(
                "First parameter should not be greater or equal than second parameter");
    }

    return ThreadLocalRandom.current().nextInt(i1, i2);
}

From source file:com.bitranger.parknshop.common.service.ads.ItemAdService.java

public List<PsPromotItem> randomReduce(List<PsPromotItem> src, int limit) {
    if (src.size() > limit) {
        ThreadLocalRandom rand = ThreadLocalRandom.current();
        int idx = rand.nextInt(limit);
        int nidx = rand.nextInt(limit, src.size());
        src.set(idx, src.get(nidx));/*from   w  w  w .  j  a  va  2 s.  c om*/
        return new ArrayList<>(src.subList(0, limit));
    } else {
        return src;
    }
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

private void scheduleRelogin(Throwable error) {
    if (error instanceof FallbackDisallowedException) {
        return;//from  w  w  w . j  av a  2  s.c o m
    }
    synchronized (this) {
        if (reloginInProgress) {
            return;
        }
        reloginInProgress = true;
        RELOGIN_EXECUTOR.schedule(new Runnable() {

            @Override
            public void run() {
                try {
                    if (shouldAuthenticateOverKrb()) {
                        relogin();
                    }
                } catch (IOException e) {
                    LOG.warn("relogin failed", e);
                }
                synchronized (this) {
                    reloginInProgress = false;
                }
            }
        }, ThreadLocalRandom.current().nextInt(reloginMaxBackoff), TimeUnit.MILLISECONDS);
    }
}

From source file:ffx.algorithms.MCLoop.java

/**
 * The primary driver. Called by the MD engine at each dynamics step.
 * @param molAss//from  w w  w . j  a  va 2 s . c om
 */
@Override
public boolean mcUpdate(MolecularAssembly molAss) {

    stepCount++;
    if (skipAlgorithm == true) {
        return false;
    }
    // Decide on the type of step to be taken.
    if ((stepCount % mcStepFrequency != 0)) {
        // Not yet time for an MC step, return to MD.
        return false;
    }

    if (lambdaInterface != null) {
        if (lambdaInterface.getLambda() > 0.1) {
            logger.info(String.format(" KIC procedure skipped (Lambda > 0.1)."));
            return false;
        }
    }

    atoms = molAss.getAtomArray();

    // Randomly choose a target sub portion of loop to KIC.
    int midResidue;
    midResidue = ThreadLocalRandom.current().nextInt(firstResidue + 1, endResidue);

    List<double[]> loopSolutions;
    loopSolutions = loop.generateLoops(midResidue - 1, midResidue + 1);

    for (int i = 1; i < iterations; i++) {
        //pick random subloop
        midResidue = ThreadLocalRandom.current().nextInt(firstResidue + 1, endResidue);
        //pick random solution
        if (loopSolutions.size() > 0) {
            List<double[]> tempLoops = loop.generateLoops(midResidue - 1, midResidue + 1,
                    loopSolutions.get(rng.nextInt(loopSolutions.size())));
            for (double[] tempLoop : tempLoops) {
                loopSolutions.add(tempLoop);
            }
        } else {
            loopSolutions = loop.generateLoops(midResidue - 1, midResidue + 1);
        }

    }
    int numLoopsFound = loopSolutions.size();
    // Check whether KIC found alternative loops
    if (numLoopsFound <= 1) {
        return false;
    }

    // Perform the MC move.
    boolean accepted = tryLoopStep(loopSolutions);
    return accepted;
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testDecorrelatedJitteredPolicy() throws Exception {
    long startMs = ThreadLocalRandom.current().nextLong(1L, 1000L);
    long maxMs = ThreadLocalRandom.current().nextLong(startMs, startMs * 2);
    Stream<Long> backoffs = Backoff.Jitter.of(DECORRELATED, startMs, maxMs, 10).toBackoffs();
    Iterator<Long> backoffIter = backoffs.iterator();
    assertTrue(backoffIter.hasNext());/*from   w w w  .  ja v a2 s. c o  m*/
    assertEquals(startMs, backoffIter.next().longValue());
    AtomicLong prevMs = new AtomicLong(startMs);
    backoffIter.forEachRemaining(backoffMs -> {
        assertTrue(backoffMs >= startMs);
        assertTrue(backoffMs <= prevMs.get() * 3);
        assertTrue(backoffMs <= maxMs);
        prevMs.set(backoffMs);
    });
}

From source file:com.spotify.helios.client.DefaultRequestDispatcher.java

/**
 * Sets up a connection, retrying on connect failure.
 *///from   w w  w . j ava2  s  .c  om
private HttpURLConnection connect(final URI uri, final String method, final byte[] entity,
        final Map<String, List<String>> headers)
        throws URISyntaxException, IOException, TimeoutException, InterruptedException, HeliosException {
    final long deadline = currentTimeMillis() + RETRY_TIMEOUT_MILLIS;
    final int offset = ThreadLocalRandom.current().nextInt();

    while (currentTimeMillis() < deadline) {
        final List<URI> endpoints = endpointSupplier.get();
        if (endpoints.isEmpty()) {
            throw new RuntimeException("failed to resolve master");
        }
        log.debug("endpoint uris are {}", endpoints);

        // Resolve hostname into IPs so client will round-robin and retry for multiple A records.
        // Keep a mapping of IPs to hostnames for TLS verification.
        final List<URI> ipEndpoints = Lists.newArrayList();
        final Map<URI, URI> ipToHostnameUris = Maps.newHashMap();

        for (final URI hnUri : endpoints) {
            try {
                final InetAddress[] ips = InetAddress.getAllByName(hnUri.getHost());
                for (final InetAddress ip : ips) {
                    final URI ipUri = new URI(hnUri.getScheme(), hnUri.getUserInfo(), ip.getHostAddress(),
                            hnUri.getPort(), hnUri.getPath(), hnUri.getQuery(), hnUri.getFragment());
                    ipEndpoints.add(ipUri);
                    ipToHostnameUris.put(ipUri, hnUri);
                }
            } catch (UnknownHostException e) {
                log.warn("Unable to resolve hostname {} into IP address: {}", hnUri.getHost(), e);
            }
        }

        for (int i = 0; i < ipEndpoints.size() && currentTimeMillis() < deadline; i++) {
            final URI ipEndpoint = ipEndpoints.get(positive(offset + i) % ipEndpoints.size());
            final String fullpath = ipEndpoint.getPath() + uri.getPath();

            final String scheme = ipEndpoint.getScheme();
            final String host = ipEndpoint.getHost();
            final int port = ipEndpoint.getPort();
            if (!VALID_PROTOCOLS.contains(scheme) || host == null || port == -1) {
                throw new HeliosException(String.format(
                        "Master endpoints must be of the form \"%s://heliosmaster.domain.net:<port>\"",
                        VALID_PROTOCOLS_STR));
            }

            final URI realUri = new URI(scheme, host + ":" + port, fullpath, uri.getQuery(), null);

            AgentProxy agentProxy = null;
            Deque<Identity> identities = Queues.newArrayDeque();
            try {
                if (scheme.equals("https")) {
                    agentProxy = AgentProxies.newInstance();
                    for (final Identity identity : agentProxy.list()) {
                        if (identity.getPublicKey().getAlgorithm().equals("RSA")) {
                            // only RSA keys will work with our TLS implementation
                            identities.offerLast(identity);
                        }
                    }
                }
            } catch (Exception e) {
                log.warn("Couldn't get identities from ssh-agent", e);
            }

            try {
                do {
                    final Identity identity = identities.poll();

                    try {
                        log.debug("connecting to {}", realUri);

                        final HttpURLConnection connection = connect0(realUri, method, entity, headers,
                                ipToHostnameUris.get(ipEndpoint).getHost(), agentProxy, identity);

                        final int responseCode = connection.getResponseCode();
                        if (((responseCode == HTTP_FORBIDDEN) || (responseCode == HTTP_UNAUTHORIZED))
                                && !identities.isEmpty()) {
                            // there was some sort of security error. if we have any more SSH identities to try,
                            // retry with the next available identity
                            log.debug("retrying with next SSH identity since {} failed", identity.getComment());
                            continue;
                        }

                        return connection;
                    } catch (ConnectException | SocketTimeoutException | UnknownHostException e) {
                        // UnknownHostException happens if we can't resolve hostname into IP address.
                        // UnknownHostException's getMessage method returns just the hostname which is a
                        // useless message, so log the exception class name to provide more info.
                        log.debug(e.toString());
                        // Connecting failed, sleep a bit to avoid hammering and then try another endpoint
                        Thread.sleep(200);
                    }
                } while (false);
            } finally {
                if (agentProxy != null) {
                    agentProxy.close();
                }
            }
        }
        log.warn("Failed to connect, retrying in 5 seconds.");
        Thread.sleep(5000);
    }
    throw new TimeoutException("Timed out connecting to master");
}

From source file:org.apache.bookkeeper.common.conf.ConfigKeyTest.java

@Test
public void testGetBoolean() {
    String keyName = runtime.getMethodName();
    boolean defaultValue = ThreadLocalRandom.current().nextBoolean();
    ConfigKey key = ConfigKey.builder(keyName).required(true).type(Type.BOOLEAN).defaultValue(defaultValue)
            .build();/*from  ww  w  . j av a 2 s. co m*/

    Configuration conf = new ConcurrentConfiguration();

    // get default value
    assertEquals(defaultValue, key.getBoolean(conf));
    assertEquals(defaultValue, key.get(conf));

    // set value
    boolean newValue = !defaultValue;
    key.set(conf, newValue);
    assertEquals(newValue, key.getBoolean(conf));
    assertEquals(newValue, key.get(conf));
}

From source file:com.adaptris.hpcc.DfuPlusWrapper.java

protected long calculateWait(long current) {
    long result = TimeUnit.SECONDS.toMillis(1);
    if (current > 0) {
        result = current * 2;/*from  w  w  w  . jav a2s .  co  m*/
        if (result > monitorIntervalMs()) {
            result = monitorIntervalMs();
        }
    }
    return Math.max(ThreadLocalRandom.current().nextLong(result), current);
}

From source file:ai.grakn.graph.internal.computer.GraknSparkComputer.java

private Future<ComputerResult> submitWithExecutor(Executor exec) {
    getGraphRDD(this);
    jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
    String jobDescription = this.vertexProgram == null ? this.mapReducers.toString()
            : this.vertexProgram + "+" + this.mapReducers;

    this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId);
    this.apacheConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
    this.hadoopConfiguration.set(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));

    // create the completable future
    return CompletableFuture.supplyAsync(() -> {
        graknGraphRDD.sparkContext.setJobGroup(jobGroupId, jobDescription);
        final long startTime = System.currentTimeMillis();

        GraknSparkMemory memory = null;//w  w w  . j a v a 2 s  . c  om
        JavaPairRDD<Object, VertexWritable> computedGraphRDD = null;
        JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null;

        ////////////////////////////////
        // process the vertex program //
        ////////////////////////////////
        if (null != this.vertexProgram) {
            // set up the vertex program and wire up configurations
            this.mapReducers.addAll(this.vertexProgram.getMapReducers());
            memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, graknGraphRDD.sparkContext);
            this.vertexProgram.setup(memory);
            memory.broadcastMemory(graknGraphRDD.sparkContext);
            final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration();
            this.vertexProgram.storeState(vertexProgramConfiguration);
            ConfigurationUtils.copy(vertexProgramConfiguration, apacheConfiguration);
            ConfUtil.mergeApacheIntoHadoopConfiguration(vertexProgramConfiguration, hadoopConfiguration);
            // execute the vertex program
            while (true) {
                memory.setInTask(true);
                viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(graknGraphRDD.loadedGraphRDD,
                        viewIncomingRDD, memory, vertexProgramConfiguration);
                memory.setInTask(false);
                if (this.vertexProgram.terminate(memory))
                    break;
                else {
                    memory.incrIteration();
                    memory.broadcastMemory(graknGraphRDD.sparkContext);
                }
            }
            // write the computed graph to the respective output (rdd or output format)
            final String[] elementComputeKeys = this.vertexProgram.getElementComputeKeys()
                    .toArray(new String[this.vertexProgram.getElementComputeKeys().size()]);
            computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(graknGraphRDD.loadedGraphRDD,
                    viewIncomingRDD, elementComputeKeys);
            if ((hadoopConfiguration.get(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, null) != null
                    || hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, null) != null)
                    && !this.persist.equals(Persist.NOTHING)) {
                try {
                    hadoopConfiguration
                            .getClass(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, OutputFormatRDD.class,
                                    OutputRDD.class)
                            .newInstance().writeGraphRDD(apacheConfiguration, computedGraphRDD);
                } catch (final InstantiationException | IllegalAccessException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        final boolean computedGraphCreated = computedGraphRDD != null;
        if (!computedGraphCreated) {
            computedGraphRDD = graknGraphRDD.loadedGraphRDD;
        }

        final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory);

        //////////////////////////////
        // process the map reducers //
        //////////////////////////////
        if (!this.mapReducers.isEmpty()) {
            for (final MapReduce mapReduce : this.mapReducers) {
                // execute the map reduce job
                final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(apacheConfiguration);
                mapReduce.storeState(newApacheConfiguration);
                // map
                final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(computedGraphRDD, mapReduce,
                        newApacheConfiguration);
                // combine
                final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE)
                        ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration)
                        : mapRDD;
                // reduce
                final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE)
                        ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration)
                        : combineRDD;
                // write the map reduce output back to disk and computer result memory
                try {
                    mapReduce.addResultToMemory(finalMemory,
                            hadoopConfiguration
                                    .getClass(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, OutputFormatRDD.class,
                                            OutputRDD.class)
                                    .newInstance()
                                    .writeMemoryRDD(apacheConfiguration, mapReduce.getMemoryKey(), reduceRDD));
                } catch (final InstantiationException | IllegalAccessException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        // unpersist the computed graph if it will not be used again (no PersistedOutputRDD)
        if (!graknGraphRDD.outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING)) {
            computedGraphRDD.unpersist();
        }
        // delete any file system or rdd data if persist nothing
        String outputPath = sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION);
        if (null != outputPath && this.persist.equals(GraphComputer.Persist.NOTHING)) {
            if (graknGraphRDD.outputToHDFS) {
                graknGraphRDD.fileSystemStorage.rm(outputPath);
            }
            if (graknGraphRDD.outputToSpark) {
                graknGraphRDD.sparkContextStorage.rm(outputPath);
            }
        }
        // update runtime and return the newly computed graph
        finalMemory.setRuntime(System.currentTimeMillis() - startTime);
        return new DefaultComputerResult(
                InputOutputHelper.getOutputGraph(apacheConfiguration, this.resultGraph, this.persist),
                finalMemory.asImmutable());
    }, exec);
}