List of usage examples for java.util.concurrent ThreadLocalRandom current
public static ThreadLocalRandom current()
From source file:fr.gouv.vitam.mdbes.QueryBench.java
private static String getRequest(JsonNode request, List<TypeField> fields, AtomicLong rank, BenchContext bench) {// w w w .java2 s . c o m if (fields != null && !fields.isEmpty()) { String finalRequest = request.toString(); ThreadLocalRandom rnd = ThreadLocalRandom.current(); for (TypeField field : fields) { String val = null; switch (field.type) { case save: finalRequest = getFinalRequest(field, "", bench.savedNames, finalRequest); break; case liste: int rlist = rnd.nextInt(field.listeValeurs.length); val = field.listeValeurs[rlist]; finalRequest = getFinalRequest(field, val, bench.savedNames, finalRequest); break; case listeorder: long i = rank.getAndIncrement(); if (i >= field.listeValeurs.length) { i = field.listeValeurs.length - 1; } val = field.listeValeurs[(int) i]; finalRequest = getFinalRequest(field, val, bench.savedNames, finalRequest); break; case serie: AtomicLong newcpt = rank; if (field.idcpt != null) { newcpt = bench.cpts.get(field.idcpt); if (newcpt == null) { newcpt = rank; System.err.println("wrong cpt name: " + field.idcpt); } } long j = newcpt.getAndIncrement(); if (field.modulo > 0) { j = j % field.modulo; } val = (field.prefix != null ? field.prefix : "") + j; finalRequest = getFinalRequest(field, val, bench.savedNames, finalRequest); break; case interval: int newval = rnd.nextInt(field.low, field.high + 1); finalRequest = getFinalRequest(field, "" + newval, bench.savedNames, finalRequest); break; default: break; } } return finalRequest; } return null; }
From source file:org.zaproxy.zap.extension.ascanrules.CommandInjectionPlugin.java
/** * Generate payload variants for uninitialized variable waf bypass * https://www.secjuice.com/web-application-firewall-waf-evasion/ * * @param cmd the cmd to insert uninitialized variable */// ww w . j av a 2s. c o m private static String insertUninitVar(String cmd) { int varLength = ThreadLocalRandom.current().nextInt(1, 3) + 1; char[] array = new char[varLength]; // $xx array[0] = '$'; for (int i = 1; i < varLength; ++i) { array[i] = (char) ThreadLocalRandom.current().nextInt(97, 123); } String var = new String(array); // insert variable before each space and '/' in the path return cmd.replaceAll("\\s", Matcher.quoteReplacement(var + " ")).replaceAll("\\/", Matcher.quoteReplacement(var + "/")); }
From source file:org.apache.hadoop.metrics2.sink.RollingFileSystemSink.java
/** * Set the {@link #nextFlush} variable to the initial flush time. The initial * flush will be an integer number of flush intervals past the beginning of * the current hour and will have a random offset added, up to * {@link #rollOffsetIntervalMillis}. The initial flush will be a time in * past that can be used from which to calculate future flush times. * * @param now the current time/*w w w. j av a 2 s.c o m*/ */ @VisibleForTesting protected void setInitialFlushTime(Date now) { // Start with the beginning of the current hour nextFlush = Calendar.getInstance(); nextFlush.setTime(now); nextFlush.set(Calendar.MILLISECOND, 0); nextFlush.set(Calendar.SECOND, 0); nextFlush.set(Calendar.MINUTE, 0); // In the first round, calculate the first flush as the largest number of // intervals from the beginning of the current hour that's not in the // future by: // 1. Subtract the beginning of the hour from the current time // 2. Divide by the roll interval and round down to get the number of whole // intervals that have passed since the beginning of the hour // 3. Multiply by the roll interval to get the number of millis between // the beginning of the current hour and the beginning of the current // interval. int millis = (int) (((now.getTime() - nextFlush.getTimeInMillis()) / rollIntervalMillis) * rollIntervalMillis); // Then add some noise to help prevent all the nodes from // closing their files at the same time. if (rollOffsetIntervalMillis > 0) { millis += ThreadLocalRandom.current().nextLong(rollOffsetIntervalMillis); // If the added time puts us into the future, step back one roll interval // because the code to increment nextFlush to the next flush expects that // nextFlush is the next flush from the previous interval. There wasn't // a previous interval, so we just fake it with the time in the past that // would have been the previous interval if there had been one. // // It's OK if millis comes out negative. while (nextFlush.getTimeInMillis() + millis > now.getTime()) { millis -= rollIntervalMillis; } } // Adjust the next flush time by millis to get the time of our ficticious // previous next flush nextFlush.add(Calendar.MILLISECOND, millis); }
From source file:org.apache.hadoop.io.retry.RetryPolicies.java
/** * Return a value which is <code>time</code> increasing exponentially as a * function of <code>retries</code>, +/- 0%-50% of that value, chosen * randomly.//w w w.j a v a 2s. c o m * * @param time the base amount of time to work with * @param retries the number of retries that have so occurred so far * @param cap value at which to cap the base sleep time * @return an amount of time to sleep */ public static long calculateExponentialTime(long time, int retries, long cap) { long baseTime = Math.min(time * (1L << retries), cap); return (long) (baseTime * (ThreadLocalRandom.current().nextDouble() + 0.5)); }
From source file:org.apache.ignite.yardstick.cache.load.IgniteCacheRandomOperationBenchmark.java
/** * @return Nex random boolean value. */ private boolean nextBoolean() { return ThreadLocalRandom.current().nextBoolean(); }
From source file:ffx.potential.bonded.MultiTerminus.java
private void maxwellMe(Atom atom, double temperature) { double vv[] = new double[3]; for (int i = 0; i < 3; i++) { vv[i] = ThreadLocalRandom.current().nextGaussian() * sqrt(kB * temperature / atom.getMass()); }// w ww . j a va 2 s. c o m atom.setVelocity(vv); }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
/** * Test for the high priority blocks are processed before the low priority * blocks.// w w w . j a v a2s . co m */ @Test(timeout = 60000) public void testReplicationWithPriority() throws Exception { int DFS_NAMENODE_REPLICATION_INTERVAL = 1000; int HIGH_PRIORITY = 0; Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); try { cluster.waitActive(); final UnderReplicatedBlocks neededReplications = cluster.getNameNode().getNamesystem() .getBlockManager().neededReplications; for (int i = 0; i < 100; i++) { // Adding the blocks directly to normal priority neededReplications.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 2, 0, 3); } // Lets wait for the replication interval, to start process normal // priority blocks Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL); // Adding the block directly to high priority list neededReplications.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 1, 0, 3); // Lets wait for the replication interval Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL); // Check replication completed successfully. Need not wait till it process // all the 100 normal blocks. assertFalse("Not able to clear the element from high priority list", neededReplications.iterator(HIGH_PRIORITY).hasNext()); } finally { cluster.shutdown(); } }
From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java
@Ignore @Test/*ww w .jav a2 s . co m*/ public void showErrorRate() { HashFunction fn = Hashing.murmur3_128(); Random random = ThreadLocalRandom.current(); double error = 0.0d; int count = 0; final int[] valsToCheck = { 10, 20, 50, 100, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 1000000, 2000000, 10000000, Integer.MAX_VALUE }; for (int numThings : valsToCheck) { long startTime = System.currentTimeMillis(); HyperLogLogCollector collector = HyperLogLogCollector.makeLatestCollector(); for (int i = 0; i < numThings; ++i) { if (i != 0 && i % 100000000 == 0) { ++count; error = computeError(error, count, i, startTime, collector); } collector.add(fn.hashLong(random.nextLong()).asBytes()); } ++count; error = computeError(error, count, numThings, startTime, collector); } }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
/** * Test for the ChooseUnderReplicatedBlocks are processed based on priority *//*from w w w . ja v a 2 s. c om*/ @Test public void testChooseUnderReplicatedBlocks() throws Exception { UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks(); for (int i = 0; i < 5; i++) { // Adding QUEUE_HIGHEST_PRIORITY block underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 1, 0, 3); // Adding QUEUE_VERY_UNDER_REPLICATED block underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 2, 0, 7); // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 6, 0, 6); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 5, 0, 6); // Adding QUEUE_WITH_CORRUPT_BLOCKS block underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 0, 0, 3); } // Choose 6 blocks from UnderReplicatedBlocks. Then it should pick 5 blocks // from // QUEUE_HIGHEST_PRIORITY and 1 block from QUEUE_VERY_UNDER_REPLICATED. List<List<BlockInfo>> chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(6); assertTheChosenBlocks(chosenBlocks, 5, 1, 0, 0, 0); // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 4 blocks from // QUEUE_VERY_UNDER_REPLICATED, 5 blocks from QUEUE_UNDER_REPLICATED and 1 // block from QUEUE_REPLICAS_BADLY_DISTRIBUTED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10); assertTheChosenBlocks(chosenBlocks, 0, 4, 5, 1, 0); // Adding QUEUE_HIGHEST_PRIORITY underReplicatedBlocks.add(genBlockInfo(ThreadLocalRandom.current().nextLong()), 1, 0, 3); // Choose 10 blocks from UnderReplicatedBlocks. Then it should pick 1 block from // QUEUE_HIGHEST_PRIORITY, 4 blocks from QUEUE_REPLICAS_BADLY_DISTRIBUTED // and 5 blocks from QUEUE_WITH_CORRUPT_BLOCKS. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(10); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 4, 5); // Since it is reached to end of all lists, // should start picking the blocks from start. // Choose 7 blocks from UnderReplicatedBlocks. Then it should pick 6 blocks from // QUEUE_HIGHEST_PRIORITY, 1 block from QUEUE_VERY_UNDER_REPLICATED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(7); assertTheChosenBlocks(chosenBlocks, 6, 1, 0, 0, 0); }
From source file:com.spotify.helios.system.SystemTestBase.java
protected static String randomHexString() { return toHexString(ThreadLocalRandom.current().nextInt()); }