List of usage examples for java.util.concurrent ThreadLocalRandom current
public static ThreadLocalRandom current()
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
@Test(timeout = 60000) public void testUpdateDoesNotCauseSkippedReplication() { UnderReplicatedBlocks underReplicatedBlocks = new UnderReplicatedBlocks(); BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); BlockInfo block3 = genBlockInfo(ThreadLocalRandom.current().nextLong()); // Adding QUEUE_VERY_UNDER_REPLICATED block final int block1CurReplicas = 2; final int block1ExpectedReplicas = 7; underReplicatedBlocks.add(block1, block1CurReplicas, 0, block1ExpectedReplicas); // Adding QUEUE_VERY_UNDER_REPLICATED block underReplicatedBlocks.add(block2, 2, 0, 7); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block3, 2, 0, 6); List<List<BlockInfo>> chosenBlocks; // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0); // Increasing the replications will move the block down a // priority. This simulates a replica being completed in between checks. underReplicatedBlocks.update(block1, block1CurReplicas + 1, 0, block1ExpectedReplicas, 1, 0); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. // This block was moved up a priority and should not be skipped over. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 0, 1, 0, 0, 0); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_UNDER_REPLICATED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 0, 0, 1, 0, 0); }
From source file:ffx.algorithms.RotamerOptimization.java
/** * Scrambles an array of rotamers.// w w w .ja va 2 s . c om * * @param rotamers * @param residues * @param useAllElims */ private void randomizeRotamers(int[] rotamers, Residue[] residues, boolean useAllElims) { int nRes = rotamers.length; for (int i = 0; i < nRes; i++) { Rotamer[] rotsi = residues[i].getRotamers(library); int lenri = rotsi.length; ArrayList<Integer> allowedRots = new ArrayList<>(lenri); for (int ri = 0; ri < lenri; ri++) { if (!check(i, ri)) { allowedRots.add(ri); } } int nRots = allowedRots.size(); if (nRots > 1) { boolean validMove = !useAllElims; int indexRI; do { int ri = ThreadLocalRandom.current().nextInt(nRots); indexRI = allowedRots.get(ri); if (useAllElims) { validMove = checkValidMove(i, indexRI, rotamers); } } while (!validMove); rotamers[i] = indexRI; } } }
From source file:ffx.algorithms.mc.RosenbluthChiAllMove.java
/** * Yields a random vector on the surface of the unit sphere. * Algorithm 42 from Frenkel/Smit.//from w ww . j a va 2 s. c o m */ private double[] vectorOnASphere() { ThreadLocalRandom rand = ThreadLocalRandom.current(); double ranA, ranB, ranC, ransq; do { ranA = 1 - 2 * rand.nextDouble(); ranB = 1 - 2 * rand.nextDouble(); ransq = ranA * ranA + ranB * ranB; } while (ransq >= 1); ranC = 2 * FastMath.sqrt(1 - ransq); double vec[] = new double[3]; vec[0] = ranA * ranC; // x vec[1] = ranB * ranC; // y vec[2] = 1 - 2 * ransq; // z return vec; }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
@Test(timeout = 60000) public void testAddStoredBlockDoesNotCauseSkippedReplication() throws IOException { Namesystem mockNS = mock(Namesystem.class); when(mockNS.isPopulatingReplQueues()).thenReturn(true); when(mockNS.hasWriteLock()).thenReturn(true); BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block1, 0, 1, 1); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block2, 0, 1, 1); List<List<BlockInfo>> chosenBlocks; // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); // Adding this block collection to the BlockManager, so that when we add the // block under construction, the BlockManager will realize the expected // replication has been achieved and remove it from the under-replicated // queue./* w ww . j av a2s . c o m*/ BlockInfoContiguousUnderConstruction info = new BlockInfoContiguousUnderConstruction(block1, (short) 1); BlockCollection bc = mock(BlockCollection.class); when(bc.getPreferredBlockReplication()).thenReturn((short) 1); bm.addBlockCollection(info, bc); // Adding this block will increase its current replication, and that will // remove it from the queue. bm.addStoredBlockUnderConstruction(new StatefulBlockInfo(info, info, ReplicaState.FINALIZED), TestReplicationPolicy.storages[0]); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. // This block remains and should not be skipped over. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); }
From source file:co.paralleluniverse.galaxy.netty.UDPComm.java
/** * Return a real number from an exponential distribution with rate lambda. Based on * http://en.wikipedia.org/wiki/Inverse_transform_sampling *//*ww w. j av a 2s. c o m*/ private static double randExp(double lambda) { return -Math.log(1 - ThreadLocalRandom.current().nextDouble()) / lambda; }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
@Test(timeout = 60000) public void testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication() throws IOException { Namesystem mockNS = mock(Namesystem.class); when(mockNS.isPopulatingReplQueues()).thenReturn(true); BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block1, 0, 1, 1); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block2, 0, 1, 1); List<List<BlockInfo>> chosenBlocks; // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); final BlockInfo info = new BlockInfoContiguous(block1, (short) 1); final BlockCollection mbc = mock(BlockCollection.class); when(mbc.getLastBlock()).thenReturn(info); when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1); when(mbc.getPreferredBlockReplication()).thenReturn((short) 1); when(mbc.isUnderConstruction()).thenReturn(true); ContentSummary cs = mock(ContentSummary.class); when(cs.getLength()).thenReturn((long) 1); when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs); info.setBlockCollection(mbc);// w w w . j ava 2 s. c o m bm.addBlockCollection(info, mbc); DatanodeStorageInfo[] storageAry = { new DatanodeStorageInfo(dataNodes[0], new DatanodeStorage("s1")) }; final BlockInfoContiguousUnderConstruction ucBlock = info .convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry); DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class); DatanodeDescriptor dn = mock(DatanodeDescriptor.class); when(dn.isDecommissioned()).thenReturn(true); when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL); when(storage.getDatanodeDescriptor()).thenReturn(dn); when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true); when(storage.addBlock(any(BlockInfo.class))).thenReturn(DatanodeStorageInfo.AddBlockResult.ADDED); ucBlock.addStorage(storage); when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any())).thenReturn(ucBlock); bm.convertLastBlockToUnderConstruction(mbc, 0L); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. // This block remains and should not be skipped over. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); }
From source file:com.tremolosecurity.provisioning.core.ProvisioningEngineImpl.java
public MessageProducerHolder getTaskMessageProducer() throws Exception { int index = ThreadLocalRandom.current().nextInt(0, this.mpPools.size()); MessageProducerHolder mph = this.mpPools.get(index).borrowObject(); mph.setPool(this.mpPools.get(index)); return mph;//from w w w . ja v a 2s . c om }
From source file:org.apache.bookkeeper.client.RackawareEnsemblePlacementPolicyImpl.java
/** * Shuffle all the entries of an array that matches a mask. * It assumes all entries with the same mask are contiguous in the array. *///ww w . j a v a2 s . c om static void shuffleWithMask(DistributionSchedule.WriteSet writeSet, int mask, int bits) { int first = -1; int last = -1; for (int i = 0; i < writeSet.size(); i++) { if ((writeSet.get(i) & bits) == mask) { if (first == -1) { first = i; } last = i; } } if (first != -1) { for (int i = last + 1; i > first; i--) { int swapWith = ThreadLocalRandom.current().nextInt(i); writeSet.set(swapWith, writeSet.set(i, writeSet.get(swapWith))); } } }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy.java
@Test(timeout = 60000) public void testupdateNeededReplicationsDoesNotCauseSkippedReplication() throws IOException { Namesystem mockNS = mock(Namesystem.class); when(mockNS.isPopulatingReplQueues()).thenReturn(true); BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block1, 0, 1, 1); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block2, 0, 1, 1); List<List<BlockInfo>> chosenBlocks; // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); bm.setReplication((short) 0, (short) 1, "", block1); // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block // from QUEUE_VERY_UNDER_REPLICATED. // This block remains and should not be skipped over. chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); }
From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java
@Test public void testExitedListContainersParam() throws DockerException, InterruptedException, UnsupportedEncodingException { sut.pull(BUSYBOX_LATEST);/*w w w.j a v a 2 s.co m*/ final String randomLong = Long.toString(ThreadLocalRandom.current().nextLong()); final ContainerConfig containerConfig = ContainerConfig.builder().image(BUSYBOX_LATEST) .cmd("sh", "-c", "echo " + randomLong).build(); final String containerName = randomName(); final ContainerCreation containerCreation = sut.createContainer(containerConfig, containerName); final String containerId = containerCreation.id(); sut.startContainer(containerId); sut.waitContainer(containerId); final List<Container> containers = sut.listContainers(DockerClient.ListContainersParam.allContainers(), DockerClient.ListContainersParam.exitedContainers()); assertThat(containers.size(), greaterThan(0)); assertThat(containers.get(0).command(), containsString(randomLong)); }