List of usage examples for java.util Random nextBytes
public void nextBytes(byte[] bytes)
From source file:org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.java
private void writeFile(FileSystem fileSys, Path name, int repl) throws IOException { FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt("io.file.buffer.size", 4096), (short) repl, (long) blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer);/*from w w w.j a v a 2 s. co m*/ stm.close(); }
From source file:io.vertx.camel.OutboundEndpointTest.java
private byte[] getRandomBytes() { Random random = new Random(); byte[] bytes = new byte[1024]; random.nextBytes(bytes); return bytes; }
From source file:com.palantir.atlasdb.schema.stream.StreamTest.java
private long storeAndCheckByteStreams(int size) throws IOException { byte[] reference = PtBytes.toBytes("ref"); final byte[] bytesToStore = new byte[size]; Random rand = new Random(); rand.nextBytes(bytesToStore); final long id = timestampService.getFreshTimestamp(); PersistentStreamStore store = StreamTestStreamStore.of(txManager, StreamTestTableFactory.of()); txManager.runTaskWithRetry(t -> { store.storeStreams(t, ImmutableMap.of(id, new ByteArrayInputStream(bytesToStore))); store.markStreamAsUsed(t, id, reference); return null; });/*from w w w . j ava 2 s. c om*/ verifyLoadingStreams(id, bytesToStore, store); store.storeStream(new ByteArrayInputStream(bytesToStore)); verifyLoadingStreams(id, bytesToStore, store); return id; }
From source file:com.palantir.atlasdb.schema.stream.StreamTest.java
@Test public void testStoreCopy() { final byte[] bytes = new byte[2 * StreamTestStreamStore.BLOCK_SIZE_IN_BYTES]; Random rand = new Random(); rand.nextBytes(bytes); long id1 = timestampService.getFreshTimestamp(); long id2 = timestampService.getFreshTimestamp(); ImmutableMap<Long, InputStream> streams = ImmutableMap.of(id1, new ByteArrayInputStream(bytes), id2, new ByteArrayInputStream(bytes)); PersistentStreamStore store = StreamTestStreamStore.of(txManager, StreamTestTableFactory.of()); txManager.runTaskWithRetry(t -> store.storeStreams(t, streams)); Pair<Long, Sha256Hash> idAndHash1 = store.storeStream(new ByteArrayInputStream(bytes)); Pair<Long, Sha256Hash> idAndHash2 = store.storeStream(new ByteArrayInputStream(bytes)); assertThat(idAndHash1.getRhSide(), equalTo(idAndHash2.getRhSide())); //verify hashes are the same assertThat(idAndHash1.getLhSide(), not(equalTo(idAndHash2.getLhSide()))); //verify ids are different }
From source file:org.broadinstitute.sting.utils.io.IOUtilsUnitTest.java
private byte[] getDeterministicRandomData(int size) { GenomeAnalysisEngine.resetRandomGenerator(); Random rand = GenomeAnalysisEngine.getRandomGenerator(); byte[] randomData = new byte[size]; rand.nextBytes(randomData); return randomData; }
From source file:com.palantir.atlasdb.schema.stream.StreamTest.java
@Test public void testLookupStreamIdsByHash() throws Exception { final byte[] bytes1 = new byte[2 * StreamTestStreamStore.BLOCK_SIZE_IN_BYTES]; final byte[] bytes2 = new byte[2 * StreamTestStreamStore.BLOCK_SIZE_IN_BYTES]; long id1 = timestampService.getFreshTimestamp(); long id2 = timestampService.getFreshTimestamp(); Random rand = new Random(); rand.nextBytes(bytes1); rand.nextBytes(bytes2);/*from w w w. j a v a2 s . c o m*/ Sha256Hash hash1 = Sha256Hash.computeHash(bytes1); Sha256Hash hash2 = Sha256Hash.computeHash(bytes2); Sha256Hash hash3 = Sha256Hash.EMPTY; ImmutableMap<Long, InputStream> streams = ImmutableMap.of(id1, new ByteArrayInputStream(bytes1), id2, new ByteArrayInputStream(bytes2)); PersistentStreamStore store = StreamTestStreamStore.of(txManager, StreamTestTableFactory.of()); txManager.runTaskWithRetry(t -> store.storeStreams(t, streams)); Map<Sha256Hash, Long> sha256HashLongMap = txManager .runTaskWithRetry(t -> store.lookupStreamIdsByHash(t, ImmutableSet.of(hash1, hash2, hash3))); assertEquals(id1, sha256HashLongMap.get(hash1).longValue()); assertEquals(id2, sha256HashLongMap.get(hash2).longValue()); assertEquals(null, sha256HashLongMap.get(hash3)); }
From source file:org.apache.activemq.bugs.AMQ6131Test.java
@Test(timeout = 300000) public void testDurableWithNoMessageAfterRestartAndIndexRecovery() throws Exception { final File persistentDir = getPersistentDir(); broker.getBroker().addDestination(broker.getAdminConnectionContext(), new ActiveMQTopic("durable.sub"), false);/*from w ww . j a va2s . c om*/ ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(this.brokerConnectURI); ActiveMQConnection connection = (ActiveMQConnection) connectionFactory.createConnection(); connection.setClientID("myId"); connection.start(); final Session jmsSession = connection.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE); TopicSubscriber durable = jmsSession.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub"); final MessageProducer producer = jmsSession.createProducer(new ActiveMQTopic("durable.sub")); final int original = new ArrayList<File>( FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE)) .size(); // 100k messages final byte[] data = new byte[100000]; final Random random = new Random(); random.nextBytes(data); // run test with enough messages to create a second journal file final AtomicInteger messageCount = new AtomicInteger(); assertTrue("Should have added a journal file", Wait.waitFor(new Condition() { @Override public boolean isSatisified() throws Exception { final ActiveMQBytesMessage message = new ActiveMQBytesMessage(); message.setContent(new ByteSequence(data)); for (int i = 0; i < 100; i++) { producer.send(message); messageCount.getAndIncrement(); } return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE)).size() > original; } })); // Consume all messages for (int i = 0; i < messageCount.get(); i++) { durable.receive(); } durable.close(); assertTrue("Subscription should go inactive", Wait.waitFor(new Condition() { @Override public boolean isSatisified() throws Exception { return broker.getAdminView().getInactiveDurableTopicSubscribers().length == 1; } })); // force a GC of unneeded journal files getBroker().getPersistenceAdapter().checkpoint(true); // wait until a journal file has been GC'd after receiving messages assertTrue("Should have garbage collected", Wait.waitFor(new Wait.Condition() { @Override public boolean isSatisified() throws Exception { return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE)).size() == original; } })); // stop the broker so we can blow away the index getBroker().stop(); getBroker().waitUntilStopped(); // delete the index so that the durables are gone from the index // The test passes if you take out this delete section for (File index : FileUtils.listFiles(persistentDir, new WildcardFileFilter("db.*"), TrueFileFilter.INSTANCE)) { FileUtils.deleteQuietly(index); } stopBroker(); setUpBroker(false); assertEquals(1, broker.getAdminView().getInactiveDurableTopicSubscribers().length); assertEquals(0, broker.getAdminView().getDurableTopicSubscribers().length); ActiveMQConnectionFactory connectionFactory2 = new ActiveMQConnectionFactory(this.brokerConnectURI); ActiveMQConnection connection2 = (ActiveMQConnection) connectionFactory2.createConnection(); connection2.setClientID("myId"); connection2.start(); final Session jmsSession2 = connection2.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE); TopicSubscriber durable2 = jmsSession2.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub"); assertEquals(0, broker.getAdminView().getInactiveDurableTopicSubscribers().length); assertEquals(1, broker.getAdminView().getDurableTopicSubscribers().length); assertNull(durable2.receive(500)); }
From source file:org.apache.activemq.bugs.AMQ6131Test.java
@Test(timeout = 300000) public void testDurableWithOnePendingAfterRestartAndIndexRecovery() throws Exception { final File persistentDir = getPersistentDir(); broker.getBroker().addDestination(broker.getAdminConnectionContext(), new ActiveMQTopic("durable.sub"), false);//from ww w . j ava 2 s . c o m ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory(this.brokerConnectURI); ActiveMQConnection connection = (ActiveMQConnection) connectionFactory.createConnection(); connection.setClientID("myId"); connection.start(); final Session jmsSession = connection.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE); TopicSubscriber durable = jmsSession.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub"); final MessageProducer producer = jmsSession.createProducer(new ActiveMQTopic("durable.sub")); final int original = new ArrayList<File>( FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE)) .size(); // 100k messages final byte[] data = new byte[100000]; final Random random = new Random(); random.nextBytes(data); // run test with enough messages to create a second journal file final AtomicInteger messageCount = new AtomicInteger(); assertTrue("Should have added a journal file", Wait.waitFor(new Condition() { @Override public boolean isSatisified() throws Exception { final ActiveMQBytesMessage message = new ActiveMQBytesMessage(); message.setContent(new ByteSequence(data)); for (int i = 0; i < 100; i++) { producer.send(message); messageCount.getAndIncrement(); } return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE)).size() > original; } })); // Consume all but 1 message for (int i = 0; i < messageCount.get() - 1; i++) { durable.receive(); } durable.close(); // wait until a journal file has been GC'd after receiving messages assertTrue("Subscription should go inactive", Wait.waitFor(new Condition() { @Override public boolean isSatisified() throws Exception { return broker.getAdminView().getInactiveDurableTopicSubscribers().length == 1; } })); // force a GC of unneeded journal files getBroker().getPersistenceAdapter().checkpoint(true); // wait until a journal file has been GC'd after receiving messages assertFalse("Should not have garbage collected", Wait.waitFor(new Wait.Condition() { @Override public boolean isSatisified() throws Exception { return new ArrayList<File>(FileUtils.listFiles(persistentDir, new WildcardFileFilter("*.log"), TrueFileFilter.INSTANCE)).size() == original; } }, 5000, 500)); // stop the broker so we can blow away the index getBroker().stop(); getBroker().waitUntilStopped(); // delete the index so that the durables are gone from the index // The test passes if you take out this delete section for (File index : FileUtils.listFiles(persistentDir, new WildcardFileFilter("db.*"), TrueFileFilter.INSTANCE)) { FileUtils.deleteQuietly(index); } stopBroker(); setUpBroker(false); assertEquals(1, broker.getAdminView().getInactiveDurableTopicSubscribers().length); assertEquals(0, broker.getAdminView().getDurableTopicSubscribers().length); ActiveMQConnectionFactory connectionFactory2 = new ActiveMQConnectionFactory(this.brokerConnectURI); ActiveMQConnection connection2 = (ActiveMQConnection) connectionFactory2.createConnection(); connection2.setClientID("myId"); connection2.start(); final Session jmsSession2 = connection2.createSession(false, javax.jms.Session.AUTO_ACKNOWLEDGE); TopicSubscriber durable2 = jmsSession2.createDurableSubscriber(new ActiveMQTopic("durable.sub"), "sub"); assertEquals(0, broker.getAdminView().getInactiveDurableTopicSubscribers().length); assertEquals(1, broker.getAdminView().getDurableTopicSubscribers().length); assertNotNull(durable2.receive(5000)); }
From source file:org.apache.hadoop.hdfs.server.namenode.TestDecommissioningStatus.java
private void writeFile(FileSystem fileSys, Path name, short repl) throws IOException { // create and write a file that contains three blocks of data FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, blockSize); byte[] buffer = new byte[fileSize]; Random rand = new Random(seed); rand.nextBytes(buffer); stm.write(buffer);/*www. jav a 2s . c o m*/ stm.close(); }
From source file:io.pravega.segmentstore.server.reading.StorageReaderTests.java
private byte[] populateSegment(Storage storage) { Random random = new Random(); int length = MIN_SEGMENT_LENGTH + random.nextInt(MAX_SEGMENT_LENGTH - MIN_SEGMENT_LENGTH); byte[] segmentData = new byte[length]; random.nextBytes(segmentData); storage.create(SEGMENT_METADATA.getName(), TIMEOUT).join(); val writeHandle = storage.openWrite(SEGMENT_METADATA.getName()).join(); storage.write(writeHandle, 0, new ByteArrayInputStream(segmentData), segmentData.length, TIMEOUT).join(); return segmentData; }