List of usage examples for java.util Random nextBytes
public void nextBytes(byte[] bytes)
From source file:com.opengamma.engine.cache.BerkeleyDBValueSpecificationIdentifierBinaryDataStoreTest.java
public void getPerformanceTest() { final int numEntries = 500; final int numCycles = 5; final int numGets = numCycles * numEntries; final int minEntrySize = 50; final int maxEntrySize = 1000; final Random random = new Random(); File dbDir = createDbDir("getPerformanceTest"); Environment dbEnvironment = BerkeleyDBViewComputationCacheSource.constructDatabaseEnvironment(dbDir, false); BerkeleyDBBinaryDataStore dataStore = new BerkeleyDBBinaryDataStore(dbEnvironment, "getPerformanceTest"); dataStore.start();//from w w w.j ava 2 s . c o m int randRange = maxEntrySize - minEntrySize; for (int i = 0; i < numEntries; i++) { int nBytes = minEntrySize + random.nextInt(randRange); byte[] bytes = new byte[nBytes]; random.nextBytes(bytes); dataStore.put(i, bytes); } OperationTimer timer = new OperationTimer(s_logger, "Loading {} entries", numGets); for (int j = 0; j < numCycles; j++) { for (int i = 0; i < numEntries; i++) { byte[] data = dataStore.get(i); assertNotNull(data); assertTrue(data.length >= minEntrySize); assertTrue(data.length <= maxEntrySize); } } long numMillis = timer.finished(); double msPerGet = ((double) numMillis) / ((double) numGets); double getsPerSecond = 1000.0 / msPerGet; s_logger.info("for {} gets, {} ms/get, {} gets/sec", new Object[] { numGets, msPerGet, getsPerSecond }); dataStore.delete(); dataStore.stop(); dbEnvironment.close(); }
From source file:org.apache.hadoop.mapred.TestSequenceFileInputFormat.java
public void testFormat() throws Exception { JobConf job = new JobConf(conf); FileSystem fs = FileSystem.getLocal(conf); Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred"); Path file = new Path(dir, "test.seq"); Reporter reporter = Reporter.NULL;/* w w w . ja va 2s .c om*/ int seed = new Random().nextInt(); //LOG.info("seed = "+seed); Random random = new Random(seed); fs.delete(dir, true); FileInputFormat.setInputPaths(job, dir); // for a variety of lengths for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) { //LOG.info("creating; entries = " + length); // create a file with length entries SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, file, IntWritable.class, BytesWritable.class); try { for (int i = 0; i < length; i++) { IntWritable key = new IntWritable(i); byte[] data = new byte[random.nextInt(10)]; random.nextBytes(data); BytesWritable value = new BytesWritable(data); writer.append(key, value); } } finally { writer.close(); } // try splitting the file in a variety of sizes InputFormat<IntWritable, BytesWritable> format = new SequenceFileInputFormat<IntWritable, BytesWritable>(); IntWritable key = new IntWritable(); BytesWritable value = new BytesWritable(); for (int i = 0; i < 3; i++) { int numSplits = random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1; //LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits = format.getSplits(job, numSplits); //LOG.info("splitting: got = " + splits.length); // check each split BitSet bits = new BitSet(length); for (int j = 0; j < splits.length; j++) { RecordReader<IntWritable, BytesWritable> reader = format.getRecordReader(splits[j], job, reporter); try { int count = 0; while (reader.next(key, value)) { // if (bits.get(key.get())) { // LOG.info("splits["+j+"]="+splits[j]+" : " + key.get()); // LOG.info("@"+reader.getPos()); // } assertFalse("Key in multiple partitions.", bits.get(key.get())); bits.set(key.get()); count++; } //LOG.info("splits["+j+"]="+splits[j]+" count=" + count); } finally { reader.close(); } } assertEquals("Some keys in no partition.", length, bits.cardinality()); } } }
From source file:com.emc.ecs.sync.CasMigrationTest.java
@Test public void testPipedStreams() throws Exception { Random random = new Random(); // test smaller than pipe buffer byte[] source = new byte[random.nextInt(BUFFER_SIZE) + 1]; random.nextBytes(source); String md5 = Hex.encodeHexString(MessageDigest.getInstance("MD5").digest(source)); Assert.assertEquals("MD5 mismatch", md5, pipeAndGetMd5(source)); // test larger than pipe buffer source = new byte[random.nextInt(BUFFER_SIZE) + BUFFER_SIZE + 1]; random.nextBytes(source);/*from w w w . java2s.c om*/ md5 = Hex.encodeHexString(MessageDigest.getInstance("MD5").digest(source)); Assert.assertEquals("MD5 mismatch", md5, pipeAndGetMd5(source)); }
From source file:org.apache.hadoop.dfs.TestFileCreation.java
/** * Test that file data does not become corrupted even in the face of errors. *//* w w w.j a v a 2s . com*/ public void testFileCreationError1() throws IOException { Configuration conf = new Configuration(); conf.setInt("heartbeat.recheck.interval", 1000); conf.setInt("dfs.heartbeat.interval", 1); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } // create cluster MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fs = cluster.getFileSystem(); cluster.waitActive(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); DFSClient client = new DFSClient(addr, conf); try { // create a new file. // Path file1 = new Path("/filestatus.dat"); FSDataOutputStream stm = createFile(fs, file1, 1); // verify that file exists in FS namespace assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false); System.out.println("Path : \"" + file1 + "\""); // kill the datanode cluster.shutdownDataNodes(); // wait for the datanode to be declared dead while (true) { DatanodeInfo[] info = client.datanodeReport(FSConstants.DatanodeReportType.LIVE); if (info.length == 0) { break; } System.out.println("testFileCreationError1: waiting for datanode " + " to die."); try { Thread.sleep(1000); } catch (InterruptedException e) { } } // write 1 byte to file. // This should fail because all datanodes are dead. byte[] buffer = new byte[1]; Random rand = new Random(seed); rand.nextBytes(buffer); try { stm.write(buffer); stm.close(); } catch (Exception e) { System.out.println("Encountered expected exception"); } // verify that no blocks are associated with this file // bad block allocations were cleaned up earlier. LocatedBlocks locations = client.namenode.getBlockLocations(file1.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue("Error blocks were not cleaned up", locations.locatedBlockCount() == 0); } finally { cluster.shutdown(); client.close(); } }
From source file:org.apache.hadoop.io.compress.TestCodec.java
public void testGzipCompatibility() throws IOException { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed);// w w w .j a va2 s. co m LOG.info("seed: " + seed); DataOutputBuffer dflbuf = new DataOutputBuffer(); GZIPOutputStream gzout = new GZIPOutputStream(dflbuf); byte[] b = new byte[r.nextInt(128 * 1024 + 1)]; r.nextBytes(b); gzout.write(b); gzout.close(); DataInputBuffer gzbuf = new DataInputBuffer(); gzbuf.reset(dflbuf.getData(), dflbuf.getLength()); Configuration conf = new Configuration(); conf.setBoolean("hadoop.native.lib", false); CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf); Decompressor decom = codec.createDecompressor(); assertNotNull(decom); assertEquals(BuiltInGzipDecompressor.class, decom.getClass()); InputStream gzin = codec.createInputStream(gzbuf, decom); dflbuf.reset(); IOUtils.copyBytes(gzin, dflbuf, 4096); final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength()); assertTrue(java.util.Arrays.equals(b, dflchk)); }
From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java
@Test public void testRecoverWarmRestart() throws Exception { TransactionalQueueManager mgr = createQueueManager(); mgr.start();//from w ww . ja v a 2 s .com QueueSession s = mgr.getQueueSession(); Queue q = s.getQueue("warmRecoverQueue"); int toPopulate = 500; // Populate queue Random rnd = new Random(); for (int j = 0; j < toPopulate; j++) { byte[] o = new byte[2048]; rnd.nextBytes(o); q.put(o); } assertEquals(q.size(), toPopulate); // Stop and start TransactionalQueueManager mgr.stop(); mgr.start(); assertEquals(toPopulate, q.size()); }
From source file:org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.java
void writeData(TableName tn, long sizeInBytes, byte[] qual) throws IOException { final Connection conn = testUtil.getConnection(); final Table table = conn.getTable(tn); try {//from ww w . j av a2 s . co m List<Put> updates = new ArrayList<>(); long bytesToWrite = sizeInBytes; long rowKeyId = 0L; final StringBuilder sb = new StringBuilder(); final Random r = new Random(); while (bytesToWrite > 0L) { sb.setLength(0); sb.append(Long.toString(rowKeyId)); // Use the reverse counter as the rowKey to get even spread across all regions Put p = new Put(Bytes.toBytes(sb.reverse().toString())); byte[] value = new byte[SIZE_PER_VALUE]; r.nextBytes(value); p.addColumn(Bytes.toBytes(F1), qual, value); updates.add(p); // Batch ~13KB worth of updates if (updates.size() > 50) { table.put(updates); updates.clear(); } // Just count the value size, ignore the size of rowkey + column bytesToWrite -= SIZE_PER_VALUE; rowKeyId++; } // Write the final batch if (!updates.isEmpty()) { table.put(updates); } LOG.debug("Data was written to HBase"); // Push the data to disk. testUtil.getAdmin().flush(tn); LOG.debug("Data flushed to disk"); } finally { table.close(); } }
From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java
@Test public void testRecoverColdRestart() throws Exception { TransactionalQueueManager mgr = createQueueManager(); QueueSession s = mgr.getQueueSession(); Queue q = s.getQueue("warmRecoverQueue"); mgr.start();// ww w. ja va 2 s . co m int toPopulate = 500; // Populate queue Random rnd = new Random(); for (int j = 0; j < toPopulate; j++) { byte[] o = new byte[2048]; rnd.nextBytes(o); q.put(o); } assertEquals(toPopulate, q.size()); // Stop and recreate TransactionalQueueManager simulating a cold restart mgr.stop(); mgr = createQueueManager(); s = mgr.getQueueSession(); q = s.getQueue("warmRecoverQueue"); mgr.start(); if (isPersistent()) { assertEquals(toPopulate, q.size()); } else { assertEquals(0, q.size()); } }
From source file:org.apache.hadoop.raid.TestRaidShellFsck.java
/** * Creates test file consisting of random data *///from w w w. ja va2s. c o m private void createTestFile(Path filePath) throws IOException { Random rand = new Random(); FSDataOutputStream stm = dfs.create(filePath, true, conf.getInt("io.file.buffer.size", 4096), REPL, BLOCK_SIZE); final byte[] b = new byte[(int) BLOCK_SIZE]; for (int i = 0; i < FILE_BLOCKS; i++) { rand.nextBytes(b); stm.write(b); } stm.close(); LOG.info("test file created"); }
From source file:org.apache.hadoop.hdfs.qjournal.client.TestImageUploadStream.java
/** * Write random data by using write(byte) and write(byt[]). */// ww w .ja va2 s .com private byte[] writeData(OutputStream os, int size) throws IOException { Random r = new Random(); int approxMaxLen = size; int bytesWritten = 0; ByteArrayOutputStream bos = new ByteArrayOutputStream(); while (bytesWritten < approxMaxLen) { if (r.nextBoolean()) { int b = r.nextInt(); os.write(b); bos.write(b); bytesWritten++; } else { byte[] rand = new byte[r.nextInt(10) + 1]; r.nextBytes(rand); os.write(rand); bos.write(rand); bytesWritten += rand.length; } } return bos.toByteArray(); }