Example usage for java.util Random nextBytes

List of usage examples for java.util Random nextBytes

Introduction

In this page you can find the example usage for java.util Random nextBytes.

Prototype

public void nextBytes(byte[] bytes) 

Source Link

Document

Generates random bytes and places them into a user-supplied byte array.

Usage

From source file:org.apache.jackrabbit.core.data.TestCaseBase.java

/**
 * Test {@link DataStore#getAllIdentifiers()} and asserts all identifiers
 * are returned./*from   w  w w  .j  av  a  2s  .  c  om*/
 */
protected void doGetAllIdentifiersTest() throws Exception {
    ds = createDataStore();
    List<DataIdentifier> list = new ArrayList<DataIdentifier>();
    Random random = randomGen;
    byte[] data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec = ds.addRecord(new ByteArrayInputStream(data));
    list.add(rec.getIdentifier());

    data = new byte[dataLength];
    random.nextBytes(data);
    rec = ds.addRecord(new ByteArrayInputStream(data));
    list.add(rec.getIdentifier());

    data = new byte[dataLength];
    random.nextBytes(data);
    rec = ds.addRecord(new ByteArrayInputStream(data));
    list.add(rec.getIdentifier());

    Iterator<DataIdentifier> itr = ds.getAllIdentifiers();
    while (itr.hasNext()) {
        assertTrue("record found on list", list.remove(itr.next()));
    }
    assertEquals(0, list.size());
    ds.close();
}

From source file:com.navercorp.pinpoint.common.buffer.FixedBufferTest.java

public void find_SVInt_errorCode() throws Exception {
    Random random = new Random();
    byte[] bytes = new byte[10];

    while (true) {
        random.nextBytes(bytes);
        Buffer buffer = new FixedBuffer(bytes);
        try {//from   ww  w .  j  ava 2  s .  co  m
            int i = buffer.readVInt();
        } catch (IllegalArgumentException e) {
            logger.debug(e.getMessage(), e);
            String binaryString = BytesUtils.toString(bytes);
            logger.debug(binaryString);
            for (byte aByte : bytes) {
                String code = String.valueOf((int) aByte);
                logger.debug(code);
            }
            return;
        }
    }
}

From source file:com.navercorp.pinpoint.common.buffer.FixedBufferTest.java

public void find_SVLong_errorCode() throws Exception {
    Random random = new Random();
    byte[] bytes = new byte[10];

    while (true) {
        random.nextBytes(bytes);
        Buffer buffer = new FixedBuffer(bytes);
        try {//from   ww  w  . j  a  v a  2  s  .c  o  m
            long i = buffer.readVLong();
        } catch (IllegalArgumentException e) {
            logger.debug(e.getMessage(), e);
            String binaryString = BytesUtils.toString(bytes);
            logger.debug(binaryString);
            for (byte aByte : bytes) {
                String code = String.valueOf((int) aByte);
                logger.debug(code);
            }
            return;
        }
    }
}

From source file:org.apache.jackrabbit.core.data.TestCaseBase.java

/**
 * Asserts that timestamp of all records accessed after
 * {@link DataStore#updateModifiedDateOnAccess(long)} invocation.
 *///w w w .java 2 s .  com
protected void doUpdateLastModifiedOnAccessTest() throws Exception {
    ds = createDataStore();
    Random random = randomGen;
    byte[] data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data));

    data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data));
    LOG.debug("rec2 timestamp=" + rec2.getLastModified());

    // sleep for some time to ensure that async upload completes in backend.
    sleep(6000);
    long updateTime = System.currentTimeMillis();
    LOG.debug("updateTime=" + updateTime);
    ds.updateModifiedDateOnAccess(updateTime);

    // sleep to workaround System.currentTimeMillis granularity.
    sleep(100);
    data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data));

    data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data));

    rec1 = ds.getRecord(rec1.getIdentifier());

    assertEquals("rec1 touched", true, ds.getLastModified(rec1.getIdentifier()) > updateTime);
    LOG.debug("rec2 timestamp=" + rec2.getLastModified());
    assertEquals("rec2 not touched", true, ds.getLastModified(rec2.getIdentifier()) < updateTime);
    assertEquals("rec3 touched", true, ds.getLastModified(rec3.getIdentifier()) > updateTime);
    assertEquals("rec4 touched", true, ds.getLastModified(rec4.getIdentifier()) > updateTime);
    ds.close();

}

From source file:org.apache.hadoop.record.TestRecordWritable.java

public void testFormat() throws Exception {
    JobConf job = new JobConf(conf);
    FileSystem fs = FileSystem.getLocal(conf);
    Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
    Path file = new Path(dir, "test.seq");

    int seed = new Random().nextInt();
    //LOG.info("seed = "+seed);
    Random random = new Random(seed);

    fs.delete(dir, true);//  w  ww.  j a va  2 s.c  o m

    FileInputFormat.setInputPaths(job, dir);

    // for a variety of lengths
    for (int length = 0; length < MAX_LENGTH; length += random.nextInt(MAX_LENGTH / 10) + 1) {

        // create a file with length entries
        SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, file, RecInt.class, RecBuffer.class);
        try {
            for (int i = 0; i < length; i++) {
                RecInt key = new RecInt();
                key.setData(i);
                byte[] data = new byte[random.nextInt(10)];
                random.nextBytes(data);
                RecBuffer value = new RecBuffer();
                value.setData(new Buffer(data));
                writer.append(key, value);
            }
        } finally {
            writer.close();
        }

        // try splitting the file in a variety of sizes
        InputFormat<RecInt, RecBuffer> format = new SequenceFileInputFormat<RecInt, RecBuffer>();
        RecInt key = new RecInt();
        RecBuffer value = new RecBuffer();
        for (int i = 0; i < 3; i++) {
            int numSplits = random.nextInt(MAX_LENGTH / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
            InputSplit[] splits = format.getSplits(job, numSplits);

            // check each split
            BitSet bits = new BitSet(length);
            for (int j = 0; j < splits.length; j++) {
                RecordReader<RecInt, RecBuffer> reader = format.getRecordReader(splits[j], job, Reporter.NULL);
                try {
                    int count = 0;
                    while (reader.next(key, value)) {
                        assertFalse("Key in multiple partitions.", bits.get(key.getData()));
                        bits.set(key.getData());
                        count++;
                    }
                } finally {
                    reader.close();
                }
            }
            assertEquals("Some keys in no partition.", length, bits.cardinality());
        }

    }
}

From source file:org.apache.jackrabbit.core.data.TestCaseBase.java

/**
 * Asserts that {@link DataStore#deleteAllOlderThan(long)} only deleted
 * records older than argument passed./*from   ww  w  .j  a v a2  s  . c om*/
 */
protected void doDeleteAllOlderThan() throws Exception {
    ds = createDataStore();
    Random random = randomGen;
    byte[] data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec1 = ds.addRecord(new ByteArrayInputStream(data));

    data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec2 = ds.addRecord(new ByteArrayInputStream(data));

    // sleep for some time to ensure that async upload completes in backend.
    sleep(6000);
    long updateTime = System.currentTimeMillis();
    ds.updateModifiedDateOnAccess(updateTime);

    // sleep to workaround System.currentTimeMillis granularity.
    sleep(100);
    data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec3 = ds.addRecord(new ByteArrayInputStream(data));

    data = new byte[dataLength];
    random.nextBytes(data);
    DataRecord rec4 = ds.addRecord(new ByteArrayInputStream(data));

    rec1 = ds.getRecord(rec1.getIdentifier());
    ds.clearInUse();
    assertEquals("only rec2 should be deleted", 1, ds.deleteAllOlderThan(updateTime));
    assertNull("rec2 should be null", ds.getRecordIfStored(rec2.getIdentifier()));

    Iterator<DataIdentifier> itr = ds.getAllIdentifiers();
    List<DataIdentifier> list = new ArrayList<DataIdentifier>();
    list.add(rec1.getIdentifier());
    list.add(rec3.getIdentifier());
    list.add(rec4.getIdentifier());
    while (itr.hasNext()) {
        assertTrue("record found on list", list.remove(itr.next()));
    }

    assertEquals("touched records found", 0, list.size());
    assertEquals("rec1 touched", true, ds.getLastModified(rec1.getIdentifier()) > updateTime);
    assertEquals("rec3 touched", true, ds.getLastModified(rec3.getIdentifier()) > updateTime);
    assertEquals("rec4 touched", true, ds.getLastModified(rec4.getIdentifier()) > updateTime);
    ds.close();
}

From source file:com.opengamma.engine.cache.BerkeleyDBValueSpecificationIdentifierBinaryDataStoreTest.java

public void parallelPutGetTest() throws InterruptedException {
    final int numEntries = 5000;
    final int numCycles = 1;
    final int numGets = numCycles * numEntries;
    final Random random = new Random();

    File dbDir = createDbDir("parallelPutGetTest");
    Environment dbEnvironment = BerkeleyDBViewComputationCacheSource.constructDatabaseEnvironment(dbDir, false);

    final BerkeleyDBBinaryDataStore dataStore = new BerkeleyDBBinaryDataStore(dbEnvironment,
            "parallelPutGetTest");
    dataStore.start();//from   w  w  w  .  j ava 2s.  c  o m

    final AtomicLong currentMaxIdentifier = new AtomicLong(0L);
    final byte[] bytes = new byte[100];
    random.nextBytes(bytes);
    Thread tPut = new Thread(new Runnable() {
        @Override
        public void run() {
            OperationTimer timer = new OperationTimer(s_logger, "Putting {} entries", numEntries);
            for (int i = 0; i < numEntries; i++) {
                random.nextBytes(bytes);
                dataStore.put(i, bytes);
                currentMaxIdentifier.set(i);
            }
            long numMillis = timer.finished();

            double msPerPut = ((double) numMillis) / ((double) numGets);
            double putsPerSecond = 1000.0 / msPerPut;

            s_logger.info("for {} puts, {} ms/put, {} puts/sec",
                    new Object[] { numEntries, msPerPut, putsPerSecond });
        }

    }, "Putter");

    class GetRunner implements Runnable {
        @Override
        public void run() {
            OperationTimer timer = new OperationTimer(s_logger, "Getting {} entries", numGets);
            for (int i = 0; i < numGets; i++) {
                int maxIdentifier = (int) currentMaxIdentifier.get();
                long actualIdentifier = random.nextInt(maxIdentifier);
                dataStore.get(actualIdentifier);
            }
            long numMillis = timer.finished();

            double msPerGet = ((double) numMillis) / ((double) numGets);
            double getsPerSecond = 1000.0 / msPerGet;

            s_logger.info("for {} gets, {} ms/get, {} gets/sec",
                    new Object[] { numGets, msPerGet, getsPerSecond });
        }
    }
    ;
    Thread tGet1 = new Thread(new GetRunner(), "getter-1");
    Thread tGet2 = new Thread(new GetRunner(), "getter-2");
    //Thread tGet3 = new Thread(new GetRunner(), "getter-3");
    //Thread tGet4 = new Thread(new GetRunner(), "getter-4");
    //Thread tGet5 = new Thread(new GetRunner(), "getter-5");

    tPut.start();
    Thread.sleep(5L);
    tGet1.start();
    tGet2.start();
    //tGet3.start();
    //tGet4.start();
    //tGet5.start();

    tPut.join();
    tGet1.join();
    tGet2.join();
    //tGet3.join();
    //tGet4.join();
    //tGet5.join();

    dataStore.delete();
    dataStore.stop();
    dbEnvironment.close();
}

From source file:org.apache.hadoop.dfs.TestFileCreation.java

private void writeFile(FSDataOutputStream stm) throws IOException {
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);/*from  ww  w.ja  v a2  s . c om*/
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

public void testCheckpointOfSLSWhenEntryLogIsRotated(boolean entryLogPerLedgerEnabled) throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false)
            //set very high period for flushInterval
            .setFlushInterval(30000).setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(entryLogPerLedgerEnabled)
            .setLedgerStorageClass(SortedLedgerStorage.class.getName())
            // set very low skipListSizeLimit and entryLogSizeLimit to simulate log file rotation
            .setSkipListSizeLimit(1 * 1000 * 1000).setEntryLogSizeLimit(2 * 1000 * 1000);

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();/*ww w.  j av a2s.  com*/
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    BookKeeper bkClient = new BookKeeper(clientConf);

    Random rand = new Random();
    byte[] dataBytes = new byte[10 * 1000];
    rand.nextBytes(dataBytes);
    int numOfEntries = ((int) conf.getEntryLogSizeLimit() + (100 * 1000)) / dataBytes.length;

    LedgerHandle handle = bkClient.createLedgerAdv(10, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
    for (int j = 0; j < numOfEntries; j++) {
        handle.addEntry(j, dataBytes);
    }
    handle.close();

    // sleep for a bit for checkpoint to do its task
    executorController.advance(Duration.ofMillis(500));

    File lastMarkFile = new File(ledgerDir, "lastMark");
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    if (entryLogPerLedgerEnabled) {
        Assert.assertEquals(
                "rolledLogMark should be zero, since checkpoint"
                        + "shouldn't have happened when entryLog is rotated",
                0, rolledLogMark.compare(new LogMark()));
    } else {
        Assert.assertNotEquals(
                "rolledLogMark shouldn't be zero, since checkpoint"
                        + "should have happened when entryLog is rotated",
                0, rolledLogMark.compare(new LogMark()));
    }
    bkClient.close();
    server.shutdown();
}

From source file:org.apache.hadoop.dfs.TestFileCreation.java

private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    boolean done = false;

    // wait till all full blocks are confirmed by the datanodes.
    while (!done) {
        try {/*  w ww.jav a2  s .c o m*/
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
        done = true;
        BlockLocation[] locations = fileSys.getFileBlockLocations(name, 0, fileSize);
        if (locations.length < numBlocks) {
            done = false;
            continue;
        }
        for (int idx = 0; idx < locations.length; idx++) {
            if (locations[idx].getHosts().length < repl) {
                done = false;
                break;
            }
        }
    }
    FSDataInputStream stm = fileSys.open(name);
    byte[] expected = new byte[numBlocks * blockSize];
    if (simulatedStorage) {
        for (int i = 0; i < expected.length; i++) {
            expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
        }
    } else {
        Random rand = new Random(seed);
        rand.nextBytes(expected);
    }
    // do a sanity check. Read the file
    byte[] actual = new byte[numBlocks * blockSize];
    stm.readFully(0, actual);
    checkData(actual, 0, expected, "Read 1");
}