Example usage for java.util Random nextBytes

List of usage examples for java.util Random nextBytes

Introduction

In this page you can find the example usage for java.util Random nextBytes.

Prototype

public void nextBytes(byte[] bytes) 

Source Link

Document

Generates random bytes and places them into a user-supplied byte array.

Usage

From source file:org.apache.hadoop.hbase.regionserver.throttle.TestFlushWithThroughputController.java

/**
 * Test the tuning task of {@link PressureAwareFlushThroughputController}
 *///  ww w  .ja va  2  s  .  co m
@Test
public void testFlushThroughputTuning() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
    conf.setLong(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND,
            20L * 1024 * 1024);
    conf.setLong(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND,
            10L * 1024 * 1024);
    conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
            PressureAwareFlushThroughputController.class.getName());
    conf.setInt(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD, 3000);
    TEST_UTIL.startMiniCluster(1);
    Connection conn = ConnectionFactory.createConnection(conf);
    try {
        HTableDescriptor htd = new HTableDescriptor(tableName);
        htd.addFamily(new HColumnDescriptor(family));
        htd.setCompactionEnabled(false);
        TEST_UTIL.getHBaseAdmin().createTable(htd);
        TEST_UTIL.waitTableAvailable(tableName);
        HRegionServer regionServer = TEST_UTIL.getRSForFirstRegionInTable(tableName);
        PressureAwareFlushThroughputController throughputController = (PressureAwareFlushThroughputController) regionServer
                .getFlushThroughputController();
        for (Region region : regionServer.getOnlineRegions()) {
            region.flush(true);
        }
        assertEquals(0.0, regionServer.getFlushPressure(), EPSILON);
        Thread.sleep(5000);
        assertEquals(10L * 1024 * 1024, throughputController.getMaxThroughput(), EPSILON);
        Table table = conn.getTable(tableName);
        Random rand = new Random();
        for (int i = 0; i < 10; i++) {
            for (int j = 0; j < 10; j++) {
                byte[] value = new byte[256 * 1024];
                rand.nextBytes(value);
                table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
            }
        }
        Thread.sleep(5000);
        double expectedThroughPut = 10L * 1024 * 1024 * (1 + regionServer.getFlushPressure());
        assertEquals(expectedThroughPut, throughputController.getMaxThroughput(), EPSILON);

        conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
                NoLimitThroughputController.class.getName());
        regionServer.onConfigurationChange(conf);
        assertTrue(throughputController.isStopped());
        assertTrue(regionServer.getFlushThroughputController() instanceof NoLimitThroughputController);
    } finally {
        conn.close();
        TEST_UTIL.shutdownMiniCluster();
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestDecommissioningStatus.java

private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name, short repl) throws IOException {
    // create and write a file that contains three blocks of data
    FSDataOutputStream stm = fileSys.create(name, true,
            fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl, blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);//www .  j av a2s  . c o m
    // need to make sure that we actually write out both file blocks
    // (see FSOutputSummer#flush)
    stm.flush();
    // Do not close stream, return it
    // so that it is not garbage collected
    return stm;
}

From source file:org.bibsonomy.webapp.controller.actions.PasswordReminderController.java

/**
 * Creates the random string//  w ww  .j  av  a  2s .  c om
 * 
 * @return String
 */
private String getRandomString() {
    final Random rand = new Random();
    final byte[] bytes = new byte[8];
    rand.nextBytes(bytes);
    return HashUtils.toHexString(bytes);
}

From source file:org.apache.hadoop.hdfs.TestDFSSSLServer.java

@Test
public void testCopyFile() throws Exception {
    // Create binary file of a couple of MB
    java.nio.file.Path binary = Paths.get(classpathDir, "binary_file.bin");
    filesToPurge.add(binary);//from   w  w w  .  ja v a2  s  . co  m
    Random rand = new Random();
    byte[] buffer = new byte[1024];
    int count = 0;
    try (BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(binary.toFile()))) {
        while (count < 5000) {
            rand.nextBytes(buffer);
            bos.write(buffer);
            count++;
        }
        bos.flush();
    }
    // Copy it to DFS
    dfs1 = cluster.getFileSystem();
    Path target = new Path("binary_file");
    dfs1.copyFromLocalFile(new Path(binary.toString()), target);
    assertTrue(dfs1.exists(target));
    // Copy back the file
    java.nio.file.Path localCopy = Paths.get(classpathDir, "copied_remote_file");
    dfs1.copyToLocalFile(target, new Path(localCopy.toString()));
    filesToPurge.add(localCopy);
    assertTrue(localCopy.toFile().exists());
}

From source file:org.apache.metron.stellar.dsl.functions.HashFunctionsTest.java

@Test
public void tlsh_multithread() throws Exception {
    //we want to ensure that everything is threadsafe, so we'll spin up some random data
    //generate some hashes and then do it all in parallel and make sure it all matches.
    Map<Map.Entry<byte[], Map<String, Object>>, String> hashes = new HashMap<>();
    Random r = new Random(0);
    for (int i = 0; i < 20; ++i) {
        byte[] d = new byte[256];
        r.nextBytes(d);
        Map<String, Object> config = new HashMap<String, Object>() {
            {/*from  www .ja v  a  2  s.c om*/
                put(TLSHHasher.Config.BUCKET_SIZE.key, r.nextBoolean() ? 128 : 256);
                put(TLSHHasher.Config.CHECKSUM.key, r.nextBoolean() ? 1 : 3);
            }
        };
        String hash = (String) run("HASH(data, 'tlsh', config)", ImmutableMap.of("config", config, "data", d));
        Assert.assertNotNull(hash);
        hashes.put(new AbstractMap.SimpleEntry<>(d, config), hash);
    }
    ForkJoinPool forkJoinPool = new ForkJoinPool(5);

    forkJoinPool.submit(() -> hashes.entrySet().parallelStream().forEach(kv -> {
        Map<String, Object> config = kv.getKey().getValue();
        byte[] data = kv.getKey().getKey();
        String hash = (String) run("HASH(data, 'tlsh', config)",
                ImmutableMap.of("config", config, "data", data));
        Assert.assertEquals(hash, kv.getValue());
    }));
}

From source file:org.commonjava.vertx.vabr.util.VertXInputStreamTest.java

@Test
//    @Ignore/*w w  w  . j av  a2s.  c om*/
public void readViaHttpHandler() throws InterruptedException {
    final ByteArrayOutputStream result = new ByteArrayOutputStream();

    final Vertx v = new DefaultVertx();
    final HttpServer server = v.createHttpServer().requestHandler(new Handler<HttpServerRequest>() {
        @Override
        public void handle(final HttpServerRequest request) {
            request.pause();

            new Thread(new Runnable() {
                @Override
                public void run() {
                    logger.info("GOT IT");
                    final VertXInputStream stream = new VertXInputStream(request);
                    try {
                        IOUtils.copy(stream, result);

                        logger.info("READ DONE");
                        synchronized (result) {
                            result.notifyAll();
                        }
                    } catch (final IOException e) {
                        throw new RuntimeException("Failed to read stream: " + e.getMessage(), e);
                    }
                }
            }, "server-request").start();
        }
    }).listen(port, "localhost");

    final HttpClient client = v.createHttpClient().setHost("localhost").setPort(port);
    final HttpClientRequest put = client.put("/put", new Handler<HttpClientResponse>() {
        @Override
        public void handle(final HttpClientResponse response) {
            logger.info("Response: {} {}", response.statusCode(), response.statusMessage());
        }
    });

    final ByteArrayOutputStream check = new ByteArrayOutputStream();
    final Random rand = new Random();

    // 4Mb file...
    final byte[] txfr = new byte[4];
    for (int i = 0; i < 1048576; i++) {
        rand.nextBytes(txfr);
        check.write(txfr, 0, txfr.length);
    }

    put.setChunked(true).write(new Buffer(check.toByteArray())).end();

    logger.info("SENT: {}", check.toByteArray().length);

    synchronized (result) {
        result.wait();
    }

    final byte[] checkedArry = check.toByteArray();
    final byte[] resultArry = result.toByteArray();
    assertThat(checkedArry.length, equalTo(resultArry.length));

    boolean match = true;
    for (int i = 0; i < checkedArry.length; i++) {
        if (resultArry[i] != checkedArry[i]) {
            logger.error("Index {} mismatch! Was: {}, expected: {}", i, resultArry[i], checkedArry[i]);
            match = false;
        }
    }

    assertThat("Byte arrays do not match.", match, equalTo(true));

    server.close();
    client.close();
}

From source file:org.ejbca.ui.cmpclient.CmpClientMessageHelper.java

/** Creates a 16 bytes random sender nonce
 * //from   www .  jav a2  s  . com
 * @return byte array of length 16
 */
public byte[] createSenderNonce() {
    // Sendernonce is a random number
    byte[] senderNonce = new byte[16];
    Random randomSource;
    randomSource = new Random();
    randomSource.nextBytes(senderNonce);
    return senderNonce;
}

From source file:org.apache.hadoop.hbase.regionserver.compactions.TestCompactionWithThroughputController.java

private Store prepareData() throws IOException {
    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    if (admin.tableExists(tableName)) {
        admin.disableTable(tableName);/*from w  w  w  .  ja va 2 s. co m*/
        admin.deleteTable(tableName);
    }
    Table table = TEST_UTIL.createTable(tableName, family);
    Random rand = new Random();
    for (int i = 0; i < 10; i++) {
        for (int j = 0; j < 10; j++) {
            byte[] value = new byte[128 * 1024];
            rand.nextBytes(value);
            table.put(new Put(Bytes.toBytes(i * 10 + j)).add(family, qualifier, value));
        }
        admin.flush(tableName);
    }
    return getStoreWithName(tableName);
}

From source file:com.intel.chimera.stream.AbstractCryptoStreamTest.java

@Before
public void before() throws IOException {
    Random random = new SecureRandom();
    random.nextBytes(data);
    random.nextBytes(key);//  ww w.ja  va 2s.com
    random.nextBytes(iv);
    setUp();
    prepareData();
}

From source file:com.alliander.osgp.webdevicesimulator.service.RegisterDevice.java

private byte[] createRandomDeviceUid() {
    // Generate random bytes for UID
    final byte[] deviceUid = new byte[OslpEnvelope.DEVICE_ID_LENGTH];
    final Random byteGenerator = new Random();
    byteGenerator.nextBytes(deviceUid);
    // Combine manufacturer id of 2 bytes (1 is AME) and device UID of 10
    // bytes.//from  ww  w.  ja v a  2  s .c  o m
    return ArrayUtils.addAll(new byte[] { 0, 1 }, deviceUid);
}