Example usage for java.util Random nextBytes

List of usage examples for java.util Random nextBytes

Introduction

In this page you can find the example usage for java.util Random nextBytes.

Prototype

public void nextBytes(byte[] bytes) 

Source Link

Document

Generates random bytes and places them into a user-supplied byte array.

Usage

From source file:org.apache.hadoop.hdfs.TestGetBlocks.java

/**
 * test getBlocks/*from   w w w  .j  a  v a 2 s  . c  om*/
 */
@Test
public void testGetBlocks() throws Exception {
    final Configuration CONF = new HdfsConfiguration();

    final short REPLICATION_FACTOR = (short) 2;
    final int DEFAULT_BLOCK_SIZE = 1024;
    final Random r = new Random();

    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).format(true)
            .build();
    try {
        cluster.waitActive();

        // create a file with two blocks
        FileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(new Path("/tmp.txt"), REPLICATION_FACTOR);
        byte[] data = new byte[1024];
        long fileLen = 2 * DEFAULT_BLOCK_SIZE;
        long bytesToWrite = fileLen;
        while (bytesToWrite > 0) {
            r.nextBytes(data);
            int bytesToWriteNext = (1024 < bytesToWrite) ? 1024 : (int) bytesToWrite;
            out.write(data, 0, bytesToWriteNext);
            bytesToWrite -= bytesToWriteNext;
        }
        out.close();

        // get blocks & data nodes
        List<LocatedBlock> locatedBlocks;
        DatanodeInfo[] dataNodes = null;
        boolean notWritten;
        do {
            final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
            locatedBlocks = dfsclient.getNamenode().getBlockLocations("/tmp.txt", 0, fileLen)
                    .getLocatedBlocks();
            assertEquals(2, locatedBlocks.size());
            notWritten = false;
            for (int i = 0; i < 2; i++) {
                dataNodes = locatedBlocks.get(i).getLocations();
                if (dataNodes.length != REPLICATION_FACTOR) {
                    notWritten = true;
                    try {
                        Thread.sleep(10);
                    } catch (InterruptedException e) {
                    }
                    break;
                }
            }
        } while (notWritten);

        // get RPC client to namenode
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        NamenodeProtocol namenode = NameNodeProxies
                .createProxy(CONF, NameNode.getUri(addr), NamenodeProtocol.class).getProxy();

        // get blocks of size fileLen from dataNodes[0]
        BlockWithLocations[] locs;
        locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
        assertEquals(2, locs.length);
        assertEquals(2, locs[0].getStorageIDs().length);
        assertEquals(2, locs[1].getStorageIDs().length);

        // get blocks of size BlockSize from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
        assertEquals(1, locs.length);
        assertEquals(2, locs[0].getStorageIDs().length);

        // get blocks of size 1 from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
        assertEquals(1, locs.length);
        assertEquals(2, locs[0].getStorageIDs().length);

        // get blocks of size 0 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], 0);

        // get blocks of size -1 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], -1);

        // get blocks of size BlockSize from a non-existent datanode
        DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
        getBlocksWithException(namenode, info, 2);
    } finally {
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileWriterV3.java

private void writeDataAndReadFromHFile(Path hfilePath, Algorithm compressAlgo, int entryCount,
        boolean findMidKey, boolean useTags) throws IOException {
    HFileContext context = new HFileContextBuilder().withBlockSize(4096).withIncludesTags(useTags)
            .withCompression(compressAlgo).build();
    HFileWriterV3 writer = (HFileWriterV3) new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf))
            .withPath(fs, hfilePath).withFileContext(context).withComparator(KeyValue.COMPARATOR).create();

    Random rand = new Random(9713312); // Just a fixed seed.
    List<KeyValue> keyValues = new ArrayList<KeyValue>(entryCount);

    for (int i = 0; i < entryCount; ++i) {
        byte[] keyBytes = TestHFileWriterV2.randomOrderedKey(rand, i);

        // A random-length random value.
        byte[] valueBytes = TestHFileWriterV2.randomValue(rand);
        KeyValue keyValue = null;
        if (useTags) {
            ArrayList<Tag> tags = new ArrayList<Tag>();
            for (int j = 0; j < 1 + rand.nextInt(4); j++) {
                byte[] tagBytes = new byte[16];
                rand.nextBytes(tagBytes);
                tags.add(new Tag((byte) 1, tagBytes));
            }//from   w ww  . j  a v a 2  s. c  o  m
            keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes, tags);
        } else {
            keyValue = new KeyValue(keyBytes, null, null, HConstants.LATEST_TIMESTAMP, valueBytes);
        }
        writer.append(keyValue);
        keyValues.add(keyValue);
    }

    // Add in an arbitrary order. They will be sorted lexicographically by
    // the key.
    writer.appendMetaBlock("CAPITAL_OF_USA", new Text("Washington, D.C."));
    writer.appendMetaBlock("CAPITAL_OF_RUSSIA", new Text("Moscow"));
    writer.appendMetaBlock("CAPITAL_OF_FRANCE", new Text("Paris"));

    writer.close();

    FSDataInputStream fsdis = fs.open(hfilePath);

    long fileSize = fs.getFileStatus(hfilePath).getLen();
    FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize);

    assertEquals(3, trailer.getMajorVersion());
    assertEquals(entryCount, trailer.getEntryCount());
    HFileContext meta = new HFileContextBuilder().withCompression(compressAlgo).withIncludesMvcc(false)
            .withIncludesTags(useTags).withHBaseCheckSum(true).build();
    HFileBlock.FSReader blockReader = new HFileBlock.FSReaderV2(fsdis, fileSize, meta);
    // Comparator class name is stored in the trailer in version 2.
    KVComparator comparator = trailer.createComparator();
    HFileBlockIndex.BlockIndexReader dataBlockIndexReader = new HFileBlockIndex.BlockIndexReader(comparator,
            trailer.getNumDataIndexLevels());
    HFileBlockIndex.BlockIndexReader metaBlockIndexReader = new HFileBlockIndex.BlockIndexReader(
            KeyValue.RAW_COMPARATOR, 1);

    HFileBlock.BlockIterator blockIter = blockReader.blockRange(trailer.getLoadOnOpenDataOffset(),
            fileSize - trailer.getTrailerSize());
    // Data index. We also read statistics about the block index written after
    // the root level.
    dataBlockIndexReader.readMultiLevelIndexRoot(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX),
            trailer.getDataIndexCount());

    if (findMidKey) {
        byte[] midkey = dataBlockIndexReader.midkey();
        assertNotNull("Midkey should not be null", midkey);
    }

    // Meta index.
    metaBlockIndexReader.readRootIndex(blockIter.nextBlockWithBlockType(BlockType.ROOT_INDEX).getByteStream(),
            trailer.getMetaIndexCount());
    // File info
    FileInfo fileInfo = new FileInfo();
    fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
    byte[] keyValueFormatVersion = fileInfo.get(HFileWriterV3.KEY_VALUE_VERSION);
    boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0;

    // Counters for the number of key/value pairs and the number of blocks
    int entriesRead = 0;
    int blocksRead = 0;
    long memstoreTS = 0;

    // Scan blocks the way the reader would scan them
    fsdis.seek(0);
    long curBlockPos = 0;
    while (curBlockPos <= trailer.getLastDataBlockOffset()) {
        HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
        assertEquals(BlockType.DATA, block.getBlockType());
        ByteBuffer buf = block.getBufferWithoutHeader();
        int keyLen = -1;
        while (buf.hasRemaining()) {

            keyLen = buf.getInt();

            int valueLen = buf.getInt();

            byte[] key = new byte[keyLen];
            buf.get(key);

            byte[] value = new byte[valueLen];
            buf.get(value);
            byte[] tagValue = null;
            if (useTags) {
                int tagLen = buf.getShort();
                tagValue = new byte[tagLen];
                buf.get(tagValue);
            }

            if (includeMemstoreTS) {
                ByteArrayInputStream byte_input = new ByteArrayInputStream(buf.array(),
                        buf.arrayOffset() + buf.position(), buf.remaining());
                DataInputStream data_input = new DataInputStream(byte_input);

                memstoreTS = WritableUtils.readVLong(data_input);
                buf.position(buf.position() + WritableUtils.getVIntSize(memstoreTS));
            }

            // A brute-force check to see that all keys and values are correct.
            assertTrue(Bytes.compareTo(key, keyValues.get(entriesRead).getKey()) == 0);
            assertTrue(Bytes.compareTo(value, keyValues.get(entriesRead).getValue()) == 0);
            if (useTags) {
                assertNotNull(tagValue);
                KeyValue tkv = keyValues.get(entriesRead);
                assertEquals(tagValue.length, tkv.getTagsLength());
                assertTrue(Bytes.compareTo(tagValue, 0, tagValue.length, tkv.getTagsArray(),
                        tkv.getTagsOffset(), tkv.getTagsLength()) == 0);
            }
            ++entriesRead;
        }
        ++blocksRead;
        curBlockPos += block.getOnDiskSizeWithHeader();
    }
    LOG.info("Finished reading: entries=" + entriesRead + ", blocksRead=" + blocksRead);
    assertEquals(entryCount, entriesRead);

    // Meta blocks. We can scan until the load-on-open data offset (which is
    // the root block index offset in version 2) because we are not testing
    // intermediate-level index blocks here.

    int metaCounter = 0;
    while (fsdis.getPos() < trailer.getLoadOnOpenDataOffset()) {
        LOG.info("Current offset: " + fsdis.getPos() + ", scanning until " + trailer.getLoadOnOpenDataOffset());
        HFileBlock block = blockReader.readBlockData(curBlockPos, -1, -1, false);
        assertEquals(BlockType.META, block.getBlockType());
        Text t = new Text();
        ByteBuffer buf = block.getBufferWithoutHeader();
        if (Writables.getWritable(buf.array(), buf.arrayOffset(), buf.limit(), t) == null) {
            throw new IOException(
                    "Failed to deserialize block " + this + " into a " + t.getClass().getSimpleName());
        }
        Text expectedText = (metaCounter == 0 ? new Text("Paris")
                : metaCounter == 1 ? new Text("Moscow") : new Text("Washington, D.C."));
        assertEquals(expectedText, t);
        LOG.info("Read meta block data: " + t);
        ++metaCounter;
        curBlockPos += block.getOnDiskSizeWithHeader();
    }

    fsdis.close();
}

From source file:org.apache.hadoop.io.compress.TestCodec.java

void GzipConcatTest(Configuration conf, Class<? extends Decompressor> decomClass) throws IOException {
    Random r = new Random();
    long seed = r.nextLong();
    r.setSeed(seed);//www  . ja va 2 s.  com
    LOG.info(decomClass + " seed: " + seed);

    final int CONCAT = r.nextInt(4) + 3;
    final int BUFLEN = 128 * 1024;
    DataOutputBuffer dflbuf = new DataOutputBuffer();
    DataOutputBuffer chkbuf = new DataOutputBuffer();
    byte[] b = new byte[BUFLEN];
    for (int i = 0; i < CONCAT; ++i) {
        GZIPOutputStream gzout = new GZIPOutputStream(dflbuf);
        r.nextBytes(b);
        int len = r.nextInt(BUFLEN);
        int off = r.nextInt(BUFLEN - len);
        chkbuf.write(b, off, len);
        gzout.write(b, off, len);
        gzout.close();
    }
    final byte[] chk = Arrays.copyOf(chkbuf.getData(), chkbuf.getLength());

    CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf);
    Decompressor decom = codec.createDecompressor();
    assertNotNull(decom);
    assertEquals(decomClass, decom.getClass());
    DataInputBuffer gzbuf = new DataInputBuffer();
    gzbuf.reset(dflbuf.getData(), dflbuf.getLength());
    InputStream gzin = codec.createInputStream(gzbuf, decom);

    dflbuf.reset();
    IOUtils.copyBytes(gzin, dflbuf, 4096);
    final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength());
    assertTrue(java.util.Arrays.equals(chk, dflchk));
}

From source file:org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat2.java

/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 *///from  ww w .j  a  v a 2 s . co  m
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, Cell> writer, TaskAttemptContext context,
        Set<byte[]> families, int numRows) throws IOException, InterruptedException {
    byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
    int valLength = 10;
    byte valBytes[] = new byte[valLength];

    int taskId = context.getTaskAttemptID().getTaskID().getId();
    assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";

    Random random = new Random();
    for (int i = 0; i < numRows; i++) {

        Bytes.putInt(keyBytes, 0, i);
        random.nextBytes(valBytes);
        ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

        for (byte[] family : families) {
            Cell kv = new KeyValue(keyBytes, family, PerformanceEvaluation.QUALIFIER_NAME, valBytes);
            writer.write(key, kv);
        }
    }
}

From source file:org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat.java

/**
 * Write random values to the writer assuming a table created using
 * {@link #FAMILIES} as column family descriptors
 *///from   w  w w  .  j a v  a 2  s  . c  om
private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, KeyValue> writer,
        TaskAttemptContext context, Set<byte[]> families, int numRows)
        throws IOException, InterruptedException {
    byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
    int valLength = 10;
    byte valBytes[] = new byte[valLength];

    int taskId = context.getTaskAttemptID().getTaskID().getId();
    assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";

    Random random = new Random();
    for (int i = 0; i < numRows; i++) {

        Bytes.putInt(keyBytes, 0, i);
        random.nextBytes(valBytes);
        ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);

        for (byte[] family : families) {
            KeyValue kv = new KeyValue(keyBytes, family, PerformanceEvaluation.QUALIFIER_NAME, valBytes);
            writer.write(key, kv);
        }
    }
}

From source file:org.apache.hc.client5.http.impl.auth.NTLMEngineImpl.java

/** Calculate a challenge block */
private static byte[] makeRandomChallenge(final Random random) {
    final byte[] rval = new byte[8];
    synchronized (random) {
        random.nextBytes(rval);
    }// www  . j a  va2  s  .  c om
    return rval;
}

From source file:org.apache.hc.client5.http.impl.auth.NTLMEngineImpl.java

/** Calculate a 16-byte secondary key */
private static byte[] makeSecondaryKey(final Random random) {
    final byte[] rval = new byte[16];
    synchronized (random) {
        random.nextBytes(rval);
    }//from   w  ww.jav a2  s.  c  om
    return rval;
}

From source file:org.apache.flink.runtime.blob.BlobRecoveryITCase.java

/**
 * Tests that with {@link HighAvailabilityMode#ZOOKEEPER} distributed JARs are recoverable from any
 * participating BlobServer.//from   ww  w.j  a  v  a 2s. c om
 */
@Test
public void testBlobServerRecovery() throws Exception {
    Random rand = new Random();

    BlobServer[] server = new BlobServer[2];
    InetSocketAddress[] serverAddress = new InetSocketAddress[2];
    BlobClient client = null;

    try {
        Configuration config = new Configuration();
        config.setString(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER");
        config.setString(ConfigConstants.STATE_BACKEND, "FILESYSTEM");
        config.setString(HighAvailabilityOptions.HA_STORAGE_PATH, recoveryDir.getPath());

        for (int i = 0; i < server.length; i++) {
            server[i] = new BlobServer(config);
            serverAddress[i] = new InetSocketAddress("localhost", server[i].getPort());
        }

        client = new BlobClient(serverAddress[0], config);

        // Random data
        byte[] expected = new byte[1024];
        rand.nextBytes(expected);

        BlobKey[] keys = new BlobKey[2];

        // Put data
        keys[0] = client.put(expected); // Request 1
        keys[1] = client.put(expected, 32, 256); // Request 2

        JobID[] jobId = new JobID[] { new JobID(), new JobID() };
        String[] testKey = new String[] { "test-key-1", "test-key-2" };

        client.put(jobId[0], testKey[0], expected); // Request 3
        client.put(jobId[1], testKey[1], expected, 32, 256); // Request 4

        // Close the client and connect to the other server
        client.close();
        client = new BlobClient(serverAddress[1], config);

        // Verify request 1
        try (InputStream is = client.get(keys[0])) {
            byte[] actual = new byte[expected.length];

            BlobUtils.readFully(is, actual, 0, expected.length, null);

            for (int i = 0; i < expected.length; i++) {
                assertEquals(expected[i], actual[i]);
            }
        }

        // Verify request 2
        try (InputStream is = client.get(keys[1])) {
            byte[] actual = new byte[256];
            BlobUtils.readFully(is, actual, 0, 256, null);

            for (int i = 32, j = 0; i < 256; i++, j++) {
                assertEquals(expected[i], actual[j]);
            }
        }

        // Verify request 3
        try (InputStream is = client.get(jobId[0], testKey[0])) {
            byte[] actual = new byte[expected.length];
            BlobUtils.readFully(is, actual, 0, expected.length, null);

            for (int i = 0; i < expected.length; i++) {
                assertEquals(expected[i], actual[i]);
            }
        }

        // Verify request 4
        try (InputStream is = client.get(jobId[1], testKey[1])) {
            byte[] actual = new byte[256];
            BlobUtils.readFully(is, actual, 0, 256, null);

            for (int i = 32, j = 0; i < 256; i++, j++) {
                assertEquals(expected[i], actual[j]);
            }
        }

        // Remove again
        client.delete(keys[0]);
        client.delete(keys[1]);
        client.delete(jobId[0], testKey[0]);
        client.delete(jobId[1], testKey[1]);
    } finally {
        for (BlobServer s : server) {
            if (s != null) {
                s.shutdown();
            }
        }

        if (client != null) {
            client.close();
        }
    }

    // Verify everything is clean
    File[] recoveryFiles = recoveryDir.listFiles();
    assertEquals("Unclean state backend: " + Arrays.toString(recoveryFiles), 0, recoveryFiles.length);
}

From source file:org.apache.hadoop.raid.TestBlockCopier.java

private long[] createRandomFileDispersed(Path file, int numBlocks, DatanodeDescriptor primaryNode,
        DatanodeDescriptor altNode) throws IOException, InterruptedException {

    BlockPlacementPolicyFakeData bp = BlockPlacementPolicyFakeData.lastInstance;
    DatanodeDescriptor tmp = bp.overridingDatanode;

    final int repl = 1;
    long[] crcs = new long[numBlocks];
    CRC32 crc = new CRC32();
    Random rand = new Random();
    FSDataOutputStream stm = fileSys.create(file, true, fileSys.getConf().getInt("io.file.buffer.size", 4096),
            (short) repl, BLOCK_SIZE);

    // Create the first block on the alt node
    bp.overridingDatanode = altNode;//from   w w  w  .j a  va2  s .  c  om

    // fill random data into file
    final byte[] b = new byte[(int) BLOCK_SIZE];
    LOG.info("Writing first block (alt. host)");
    rand.nextBytes(b);
    stm.write(b);
    crc.update(b);
    crcs[0] = crc.getValue();

    stm.flush();
    Thread.sleep(1000); // What a hack. Le sigh.

    // Now we want to write on the altNode
    bp.overridingDatanode = primaryNode;

    // Write the rest of the blocks on primaryNode
    for (int i = 1; i < numBlocks; i++) {
        LOG.info("Writing block number " + i + " (primary host)");

        rand.nextBytes(b);
        stm.write(b);
        crc.reset();
        crc.update(b);
        crcs[i] = crc.getValue();
    }
    stm.close();
    Thread.sleep(1000);

    // Reset this guy
    bp.overridingDatanode = tmp;

    return crcs;
}

From source file:org.lilyproject.repository.impl.test.AbstractBlobStoreTest.java

/**
 * Test case to reproduce the 'Row key is invalid' problem reported here:
 * https://groups.google.com/forum/#!topic/lily-discuss/XiRxOxJTv70/discussion
 *
 * @throws Exception//from  w  w  w .  j av  a  2 s .  c o m
 */
@Test
public void testForceInline() throws Exception {
    QName fieldName = new QName(namespace, "testForceInline");
    FieldType fieldType = typeManager.newFieldType(typeManager.getValueType("BLOB"), fieldName,
            Scope.NON_VERSIONED);
    fieldType = typeManager.createFieldType(fieldType);
    RecordType recordType = typeManager.newRecordType(new QName(namespace, "testForceInlineRT"));
    FieldTypeEntry fieldTypeEntry = typeManager.newFieldTypeEntry(fieldType.getId(), true);
    recordType.addFieldTypeEntry(fieldTypeEntry);
    recordType = typeManager.createRecordType(recordType);

    int size = 4096;
    Random rg = new Random();
    byte[] bytes = new byte[size];
    rg.nextBytes(bytes);

    // create BLOB object
    Blob blob = new Blob("application/pdf", 0L, "Document");
    // create a stream to write the BLOB
    OutputStream bos = repository.getOutputStream(blob);
    // write the data
    bos.write(bytes);
    bos.close();
    blob.setSize(5L);
    // create a new record ID
    RecordId rid = repository.getIdGenerator().newRecordId();
    // create a new record
    Record record = repository.newRecord(rid);
    record.setRecordType(new QName(namespace, "testForceInlineRT"));
    // set the blob
    record.setField(fieldName, blob);

    // create the record
    record = repository.create(record);

    byte[] readBytes = readBlob(record.getId(), fieldName);
    assertTrue(Arrays.equals(bytes, readBytes));

    // Test the getInputStream with giving the record instead of the recordId
    InputStream inputStream = repository.getInputStream(record, fieldName);
    try {
        byte[] readBytes2 = IOUtils.toByteArray(inputStream);
        assertTrue(Arrays.equals(bytes, readBytes2));
    } finally {
        IOUtils.closeQuietly(inputStream);
    }
}