Example usage for java.util Random nextLong

List of usage examples for java.util Random nextLong

Introduction

In this page you can find the example usage for java.util Random nextLong.

Prototype

public long nextLong() 

Source Link

Document

Returns the next pseudorandom, uniformly distributed long value from this random number generator's sequence.

Usage

From source file:org.apache.hadoop.dfs.ClusterTestDFS.java

/**
 * Pseudo Distributed FS Testing.//from   w w  w  . j  a v  a 2 s  .  c  om
 * Do one test cycle with given parameters.
 *
 * @param nBytes         number of bytes to write to each file.
 * @param numFiles       number of files to create.
 * @param blockSize      block size to use for this test cycle.
 * @param initialDNcount number of datanodes to create
 * @throws Exception
 */
public void testFsPseudoDistributed(long nBytes, int numFiles, int blockSize, int initialDNcount)
        throws Exception {
    long startTime = System.currentTimeMillis();
    int bufferSize = Math.min(BUFFER_SIZE, blockSize);
    boolean checkDataDirsEmpty = false;
    int iDatanodeClosed = 0;
    Random randomDataGenerator = makeRandomDataGenerator();
    final int currentTestCycleNumber = testCycleNumber;
    msg("using randomDataGenerator=" + randomDataGenerator.getClass().getName());

    //
    //     modify config for test

    //
    // set given config param to override other config settings
    conf.setInt("test.dfs.block_size", blockSize);
    // verify that config changed
    assertTrue(blockSize == conf.getInt("test.dfs.block_size", 2)); // 2 is an intentional obviously-wrong block size
    // downsize for testing (just to save resources)
    conf.setInt("dfs.namenode.handler.count", 3);
    if (false) { //  use MersenneTwister, if present
        conf.set("hadoop.random.class", "org.apache.hadoop.util.MersenneTwister");
    }
    conf.setLong("dfs.blockreport.intervalMsec", 50 * 1000L);
    conf.setLong("dfs.datanode.startupMsec", 15 * 1000L);

    String nameFSDir = baseDirSpecified + "/name";
    msg("----Start Test Cycle=" + currentTestCycleNumber + " test.dfs.block_size=" + blockSize + " nBytes="
            + nBytes + " numFiles=" + numFiles + " initialDNcount=" + initialDNcount);

    //
    //          start a NameNode

    int nameNodePort = 9000 + testCycleNumber++; // ToDo: settable base port
    String nameNodeSocketAddr = "localhost:" + nameNodePort;
    conf.set("dfs.name.dir", nameFSDir);
    NameNode nameNodeDaemon = new NameNode(nameNodeSocketAddr, conf);
    DFSClient dfsClient = null;
    try {
        //
        //        start some DataNodes
        //
        ArrayList<DataNode> listOfDataNodeDaemons = new ArrayList<DataNode>();
        FileSystem.setDefaultUri(conf, "hdfs://" + nameNodeSocketAddr);
        for (int i = 0; i < initialDNcount; i++) {
            // uniquely config real fs path for data storage for this datanode
            String dataDirs[] = new String[1];
            dataDirs[0] = baseDirSpecified + "/datanode" + i;
            conf.set("dfs.data.dir", dataDirs[0]);
            DataNode dn = DataNode.makeInstance(dataDirs, conf);
            if (dn != null) {
                listOfDataNodeDaemons.add(dn);
                (new Thread(dn, "DataNode" + i + ": " + dataDirs[0])).start();
            }
        }
        try {
            assertTrue("insufficient datanodes for test to continue", (listOfDataNodeDaemons.size() >= 2));

            //
            //          wait for datanodes to report in
            awaitQuiescence();

            //  act as if namenode is a remote process
            dfsClient = new DFSClient(new InetSocketAddress("localhost", nameNodePort), conf);

            //
            //           write nBytes of data using randomDataGenerator to numFiles
            //
            ArrayList<UTF8> testfilesList = new ArrayList<UTF8>();
            byte[] buffer = new byte[bufferSize];
            UTF8 testFileName = null;
            for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
                testFileName = new UTF8("/f" + iFileNumber);
                testfilesList.add(testFileName);
                OutputStream nos = dfsClient.create(testFileName.toString(), false);
                try {
                    for (long nBytesWritten = 0L; nBytesWritten < nBytes; nBytesWritten += buffer.length) {
                        if ((nBytesWritten + buffer.length) > nBytes) {
                            // calculate byte count needed to exactly hit nBytes in length
                            //  to keep randomDataGenerator in sync during the verify step
                            int pb = (int) (nBytes - nBytesWritten);
                            byte[] bufferPartial = new byte[pb];
                            randomDataGenerator.nextBytes(bufferPartial);
                            nos.write(bufferPartial);
                        } else {
                            randomDataGenerator.nextBytes(buffer);
                            nos.write(buffer);
                        }
                    }
                } finally {
                    nos.flush();
                    nos.close();
                }
            }

            //
            // No need to wait for blocks to be replicated because replication
            //  is supposed to be complete when the file is closed.
            //

            //
            //                     take one datanode down
            iDatanodeClosed = currentTestCycleNumber % listOfDataNodeDaemons.size();
            DataNode dn = (DataNode) listOfDataNodeDaemons.get(iDatanodeClosed);
            msg("shutdown datanode daemon " + iDatanodeClosed + " dn=" + dn.data);
            try {
                dn.shutdown();
            } catch (Exception e) {
                msg("ignoring datanode shutdown exception=" + e);
            }

            //
            //          verify data against a "rewound" randomDataGenerator
            //               that all of the data is intact
            long lastLong = randomDataGenerator.nextLong();
            randomDataGenerator = makeRandomDataGenerator(); // restart (make new) PRNG
            ListIterator li = testfilesList.listIterator();
            while (li.hasNext()) {
                testFileName = (UTF8) li.next();
                FSInputStream nis = dfsClient.open(testFileName.toString());
                byte[] bufferGolden = new byte[bufferSize];
                int m = 42;
                try {
                    while (m != -1) {
                        m = nis.read(buffer);
                        if (m == buffer.length) {
                            randomDataGenerator.nextBytes(bufferGolden);
                            assertBytesEqual(buffer, bufferGolden, buffer.length);
                        } else if (m > 0) {
                            byte[] bufferGoldenPartial = new byte[m];
                            randomDataGenerator.nextBytes(bufferGoldenPartial);
                            assertBytesEqual(buffer, bufferGoldenPartial, bufferGoldenPartial.length);
                        }
                    }
                } finally {
                    nis.close();
                }
            }
            // verify last randomDataGenerator rand val to ensure last file length was checked
            long lastLongAgain = randomDataGenerator.nextLong();
            assertEquals(lastLong, lastLongAgain);
            msg("Finished validating all file contents");

            //
            //                    now delete all the created files
            msg("Delete all random test files under DFS via remaining datanodes");
            li = testfilesList.listIterator();
            while (li.hasNext()) {
                testFileName = (UTF8) li.next();
                assertTrue(dfsClient.delete(testFileName.toString(), true));
            }

            //
            //                   wait for delete to be propagated
            //                  (unlike writing files, delete is lazy)
            msg("Test thread sleeping while datanodes propagate delete...");
            awaitQuiescence();
            msg("Test thread awakens to verify file contents");

            //
            //             check that the datanode's block directory is empty
            //                (except for datanode that had forced shutdown)
            checkDataDirsEmpty = true; // do it during finally clause

        } catch (AssertionFailedError afe) {
            throw afe;
        } catch (Throwable t) {
            msg("Unexpected exception_b: " + t);
            t.printStackTrace();
        } finally {
            //
            // shut down datanode daemons (this takes advantage of being same-process)
            msg("begin shutdown of all datanode daemons for test cycle " + currentTestCycleNumber);

            for (int i = 0; i < listOfDataNodeDaemons.size(); i++) {
                DataNode dataNode = (DataNode) listOfDataNodeDaemons.get(i);
                if (i != iDatanodeClosed) {
                    try {
                        if (checkDataDirsEmpty) {
                            assertNoBlocks(dataNode);

                        }
                        dataNode.shutdown();
                    } catch (Exception e) {
                        msg("ignoring exception during (all) datanode shutdown, e=" + e);
                    }
                }
            }
        }
        msg("finished shutdown of all datanode daemons for test cycle " + currentTestCycleNumber);
        if (dfsClient != null) {
            try {
                msg("close down subthreads of DFSClient");
                dfsClient.close();
            } catch (Exception ignored) {
            }
            msg("finished close down of DFSClient");
        }
    } catch (AssertionFailedError afe) {
        throw afe;
    } catch (Throwable t) {
        msg("Unexpected exception_a: " + t);
        t.printStackTrace();
    } finally {
        // shut down namenode daemon (this takes advantage of being same-process)
        msg("begin shutdown of namenode daemon for test cycle " + currentTestCycleNumber);
        try {
            nameNodeDaemon.stop();
        } catch (Exception e) {
            msg("ignoring namenode shutdown exception=" + e);
        }
        msg("finished shutdown of namenode daemon for test cycle " + currentTestCycleNumber);
    }
    msg("test cycle " + currentTestCycleNumber + " elapsed time="
            + (System.currentTimeMillis() - startTime) / 1000. + "sec");
    msg("threads still running (look for stragglers): ");
    msg(summarizeThreadGroup());
}