Example usage for java.util Random nextLong

List of usage examples for java.util Random nextLong

Introduction

In this page you can find the example usage for java.util Random nextLong.

Prototype

public long nextLong() 

Source Link

Document

Returns the next pseudorandom, uniformly distributed long value from this random number generator's sequence.

Usage

From source file:io.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldingReadOnlyByteBuffers() throws Exception {
    final Random random = new Random(0);
    final int[] numValsToCheck = { 10, 20, 50, 100, 1000, 2000 };
    for (int numThings : numValsToCheck) {
        HyperLogLogCollector allCombined = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector oneHalf = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector otherHalf = HyperLogLogCollector.makeLatestCollector();

        for (int i = 0; i < numThings; ++i) {
            byte[] hashedVal = fn.hashLong(random.nextLong()).asBytes();

            allCombined.add(hashedVal);//ww w .j a va  2  s  .  c om
            if (i % 2 == 0) {
                oneHalf.add(hashedVal);
            } else {
                otherHalf.add(hashedVal);
            }
        }

        HyperLogLogCollector folded = HyperLogLogCollector.makeCollector(
                ByteBuffer.wrap(HyperLogLogCollector.makeEmptyVersionedByteArray()).asReadOnlyBuffer());

        folded.fold(oneHalf.toByteBuffer());
        Assert.assertEquals(oneHalf, folded);
        Assert.assertEquals(oneHalf.estimateCardinality(), folded.estimateCardinality(), 0.0d);

        folded.fold(otherHalf.toByteBuffer());
        Assert.assertEquals(allCombined, folded);
        Assert.assertEquals(allCombined.estimateCardinality(), folded.estimateCardinality(), 0.0d);
    }
}

From source file:io.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldingReadOnlyByteBuffersWithArbitraryPosition() throws Exception {
    final Random random = new Random(0);
    final int[] numValsToCheck = { 10, 20, 50, 100, 1000, 2000 };
    for (int numThings : numValsToCheck) {
        HyperLogLogCollector allCombined = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector oneHalf = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector otherHalf = HyperLogLogCollector.makeLatestCollector();

        for (int i = 0; i < numThings; ++i) {
            byte[] hashedVal = fn.hashLong(random.nextLong()).asBytes();

            allCombined.add(hashedVal);/* w ww  . ja  v  a  2  s  .c  om*/
            if (i % 2 == 0) {
                oneHalf.add(hashedVal);
            } else {
                otherHalf.add(hashedVal);
            }
        }

        HyperLogLogCollector folded = HyperLogLogCollector.makeCollector(shiftedBuffer(
                ByteBuffer.wrap(HyperLogLogCollector.makeEmptyVersionedByteArray()).asReadOnlyBuffer(), 17));

        folded.fold(oneHalf.toByteBuffer());
        Assert.assertEquals(oneHalf, folded);
        Assert.assertEquals(oneHalf.estimateCardinality(), folded.estimateCardinality(), 0.0d);

        folded.fold(otherHalf.toByteBuffer());
        Assert.assertEquals(allCombined, folded);
        Assert.assertEquals(allCombined.estimateCardinality(), folded.estimateCardinality(), 0.0d);
    }
}

From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFolding() {
    final Random random = new Random(0);
    final int[] numValsToCheck = { 10, 20, 50, 100, 1000, 2000 };
    for (int numThings : numValsToCheck) {
        HyperLogLogCollector allCombined = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector oneHalf = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector otherHalf = HyperLogLogCollector.makeLatestCollector();

        for (int i = 0; i < numThings; ++i) {
            byte[] hashedVal = fn.hashLong(random.nextLong()).asBytes();

            allCombined.add(hashedVal);/*from w  w w. j  a v  a  2s  .co m*/
            if (i % 2 == 0) {
                oneHalf.add(hashedVal);
            } else {
                otherHalf.add(hashedVal);
            }
        }

        HyperLogLogCollector folded = HyperLogLogCollector.makeLatestCollector();

        folded.fold(oneHalf);
        Assert.assertEquals(oneHalf, folded);
        Assert.assertEquals(oneHalf.estimateCardinality(), folded.estimateCardinality(), 0.0d);

        folded.fold(otherHalf);
        Assert.assertEquals(allCombined, folded);
        Assert.assertEquals(allCombined.estimateCardinality(), folded.estimateCardinality(), 0.0d);
    }
}

From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldingByteBuffers() {
    final Random random = new Random(0);
    final int[] numValsToCheck = { 10, 20, 50, 100, 1000, 2000 };
    for (int numThings : numValsToCheck) {
        HyperLogLogCollector allCombined = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector oneHalf = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector otherHalf = HyperLogLogCollector.makeLatestCollector();

        for (int i = 0; i < numThings; ++i) {
            byte[] hashedVal = fn.hashLong(random.nextLong()).asBytes();

            allCombined.add(hashedVal);//from w ww  .  jav a2 s .co  m
            if (i % 2 == 0) {
                oneHalf.add(hashedVal);
            } else {
                otherHalf.add(hashedVal);
            }
        }

        HyperLogLogCollector folded = HyperLogLogCollector.makeLatestCollector();

        folded.fold(oneHalf.toByteBuffer());
        Assert.assertEquals(oneHalf, folded);
        Assert.assertEquals(oneHalf.estimateCardinality(), folded.estimateCardinality(), 0.0d);

        folded.fold(otherHalf.toByteBuffer());
        Assert.assertEquals(allCombined, folded);
        Assert.assertEquals(allCombined.estimateCardinality(), folded.estimateCardinality(), 0.0d);
    }
}

From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldingReadOnlyByteBuffers() {
    final Random random = new Random(0);
    final int[] numValsToCheck = { 10, 20, 50, 100, 1000, 2000 };
    for (int numThings : numValsToCheck) {
        HyperLogLogCollector allCombined = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector oneHalf = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector otherHalf = HyperLogLogCollector.makeLatestCollector();

        for (int i = 0; i < numThings; ++i) {
            byte[] hashedVal = fn.hashLong(random.nextLong()).asBytes();

            allCombined.add(hashedVal);/* w  w w .j a  v  a  2 s . com*/
            if (i % 2 == 0) {
                oneHalf.add(hashedVal);
            } else {
                otherHalf.add(hashedVal);
            }
        }

        HyperLogLogCollector folded = HyperLogLogCollector.makeCollector(
                ByteBuffer.wrap(HyperLogLogCollector.makeEmptyVersionedByteArray()).asReadOnlyBuffer());

        folded.fold(oneHalf.toByteBuffer());
        Assert.assertEquals(oneHalf, folded);
        Assert.assertEquals(oneHalf.estimateCardinality(), folded.estimateCardinality(), 0.0d);

        folded.fold(otherHalf.toByteBuffer());
        Assert.assertEquals(allCombined, folded);
        Assert.assertEquals(allCombined.estimateCardinality(), folded.estimateCardinality(), 0.0d);
    }
}

From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldingReadOnlyByteBuffersWithArbitraryPosition() {
    final Random random = new Random(0);
    final int[] numValsToCheck = { 10, 20, 50, 100, 1000, 2000 };
    for (int numThings : numValsToCheck) {
        HyperLogLogCollector allCombined = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector oneHalf = HyperLogLogCollector.makeLatestCollector();
        HyperLogLogCollector otherHalf = HyperLogLogCollector.makeLatestCollector();

        for (int i = 0; i < numThings; ++i) {
            byte[] hashedVal = fn.hashLong(random.nextLong()).asBytes();

            allCombined.add(hashedVal);/* www  .  j a v  a2s. c  o  m*/
            if (i % 2 == 0) {
                oneHalf.add(hashedVal);
            } else {
                otherHalf.add(hashedVal);
            }
        }

        HyperLogLogCollector folded = HyperLogLogCollector.makeCollector(shiftedBuffer(
                ByteBuffer.wrap(HyperLogLogCollector.makeEmptyVersionedByteArray()).asReadOnlyBuffer(), 17));

        folded.fold(oneHalf.toByteBuffer());
        Assert.assertEquals(oneHalf, folded);
        Assert.assertEquals(oneHalf.estimateCardinality(), folded.estimateCardinality(), 0.0d);

        folded.fold(otherHalf.toByteBuffer());
        Assert.assertEquals(allCombined, folded);
        Assert.assertEquals(allCombined.estimateCardinality(), folded.estimateCardinality(), 0.0d);
    }
}

From source file:org.apache.hadoop.tools.rumen.Folder.java

public int run() throws IOException {
    class JobEntryComparator implements Comparator<Pair<LoggedJob, JobTraceReader>> {
        public int compare(Pair<LoggedJob, JobTraceReader> p1, Pair<LoggedJob, JobTraceReader> p2) {
            LoggedJob j1 = p1.first();/*  www  .ja  v  a2s  .  co m*/
            LoggedJob j2 = p2.first();

            return (j1.getSubmitTime() < j2.getSubmitTime()) ? -1
                    : (j1.getSubmitTime() == j2.getSubmitTime()) ? 0 : 1;
        }
    }

    // we initialize an empty heap so if we take an error before establishing
    // a real one the finally code goes through
    Queue<Pair<LoggedJob, JobTraceReader>> heap = new PriorityQueue<Pair<LoggedJob, JobTraceReader>>();

    try {
        LoggedJob job = reader.nextJob();

        if (job == null) {
            LOG.error("The job trace is empty");

            return EMPTY_JOB_TRACE;
        }

        // If starts-after time is specified, skip the number of jobs till we reach
        // the starting time limit.
        if (startsAfter > 0) {
            LOG.info("starts-after time is specified. Initial job submit time : " + job.getSubmitTime());

            long approximateTime = job.getSubmitTime() + startsAfter;
            job = reader.nextJob();
            long skippedCount = 0;
            while (job != null && job.getSubmitTime() < approximateTime) {
                job = reader.nextJob();
                skippedCount++;
            }

            LOG.debug("Considering jobs with submit time greater than " + startsAfter + " ms. Skipped "
                    + skippedCount + " jobs.");

            if (job == null) {
                LOG.error("No more jobs to process in the trace with 'starts-after'" + " set to " + startsAfter
                        + "ms.");
                return EMPTY_JOB_TRACE;
            }
            LOG.info("The first job has a submit time of " + job.getSubmitTime());
        }

        firstJobSubmitTime = job.getSubmitTime();
        long lastJobSubmitTime = firstJobSubmitTime;

        int numberJobs = 0;

        long currentIntervalEnd = Long.MIN_VALUE;

        Path nextSegment = null;
        Outputter<LoggedJob> tempGen = null;

        if (debug) {
            LOG.debug("The first job has a submit time of " + firstJobSubmitTime);
        }

        final Configuration conf = getConf();

        try {
            // At the top of this loop, skewBuffer has at most
            // skewBufferLength entries.
            while (job != null) {
                final Random tempNameGenerator = new Random();

                lastJobSubmitTime = job.getSubmitTime();

                ++numberJobs;

                if (job.getSubmitTime() >= currentIntervalEnd) {
                    if (tempGen != null) {
                        tempGen.close();
                    }

                    nextSegment = null;
                    for (int i = 0; i < 3 && nextSegment == null; ++i) {
                        try {
                            nextSegment = new Path(tempDir,
                                    "segment-" + tempNameGenerator.nextLong() + ".json.gz");

                            if (debug) {
                                LOG.debug("The next segment name is " + nextSegment);
                            }

                            FileSystem fs = nextSegment.getFileSystem(conf);

                            try {
                                if (!fs.exists(nextSegment)) {
                                    break;
                                }

                                continue;
                            } catch (IOException e) {
                                // no code -- file did not already exist
                            }
                        } catch (IOException e) {
                            // no code -- file exists now, or directory bad. We try three
                            // times.
                        }
                    }

                    if (nextSegment == null) {
                        throw new RuntimeException("Failed to create a new file!");
                    }

                    if (debug) {
                        LOG.debug("Creating " + nextSegment + " for a job with a submit time of "
                                + job.getSubmitTime());
                    }

                    deletees.add(nextSegment);

                    tempPaths.add(nextSegment);

                    tempGen = new DefaultOutputter<LoggedJob>();
                    tempGen.init(nextSegment, conf);

                    long currentIntervalNumber = (job.getSubmitTime() - firstJobSubmitTime) / inputCycle;

                    currentIntervalEnd = firstJobSubmitTime + ((currentIntervalNumber + 1) * inputCycle);
                }

                // the temp files contain UDadjusted times, but each temp file's
                // content is in the same input cycle interval.
                if (tempGen != null) {
                    tempGen.output(job);
                }

                job = reader.nextJob();
            }
        } catch (DeskewedJobTraceReader.OutOfOrderException e) {
            return OUT_OF_ORDER_JOBS;
        } finally {
            if (tempGen != null) {
                tempGen.close();
            }
        }

        if (lastJobSubmitTime <= firstJobSubmitTime) {
            LOG.error("All of your job[s] have the same submit time." + "  Please just use your input file.");

            return ALL_JOBS_SIMULTANEOUS;
        }

        double submitTimeSpan = lastJobSubmitTime - firstJobSubmitTime;

        LOG.warn("Your input trace spans " + (lastJobSubmitTime - firstJobSubmitTime) + " ticks.");

        double foldingRatio = submitTimeSpan * (numberJobs + 1) / numberJobs / inputCycle;

        if (debug) {
            LOG.warn("run: submitTimeSpan = " + submitTimeSpan + ", numberJobs = " + numberJobs
                    + ", inputCycle = " + inputCycle);
        }

        if (reader.neededSkewBufferSize() > 0) {
            LOG.warn("You needed a -skew-buffer-length of " + reader.neededSkewBufferSize()
                    + " but no more, for this input.");
        }

        double tProbability = timeDilation * concentration / foldingRatio;

        if (debug) {
            LOG.warn("run: timeDilation = " + timeDilation + ", concentration = " + concentration
                    + ", foldingRatio = " + foldingRatio);
            LOG.warn("The transcription probability is " + tProbability);
        }

        transcriptionRateInteger = (int) Math.floor(tProbability);
        transcriptionRateFraction = tProbability - Math.floor(tProbability);

        // Now read all the inputs in parallel
        heap = new PriorityQueue<Pair<LoggedJob, JobTraceReader>>(tempPaths.size(), new JobEntryComparator());

        for (Path tempPath : tempPaths) {
            JobTraceReader thisReader = new JobTraceReader(tempPath, conf);

            closees.add(thisReader);

            LoggedJob streamFirstJob = thisReader.getNext();

            long thisIndex = (streamFirstJob.getSubmitTime() - firstJobSubmitTime) / inputCycle;

            if (debug) {
                LOG.debug("A job with submit time of " + streamFirstJob.getSubmitTime() + " is in interval # "
                        + thisIndex);
            }

            adjustJobTimes(streamFirstJob);

            if (debug) {
                LOG.debug("That job's submit time is adjusted to " + streamFirstJob.getSubmitTime());
            }

            heap.add(new Pair<LoggedJob, JobTraceReader>(streamFirstJob, thisReader));
        }

        Pair<LoggedJob, JobTraceReader> next = heap.poll();

        while (next != null) {
            maybeOutput(next.first());

            if (debug) {
                LOG.debug("The most recent job has an adjusted submit time of " + next.first().getSubmitTime());
                LOG.debug(" Its replacement in the heap will come from input engine " + next.second());
            }

            LoggedJob replacement = next.second().getNext();

            if (replacement == null) {
                next.second().close();

                if (debug) {
                    LOG.debug("That input engine is depleted.");
                }
            } else {
                adjustJobTimes(replacement);

                if (debug) {
                    LOG.debug("The replacement has an adjusted submit time of " + replacement.getSubmitTime());
                }

                heap.add(new Pair<LoggedJob, JobTraceReader>(replacement, next.second()));
            }

            next = heap.poll();
        }
    } finally {
        IOUtils.cleanup(null, reader);
        if (outGen != null) {
            outGen.close();
        }
        for (Pair<LoggedJob, JobTraceReader> heapEntry : heap) {
            heapEntry.second().close();
        }
        for (Closeable closee : closees) {
            closee.close();
        }
        if (!debug) {
            Configuration conf = getConf();

            for (Path deletee : deletees) {
                FileSystem fs = deletee.getFileSystem(conf);

                try {
                    fs.delete(deletee, false);
                } catch (IOException e) {
                    // no code
                }
            }
        }
    }

    return 0;
}

From source file:com.facebook.LinkBench.LinkBenchDriver.java

void load() throws IOException, InterruptedException, Throwable {

    if (!doLoad) {
        logger.info("Skipping load data per the cmdline arg");
        return;//from w w w  .j  av  a 2s  .  co m
    }
    // load data
    int nLinkLoaders = ConfigUtil.getInt(props, Config.NUM_LOADERS);

    boolean bulkLoad = true;
    BlockingQueue<LoadChunk> chunk_q = new LinkedBlockingQueue<LoadChunk>();

    // max id1 to generate
    long maxid1 = ConfigUtil.getLong(props, Config.MAX_ID);
    // id1 at which to start
    long startid1 = ConfigUtil.getLong(props, Config.MIN_ID);

    // Create loaders
    logger.info("Starting loaders " + nLinkLoaders);
    logger.debug("Bulk Load setting: " + bulkLoad);

    Random masterRandom = createMasterRNG(props, Config.LOAD_RANDOM_SEED);

    boolean genNodes = ConfigUtil.getBool(props, Config.GENERATE_NODES);
    int nTotalLoaders = genNodes ? nLinkLoaders + 1 : nLinkLoaders;

    LatencyStats latencyStats = new LatencyStats(nTotalLoaders);
    List<Runnable> loaders = new ArrayList<Runnable>(nTotalLoaders);

    LoadProgress loadTracker = LoadProgress.create(logger, props);

    // First load nodes  (changed by aapo)
    if (genNodes) {
        logger.info("Will generate graph nodes during loading");
        int loaderId = nTotalLoaders - 1;
        NodeStore nodeStore = createNodeStore(null);
        Random rng = new Random(masterRandom.nextLong());
        loaders.add(new NodeLoader(props, logger, nodeStore, rng, latencyStats, csvStreamFile, loaderId));
    }

    enqueueLoadWork(chunk_q, startid1, maxid1, nLinkLoaders, new Random(masterRandom.nextLong()));
    // run loaders
    loadTracker.startTimer();
    System.out.println("Loading nodes first..."); // Changed by Aky
    long loadTime = concurrentExec(loaders);

    loaders.clear();

    // Links
    for (int i = 0; i < nLinkLoaders; i++) {
        LinkStore linkStore = createLinkStore();

        bulkLoad = bulkLoad && linkStore.bulkLoadBatchSize() > 0;
        LinkBenchLoad l = new LinkBenchLoad(linkStore, props, latencyStats, csvStreamFile, i,
                maxid1 == startid1 + 1, chunk_q, loadTracker);
        loaders.add(l);
    }

    loadTime += concurrentExec(loaders);

    long expectedNodes = maxid1 - startid1;
    long actualLinks = 0;
    long actualNodes = 0;
    for (final Runnable l : loaders) {
        if (l instanceof LinkBenchLoad) {
            actualLinks += ((LinkBenchLoad) l).getLinksLoaded();
        } else {
            assert (l instanceof NodeLoader);
            actualNodes += ((NodeLoader) l).getNodesLoaded();
        }
    }

    latencyStats.displayLatencyStats();

    if (csvStatsFile != null) {
        latencyStats.printCSVStats(csvStatsFile, true);
    }

    double loadTime_s = (loadTime / 1000.0);
    logger.info(String.format(
            "LOAD PHASE COMPLETED. " + " Loaded %d nodes (Expected %d)."
                    + " Loaded %d links (%.2f links per node). " + " Took %.1f seconds.  Links/second = %d",
            actualNodes, expectedNodes, actualLinks, actualLinks / (double) actualNodes, loadTime_s,
            (long) Math.round(actualLinks / loadTime_s)));
}

From source file:io.druid.query.aggregation.hyperloglog.HyperLogLogCollectorTest.java

@Test
public void testCompare2() throws Exception {
    Random rand = new Random(0);
    HyperUniquesAggregatorFactory factory = new HyperUniquesAggregatorFactory("foo", "bar");
    Comparator comparator = factory.getComparator();
    for (int i = 1; i < 1000; ++i) {
        HyperLogLogCollector collector1 = HyperLogLogCollector.makeLatestCollector();
        int j = rand.nextInt(50);
        for (int l = 0; l < j; ++l) {
            collector1.add(fn.hashLong(rand.nextLong()).asBytes());
        }//from   www  .j a  va 2s .c  o m

        HyperLogLogCollector collector2 = HyperLogLogCollector.makeLatestCollector();
        int k = j + 1 + rand.nextInt(5);
        for (int l = 0; l < k; ++l) {
            collector2.add(fn.hashLong(rand.nextLong()).asBytes());
        }

        Assert.assertEquals(Double.compare(collector1.estimateCardinality(), collector2.estimateCardinality()),
                comparator.compare(collector1, collector2));
    }

    for (int i = 1; i < 100; ++i) {
        HyperLogLogCollector collector1 = HyperLogLogCollector.makeLatestCollector();
        int j = rand.nextInt(500);
        for (int l = 0; l < j; ++l) {
            collector1.add(fn.hashLong(rand.nextLong()).asBytes());
        }

        HyperLogLogCollector collector2 = HyperLogLogCollector.makeLatestCollector();
        int k = j + 2 + rand.nextInt(5);
        for (int l = 0; l < k; ++l) {
            collector2.add(fn.hashLong(rand.nextLong()).asBytes());
        }

        Assert.assertEquals(Double.compare(collector1.estimateCardinality(), collector2.estimateCardinality()),
                comparator.compare(collector1, collector2));
    }

    for (int i = 1; i < 10; ++i) {
        HyperLogLogCollector collector1 = HyperLogLogCollector.makeLatestCollector();
        int j = rand.nextInt(100000);
        for (int l = 0; l < j; ++l) {
            collector1.add(fn.hashLong(rand.nextLong()).asBytes());
        }

        HyperLogLogCollector collector2 = HyperLogLogCollector.makeLatestCollector();
        int k = j + 20000 + rand.nextInt(100000);
        for (int l = 0; l < k; ++l) {
            collector2.add(fn.hashLong(rand.nextLong()).asBytes());
        }

        Assert.assertEquals(Double.compare(collector1.estimateCardinality(), collector2.estimateCardinality()),
                comparator.compare(collector1, collector2));
    }
}

From source file:dk.statsbiblioteket.netark.dvenabler.DVReaderTest.java

private static File generateIndex(int documents) throws IOException {
    final File INDEX = new File("target/testindex.deletefreely." + documents);
    final long seed = new Random().nextLong();
    Random random = new Random(seed);
    log.info("Testing with random seed" + seed);
    Analyzer analyzer = new StandardAnalyzer(LUCENE_VERSION);

    final FieldType SINGLE_F = new FieldType();
    SINGLE_F.setIndexed(true);//  ww  w. j av  a2  s.  c o  m
    SINGLE_F.setStored(true);

    final FieldType MULTI_F = new FieldType();
    MULTI_F.setIndexed(true);
    MULTI_F.setStored(true);

    final FieldType SEARCH_F = new FieldType();
    SEARCH_F.setIndexed(true);

    final FieldType LONG_F = new FieldType();
    LONG_F.setIndexed(true);
    LONG_F.setStored(true);
    LONG_F.setNumericType(FieldType.NumericType.LONG);

    final FieldType DOUBLE_F = new FieldType();
    DOUBLE_F.setIndexed(true);
    DOUBLE_F.setStored(true);
    DOUBLE_F.setNumericType(FieldType.NumericType.DOUBLE);

    IndexWriter indexWriter = new IndexWriter(MMapDirectory.open(INDEX),
            new IndexWriterConfig(LUCENE_VERSION, analyzer));
    for (int docID = 0; docID < documents; docID++) {
        Document document = new Document();
        document.add(new Field(ID, Integer.toString(docID), SINGLE_F));
        document.add(new Field(SEARCH, SEARCH_CONTENT + "_" + docID, SEARCH_F));
        if (random.nextInt(5) > 0) {
            document.add(new Field(SINGLE, SINGLE_CONTENT + "_r" + random.nextInt(), SINGLE_F));
        }
        if (random.nextInt(5) > 0) {
            document.add(new Field(MULTI, MULTI_CONTENT_1 + "_" + docID, MULTI_F));
            if (random.nextInt(3) > 0) {
                document.add(new Field(MULTI, MULTI_CONTENT_2 + "_random" + random.nextInt(5), MULTI_F));
            }
        }
        if (random.nextInt(5) > 0) {
            document.add(new LongField(LONG, random.nextLong(), LONG_F));
        }
        if (random.nextInt(5) > 0) {
            document.add(new DoubleField(DOUBLE, random.nextDouble(), DOUBLE_F));
        }
        indexWriter.addDocument(document);
        if (docID == documents / 3) {
            indexWriter.commit(); // Ensure multi-segment
        }
    }
    indexWriter.commit();
    indexWriter.close();
    return INDEX;
}