Example usage for java.util.stream LongStream range

List of usage examples for java.util.stream LongStream range

Introduction

In this page you can find the example usage for java.util.stream LongStream range.

Prototype

public static LongStream range(long startInclusive, final long endExclusive) 

Source Link

Document

Returns a sequential ordered LongStream from startInclusive (inclusive) to endExclusive (exclusive) by an incremental step of 1 .

Usage

From source file:io.divolte.server.hdfs.HdfsFlusherTest.java

@Test
public void shouldNotCreateEmptyFiles() throws IOException, InterruptedException {
    final Schema schema = schemaFromClassPath("/MinimalRecord.avsc");
    final Config config = ConfigFactory
            .parseString("divolte.hdfs_flusher.simple_rolling_file_strategy.roll_every = 100 millisecond\n"
                    + "divolte.hdfs_flusher.simple_rolling_file_strategy.working_dir = \""
                    + tempInflightDir.toString() + "\"\n"
                    + "divolte.hdfs_flusher.simple_rolling_file_strategy.publish_dir = \""
                    + tempPublishDir.toString() + '"')
            .withFallback(ConfigFactory.parseResources("hdfs-flusher-test.conf"));
    final ValidatedConfiguration vc = new ValidatedConfiguration(() -> config);

    final List<Record> records = LongStream.range(0, 5).mapToObj(
            (time) -> new GenericRecordBuilder(schema).set("ts", time).set("remoteHost", ARBITRARY_IP).build())
            .collect(Collectors.toList());

    final HdfsFlusher flusher = new HdfsFlusher(vc, schema);

    records.forEach((record) -> flusher.process(AvroRecordBuffer.fromRecord(DivolteIdentifier.generate(),
            DivolteIdentifier.generate(), System.currentTimeMillis(), 0, record)));

    for (int c = 0; c < 4; c++) {
        Thread.sleep(500);//from  w ww .ja va2s  .c  om
        flusher.heartbeat();
    }

    records.forEach((record) -> flusher.process(AvroRecordBuffer.fromRecord(DivolteIdentifier.generate(),
            DivolteIdentifier.generate(), System.currentTimeMillis(), 0, record)));

    flusher.cleanup();

    final MutableInt count = new MutableInt(0);
    Files.walk(tempPublishDir).filter((p) -> p.toString().endsWith(".avro")).forEach((p) -> {
        verifyAvroFile(records, schema, p);
        count.increment();
    });

    assertEquals(2, count.intValue());
}

From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java

@Test
public void testCheckPointForEntryLoggerWithMultipleActiveEntryLogs() throws Exception {
    File tmpDir = createTempDir("DiskCheck", "test");

    final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration()
            .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000)
            .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() })
            .setAutoRecoveryDaemonEnabled(false).setFlushInterval(3000)
            .setBookiePort(PortManager.nextFreePort())
            // entrylog per ledger is enabled
            .setEntryLogPerLedgerEnabled(true)
            .setLedgerStorageClass(MockInterleavedLedgerStorage.class.getName());

    Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length);
    // we know there is only one ledgerDir
    File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0];
    BookieServer server = new BookieServer(conf);
    server.start();//from  w  ww. j  a  va2  s.co m
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
    final BookKeeper bkClient = new BookKeeper(clientConf);

    int numOfLedgers = 12;
    int numOfEntries = 100;
    byte[] dataBytes = "data".getBytes();
    AtomicBoolean receivedExceptionForAdd = new AtomicBoolean(false);
    LongStream.range(0, numOfLedgers).parallel().mapToObj((ledgerId) -> {
        LedgerHandle handle = null;
        try {
            handle = bkClient.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null);
        } catch (BKException | InterruptedException exc) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to create LedgerHandle for ledgerId: " + ledgerId, exc);
        }
        return handle;
    }).forEach((writeHandle) -> {
        IntStream.range(0, numOfEntries).forEach((entryId) -> {
            try {
                writeHandle.addEntry(entryId, dataBytes);
            } catch (BKException | InterruptedException exc) {
                receivedExceptionForAdd.compareAndSet(false, true);
                LOG.error("Got Exception while trying to AddEntry of ledgerId: " + writeHandle.getId()
                        + " entryId: " + entryId, exc);
            }
        });
        try {
            writeHandle.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForAdd.compareAndSet(false, true);
            LOG.error("Got Exception while trying to close writeHandle of ledgerId: " + writeHandle.getId(), e);
        }
    });

    Assert.assertFalse(
            "There shouldn't be any exceptions while creating writeHandle and adding entries to writeHandle",
            receivedExceptionForAdd.get());

    executorController.advance(Duration.ofMillis(conf.getFlushInterval()));
    // since we have waited for more than flushInterval SyncThread should have checkpointed.
    // if entrylogperledger is not enabled, then we checkpoint only when currentLog in EntryLogger
    // is rotated. but if entrylogperledger is enabled, then we checkpoint for every flushInterval period
    File lastMarkFile = new File(ledgerDir, "lastMark");
    Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened",
            lastMarkFile.exists());
    LogMark rolledLogMark = readLastMarkFile(lastMarkFile);
    Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0,
            rolledLogMark.compare(new LogMark()));

    bkClient.close();
    // here we are calling shutdown, but MockInterleavedLedgerStorage shudown/flush
    // methods are noop, so entrylogger is not flushed as part of this shutdown
    // here we are trying to simulate Bookie crash, but there is no way to
    // simulate bookie abrupt crash
    server.shutdown();

    // delete journal files and lastMark, to make sure that we are not reading from
    // Journal file
    File[] journalDirs = conf.getJournalDirs();
    for (File journalDir : journalDirs) {
        File journalDirectory = Bookie.getCurrentDirectory(journalDir);
        List<Long> journalLogsId = Journal.listJournalIds(journalDirectory, null);
        for (long journalId : journalLogsId) {
            File journalFile = new File(journalDirectory, Long.toHexString(journalId) + ".txn");
            journalFile.delete();
        }
    }

    // we know there is only one ledgerDir
    lastMarkFile = new File(ledgerDir, "lastMark");
    lastMarkFile.delete();

    // now we are restarting BookieServer
    conf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName());
    server = new BookieServer(conf);
    server.start();
    BookKeeper newBKClient = new BookKeeper(clientConf);
    // since Bookie checkpointed successfully before shutdown/crash,
    // we should be able to read from entryLogs though journal is deleted

    AtomicBoolean receivedExceptionForRead = new AtomicBoolean(false);

    LongStream.range(0, numOfLedgers).parallel().forEach((ledgerId) -> {
        try {
            LedgerHandle lh = newBKClient.openLedger(ledgerId, DigestType.CRC32, "passwd".getBytes());
            Enumeration<LedgerEntry> entries = lh.readEntries(0, numOfEntries - 1);
            while (entries.hasMoreElements()) {
                LedgerEntry entry = entries.nextElement();
                byte[] readData = entry.getEntry();
                Assert.assertEquals("Ledger Entry Data should match", new String("data".getBytes()),
                        new String(readData));
            }
            lh.close();
        } catch (BKException | InterruptedException e) {
            receivedExceptionForRead.compareAndSet(false, true);
            LOG.error("Got Exception while trying to read entries of ledger, ledgerId: " + ledgerId, e);
        }
    });
    Assert.assertFalse("There shouldn't be any exceptions while creating readHandle and while reading"
            + "entries using readHandle", receivedExceptionForRead.get());

    newBKClient.close();
    server.shutdown();
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testExponential() throws Exception {
    Stream<Long> backoffs = Backoff.exponential(1000, 2, Long.MAX_VALUE).limit(10);
    Stream<Long> expectedBackoffs = LongStream.range(0L, 10L).mapToObj(i -> (1000L << i));
    assertStreamEquals(expectedBackoffs, backoffs);
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testExponentialPolicy() throws Exception {
    Stream<Long> expectedBackoffs = LongStream.range(0L, 10L).mapToObj(i -> (1000L << i));
    Backoff.Policy policy = Backoff.Exponential.of(1000, Long.MAX_VALUE, 2, 10);
    assertStreamEquals(expectedBackoffs, policy.toBackoffs());
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testExponentialWithUpperLimit() throws Exception {
    Stream<Long> backoffs = Backoff.exponential(1000, 2, 32000).limit(10);
    Stream<Long> expectedBackoffs = LongStream.range(0L, 10L).mapToObj(i -> Math.min(1000L << i, 32000));
    assertStreamEquals(expectedBackoffs, backoffs);
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testExponentialPolicyWithUpperLimit() throws Exception {
    Stream<Long> expectedBackoffs = LongStream.range(0L, 10L).mapToObj(i -> Math.min(1000L << i, 32000));
    Backoff.Policy policy = Backoff.Exponential.of(1000, 32000, 2, 10);
    assertStreamEquals(expectedBackoffs, policy.toBackoffs());
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testConstant() throws Exception {
    Stream<Long> backoffs = Backoff.constant(12345L).limit(10);
    Stream<Long> expectedBackoffs = LongStream.range(0L, 10L).mapToObj(i -> 12345L);
    assertStreamEquals(expectedBackoffs, backoffs);
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testConstantPolicy() throws Exception {
    Stream<Long> backoffs = Backoff.Constant.of(12345L, 10).toBackoffs();
    Stream<Long> expectedBackoffs = LongStream.range(0L, 10L).mapToObj(i -> 12345L);
    assertStreamEquals(expectedBackoffs, backoffs);
}

From source file:org.apache.bookkeeper.stream.storage.impl.sc.DefaultStorageContainerControllerTest.java

private static void verifyAssignmentData(ClusterAssignmentData newAssignment,
        Set<BookieSocketAddress> currentCluster, boolean isInitialIdealState) throws Exception {
    int numServers = currentCluster.size();

    assertEquals(numServers, newAssignment.getServersCount());
    Set<Long> assignedContainers = Sets.newHashSet();
    Set<BookieSocketAddress> assignedServers = Sets.newHashSet();

    int numContainersPerServer = NUM_STORAGE_CONTAINERS / numServers;
    int serverIdx = 0;
    for (Map.Entry<String, ServerAssignmentData> entry : newAssignment.getServersMap().entrySet()) {
        log.info("Check assignment for server {} = {}", entry.getKey(), entry.getValue());

        BookieSocketAddress address = new BookieSocketAddress(entry.getKey());
        assignedServers.add(address);/*from  ww  w .  ja v a  2  s . c  o  m*/
        assertEquals(serverIdx + 1, assignedServers.size());

        ServerAssignmentData serverData = entry.getValue();
        assertEquals(numContainersPerServer, serverData.getContainersCount());
        List<Long> containers = Lists.newArrayList(serverData.getContainersList());
        Collections.sort(containers);
        assignedContainers.addAll(containers);

        if (isInitialIdealState) {
            long startContainerId = containers.get(0);
            for (int i = 0; i < containers.size(); i++) {
                assertEquals(startContainerId + i * numServers, containers.get(i).longValue());
            }
        }
        ++serverIdx;
    }

    // each server should be assigned with equal number of containers
    assertTrue(Sets.difference(currentCluster, assignedServers).isEmpty());
    // all containers should be assigned
    Set<Long> expectedContainers = LongStream.range(0L, NUM_STORAGE_CONTAINERS)
            .mapToObj(scId -> Long.valueOf(scId)).collect(Collectors.toSet());
    assertTrue(Sets.difference(expectedContainers, assignedContainers).isEmpty());
}

From source file:org.apache.bookkeeper.stream.storage.impl.sc.DefaultStorageContainerControllerTest.java

private static void verifyAssignmentDataWhenHasMoreServers(ClusterAssignmentData newAssignment,
        Set<BookieSocketAddress> currentCluster) throws Exception {
    int numServers = currentCluster.size();

    assertEquals(numServers, newAssignment.getServersCount());
    Set<Long> assignedContainers = Sets.newHashSet();
    Set<BookieSocketAddress> assignedServers = Sets.newHashSet();

    int numEmptyServers = 0;
    int numAssignedServers = 0;
    int serverIdx = 0;
    for (Map.Entry<String, ServerAssignmentData> entry : newAssignment.getServersMap().entrySet()) {
        log.info("Check assignment for server {} = {}", entry.getKey(), entry.getValue());

        BookieSocketAddress address = new BookieSocketAddress(entry.getKey());
        assignedServers.add(address);/* www.j  a v  a2  s. co  m*/
        assertEquals(serverIdx + 1, assignedServers.size());

        ServerAssignmentData serverData = entry.getValue();
        if (serverData.getContainersCount() > 0) {
            assertEquals(1, serverData.getContainersCount());
            ++numAssignedServers;
        } else {
            ++numEmptyServers;
        }
        List<Long> containers = Lists.newArrayList(serverData.getContainersList());
        Collections.sort(containers);
        assignedContainers.addAll(containers);

        ++serverIdx;
    }

    assertEquals(numServers / 2, numEmptyServers);
    assertEquals(numServers / 2, numAssignedServers);

    // each server should be assigned with equal number of containers
    assertTrue(Sets.difference(currentCluster, assignedServers).isEmpty());
    // all containers should be assigned
    Set<Long> expectedContainers = LongStream.range(0L, NUM_STORAGE_CONTAINERS)
            .mapToObj(scId -> Long.valueOf(scId)).collect(Collectors.toSet());
    assertTrue(Sets.difference(expectedContainers, assignedContainers).isEmpty());
}