List of usage examples for java.util.stream IntStream range
public static IntStream range(int startInclusive, int endExclusive)
From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java
@Test public void testCheckPointForEntryLoggerWithMultipleActiveEntryLogs() throws Exception { File tmpDir = createTempDir("DiskCheck", "test"); final ServerConfiguration conf = TestBKConfiguration.newServerConfiguration() .setMetadataServiceUri(zkUtil.getMetadataServiceUri()).setZkTimeout(5000) .setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() }) .setAutoRecoveryDaemonEnabled(false).setFlushInterval(3000) .setBookiePort(PortManager.nextFreePort()) // entrylog per ledger is enabled .setEntryLogPerLedgerEnabled(true) .setLedgerStorageClass(MockInterleavedLedgerStorage.class.getName()); Assert.assertEquals("Number of JournalDirs", 1, conf.getJournalDirs().length); // we know there is only one ledgerDir File ledgerDir = Bookie.getCurrentDirectories(conf.getLedgerDirs())[0]; BookieServer server = new BookieServer(conf); server.start();//from w ww .j a v a 2 s . c o m ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); final BookKeeper bkClient = new BookKeeper(clientConf); int numOfLedgers = 12; int numOfEntries = 100; byte[] dataBytes = "data".getBytes(); AtomicBoolean receivedExceptionForAdd = new AtomicBoolean(false); LongStream.range(0, numOfLedgers).parallel().mapToObj((ledgerId) -> { LedgerHandle handle = null; try { handle = bkClient.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "passwd".getBytes(), null); } catch (BKException | InterruptedException exc) { receivedExceptionForAdd.compareAndSet(false, true); LOG.error("Got Exception while trying to create LedgerHandle for ledgerId: " + ledgerId, exc); } return handle; }).forEach((writeHandle) -> { IntStream.range(0, numOfEntries).forEach((entryId) -> { try { writeHandle.addEntry(entryId, dataBytes); } catch (BKException | InterruptedException exc) { receivedExceptionForAdd.compareAndSet(false, true); LOG.error("Got Exception while trying to AddEntry of ledgerId: " + writeHandle.getId() + " entryId: " + entryId, exc); } }); try { writeHandle.close(); } catch (BKException | InterruptedException e) { receivedExceptionForAdd.compareAndSet(false, true); LOG.error("Got Exception while trying to close writeHandle of ledgerId: " + writeHandle.getId(), e); } }); Assert.assertFalse( "There shouldn't be any exceptions while creating writeHandle and adding entries to writeHandle", receivedExceptionForAdd.get()); executorController.advance(Duration.ofMillis(conf.getFlushInterval())); // since we have waited for more than flushInterval SyncThread should have checkpointed. // if entrylogperledger is not enabled, then we checkpoint only when currentLog in EntryLogger // is rotated. but if entrylogperledger is enabled, then we checkpoint for every flushInterval period File lastMarkFile = new File(ledgerDir, "lastMark"); Assert.assertTrue("lastMark file must be existing, because checkpoint should have happened", lastMarkFile.exists()); LogMark rolledLogMark = readLastMarkFile(lastMarkFile); Assert.assertNotEquals("rolledLogMark should not be zero, since checkpoint has happenend", 0, rolledLogMark.compare(new LogMark())); bkClient.close(); // here we are calling shutdown, but MockInterleavedLedgerStorage shudown/flush // methods are noop, so entrylogger is not flushed as part of this shutdown // here we are trying to simulate Bookie crash, but there is no way to // simulate bookie abrupt crash server.shutdown(); // delete journal files and lastMark, to make sure that we are not reading from // Journal file File[] journalDirs = conf.getJournalDirs(); for (File journalDir : journalDirs) { File journalDirectory = Bookie.getCurrentDirectory(journalDir); List<Long> journalLogsId = Journal.listJournalIds(journalDirectory, null); for (long journalId : journalLogsId) { File journalFile = new File(journalDirectory, Long.toHexString(journalId) + ".txn"); journalFile.delete(); } } // we know there is only one ledgerDir lastMarkFile = new File(ledgerDir, "lastMark"); lastMarkFile.delete(); // now we are restarting BookieServer conf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName()); server = new BookieServer(conf); server.start(); BookKeeper newBKClient = new BookKeeper(clientConf); // since Bookie checkpointed successfully before shutdown/crash, // we should be able to read from entryLogs though journal is deleted AtomicBoolean receivedExceptionForRead = new AtomicBoolean(false); LongStream.range(0, numOfLedgers).parallel().forEach((ledgerId) -> { try { LedgerHandle lh = newBKClient.openLedger(ledgerId, DigestType.CRC32, "passwd".getBytes()); Enumeration<LedgerEntry> entries = lh.readEntries(0, numOfEntries - 1); while (entries.hasMoreElements()) { LedgerEntry entry = entries.nextElement(); byte[] readData = entry.getEntry(); Assert.assertEquals("Ledger Entry Data should match", new String("data".getBytes()), new String(readData)); } lh.close(); } catch (BKException | InterruptedException e) { receivedExceptionForRead.compareAndSet(false, true); LOG.error("Got Exception while trying to read entries of ledger, ledgerId: " + ledgerId, e); } }); Assert.assertFalse("There shouldn't be any exceptions while creating readHandle and while reading" + "entries using readHandle", receivedExceptionForRead.get()); newBKClient.close(); server.shutdown(); }
From source file:org.apache.tinkerpop.gremlin.server.GremlinDriverIntegrateTest.java
@Test public void shouldFailClientSideWithTooLargeAResponse() { final Cluster cluster = Cluster.build().maxContentLength(1).create(); final Client client = cluster.connect(); try {/*from w ww . ja v a 2 s.c o m*/ final String fatty = IntStream.range(0, 100).mapToObj(String::valueOf).collect(Collectors.joining()); client.submit("'" + fatty + "'").all().get(); fail("Should throw an exception."); } catch (Exception re) { final Throwable root = ExceptionUtils.getRootCause(re); assertTrue(root.getMessage().equals("Max frame length of 1 has been exceeded.")); } finally { cluster.close(); } }
From source file:com.netflix.imfutility.itunes.ITunesFormatBuilder.java
private void processSubtitles() { int count = iTunesInputParameters.getSubFiles() == null ? contextProvider.getSequenceContext().getSequenceCount(SequenceType.SUBTITLE) : iTunesInputParameters.getSubFiles().size(); if (count == 0) { return;//w w w . j a v a 2 s . c o m } IntStream.range(0, count) .mapToObj(i -> contextProvider.getDynamicContext() .getParameterValueAsString(DYNAMIC_PARAM_SUBTITLE_ITT_PREFIX + i)) .map(fileName -> new File(inputParameters.getWorkingDirFile(), fileName)) .forEach(this::safeProcessSubtitles); }
From source file:com.yahoo.bullet.storm.FilterBoltTest.java
@Test public void testGroupAllCount() { // 15 Records will be consumed bolt = ComponentUtils.prepare(new DonableFilterBolt(15, new BulletStormConfig()), collector); Tuple query = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", makeGroupFilterQuery("timestamp", asList("1", "2"), EQUALS, GROUP, 1, singletonList(new GroupOperation(COUNT, null, "cnt"))), METADATA);/*from w w w.ja v a2s .c o m*/ bolt.execute(query); BulletRecord record = RecordBox.get().add("timestamp", "1").getRecord(); Tuple matching = makeRecordTuple(record); IntStream.range(0, 10).forEach(i -> bolt.execute(matching)); BulletRecord another = RecordBox.get().getRecord(); Tuple nonMatching = makeRecordTuple(another); IntStream.range(0, 5).forEach(i -> bolt.execute(nonMatching)); bolt.execute(nonMatching); // Two to flush bolt Tuple tick = TupleUtils.makeTuple(TupleClassifier.Type.TICK_TUPLE); bolt.execute(tick); bolt.execute(tick); Assert.assertEquals(collector.getEmittedCount(), 1); GroupData actual = SerializerDeserializer.fromBytes(getRawPayloadOfNthTuple(1)); BulletRecord expected = RecordBox.get().add("cnt", 10).getRecord(); Assert.assertTrue(isEqual(actual, expected)); }
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java
@Test public void shouldAllowConcurrentModificationOfGlobals() throws Exception { // this test simulates a scenario that likely shouldn't happen - where globals are modified by multiple // threads. globals are created in a synchronized fashion typically but it's possible that someone // could do something like this and this test validate that concurrency exceptions don't occur as a // result/*from w w w . jav a 2s . com*/ final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory); final Bindings globals = new SimpleBindings(); globals.put("g", -1); final GremlinExecutor gremlinExecutor = GremlinExecutor.build().globalBindings(globals).create(); final AtomicBoolean failed = new AtomicBoolean(false); final int max = 512; final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max)); IntStream.range(0, max).forEach(i -> { final int yValue = i * 2; final Bindings b = new SimpleBindings(); b.put("x", i); b.put("y", yValue); final int zValue = i * -1; final String script = "z=" + zValue + ";[x,y,z,g]"; try { service.submit(() -> { try { // modify the global in a separate thread gremlinExecutor.getGlobalBindings().put("g", i); gremlinExecutor.getGlobalBindings().put(Integer.toString(i), i); gremlinExecutor.getGlobalBindings().keySet().stream() .filter(s -> i % 2 == 0 && !s.equals("g")).findFirst().ifPresent(globals::remove); final List<Integer> result = (List<Integer>) gremlinExecutor.eval(script, b).get(); futures.add(Pair.with(i, result)); } catch (Exception ex) { failed.set(true); } }); } catch (Exception ex) { throw new RuntimeException(ex); } }); service.shutdown(); assertThat(service.awaitTermination(60000, TimeUnit.MILLISECONDS), is(true)); // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this // test is partially designed to protected against. assertThat(failed.get(), is(false)); assertEquals(max, futures.size()); futures.forEach(t -> { assertEquals(t.getValue0(), t.getValue1().get(0)); assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue()); assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue()); assertThat(t.getValue1().get(3).intValue(), greaterThan(-1)); }); }
From source file:org.apache.tinkerpop.gremlin.server.GremlinServerIntegrateTest.java
@SuppressWarnings("ThrowableResultOfMethodCallIgnored") @Test//from w ww . j ava2s. c o m public void shouldBlockRequestWhenTooBig() throws Exception { final Cluster cluster = Cluster.open(); final Client client = cluster.connect(); try { final String fatty = IntStream.range(0, 1024).mapToObj(String::valueOf).collect(Collectors.joining()); final CompletableFuture<ResultSet> result = client.submitAsync("'" + fatty + "';'test'"); final ResultSet resultSet = result.get(10000, TimeUnit.MILLISECONDS); resultSet.all().get(10000, TimeUnit.MILLISECONDS); fail("Should throw an exception."); } catch (TimeoutException te) { // the request should not have timed-out - the connection should have been reset, but it seems that // timeout seems to occur as well on some systems (it's not clear why). however, the nature of this // test is to ensure that the script isn't processed if it exceeds a certain size, so in this sense // it seems ok to pass in this case. } catch (Exception re) { final Throwable root = ExceptionUtils.getRootCause(re); assertEquals("Connection reset by peer", root.getMessage()); // validate that we can still send messages to the server assertEquals(2, client.submit("1+1").all().join().get(0).getInt()); } finally { cluster.close(); } }
From source file:com.yahoo.bullet.storm.FilterBoltTest.java
@Test public void testCountDistinct() { // 256 Records will be consumed BulletStormConfig config = new BulletStormConfig(CountDistinctTest.makeConfiguration(8, 512)); bolt = ComponentUtils.prepare(new DonableFilterBolt(256, config), collector); Tuple query = makeIDTuple(TupleClassifier.Type.QUERY_TUPLE, "42", makeAggregationQuery(COUNT_DISTINCT, 1, null, Pair.of("field", "field")), METADATA); bolt.execute(query);/* www . j a v a2 s . c om*/ IntStream.range(0, 256).mapToObj(i -> RecordBox.get().add("field", i).getRecord()) .map(FilterBoltTest::makeRecordTuple).forEach(bolt::execute); Assert.assertEquals(collector.getEmittedCount(), 0); Tuple tick = TupleUtils.makeTuple(TupleClassifier.Type.TICK_TUPLE); bolt.execute(tick); bolt.execute(tick); Assert.assertEquals(collector.getEmittedCount(), 1); byte[] rawData = getRawPayloadOfNthTuple(1); Assert.assertNotNull(rawData); CountDistinct distinct = CountDistinctTest.makeCountDistinct(config, singletonList("field")); distinct.combine(rawData); BulletRecord actual = distinct.getRecords().get(0); BulletRecord expected = RecordBox.get().add(CountDistinct.DEFAULT_NEW_NAME, 256.0).getRecord(); Assert.assertEquals(actual, expected); }
From source file:com.yahoo.bullet.drpc.JoinBoltTest.java
@Test public void testCountDistinct() { Map<String, Object> config = new HashMap<>(); config.put(BulletConfig.COUNT_DISTINCT_AGGREGATION_SKETCH_ENTRIES, 512); Aggregation aggregation = new Aggregation(); aggregation.setConfiguration(config); aggregation.setFields(singletonMap("field", "foo")); CountDistinct distinct = new CountDistinct(aggregation); IntStream.range(0, 256).mapToObj(i -> RecordBox.get().add("field", i).getRecord()) .forEach(distinct::consume); byte[] first = distinct.getSerializedAggregation(); distinct = new CountDistinct(aggregation); IntStream.range(128, 256).mapToObj(i -> RecordBox.get().add("field", i).getRecord()) .forEach(distinct::consume); byte[] second = distinct.getSerializedAggregation(); // Send generated data to JoinBolt bolt = ComponentUtils.prepare(config, new ExpiringJoinBolt(), collector); Tuple rule = TupleUtils.makeIDTuple(TupleType.Type.RULE_TUPLE, 42L, makeAggregationRule(COUNT_DISTINCT, 1, null, Pair.of("field", "field"))); bolt.execute(rule);// w ww . j a v a 2s .c o m Tuple returnInfo = TupleUtils.makeIDTuple(TupleType.Type.RETURN_TUPLE, 42L, ""); bolt.execute(returnInfo); sendRawByteTuplesTo(bolt, 42L, asList(first, second)); List<BulletRecord> result = singletonList( RecordBox.get().add(CountDistinct.DEFAULT_NEW_NAME, 256.0).getRecord()); Tuple expected = TupleUtils.makeTuple(TupleType.Type.JOIN_TUPLE, Clip.of(result).asJSON(), ""); Tuple tick = TupleUtils.makeTuple(TupleType.Type.TICK_TUPLE); bolt.execute(tick); bolt.execute(tick); for (int i = 0; i < JoinBolt.DEFAULT_RULE_TICKOUT - 1; ++i) { bolt.execute(tick); Assert.assertFalse(collector.wasTupleEmitted(expected)); } bolt.execute(tick); Assert.assertTrue(collector.wasNthEmitted(expected, 1)); Assert.assertEquals(collector.getAllEmitted().count(), 1); }
From source file:org.ow2.proactive.scheduling.api.GraphqlServiceIntegrationTest.java
private List<JobData> createJobData(int count) { return IntStream.range(1, count + 1) .mapToObj(index -> createJobData("job" + index, index % 2 == 0 ? CONTEXT_LOGIN : "owner" + index, index % 2 == 0 ? JobPriority.IDLE : JobPriority.HIGH, "projectName" + index, index % 2 == 0 ? JobStatus.CANCELED : JobStatus.KILLED)) .collect(Collectors.toList()); }
From source file:org.ow2.proactive.scheduling.api.GraphqlServiceIntegrationTest.java
private List<TaskData> createTaskData(JobData jobData, int nbTasks) { return IntStream.range(1, nbTasks + 1).mapToObj(index -> createTaskData(jobData, index - 1, "task" + index)) .collect(Collectors.toList()); }