List of usage examples for java.util.concurrent CompletableFuture allOf
public static CompletableFuture<Void> allOf(CompletableFuture<?>... cfs)
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<CacheEvictionStats> clearBlockCache(TableName tableName) { CompletableFuture<CacheEvictionStats> future = new CompletableFuture<>(); addListener(getTableHRegionLocations(tableName), (locations, err) -> { if (err != null) { future.completeExceptionally(err); return; }// w ww.j a v a 2s . com Map<ServerName, List<RegionInfo>> regionInfoByServerName = locations.stream() .filter(l -> l.getRegion() != null).filter(l -> !l.getRegion().isOffline()) .filter(l -> l.getServerName() != null).collect(Collectors.groupingBy(l -> l.getServerName(), Collectors.mapping(l -> l.getRegion(), Collectors.toList()))); List<CompletableFuture<CacheEvictionStats>> futures = new ArrayList<>(); CacheEvictionStatsAggregator aggregator = new CacheEvictionStatsAggregator(); for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) { futures.add(clearBlockCache(entry.getKey(), entry.getValue()).whenComplete((stats, err2) -> { if (err2 != null) { future.completeExceptionally(unwrapCompletionException(err2)); } else { aggregator.append(stats); } })); } addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])), (ret, err3) -> { if (err3 != null) { future.completeExceptionally(unwrapCompletionException(err3)); } else { future.complete(aggregator.sum()); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.TestAsyncGetMultiThread.java
@BeforeClass public static void setUp() throws Exception { TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none"); TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L); TEST_UTIL.getConfiguration().setLong(HBASE_RPC_READ_TIMEOUT_KEY, 1000L); TEST_UTIL.getConfiguration().setInt(HBASE_CLIENT_RETRIES_NUMBER, 1000); TEST_UTIL.getConfiguration().setInt(ByteBufferPool.MAX_POOL_SIZE_KEY, 100); TEST_UTIL.startMiniCluster(5);/*from w ww .j ava2s.c o m*/ SPLIT_KEYS = new byte[8][]; for (int i = 111; i < 999; i += 111) { SPLIT_KEYS[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i)); } TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.waitTableAvailable(TABLE_NAME); CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()); RawAsyncTable table = CONN.getRawTable(TABLE_NAME); List<CompletableFuture<?>> futures = new ArrayList<>(); IntStream.range(0, COUNT).forEach(i -> futures.add(table.put( new Put(Bytes.toBytes(String.format("%03d", i))).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))))); CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[0])).get(); }
From source file:org.apache.james.mailbox.cassandra.mail.CassandraAttachmentMapper.java
@Override public void storeAttachments(Collection<Attachment> attachments) throws MailboxException { try {/*from w w w . j a v a 2 s .c o m*/ CompletableFuture.allOf(attachments.stream().map(Throwing.function(this::asyncStoreAttachment)) .toArray(CompletableFuture[]::new)).join(); } catch (ThrownByLambdaException e) { throw new MailboxException(e.getCause().getMessage(), e.getCause()); } }
From source file:org.apache.pulsar.compaction.CompactedTopicTest.java
/** * Build a compacted ledger, and return the id of the ledger, the position of the different * entries in the ledger, and a list of gaps, and the entry which should be returned after the gap. *///w w w . j ava 2 s .c o m private Triple<Long, List<Pair<MessageIdData, Long>>, List<Pair<MessageIdData, Long>>> buildCompactedLedger( BookKeeper bk, int count) throws Exception { LedgerHandle lh = bk.createLedger(1, 1, Compactor.COMPACTED_TOPIC_LEDGER_DIGEST_TYPE, Compactor.COMPACTED_TOPIC_LEDGER_PASSWORD); List<Pair<MessageIdData, Long>> positions = new ArrayList<>(); List<Pair<MessageIdData, Long>> idsInGaps = new ArrayList<>(); AtomicLong ledgerIds = new AtomicLong(10L); AtomicLong entryIds = new AtomicLong(0L); CompletableFuture.allOf(IntStream.range(0, count).mapToObj((i) -> { List<MessageIdData> idsInGap = new ArrayList<MessageIdData>(); if (r.nextInt(10) == 1) { long delta = r.nextInt(10) + 1; idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); ledgerIds.addAndGet(delta); entryIds.set(0); } long delta = r.nextInt(5); if (delta != 0) { idsInGap.add(MessageIdData.newBuilder().setLedgerId(ledgerIds.get()).setEntryId(entryIds.get() + 1) .build()); } MessageIdData id = MessageIdData.newBuilder().setLedgerId(ledgerIds.get()) .setEntryId(entryIds.addAndGet(delta + 1)).build(); @Cleanup RawMessage m = new RawMessageImpl(id, Unpooled.EMPTY_BUFFER); CompletableFuture<Void> f = new CompletableFuture<>(); ByteBuf buffer = m.serialize(); lh.asyncAddEntry(buffer, (rc, ledger, eid, ctx) -> { if (rc != BKException.Code.OK) { f.completeExceptionally(BKException.create(rc)); } else { positions.add(Pair.of(id, eid)); idsInGap.forEach((gid) -> idsInGaps.add(Pair.of(gid, eid))); f.complete(null); } }, null); return f; }).toArray(CompletableFuture[]::new)).get(); lh.close(); return Triple.of(lh.getId(), positions, idsInGaps); }
From source file:org.apache.pulsar.functions.runtime.LocalRunner.java
protected static void startLocalRun(org.apache.pulsar.functions.proto.Function.FunctionDetails functionDetails, int parallelism, int instanceIdOffset, String brokerServiceUrl, String stateStorageServiceUrl, AuthenticationConfig authConfig, String userCodeFile) throws Exception { String serviceUrl = DEFAULT_SERVICE_URL; if (brokerServiceUrl != null) { serviceUrl = brokerServiceUrl;/*from www . j a v a 2 s . c om*/ } try (ProcessRuntimeFactory containerFactory = new ProcessRuntimeFactory(serviceUrl, stateStorageServiceUrl, authConfig, null, /* java instance jar file */ null, /* python instance file */ null, /* log directory */ null, /* extra dependencies dir */ new DefaultSecretsProviderConfigurator())) { List<RuntimeSpawner> spawners = new LinkedList<>(); for (int i = 0; i < parallelism; ++i) { InstanceConfig instanceConfig = new InstanceConfig(); instanceConfig.setFunctionDetails(functionDetails); // TODO: correctly implement function version and id instanceConfig.setFunctionVersion(UUID.randomUUID().toString()); instanceConfig.setFunctionId(UUID.randomUUID().toString()); instanceConfig.setInstanceId(i + instanceIdOffset); instanceConfig.setMaxBufferedTuples(1024); instanceConfig.setPort(Utils.findAvailablePort()); instanceConfig.setClusterName("local"); RuntimeSpawner runtimeSpawner = new RuntimeSpawner(instanceConfig, userCodeFile, null, containerFactory, 30000); spawners.add(runtimeSpawner); runtimeSpawner.start(); } java.lang.Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { log.info("Shutting down the localrun runtimeSpawner ..."); for (RuntimeSpawner spawner : spawners) { spawner.close(); } } }); Timer statusCheckTimer = new Timer(); statusCheckTimer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { CompletableFuture<String>[] futures = new CompletableFuture[spawners.size()]; int index = 0; for (RuntimeSpawner spawner : spawners) { futures[index] = spawner.getFunctionStatusAsJson(index); index++; } try { CompletableFuture.allOf(futures).get(5, TimeUnit.SECONDS); for (index = 0; index < futures.length; ++index) { String json = futures[index].get(); Gson gson = new GsonBuilder().setPrettyPrinting().create(); log.info(gson.toJson(new JsonParser().parse(json))); } } catch (Exception ex) { log.error("Could not get status from all local instances"); } } }, 30000, 30000); java.lang.Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { statusCheckTimer.cancel(); } }); for (RuntimeSpawner spawner : spawners) { spawner.join(); log.info("RuntimeSpawner quit because of", spawner.getRuntime().getDeathException()); } } }
From source file:org.apache.pulsar.tests.integration.semantics.SemanticsTest.java
@Test(dataProvider = "ServiceUrls") public void testBatchProducing(String serviceUrl) throws Exception { String topicName = generateTopicName("testbatchproducing", true); int numMessages = 10; List<MessageId> producedMsgIds; try (PulsarClient client = PulsarClient.builder().serviceUrl(serviceUrl).build()) { try (Consumer<String> consumer = client.newConsumer(Schema.STRING).topic(topicName) .subscriptionName("my-sub").subscribe()) { try (Producer<String> producer = client.newProducer(Schema.STRING).topic(topicName) .enableBatching(true).batchingMaxMessages(5).batchingMaxPublishDelay(1, TimeUnit.HOURS) .create()) {//from w w w . j av a2 s . c o m List<CompletableFuture<MessageId>> sendFutures = Lists.newArrayList(); for (int i = 0; i < numMessages; i++) { sendFutures.add(producer.sendAsync("batch-message-" + i)); } CompletableFuture.allOf(sendFutures.toArray(new CompletableFuture[numMessages])).get(); producedMsgIds = sendFutures.stream().map(future -> future.join()).collect(Collectors.toList()); } for (int i = 0; i < numMessages; i++) { Message<String> m = consumer.receive(); assertEquals(producedMsgIds.get(i), m.getMessageId()); assertEquals("batch-message-" + i, m.getValue()); } } } // inspect the message ids for (int i = 0; i < 5; i++) { assertTrue(producedMsgIds.get(i) instanceof BatchMessageIdImpl); BatchMessageIdImpl mid = (BatchMessageIdImpl) producedMsgIds.get(i); log.info("Message {} id : {}", i, mid); assertEquals(i, mid.getBatchIndex()); } for (int i = 5; i < 10; i++) { assertTrue(producedMsgIds.get(i) instanceof BatchMessageIdImpl); BatchMessageIdImpl mid = (BatchMessageIdImpl) producedMsgIds.get(i); log.info("Message {} id : {}", i, mid); assertEquals(i - 5, mid.getBatchIndex()); } }
From source file:org.apache.samza.system.eventhub.producer.EventHubSystemProducer.java
@Override public synchronized void stop() { LOG.info("Stopping producer."); streamPartitionSenders.values().forEach((streamPartitionSender) -> { List<CompletableFuture<Void>> futures = new ArrayList<>(); streamPartitionSender.forEach((key, value) -> futures.add(value.close())); CompletableFuture<Void> future = CompletableFuture .allOf(futures.toArray(new CompletableFuture[futures.size()])); try {// w w w . j a v a 2 s. c o m future.get(DEFAULT_SHUTDOWN_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { LOG.error("Closing the partition sender failed ", e); } }); perStreamEventHubClientManagers.values().parallelStream() .forEach(ehClient -> ehClient.close(DEFAULT_SHUTDOWN_TIMEOUT_MILLIS)); perStreamEventHubClientManagers.clear(); if (config.getPerPartitionConnection(systemName)) { perPartitionEventHubClients.values().stream().flatMap(map -> map.values().stream()) .forEach(ehClient -> ehClient.close(DEFAULT_SHUTDOWN_TIMEOUT_MILLIS)); perPartitionEventHubClients.clear(); } isStarted = false; isInitialized = false; LOG.info("EventHubSystemProducer stopped."); }
From source file:org.apache.servicecomb.demo.springmvc.client.TestDownload.java
@SuppressWarnings("unchecked") public void runRest() { futures.add(checkFile(intf.tempFileEntity(content))); futures.add(checkFuture(templateGet("tempFileEntity").saveAsBytes())); futures.add(checkFile(intf.tempFilePart(content))); futures.add(checkFuture(templateGet("tempFilePart").saveAsString())); futures.add(checkFile(intf.file(content))); futures.add(checkFuture(templateGet("file").saveAsString())); {/* www . j a va2s.c o m*/ ReadStreamPart part = intf.chineseAndSpaceFile(content); TestMgr.check(" .test.txt", part.getSubmittedFileName()); futures.add(checkFile(part)); part = templateGet("chineseAndSpaceFile"); TestMgr.check(" .test.txt", part.getSubmittedFileName()); futures.add(checkFuture(part.saveAsString())); } futures.add(checkFile(intf.resource(content))); futures.add(checkFuture(templateGet("resource").saveAsString())); futures.add(checkFile(intf.entityResource(content))); futures.add(checkFuture(templateGet("entityResource").saveAsString())); futures.add(checkFile(intf.entityInputStream(content))); futures.add(checkFuture(templateGet("entityInputStream").saveAsString())); futures.add(checkFile(intf.bytes(content))); futures.add(checkFuture(templateGet("bytes").saveAsString())); futures.add(checkFile(intf.netInputStream(content))); futures.add(checkFuture(templateGet("netInputStream").saveAsString())); try { CompletableFuture.allOf( Iterables.toArray((List<CompletableFuture<Object>>) (Object) futures, CompletableFuture.class)) .get(); } catch (InterruptedException | ExecutionException e1) { TestMgr.failed("test download failed.", e1); } }
From source file:org.apache.storm.localizer.AsyncLocalizer.java
private CompletableFuture<Void> downloadOrUpdate(LocallyCachedBlob... blobs) { CompletableFuture<Void>[] all = new CompletableFuture[blobs.length]; for (int i = 0; i < blobs.length; i++) { final LocallyCachedBlob blob = blobs[i]; all[i] = CompletableFuture.runAsync(() -> { LOG.debug("STARTING download of {}", blob); try (ClientBlobStore blobStore = ServerUtils.getClientBlobStoreForSupervisor(conf)) { boolean done = false; long failures = 0; while (!done) { try { synchronized (blob) { long localVersion = blob.getLocalVersion(); long remoteVersion = blob.getRemoteVersion(blobStore); if (localVersion != remoteVersion) { try { long newVersion = blob.downloadToTempLocation(blobStore); blob.informAllOfChangeAndWaitForConsensus(); blob.commitNewVersion(newVersion); blob.informAllChangeComplete(); } finally { blob.cleanupOrphanedData(); }/* w ww.j a v a 2s . c o m*/ } } done = true; } catch (Exception e) { failures++; if (failures > blobDownloadRetries) { throw new RuntimeException("Could not download...", e); } LOG.warn("Failed to download blob {} will try again in {} ms", blob, ATTEMPTS_INTERVAL_TIME, e); Utils.sleep(ATTEMPTS_INTERVAL_TIME); } } } LOG.debug("FINISHED download of {}", blob); }, execService); } return CompletableFuture.allOf(all); }
From source file:org.dhatim.fastexcel.Correctness.java
@Test public void multipleWorksheets() throws Exception { int numWs = 10; int numRows = 5000; int numCols = 6; byte[] data = writeWorkbook(wb -> { @SuppressWarnings("unchecked") CompletableFuture<Void>[] cfs = new CompletableFuture[numWs]; for (int i = 0; i < cfs.length; ++i) { Worksheet ws = wb.newWorksheet("Sheet " + i); CompletableFuture<Void> cf = CompletableFuture.runAsync(() -> { for (int j = 0; j < numCols; ++j) { ws.value(0, j, "Column " + j); ws.style(0, j).bold().fontSize(12).fillColor(Color.GRAY2).set(); for (int k = 1; k <= numRows; ++k) { switch (j) { case 0: ws.value(k, j, "String value " + k); break; case 1: ws.value(k, j, 2); break; case 2: ws.value(k, j, 3L); break; case 3: ws.value(k, j, 0.123); break; case 4: ws.value(k, j, new Date()); ws.style(k, j).format("yyyy-MM-dd HH:mm:ss").set(); break; case 5: ws.value(k, j, LocalDate.now()); ws.style(k, j).format("yyyy-MM-dd").set(); break; default: throw new IllegalArgumentException(); }/*from w w w.java 2 s .com*/ } } ws.formula(numRows + 1, 1, "=SUM(" + ws.range(1, 1, numRows, 1).toString() + ")"); ws.formula(numRows + 1, 2, "=SUM(" + ws.range(1, 2, numRows, 2).toString() + ")"); ws.formula(numRows + 1, 3, "=SUM(" + ws.range(1, 3, numRows, 3).toString() + ")"); ws.formula(numRows + 1, 4, "=AVERAGE(" + ws.range(1, 4, numRows, 4).toString() + ")"); ws.style(numRows + 1, 4).format("yyyy-MM-dd HH:mm:ss").set(); ws.formula(numRows + 1, 5, "=AVERAGE(" + ws.range(1, 5, numRows, 5).toString() + ")"); ws.style(numRows + 1, 5).format("yyyy-MM-dd").bold().italic().fontColor(Color.RED) .fontName("Garamond").fontSize(new BigDecimal("14.5")).horizontalAlignment("center") .verticalAlignment("top").wrapText(true).set(); ws.range(1, 0, numRows, numCols - 1).style().borderColor(Color.RED).borderStyle("thick") .shadeAlternateRows(Color.RED).set(); }); cfs[i] = cf; } try { CompletableFuture.allOf(cfs).get(); } catch (InterruptedException | ExecutionException ex) { throw new RuntimeException(ex); } }); // Check generated workbook with Apache POI XSSFWorkbook xwb = new XSSFWorkbook(new ByteArrayInputStream(data)); assertThat(xwb.getActiveSheetIndex()).isEqualTo(0); assertThat(xwb.getNumberOfSheets()).isEqualTo(numWs); for (int i = 0; i < numWs; ++i) { assertThat(xwb.getSheetName(i)).isEqualTo("Sheet " + i); XSSFSheet xws = xwb.getSheetAt(i); assertThat(xws.getLastRowNum()).isEqualTo(numRows + 1); for (int j = 1; j <= numRows; ++j) { assertThat(xws.getRow(j).getCell(0).getStringCellValue()).isEqualTo("String value " + j); } } }