List of usage examples for java.util.concurrent CompletableFuture get
@SuppressWarnings("unchecked") public T get() throws InterruptedException, ExecutionException
From source file:info.archinnov.achilles.it.TestAsyncDSLSimpleEntity.java
@Test public void should_dsl_select_slice_async() throws Exception { //Given/*ww w.j a v a 2 s .c o m*/ final Map<String, Object> values = new HashMap<>(); final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); values.put("id", id); SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z"); dateFormat.setTimeZone(TimeZone.getTimeZone("GMT")); final Date date1 = dateFormat.parse("2015-10-01 00:00:00 GMT"); final Date date9 = dateFormat.parse("2015-10-09 00:00:00 GMT"); values.put("date1", "'2015-10-01 00:00:00+0000'"); values.put("date2", "'2015-10-02 00:00:00+0000'"); values.put("date3", "'2015-10-03 00:00:00+0000'"); values.put("date4", "'2015-10-04 00:00:00+0000'"); values.put("date5", "'2015-10-05 00:00:00+0000'"); values.put("date6", "'2015-10-06 00:00:00+0000'"); values.put("date7", "'2015-10-07 00:00:00+0000'"); values.put("date8", "'2015-10-08 00:00:00+0000'"); values.put("date9", "'2015-10-09 00:00:00+0000'"); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_many_rows.cql", values); final CountDownLatch latch = new CountDownLatch(1); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevel(ASYNC_LOGGER_STRING, "%msg - [%thread]%n"); //When final CompletableFuture<List<SimpleEntity>> future = manager.dsl().select().consistencyList().simpleSet() .simpleMap().value().simpleMap().fromBaseTable().where().id_Eq(id).date_Gte_And_Lt(date1, date9) .withResultSetAsyncListener(rs -> { LOGGER.info(CALLED); latch.countDown(); return rs; }).withTracing().getListAsync(); //Then latch.await(); assertThat(future.get()).hasSize(8); logAsserter.assertContains("Called - [achilles-default-executor"); }
From source file:org.apache.bookkeeper.stream.storage.impl.TestStorageContainerStoreImpl.java
@Test public void testRangeMockStorageContainer() throws Exception { RangeResponse response = createRangeResponse(StatusCode.SUCCESS); RangeRequest request = createRangeRequest(); when(mockRangeStoreService.range(request)).thenReturn(CompletableFuture.completedFuture(response)); CompletableFuture<RangeResponse> future = fromListenableFuture(tableService.range(request)); verify(mockRangeStoreService, times(1)).range(eq(request)); assertTrue(response == future.get()); }
From source file:org.apache.bookkeeper.stream.storage.impl.TestStorageContainerStoreImpl.java
@Test public void testDeleteMockStorageContainer() throws Exception { DeleteRangeResponse response = createDeleteResponse(StatusCode.SUCCESS); DeleteRangeRequest request = createDeleteRequest(); when(mockRangeStoreService.delete(request)).thenReturn(CompletableFuture.completedFuture(response)); CompletableFuture<DeleteRangeResponse> future = fromListenableFuture(tableService.delete(request)); verify(mockRangeStoreService, times(1)).delete(eq(request)); assertTrue(response == future.get()); }
From source file:eu.itesla_project.modules.wca.WCATool.java
@Override public void run(CommandLine line) throws Exception { Path caseFile = Paths.get(line.getOptionValue("case-file")); String offlineWorkflowId = line.getOptionValue("offline-workflow-id"); // can be null meaning use no offline security rules Interval histoInterval = Interval.parse(line.getOptionValue("history-interval")); String rulesDbName = line.hasOption("rules-db-name") ? line.getOptionValue("rules-db-name") : OfflineConfig.DEFAULT_RULES_DB_NAME; double purityThreshold = DEFAULT_PURITY_THRESHOLD; if (line.hasOption("purity-threshold")) { purityThreshold = Double.parseDouble(line.getOptionValue("purity-threshold")); }//w ww . ja v a2 s . c o m Set<SecurityIndexType> securityIndexTypes = null; if (line.hasOption("security-index-types")) { securityIndexTypes = Arrays.stream(line.getOptionValue("security-index-types").split(",")) .map(SecurityIndexType::valueOf).collect(Collectors.toSet()); } Path outputCsvFile = null; if (line.hasOption("output-csv-file")) { outputCsvFile = Paths.get(line.getOptionValue("output-csv-file")); } boolean stopWcaOnViolations = DEFAULT_STOP_WCA_ON_VIOLATIONS; if (line.hasOption("stop-on-violations")) { stopWcaOnViolations = Boolean.parseBoolean(line.getOptionValue("stop-on-violations")); } try (ComputationManager computationManager = new LocalComputationManager()) { WCAParameters parameters = new WCAParameters(histoInterval, offlineWorkflowId, securityIndexTypes, purityThreshold, stopWcaOnViolations); OnlineConfig config = OnlineConfig.load(); ContingenciesAndActionsDatabaseClient contingenciesDb = config.getContingencyDbClientFactoryClass() .newInstance().create(); LoadFlowFactory loadFlowFactory = config.getLoadFlowFactoryClass().newInstance(); WCAFactory wcaFactory = config.getWcaFactoryClass().newInstance(); try (HistoDbClient histoDbClient = new SynchronizedHistoDbClient( config.getHistoDbClientFactoryClass().newInstance().create()); RulesDbClient rulesDbClient = config.getRulesDbClientFactoryClass().newInstance() .create(rulesDbName)) { UncertaintiesAnalyserFactory uncertaintiesAnalyserFactory = config .getUncertaintiesAnalyserFactoryClass().newInstance(); if (Files.isRegularFile(caseFile)) { if (outputCsvFile != null) { throw new RuntimeException( "In case of single wca, only standard output pretty print is supported"); } System.out.println("loading case..."); // load the network Network network = Importers.loadNetwork(caseFile); if (network == null) { throw new RuntimeException("Case '" + caseFile + "' not found"); } network.getStateManager().allowStateMultiThreadAccess(true); WCA wca = wcaFactory.create(network, computationManager, histoDbClient, rulesDbClient, uncertaintiesAnalyserFactory, contingenciesDb, loadFlowFactory); WCAAsyncResult result = wca.runAsync(StateManager.INITIAL_STATE_ID, parameters).join(); Table table = new Table(3, BorderStyle.CLASSIC_WIDE); table.addCell("Contingency"); table.addCell("Cluster"); table.addCell("Causes"); List<CompletableFuture<WCACluster>> futureClusters = new LinkedList<>(result.getClusters()); while (futureClusters.size() > 0) { CompletableFuture .anyOf(futureClusters.toArray(new CompletableFuture[futureClusters.size()])).join(); for (Iterator<CompletableFuture<WCACluster>> it = futureClusters.iterator(); it .hasNext();) { CompletableFuture<WCACluster> futureCluster = it.next(); if (futureCluster.isDone()) { it.remove(); WCACluster cluster = futureCluster.get(); if (cluster != null) { System.out.println("contingency " + cluster.getContingency().getId() + " done: " + cluster.getNum() + " (" + cluster.getOrigin() + ")"); table.addCell(cluster.getContingency().getId()); table.addCell(cluster.getNum() + " (" + cluster.getOrigin() + ")"); List<String> sortedCauses = cluster.getCauses().stream().sorted() .collect(Collectors.toList()); if (sortedCauses != null && sortedCauses.size() > 0) { table.addCell(sortedCauses.get(0)); for (int i = 1; i < sortedCauses.size(); i++) { table.addCell(""); table.addCell(""); table.addCell(sortedCauses.get(i)); } } else { table.addCell(""); } } } } } System.out.println(table.render()); } else if (Files.isDirectory(caseFile)) { if (outputCsvFile == null) { throw new RuntimeException( "In case of multiple wca, you have to specify and ouput to csv file"); } Map<String, Map<String, WCACluster>> clusterPerContingencyPerBaseCase = Collections .synchronizedMap(new TreeMap<>()); Set<String> contingencyIds = Collections.synchronizedSet(new TreeSet<>()); Importers.loadNetworks(caseFile, true, network -> { try { network.getStateManager().allowStateMultiThreadAccess(true); String baseStateId = network.getId(); network.getStateManager().cloneState(StateManager.INITIAL_STATE_ID, baseStateId); network.getStateManager().setWorkingState(baseStateId); WCA wca = wcaFactory.create(network, computationManager, histoDbClient, rulesDbClient, uncertaintiesAnalyserFactory, contingenciesDb, loadFlowFactory); WCAAsyncResult result = wca.runAsync(baseStateId, parameters).join(); Map<String, WCACluster> clusterPerContingency = new HashMap<>(); List<CompletableFuture<WCACluster>> futureClusters = new LinkedList<>( result.getClusters()); while (futureClusters.size() > 0) { CompletableFuture .anyOf(futureClusters.toArray(new CompletableFuture[futureClusters.size()])) .join(); for (Iterator<CompletableFuture<WCACluster>> it = futureClusters.iterator(); it .hasNext();) { CompletableFuture<WCACluster> futureCluster = it.next(); if (futureCluster.isDone()) { it.remove(); WCACluster cluster = futureCluster.get(); if (cluster != null) { System.out.println("case " + network.getId() + ", contingency " + cluster.getContingency().getId() + " done: " + cluster.getNum() + " (" + cluster.getOrigin() + ")"); clusterPerContingency.put(cluster.getContingency().getId(), cluster); contingencyIds.add(cluster.getContingency().getId()); } } } } clusterPerContingencyPerBaseCase.put(network.getId(), clusterPerContingency); } catch (Exception e) { LOGGER.error(e.toString(), e); } }, dataSource -> System.out.println("loading case " + dataSource.getBaseName() + "...")); writeClustersCsv(clusterPerContingencyPerBaseCase, contingencyIds, outputCsvFile); } } } }
From source file:org.apache.bookkeeper.stream.storage.impl.TestStorageContainerStoreImpl.java
@Test public void testGetStreamMockRootStorageContainerStore() throws Exception { String colName = "test-get-namespace-no-root-storage-container-store"; String streamName = colName;/*from w w w. ja v a2s.co m*/ GetStreamResponse getResp = GetStreamResponse.newBuilder().setCode(StatusCode.STREAM_NOT_FOUND).build(); GetStreamRequest getReq = createGetStreamRequest(colName, streamName); when(mockRangeStoreService.getStream(getReq)).thenReturn(CompletableFuture.completedFuture(getResp)); CompletableFuture<GetStreamResponse> getRespFuture = fromListenableFuture( rootRangeService.getStream(getReq)); verify(mockRangeStoreService, times(1)).getStream(getReq); assertTrue(getResp == getRespFuture.get()); }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void closingInvoker_success() throws IOException, TimeoutException, ExecutionException, InterruptedException { String instanceId = "123456789"; Channel channel = mock(Channel.class); ResponseReceiver receiver = mock(ResponseReceiver.class); ChannelProvider channelProvider = mock(ChannelProvider.class); doReturn(channel).when(channelProvider).provide(instanceId, receiver); RemoteInvoker invoker = new AmqpInvokerImpl(instanceId, channelProvider, receiver); CompletableFuture<Void> actual = invoker.close(); verify(channel).close();// ww w . java 2 s. com verifyNoMoreInteractions(channel); actual.get(); assertTrue(actual.isDone()); assertFalse(actual.isCompletedExceptionally()); }
From source file:org.apache.bookkeeper.stream.storage.impl.TestStorageContainerStoreImpl.java
@Test public void testGetActiveRangesMockManager() throws Exception { GetActiveRangesResponse resp = GetActiveRangesResponse.newBuilder().setCode(StatusCode.STREAM_NOT_FOUND) .build();// w w w . j av a2 s . c om GetActiveRangesRequest request = createGetActiveRangesRequest(34L); when(mockRangeStoreService.getActiveRanges(request)).thenReturn(CompletableFuture.completedFuture(resp)); CompletableFuture<GetActiveRangesResponse> future = fromListenableFuture( metaRangeService.getActiveRanges(request)); verify(mockRangeStoreService, times(1)).getActiveRanges(request); assertTrue(resp == future.get()); }
From source file:com.xylocore.cassandra.query.TableScanQuery.java
/** * FILLIN/*from w w w. j a v a 2 s . c om*/ * * @param aExecutionContext * @param aParameters * * @return */ public CompletableFuture<Void> execute(TableScanQueryExecutionContext<T> aExecutionContext, Map<String, Object> aParameters) { CompletableFuture<Void> myPartitionFuture = null; if (clusteringQueryNeeded) { Consumer<List<PartitionKeyInfo>> myPartitionKeyProcessor = (myPartitionKeyInfos) -> { ClusterQueryState myState = (ClusterQueryState) myPartitionKeyInfos.get(0); CompletableFuture<Void> myClusterFuture = clusterPagedQuery.execute(aExecutionContext, myState.getPartitionKeys()); try { myClusterFuture.get(); } catch (Exception myException) { throw new CompletionException(myException); } }; PagedQueryExecutionContext<PartitionKeyInfo> myPartitionExecutionContext = PagedQueryExecutionContextBuilder .builder(PartitionKeyInfo.class).entityCreator(() -> { return new ClusterQueryState(); }).reuseEntity(true).entityExtractor(this::partitionKeyExtractor) .entityFilter(aExecutionContext.getPartitionKeyFilter()) .entityProcessor(myPartitionKeyProcessor).build(); myPartitionFuture = partitionPagedQuery.execute(myPartitionExecutionContext); } else { myPartitionFuture = standalonePagedQuery.execute(aExecutionContext); } return myPartitionFuture; }
From source file:co.runrightfast.vertx.demo.testHarness.jmx.DemoMXBeanImpl.java
@Override public String lookupIPAddress(final String dnsServer, final String host) { final DnsClient client = vertx.createDnsClient(53, dnsServer); final CompletableFuture<String> future = new CompletableFuture<>(); client.lookup("vertx.io", result -> { if (result.succeeded()) { future.complete(result.result()); } else {//from w ww. j a va 2s . c o m future.completeExceptionally(result.cause()); } }); try { return future.get(); } catch (final InterruptedException | ExecutionException ex) { throw new RuntimeException(ex); } }
From source file:info.archinnov.achilles.it.TestAsyncCRUDSimpleEntity.java
@Test public void should_delete_instance_async() throws Exception { //Given//from w w w .j ava2 s. c o m final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); final Date date = buildDateKey(); final SimpleEntity entity = new SimpleEntity(id, date, "value"); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql", ImmutableMap.of("id", id, "table", "simple")); final CountDownLatch latch = new CountDownLatch(1); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevel(ASYNC_LOGGER_STRING, "%msg - [%thread]%n"); //When final CompletableFuture<ExecutionInfo> future = manager.crud().delete(entity) .withResultSetAsyncListener(rs -> { LOGGER.info(CALLED); latch.countDown(); return rs; }).executeAsyncWithStats(); //Then latch.await(); logAsserter.assertContains("Called"); final List<Row> rows = session.execute("SELECT * FROM simple WHERE id = " + id).all(); assertThat(rows).isEmpty(); final ExecutionInfo executionInfo = future.get(); assertThat(executionInfo.getQueriedHost().isUp()).isTrue(); }