List of usage examples for java.util.concurrent CompletableFuture isDone
public boolean isDone()
From source file:example.springdata.jpa.java8.Java8IntegrationTests.java
/** * Here we demonstrate the usage of {@link CompletableFuture} as a result wrapper for asynchronous repository query * methods. Note, that we need to disable the surrounding transaction to be able to asynchronously read the written * data from from another thread within the same test method. */// w w w .j a v a 2 s .c om @Test @Transactional(propagation = Propagation.NOT_SUPPORTED) public void supportsCompletableFuturesAsReturnTypeWrapper() throws Exception { repository.save(new Customer("Customer1", "Foo")); repository.save(new Customer("Customer2", "Bar")); CompletableFuture<Void> future = repository.readAllBy().thenAccept(customers -> { assertThat(customers, hasSize(2)); customers.forEach(customer -> log.info(customer.toString())); log.info("Completed!"); }); while (!future.isDone()) { log.info("Waiting for the CompletableFuture to finish..."); TimeUnit.MILLISECONDS.sleep(500); } future.get(); log.info("Done!"); }
From source file:io.pravega.test.system.ControllerFailoverTest.java
@Test(timeout = 180000) public void failoverTest() throws URISyntaxException, InterruptedException { String scope = "testFailoverScope" + RandomStringUtils.randomAlphabetic(5); String stream = "testFailoverStream" + RandomStringUtils.randomAlphabetic(5); int initialSegments = 2; List<Integer> segmentsToSeal = Collections.singletonList(0); Map<Double, Double> newRangesToCreate = new HashMap<>(); newRangesToCreate.put(0.0, 0.25);/* w w w .j a v a 2 s . com*/ newRangesToCreate.put(0.25, 0.5); long lease = 29000; long maxExecutionTime = 60000; long scaleGracePeriod = 30000; // Connect with first controller instance. URI controllerUri = getTestControllerServiceURI(); Controller controller = new ControllerImpl(controllerUri); // Create scope, stream, and a transaction with high timeout value. controller.createScope(scope).join(); log.info("Scope {} created successfully", scope); createStream(controller, scope, stream, ScalingPolicy.fixed(initialSegments)); log.info("Stream {}/{} created successfully", scope, stream); long txnCreationTimestamp = System.nanoTime(); TxnSegments txnSegments = controller .createTransaction(new StreamImpl(scope, stream), lease, maxExecutionTime, scaleGracePeriod).join(); log.info("Transaction {} created successfully, beginTime={}", txnSegments.getTxnId(), txnCreationTimestamp); // Initiate scale operation. It will block until ongoing transaction is complete. CompletableFuture<Boolean> scaleFuture = controller .scaleStream(new StreamImpl(scope, stream), segmentsToSeal, newRangesToCreate, EXECUTOR_SERVICE) .getFuture(); // Ensure that scale is not yet done. log.info("Status of scale operation isDone={}", scaleFuture.isDone()); Assert.assertTrue(!scaleFuture.isDone()); // Now stop the controller instance executing scale operation. stopTestControllerService(); log.info("Successfully stopped test controller service"); // Connect to another controller instance. controllerUri = getControllerURI(); controller = new ControllerImpl(controllerUri); // Fetch status of transaction. log.info("Fetching status of transaction {}, time elapsed since its creation={}", txnSegments.getTxnId(), System.nanoTime() - txnCreationTimestamp); Transaction.Status status = controller .checkTransactionStatus(new StreamImpl(scope, stream), txnSegments.getTxnId()).join(); log.info("Transaction {} status={}", txnSegments.getTxnId(), status); if (status == Transaction.Status.OPEN) { // Abort the ongoing transaction. log.info("Trying to abort transaction {}, by sending request to controller at {}", txnSegments.getTxnId(), controllerUri); controller.abortTransaction(new StreamImpl(scope, stream), txnSegments.getTxnId()).join(); } // Scale operation should now complete on the second controller instance. // Sleep for some time for it to complete Thread.sleep(90000); // Ensure that the stream has 3 segments now. log.info("Checking whether scale operation succeeded by fetching current segments"); StreamSegments streamSegments = controller.getCurrentSegments(scope, stream).join(); log.info("Current segment count=", streamSegments.getSegments().size()); Assert.assertEquals(initialSegments - segmentsToSeal.size() + newRangesToCreate.size(), streamSegments.getSegments().size()); }
From source file:eu.itesla_project.modules.wca.WCATool.java
@Override public void run(CommandLine line) throws Exception { Path caseFile = Paths.get(line.getOptionValue("case-file")); String offlineWorkflowId = line.getOptionValue("offline-workflow-id"); // can be null meaning use no offline security rules Interval histoInterval = Interval.parse(line.getOptionValue("history-interval")); String rulesDbName = line.hasOption("rules-db-name") ? line.getOptionValue("rules-db-name") : OfflineConfig.DEFAULT_RULES_DB_NAME; double purityThreshold = DEFAULT_PURITY_THRESHOLD; if (line.hasOption("purity-threshold")) { purityThreshold = Double.parseDouble(line.getOptionValue("purity-threshold")); }// w w w.j a v a 2 s .c om Set<SecurityIndexType> securityIndexTypes = null; if (line.hasOption("security-index-types")) { securityIndexTypes = Arrays.stream(line.getOptionValue("security-index-types").split(",")) .map(SecurityIndexType::valueOf).collect(Collectors.toSet()); } Path outputCsvFile = null; if (line.hasOption("output-csv-file")) { outputCsvFile = Paths.get(line.getOptionValue("output-csv-file")); } boolean stopWcaOnViolations = DEFAULT_STOP_WCA_ON_VIOLATIONS; if (line.hasOption("stop-on-violations")) { stopWcaOnViolations = Boolean.parseBoolean(line.getOptionValue("stop-on-violations")); } try (ComputationManager computationManager = new LocalComputationManager()) { WCAParameters parameters = new WCAParameters(histoInterval, offlineWorkflowId, securityIndexTypes, purityThreshold, stopWcaOnViolations); OnlineConfig config = OnlineConfig.load(); ContingenciesAndActionsDatabaseClient contingenciesDb = config.getContingencyDbClientFactoryClass() .newInstance().create(); LoadFlowFactory loadFlowFactory = config.getLoadFlowFactoryClass().newInstance(); WCAFactory wcaFactory = config.getWcaFactoryClass().newInstance(); try (HistoDbClient histoDbClient = new SynchronizedHistoDbClient( config.getHistoDbClientFactoryClass().newInstance().create()); RulesDbClient rulesDbClient = config.getRulesDbClientFactoryClass().newInstance() .create(rulesDbName)) { UncertaintiesAnalyserFactory uncertaintiesAnalyserFactory = config .getUncertaintiesAnalyserFactoryClass().newInstance(); if (Files.isRegularFile(caseFile)) { if (outputCsvFile != null) { throw new RuntimeException( "In case of single wca, only standard output pretty print is supported"); } System.out.println("loading case..."); // load the network Network network = Importers.loadNetwork(caseFile); if (network == null) { throw new RuntimeException("Case '" + caseFile + "' not found"); } network.getStateManager().allowStateMultiThreadAccess(true); WCA wca = wcaFactory.create(network, computationManager, histoDbClient, rulesDbClient, uncertaintiesAnalyserFactory, contingenciesDb, loadFlowFactory); WCAAsyncResult result = wca.runAsync(StateManager.INITIAL_STATE_ID, parameters).join(); Table table = new Table(3, BorderStyle.CLASSIC_WIDE); table.addCell("Contingency"); table.addCell("Cluster"); table.addCell("Causes"); List<CompletableFuture<WCACluster>> futureClusters = new LinkedList<>(result.getClusters()); while (futureClusters.size() > 0) { CompletableFuture .anyOf(futureClusters.toArray(new CompletableFuture[futureClusters.size()])).join(); for (Iterator<CompletableFuture<WCACluster>> it = futureClusters.iterator(); it .hasNext();) { CompletableFuture<WCACluster> futureCluster = it.next(); if (futureCluster.isDone()) { it.remove(); WCACluster cluster = futureCluster.get(); if (cluster != null) { System.out.println("contingency " + cluster.getContingency().getId() + " done: " + cluster.getNum() + " (" + cluster.getOrigin() + ")"); table.addCell(cluster.getContingency().getId()); table.addCell(cluster.getNum() + " (" + cluster.getOrigin() + ")"); List<String> sortedCauses = cluster.getCauses().stream().sorted() .collect(Collectors.toList()); if (sortedCauses != null && sortedCauses.size() > 0) { table.addCell(sortedCauses.get(0)); for (int i = 1; i < sortedCauses.size(); i++) { table.addCell(""); table.addCell(""); table.addCell(sortedCauses.get(i)); } } else { table.addCell(""); } } } } } System.out.println(table.render()); } else if (Files.isDirectory(caseFile)) { if (outputCsvFile == null) { throw new RuntimeException( "In case of multiple wca, you have to specify and ouput to csv file"); } Map<String, Map<String, WCACluster>> clusterPerContingencyPerBaseCase = Collections .synchronizedMap(new TreeMap<>()); Set<String> contingencyIds = Collections.synchronizedSet(new TreeSet<>()); Importers.loadNetworks(caseFile, true, network -> { try { network.getStateManager().allowStateMultiThreadAccess(true); String baseStateId = network.getId(); network.getStateManager().cloneState(StateManager.INITIAL_STATE_ID, baseStateId); network.getStateManager().setWorkingState(baseStateId); WCA wca = wcaFactory.create(network, computationManager, histoDbClient, rulesDbClient, uncertaintiesAnalyserFactory, contingenciesDb, loadFlowFactory); WCAAsyncResult result = wca.runAsync(baseStateId, parameters).join(); Map<String, WCACluster> clusterPerContingency = new HashMap<>(); List<CompletableFuture<WCACluster>> futureClusters = new LinkedList<>( result.getClusters()); while (futureClusters.size() > 0) { CompletableFuture .anyOf(futureClusters.toArray(new CompletableFuture[futureClusters.size()])) .join(); for (Iterator<CompletableFuture<WCACluster>> it = futureClusters.iterator(); it .hasNext();) { CompletableFuture<WCACluster> futureCluster = it.next(); if (futureCluster.isDone()) { it.remove(); WCACluster cluster = futureCluster.get(); if (cluster != null) { System.out.println("case " + network.getId() + ", contingency " + cluster.getContingency().getId() + " done: " + cluster.getNum() + " (" + cluster.getOrigin() + ")"); clusterPerContingency.put(cluster.getContingency().getId(), cluster); contingencyIds.add(cluster.getContingency().getId()); } } } } clusterPerContingencyPerBaseCase.put(network.getId(), clusterPerContingency); } catch (Exception e) { LOGGER.error(e.toString(), e); } }, dataSource -> System.out.println("loading case " + dataSource.getBaseName() + "...")); writeClustersCsv(clusterPerContingencyPerBaseCase, contingencyIds, outputCsvFile); } } } }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void responseReceiver_handleDelivery_onEncodingException_exception() throws EncodingException, IOException, InterruptedException, ExecutionException, TimeoutException { ResponseReceiverImpl receiver = new ResponseReceiverImpl(serializer, new Validator() { }, 1, TimeUnit.MINUTES);/* w w w.j a v a 2 s . c o m*/ String correlationId = "987654321"; CompletableFuture<Res> answer = receiver.put(correlationId, Res.class); assertFalse(answer.isDone()); assertFalse(answer.isCompletedExceptionally()); receiver.handleDelivery(correlationId, new byte[] {}); assertTrue(answer.isDone()); assertTrue(answer.isCompletedExceptionally()); exception.expect(ExecutionException.class); try { answer.get(); } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof EncodingException); assertEquals("failed to decode JSON", ex.getCause().getMessage()); throw ex; } }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void responseReceiver_put_createsFuture_andCleansUpExpires() throws InterruptedException, ExecutionException, TimeoutException { ResponseReceiver receiver = new ResponseReceiverImpl(serializer, new Validator() { }, 100, TimeUnit.MILLISECONDS); CompletableFuture<Res> actual = receiver.put("987654321", Res.class); assertFalse(actual.isDone()); assertFalse(actual.isCompletedExceptionally()); // wait for expiry timeout Thread.sleep(150);//from w w w . ja v a 2 s . c o m // trigger cache operations (to evict the record) for (int i = 0; i < 1000; i++) { receiver.put(UUID.randomUUID().toString(), Res.class); } exception.expect(ExecutionException.class); try { actual.get(); } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof TimeoutException); assertEquals( "Request io.ventu.rpc.amqp.AmqpInvokerimplTest$Res with correlationId 987654321 has expired.", ex.getCause().getMessage()); throw ex; } }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void responseReceiver_handleDelivery_onEncodingException_withErrorField_APIException() throws EncodingException, IOException, InterruptedException, ExecutionException, TimeoutException { ResponseReceiverImpl receiver = new ResponseReceiverImpl(serializer, new Validator() { }, 1, TimeUnit.MINUTES);/*from w w w .j av a 2s . c o m*/ String correlationId = "987654321"; CompletableFuture<Res> answer = receiver.put(correlationId, Res.class); assertFalse(answer.isDone()); assertFalse(answer.isCompletedExceptionally()); Map<String, Object> res = Maps.newHashMap(); res.put("error", Integer.valueOf(371)); receiver.handleDelivery(correlationId, serializer.encode(res)); assertTrue(answer.isDone()); assertTrue(answer.isCompletedExceptionally()); exception.expect(ExecutionException.class); try { answer.get(); } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof ApiException); assertEquals("371", ex.getCause().getMessage()); throw ex; } }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void responseReceiver_handleDelivery_onEncodingException_MapWithNoError_exception() throws EncodingException, IOException, InterruptedException, ExecutionException, TimeoutException { ResponseReceiverImpl receiver = new ResponseReceiverImpl(serializer, new Validator() { }, 1, TimeUnit.MINUTES);//from ww w. j a va 2 s . com String correlationId = "987654321"; CompletableFuture<Res> answer = receiver.put(correlationId, Res.class); assertFalse(answer.isDone()); assertFalse(answer.isCompletedExceptionally()); Map<String, Object> res = Maps.newHashMap(); res.put("value", "notAnInt"); receiver.handleDelivery(correlationId, serializer.encode(res)); assertTrue(answer.isDone()); assertTrue(answer.isCompletedExceptionally()); exception.expect(ExecutionException.class); try { answer.get(); } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof EncodingException); assertEquals("failed to decode JSON", ex.getCause().getMessage()); throw ex; } }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void responseReceiver_handleDelivery_onAPIException_exception() throws EncodingException, IOException, InterruptedException, ExecutionException { Validator validator = new Validator() { @Override//from w w w. j a va2s. c o m public <T> void validate(T value) throws ApiException, IllegalArgumentException { throw new ApiException("boom"); } }; ResponseReceiverImpl receiver = new ResponseReceiverImpl(serializer, validator, 1, TimeUnit.MINUTES); String correlationId = "987654321"; CompletableFuture<Res> answer = receiver.put(correlationId, Res.class); assertFalse(answer.isDone()); assertFalse(answer.isCompletedExceptionally()); receiver.handleDelivery(correlationId, serializer.encode(Maps.newHashMap())); assertTrue(answer.isDone()); assertTrue(answer.isCompletedExceptionally()); exception.expect(ExecutionException.class); try { answer.get(); } catch (ExecutionException ex) { assertTrue(ex.getCause() instanceof ApiException); assertEquals("boom", ex.getCause().getMessage()); throw ex; } }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void responseReceiver_handleDelivery_responseValidated() throws EncodingException, IOException, InterruptedException, ExecutionException, TimeoutException { final List<Boolean> invocations = Lists.newArrayList(); Validator validator = new Validator() { @Override/*from ww w .j a va 2 s . c om*/ public <T> void validate(T value) throws ApiException, IllegalArgumentException { invocations.add(Boolean.TRUE); assertTrue(value instanceof Res); assertEquals(25, ((Res) value).value); } }; ResponseReceiverImpl receiver = new ResponseReceiverImpl(serializer, validator, 1, TimeUnit.MINUTES); Res res = new Res(); res.value = 25; String correlationId = "987654321"; CompletableFuture<Res> answer = receiver.put(correlationId, Res.class); assertFalse(answer.isDone()); assertFalse(answer.isCompletedExceptionally()); receiver.handleDelivery(correlationId, serializer.encode(res)); assertEquals(1, invocations.size()); assertTrue(answer.isDone()); assertFalse(answer.isCompletedExceptionally()); Res actual = answer.get(500, TimeUnit.MILLISECONDS); assertEquals(25, actual.value); }
From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java
@Test public void invoke_onOkRequest_encodedAndCorrectlyPublishedToAMQP() throws IOException, TimeoutException, ExecutionException, InterruptedException { String instanceId = "123456789"; Req req = new Req(); Channel channel = mock(Channel.class); CompletableFuture<Res> answer = new CompletableFuture<>(); ResponseReceiver receiver = mock(ResponseReceiver.class); doReturn(answer).when(receiver).put(anyString(), any()); ChannelProvider channelProvider = mock(ChannelProvider.class); doReturn(channel).when(channelProvider).provide(instanceId, receiver); doReturn(DEFAULT_RPC_EXCHANGE).when(channelProvider).rpcExchange(); RemoteInvoker invoker = new AmqpInvokerImpl(instanceId, channelProvider, receiver); CompletableFuture<Res> actual = invoker.invoke(req, Res.class); assertSame(answer, actual);/*w w w. j a v a 2s. c o m*/ assertFalse(actual.isDone()); assertFalse(actual.isCompletedExceptionally()); verify(channelProvider).provide(instanceId, receiver); verify(channelProvider).rpcExchange(); verifyNoMoreInteractions(channelProvider); verify(channel).basicPublish(anyString(), any(), any(), any()); verifyNoMoreInteractions(channel); verify(receiver).put(anyString(), any()); verifyNoMoreInteractions(receiver); }