List of usage examples for java.util.concurrent CompletableFuture get
@SuppressWarnings("unchecked") public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException
From source file:co.runrightfast.vertx.orientdb.verticle.OrientDBVerticleTest.java
@Test public void testEventLogRepository() throws Exception { final Vertx vertx = vertxService.getVertx(); final RunRightFastVerticleId verticleManagerId = EventLogRepository.VERTICLE_ID; final CompletableFuture<GetEventCount.Response> getEventCountFuture = new CompletableFuture<>(); final long timeout = 60000L; // because the verticles are deployed asynchronously, the EventLogRepository verticle may not yet be deployed yet // the message codec for the Verticle only gets registered, while the verticle is starting. Thus, the message codec may not yet be registered. while (true) { try {/*from ww w . j a v a 2 s.com*/ vertx.eventBus().send(EventBusAddress.eventBusAddress(verticleManagerId, GetEventCount.class), GetEventCount.Request.getDefaultInstance(), new DeliveryOptions().setSendTimeout(timeout), responseHandler(getEventCountFuture, GetEventCount.Response.class)); break; } catch (final IllegalArgumentException e) { if (e.getMessage().contains("No message codec for type")) { log.log(WARNING, "Waiting for EventLogRepository ... ", e); Thread.sleep(5000L); } else { throw e; } } } final GetEventCount.Response getEventCountResponse = getEventCountFuture.get(timeout, TimeUnit.MILLISECONDS); assertThat(getEventCountResponse.getCount(), is(0L)); final CompletableFuture<CreateEvent.Response> createEventFuture = new CompletableFuture<>(); vertx.eventBus().send(EventBusAddress.eventBusAddress(verticleManagerId, CreateEvent.class), CreateEvent.Request.newBuilder().setEvent("testEventLogRepository").build(), new DeliveryOptions().setSendTimeout(timeout), responseHandler(createEventFuture, CreateEvent.Response.class)); final CreateEvent.Response createEventResponse = createEventFuture.get(timeout, TimeUnit.MILLISECONDS); log.info(String.format("record id = %d::%d", createEventResponse.getId().getClusterId(), createEventResponse.getId().getPosition())); final CompletableFuture<GetEventCount.Response> getEventCountFuture2 = new CompletableFuture<>(); vertx.eventBus().send(EventBusAddress.eventBusAddress(verticleManagerId, GetEventCount.class), GetEventCount.Request.getDefaultInstance(), new DeliveryOptions().setSendTimeout(timeout), responseHandler(getEventCountFuture2, GetEventCount.Response.class)); final GetEventCount.Response getEventCountResponse2 = getEventCountFuture2.get(timeout, TimeUnit.MILLISECONDS); assertThat(getEventCountResponse2.getCount(), is(1L)); }
From source file:org.springframework.integration.aggregator.AggregatorTests.java
@Test public void testAggPerf() throws InterruptedException, ExecutionException, TimeoutException { AggregatingMessageHandler handler = new AggregatingMessageHandler( new DefaultAggregatingMessageGroupProcessor()); handler.setCorrelationStrategy(message -> "foo"); handler.setReleaseStrategy(new MessageCountReleaseStrategy(60000)); handler.setExpireGroupsUponCompletion(true); handler.setSendPartialResultOnExpiry(true); DirectChannel outputChannel = new DirectChannel(); handler.setOutputChannel(outputChannel); final CompletableFuture<Collection<?>> resultFuture = new CompletableFuture<>(); outputChannel.subscribe(message -> { Collection<?> payload = (Collection<?>) message.getPayload(); logger.warn("Received " + payload.size()); resultFuture.complete(payload);/*from ww w. j a v a 2s . co m*/ }); SimpleMessageStore store = new SimpleMessageStore(); SimpleMessageGroupFactory messageGroupFactory = new SimpleMessageGroupFactory( SimpleMessageGroupFactory.GroupType.BLOCKING_QUEUE); store.setMessageGroupFactory(messageGroupFactory); handler.setMessageStore(store); Message<?> message = new GenericMessage<String>("foo"); StopWatch stopwatch = new StopWatch(); stopwatch.start(); for (int i = 0; i < 120000; i++) { if (i % 10000 == 0) { stopwatch.stop(); logger.warn("Sent " + i + " in " + stopwatch.getTotalTimeSeconds() + " (10k in " + stopwatch.getLastTaskTimeMillis() + "ms)"); stopwatch.start(); } handler.handleMessage(message); } stopwatch.stop(); logger.warn("Sent " + 120000 + " in " + stopwatch.getTotalTimeSeconds() + " (10k in " + stopwatch.getLastTaskTimeMillis() + "ms)"); Collection<?> result = resultFuture.get(10, TimeUnit.SECONDS); assertNotNull(result); assertEquals(60000, result.size()); }
From source file:org.springframework.integration.aggregator.AggregatorTests.java
@Test public void testAggPerfDefaultPartial() throws InterruptedException, ExecutionException, TimeoutException { AggregatingMessageHandler handler = new AggregatingMessageHandler( new DefaultAggregatingMessageGroupProcessor()); handler.setCorrelationStrategy(message -> "foo"); handler.setReleasePartialSequences(true); DirectChannel outputChannel = new DirectChannel(); handler.setOutputChannel(outputChannel); final CompletableFuture<Collection<?>> resultFuture = new CompletableFuture<>(); outputChannel.subscribe(message -> { Collection<?> payload = (Collection<?>) message.getPayload(); logger.warn("Received " + payload.size()); resultFuture.complete(payload);/* w w w .j a v a2 s .c om*/ }); SimpleMessageStore store = new SimpleMessageStore(); SimpleMessageGroupFactory messageGroupFactory = new SimpleMessageGroupFactory( SimpleMessageGroupFactory.GroupType.BLOCKING_QUEUE); store.setMessageGroupFactory(messageGroupFactory); handler.setMessageStore(store); StopWatch stopwatch = new StopWatch(); stopwatch.start(); for (int i = 0; i < 120000; i++) { if (i % 10000 == 0) { stopwatch.stop(); logger.warn("Sent " + i + " in " + stopwatch.getTotalTimeSeconds() + " (10k in " + stopwatch.getLastTaskTimeMillis() + "ms)"); stopwatch.start(); } handler.handleMessage( MessageBuilder.withPayload("foo").setSequenceSize(120000).setSequenceNumber(i + 1).build()); } stopwatch.stop(); logger.warn("Sent " + 120000 + " in " + stopwatch.getTotalTimeSeconds() + " (10k in " + stopwatch.getLastTaskTimeMillis() + "ms)"); Collection<?> result = resultFuture.get(10, TimeUnit.SECONDS); assertNotNull(result); assertEquals(120000, result.size()); assertThat(stopwatch.getTotalTimeSeconds(), lessThan(60.0)); // actually < 2.0, was many minutes }
From source file:com.ikanow.aleph2.distributed_services.services.CoreDistributedServices.java
/** Joins the Akka cluster *//*from www .ja v a 2s . c o m*/ protected void joinAkkaCluster() { if (!_akka_system.isSet()) { this.getAkkaSystem(); // (this will also join the cluster) return; } if (!_has_joined_akka_cluster) { _has_joined_akka_cluster = true; // WORKAROUND FOR BUG IN akka-cluster/akka-zookeeper-seed: if it grabs the old ephemeral connection info of master then bad things can happen // so wait until a ZK node that I create for this purpose is removed (so the others also should have been) final String application_name = _config_bean.application_name(); final String hostname_application = DistributedServicesPropertyBean.ZOOKEEPER_APPLICATION_LOCK + "/" + ZookeeperUtils.getHostname() + ":" + application_name; if (null == application_name) { logger.info("(This is a transient application, cannot be the master)"); } else { logger.info("Checking for old ZK artefacts from old instance of this application path=" + hostname_application); final int MAX_ZK_ATTEMPTS = 6; int i = 0; for (i = 0; i <= MAX_ZK_ATTEMPTS; ++i) { try { this.getCuratorFramework().create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL) .forPath(hostname_application); Thread.sleep(2000L); // (Wait a little longer) break; } catch (Exception e) { logger.warn( ErrorUtils.get("Waiting for old instance to be cleared out (err={0}), retrying={1}", e.getMessage(), i < MAX_ZK_ATTEMPTS)); try { Thread.sleep(10000L); } catch (Exception __) { } } } if (i > MAX_ZK_ATTEMPTS) { throw new RuntimeException("Failed to clear out lock, not clear why - try removing by hand: " + (DistributedServicesPropertyBean.ZOOKEEPER_APPLICATION_LOCK + "/" + hostname_application)); } } ZookeeperClusterSeed.get(_akka_system.get()).join(); _shutdown_hook.set(Lambdas.wrap_runnable_u(() -> { try { final CompletableFuture<Unit> wait_for_member_to_leave = new CompletableFuture<>(); Cluster.get(_akka_system.get()) .registerOnMemberRemoved(() -> wait_for_member_to_leave.complete(Unit.unit())); _joined_akka_cluster = new CompletableFuture<>(); //(mainly just for testing) Cluster.get(_akka_system.get()).leave(ZookeeperClusterSeed.get(_akka_system.get()).address()); // If it's an application, not transient, then handle synchronization try { System.out .println(new java.util.Date() + ": Akka cluster management: Shutting down in ~10s"); logger.error("(Not really an error) Shutting down in ~10s"); } catch (Throwable e) { } // logging might not still work at this point // (don't delete the ZK node - appear to still be able to run into race problems if you do, left here to remind me): //if (null != application_name) { // this.getCuratorFramework().delete().deletingChildrenIfNeeded().forPath(hostname_application); //} try { wait_for_member_to_leave.get(10L, TimeUnit.SECONDS); } catch (Throwable e) { try { System.out.println(new java.util.Date() + ": Akka cluster management: Akka Cluster departure was not able to complete in time: " + e.getMessage()); logger.error("Akka Cluster departure was not able to complete in time"); } catch (Throwable ee) { } // logging might not still work at this point } try { Await.result(_akka_system.get().terminate(), Duration.create(10L, TimeUnit.SECONDS)); } catch (Throwable e) { try { System.out.println(new java.util.Date() + ": Akka cluster management: Akka System termination was not able to complete in time: " + e.getMessage()); logger.error("Akka System termination was not able to complete in time"); } catch (Throwable ee) { } // logging might not still work at this point } // All done try { System.out.println(new java.util.Date() + ": Akka cluster management: Akka shut down complete, now exiting"); logger.error("(Not really an error) Akka shut down complete, now exiting"); } catch (Throwable e) { } // logging might not still work at this point } catch (Throwable t) { // (unknown error, we'll print and log this) try { t.printStackTrace(); logger.error(ErrorUtils.getLongForm("{0}", t)); } catch (Throwable e) { } // logging might not still work at this point } })); Cluster.get(_akka_system.get()).registerOnMemberUp(() -> { logger.info("Joined cluster address=" + ZookeeperClusterSeed.get(_akka_system.get()).address() + ", adding shutdown hook"); synchronized (_joined_akka_cluster) { // (prevents a race condition vs runOnAkkaJoin) _joined_akka_cluster.complete(true); } // Now register a shutdown hook Runtime.getRuntime().addShutdownHook(new Thread(_shutdown_hook.get())); _post_join_task_list.stream().parallel().forEach(retval_task -> { try { retval_task._2().run(); retval_task._1().complete(null); } catch (Throwable t) { retval_task._1().completeExceptionally(t); } }); }); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
@Override public Position offloadPrefix(Position pos) throws InterruptedException, ManagedLedgerException { CompletableFuture<Position> promise = new CompletableFuture<>(); asyncOffloadPrefix(pos, new OffloadCallback() { @Override//from www . j a v a 2 s. co m public void offloadComplete(Position offloadedTo, Object ctx) { promise.complete(offloadedTo); } @Override public void offloadFailed(ManagedLedgerException e, Object ctx) { promise.completeExceptionally(e); } }, null); try { return promise.get(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS); } catch (TimeoutException te) { throw new ManagedLedgerException("Timeout during managed ledger offload operation"); } catch (ExecutionException e) { log.error("[{}] Error offloading. pos = {}", name, pos, e.getCause()); throw ManagedLedgerException.getManagedLedgerException(e.getCause()); } }
From source file:io.liveoak.container.BasicServerTest.java
@Test public void testServer() throws Exception { CompletableFuture<StompMessage> peopleCreationNotification = new CompletableFuture<>(); CompletableFuture<StompMessage> bobCreationNotification = new CompletableFuture<>(); StompClient stompClient = new StompClient(); CountDownLatch subscriptionLatch = new CountDownLatch(1); stompClient.connect("localhost", 8080, (client) -> { stompClient.subscribe("/testApp/memory/people/*", (subscription) -> { subscription.onMessage((msg) -> { System.err.println("******* MESSAGE: " + msg); if (msg.headers().get("location").equals("/testApp/memory/people")) { peopleCreationNotification.complete(msg); } else { bobCreationNotification.complete(msg); }//from www . j ava 2s.c o m }); subscription.onReceipt(() -> { subscriptionLatch.countDown(); }); }); }); subscriptionLatch.await(); Header header = new BasicHeader("Accept", "application/json"); HttpGet getRequest = null; HttpPost postRequest = null; HttpPut putRequest = null; CloseableHttpResponse response = null; System.err.println("TEST #1"); // Root object should exist. getRequest = new HttpGet("http://localhost:8080/testApp/memory"); getRequest.addHeader(header); response = this.httpClient.execute(getRequest); // { // "id" : "memory", // "self" : { // "href" : "/memory" // }, // "content" : [ ] // } assertThat(response).isNotNull(); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(200); ResourceState state = decode(response); assertThat(state).isNotNull(); assertThat(state.members().size()).isEqualTo(0); response.close(); System.err.println("TEST #2"); // people collection should not exist. getRequest = new HttpGet("http://localhost:8080/testApp/memory/people"); response = this.httpClient.execute(getRequest); assertThat(response).isNotNull(); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(404); response.close(); System.err.println("TEST #3"); // create people collection with direct PUT putRequest = new HttpPut("http://localhost:8080/testApp/memory/people"); putRequest.setEntity(new StringEntity("{ \"type\": \"collection\" }")); putRequest.setHeader("Content-Type", "application/json"); response = this.httpClient.execute(putRequest); System.err.println("response: " + response); assertThat(response).isNotNull(); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(201); response.close(); System.err.println("TEST #4"); // people collection should exist now. getRequest = new HttpGet("http://localhost:8080/testApp/memory/people"); response = this.httpClient.execute(getRequest); assertThat(response).isNotNull(); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(200); response.close(); assertThat(peopleCreationNotification.getNow(null)).isNull(); System.err.println("TEST #5"); // people collection should be enumerable from the root getRequest = new HttpGet("http://localhost:8080/testApp/memory?expand=members"); getRequest.addHeader(header); response = this.httpClient.execute(getRequest); // { // "id" : "memory", // "self" : { // "href" : "/memory" // }, // "content" : [ { // "id" : "people", // "self" : { // "href" : "/memory/people" // }, // "content" : [ ] // } ] // } assertThat(response).isNotNull(); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(200); state = decode(response); assertThat(state).isNotNull(); assertThat(state.members().size()).isEqualTo(1); ResourceState memoryCollection = state.members().get(0); assertThat(memoryCollection.id()).isEqualTo("people"); assertThat(memoryCollection.uri().toString()).isEqualTo("/testApp/memory/people"); System.err.println("TEST #6"); // Post a person postRequest = new HttpPost("http://localhost:8080/testApp/memory/people"); postRequest.setEntity(new StringEntity("{ \"name\": \"bob\" }")); postRequest.setHeader("Content-Type", "application/json"); response = httpClient.execute(postRequest); assertThat(response).isNotNull(); assertThat(response.getStatusLine().getStatusCode()).isEqualTo(201); state = decode(response); assertThat(state).isNotNull(); assertThat(state).isInstanceOf(ResourceState.class); assertThat(state.id()).isNotNull(); assertThat(state.getProperty("name")).isEqualTo("bob"); // check STOMP System.err.println("TEST #STOMP"); StompMessage obj = bobCreationNotification.get(30000, TimeUnit.SECONDS); assertThat(obj).isNotNull(); ResourceState bobObjState = decode(obj.content()); assertThat(bobObjState.getProperty("name")).isEqualTo("bob"); assertThat(state.getProperty(LiveOak.ID)).isEqualTo(bobObjState.getProperty(LiveOak.ID)); response.close(); }