List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:org.onosproject.segmentrouting.pwaas.DefaultL2TunnelHandler.java
/** * Deletes the pseudo wire termination./*from w ww . j a v a 2s .c om*/ * * @param l2Tunnel the tunnel * @param egress the egress connect point * @param future the async task * @param direction the direction of the tunnel */ private void tearDownPseudoWireTerm(L2Tunnel l2Tunnel, ConnectPoint egress, CompletableFuture<ObjectiveError> future, Direction direction) { String key = generateKey(l2Tunnel.tunnelId(), direction); if (!l2TerminationNextObjStore.containsKey(key)) { log.info("Abort delete of {} for {}: next does not exist in the store", TERMINATION, key); if (future != null) { future.complete(null); } return; } NextObjective nextObjective = l2TerminationNextObjStore.get(key).value(); ForwardingObjective.Builder fwdBuilder = createTermFwdObjective(l2Tunnel.pwLabel(), l2Tunnel.tunnelId(), egress.port(), nextObjective.id()); ObjectiveContext context = new DefaultObjectiveContext( (objective) -> log.debug("FwdObj for {} {}, " + "direction {} removed", TERMINATION, l2Tunnel.tunnelId(), direction), (objective, error) -> log.warn("Failed to remove fwdObj " + "for {} {}" + ", direction {}", TERMINATION, l2Tunnel.tunnelId(), error, direction)); srManager.flowObjectiveService.forward(egress.deviceId(), fwdBuilder.remove(context)); // un-comment in case you want to delete groups used by the pw // however, this will break the update of pseudowires cause the L2 interface group can // not be deleted (it is referenced by other groups) /* context = new ObjectiveContext() { @Override public void onSuccess(Objective objective) { log.debug("Previous {} next for {} removed", TERMINATION, key); if (future != null) { future.complete(null); } } @Override public void onError(Objective objective, ObjectiveError error) { log.warn("Failed to remove previous {} next for {}: {}", TERMINATION, key, error); if (future != null) { future.complete(error); } } }; srManager.flowObjectiveService.next(egress.deviceId(), (NextObjective) nextObjective.copy().remove(context)); */ // delete the extra filtering objective for terminating // spine-spine pws if (!l2Tunnel.transportVlan().equals(UNTAGGED_TRANSPORT_VLAN)) { // determine the input port at the PortNumber inPort; if (egress.deviceId().equals(l2Tunnel.pathUsed().get(0).dst().deviceId())) { inPort = l2Tunnel.pathUsed().get(0).dst().port(); } else { inPort = l2Tunnel.pathUsed().get(0).src().port(); } MacAddress dstMac; try { dstMac = srManager.deviceConfiguration().getDeviceMac(egress.deviceId()); } catch (Exception e) { log.info("Device not found in configuration, no programming of MAC address"); dstMac = null; } log.info("Removing filtering objective for pseudowire transport" + " with vlan = {}, port = {}, mac = {}", l2Tunnel.transportVlan(), inPort, dstMac); FilteringObjective.Builder filteringObjectiveBuilder = createNormalPipelineFiltObjective(inPort, l2Tunnel.transportVlan(), dstMac); context = new DefaultObjectiveContext( (objective) -> log.debug("Special filtObj for " + "for {} removed", l2Tunnel.tunnelId()), (objective, error) -> log.warn("Failed to populate " + "special filtObj " + "rule for {}: {}", l2Tunnel.tunnelId(), error)); TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder(); filteringObjectiveBuilder.withMeta(treatment.build()); srManager.flowObjectiveService.filter(egress.deviceId(), filteringObjectiveBuilder.remove(context)); log.debug("Removing special FiltObj for termination point with tunnel {} for port {}", l2Tunnel.tunnelId(), inPort); } l2TerminationNextObjStore.remove(key); future.complete(null); }
From source file:com.microsoft.azure.servicebus.samples.messagebrowse.MessageBrowse.java
CompletableFuture peekMessagesAsync(IMessageReceiver receiver) { CompletableFuture currentTask = new CompletableFuture(); try {//from w ww . ja va 2s. c om CompletableFuture.runAsync(() -> { while (!currentTask.isCancelled()) { try { IMessage message = receiver.peek(); if (message != null) { // receives message is passed to callback if (message.getLabel() != null && message.getContentType() != null && message.getLabel().contentEquals("Scientist") && message.getContentType().contentEquals("application/json")) { byte[] body = message.getBody(); Map scientist = GSON.fromJson(new String(body, UTF_8), Map.class); System.out.printf( "\n\t\t\t\tMessage received: \n\t\t\t\t\t\tMessageId = %s, \n\t\t\t\t\t\tSequenceNumber = %s, \n\t\t\t\t\t\tEnqueuedTimeUtc = %s," + "\n\t\t\t\t\t\tExpiresAtUtc = %s, \n\t\t\t\t\t\tContentType = \"%s\", \n\t\t\t\t\t\tContent: [ firstName = %s, name = %s ]\n", message.getMessageId(), message.getSequenceNumber(), message.getEnqueuedTimeUtc(), message.getExpiresAtUtc(), message.getContentType(), scientist != null ? scientist.get("firstName") : "", scientist != null ? scientist.get("name") : ""); } else { currentTask.complete(null); } } } catch (Exception e) { currentTask.completeExceptionally(e); } } if (!currentTask.isCancelled()) { currentTask.complete(null); } }); return currentTask; } catch (Exception e) { currentTask.completeExceptionally(e); } return currentTask; }
From source file:io.pravega.segmentstore.server.host.stat.AutoScaleProcessor.java
private void bootstrapRequestWriters() { CompletableFuture<Void> createWriter = new CompletableFuture<>(); // Starting with initial delay, in case request stream has not been created, to give it time to start // However, we have this wrapped in consumeFailure which means the creation of writer will be retried. // We are introducing a delay to avoid exceptions in the log in case creation of writer is attempted before // creation of requeststream. maintenanceExecutor.schedule(() -> Retry.indefinitelyWithExpBackoff(100, 10, 10000, e -> { log.warn("error while creating writer for requeststream"); log.debug("error while creating writer for requeststream {}", e); }).runAsync(() -> {//from w ww . j a v a 2 s . c o m if (clientFactory.get() == null) { clientFactory.compareAndSet(null, ClientFactory.withScope(NameUtils.INTERNAL_SCOPE_NAME, configuration.getControllerUri())); } this.writer.set(clientFactory.get().createEventWriter(configuration.getInternalRequestStream(), serializer, writerConfig)); initialized.set(true); // even if there is no activity, keep cleaning up the cache so that scale down can be triggered. // caches do not perform clean up if there is no activity. This is because they do not maintain their // own background thread. maintenanceExecutor.scheduleAtFixedRate(cache::cleanUp, 0, configuration.getCacheCleanup().getSeconds(), TimeUnit.SECONDS); log.info("bootstrapping auto-scale reporter done"); createWriter.complete(null); return createWriter; }, maintenanceExecutor), 10, TimeUnit.SECONDS); }
From source file:com.yahoo.pulsar.client.impl.ConnectionPool.java
private CompletableFuture<ClientCnx> createConnection(InetSocketAddress address, int connectionKey) { if (log.isDebugEnabled()) { log.debug("Connection for {} not found in cache", address); }//from www.j av a 2s. c om final CompletableFuture<ClientCnx> cnxFuture = new CompletableFuture<ClientCnx>(); // Trigger async connect to broker bootstrap.connect(address).addListener((ChannelFuture future) -> { if (!future.isSuccess()) { cnxFuture.completeExceptionally(new PulsarClientException(future.cause())); cleanupConnection(address, connectionKey, cnxFuture); return; } log.info("[{}] Connected to server", future.channel()); future.channel().closeFuture().addListener(v -> { // Remove connection from pool when it gets closed if (log.isDebugEnabled()) { log.debug("Removing closed connection from pool: {}", v); } cleanupConnection(address, connectionKey, cnxFuture); }); // We are connected to broker, but need to wait until the connect/connected handshake is // complete final ClientCnx cnx = (ClientCnx) future.channel().pipeline().get("handler"); if (!future.channel().isActive() || cnx == null) { if (log.isDebugEnabled()) { log.debug("[{}] Connection was already closed by the time we got notified", future.channel()); } cnxFuture.completeExceptionally(new ChannelException("Connection already closed")); return; } cnx.connectionFuture().thenRun(() -> { if (log.isDebugEnabled()) { log.debug("[{}] Connection handshake completed", cnx.channel()); } cnxFuture.complete(cnx); }).exceptionally(exception -> { log.warn("[{}] Connection handshake failed: {}", cnx.channel(), exception.getMessage()); cnxFuture.completeExceptionally(exception); cleanupConnection(address, connectionKey, cnxFuture); cnx.ctx().close(); return null; }); }); return cnxFuture; }
From source file:com.android.tools.idea.diagnostics.crash.GoogleCrash.java
@NotNull @Override/*from w w w. j a v a 2s. co m*/ public CompletableFuture<String> submit(@NotNull final HttpEntity requestEntity) { CompletableFuture<String> future = new CompletableFuture<>(); try { ourExecutor.submit(() -> { try { HttpClient client = HttpClients.createDefault(); HttpEntity entity = requestEntity; if (!UNIT_TEST_MODE) { // The test server used in testing doesn't handle gzip compression (netty requires jcraft jzlib for gzip decompression) entity = new GzipCompressingEntity(requestEntity); } HttpPost post = new HttpPost(myCrashUrl); post.setEntity(entity); HttpResponse response = client.execute(post); StatusLine statusLine = response.getStatusLine(); if (statusLine.getStatusCode() >= 300) { future.completeExceptionally(new HttpResponseException(statusLine.getStatusCode(), statusLine.getReasonPhrase())); return; } entity = response.getEntity(); if (entity == null) { future.completeExceptionally(new NullPointerException("Empty response entity")); return; } String reportId = EntityUtils.toString(entity); if (DEBUG_BUILD) { //noinspection UseOfSystemOutOrSystemErr System.out.println("Report submitted: http://go/crash-staging/" + reportId); } future.complete(reportId); } catch (IOException e) { future.completeExceptionally(e); } }); } catch (RejectedExecutionException ignore) { // handled by the rejected execution handler associated with ourExecutor } return future; }
From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java
private CompletableFuture<Void> updatePartitionedTopic(TopicName topicName, int numPartitions) { final String path = ZkAdminPaths.partitionedTopicPath(topicName); CompletableFuture<Void> updatePartition = new CompletableFuture<>(); createSubscriptions(topicName, numPartitions).thenAccept(res -> { try {//from w w w . j a va 2 s . co m byte[] data = jsonMapper().writeValueAsBytes(new PartitionedTopicMetadata(numPartitions)); globalZk().setData(path, data, -1, (rc, path1, ctx, stat) -> { if (rc == KeeperException.Code.OK.intValue()) { updatePartition.complete(null); } else { updatePartition.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc), "failed to create update partitions")); } }, null); } catch (Exception e) { updatePartition.completeExceptionally(e); } }).exceptionally(ex -> { updatePartition.completeExceptionally(ex); return null; }); return updatePartition; }
From source file:org.apache.distributedlog.BKLogHandler.java
protected void readLogSegmentsFromStore(final Versioned<List<String>> logSegmentNames, final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter, final CompletableFuture<Versioned<List<LogSegmentMetadata>>> readResult) { Set<String> segmentsReceived = new HashSet<String>(); segmentsReceived.addAll(segmentFilter.filter(logSegmentNames.getValue())); Set<String> segmentsAdded; final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>()); final Map<String, LogSegmentMetadata> addedSegments = Collections .synchronizedMap(new HashMap<String, LogSegmentMetadata>()); Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived); segmentsAdded = segmentChanges.getLeft(); removedSegments.addAll(segmentChanges.getRight()); if (segmentsAdded.isEmpty()) { if (LOG.isTraceEnabled()) { LOG.trace("No segments added for {}.", getFullyQualifiedName()); }//from w ww .j a v a2 s . com // update the cache before #getCachedLogSegments to return updateLogSegmentCache(removedSegments, addedSegments); List<LogSegmentMetadata> segmentList; try { segmentList = getCachedLogSegments(comparator); } catch (UnexpectedException e) { readResult.completeExceptionally(e); return; } readResult.complete(new Versioned<List<LogSegmentMetadata>>(segmentList, logSegmentNames.getVersion())); return; } final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size()); final AtomicInteger numFailures = new AtomicInteger(0); for (final String segment : segmentsAdded) { String logSegmentPath = logMetadata.getLogSegmentPath(segment); LogSegmentMetadata cachedSegment = metadataCache.get(logSegmentPath); if (null != cachedSegment) { addedSegments.put(segment, cachedSegment); completeReadLogSegmentsFromStore(removedSegments, addedSegments, comparator, readResult, logSegmentNames.getVersion(), numChildren, numFailures); continue; } metadataStore.getLogSegment(logSegmentPath).whenComplete(new FutureEventListener<LogSegmentMetadata>() { @Override public void onSuccess(LogSegmentMetadata result) { addedSegments.put(segment, result); complete(); } @Override public void onFailure(Throwable cause) { // LogSegmentNotFoundException exception is possible in two cases // 1. A log segment was deleted by truncation between the call to getChildren and read // attempt on the znode corresponding to the segment // 2. In progress segment has been completed => inprogress ZNode does not exist if (cause instanceof LogSegmentNotFoundException) { removedSegments.add(segment); complete(); } else { // fail fast if (1 == numFailures.incrementAndGet()) { readResult.completeExceptionally(cause); return; } } } private void complete() { completeReadLogSegmentsFromStore(removedSegments, addedSegments, comparator, readResult, logSegmentNames.getVersion(), numChildren, numFailures); } }); } }
From source file:tech.beshu.ror.httpclient.ApacheHttpCoreClient.java
@Override public CompletableFuture<RRHttpResponse> send(RRHttpRequest request) { CompletableFuture<HttpResponse> promise = new CompletableFuture<>(); URI uri;/*from w w w. java2 s . c o m*/ HttpRequestBase hcRequest; try { if (request.getMethod() == HttpMethod.POST) { uri = new URIBuilder(request.getUrl().toASCIIString()).build(); hcRequest = new HttpPost(uri); List<NameValuePair> urlParameters = new ArrayList<NameValuePair>(); request.getQueryParams().entrySet() .forEach(x -> urlParameters.add(new BasicNameValuePair(x.getKey(), x.getValue()))); ((HttpPost) hcRequest).setEntity(new UrlEncodedFormEntity(urlParameters)); } else { uri = new URIBuilder(request.getUrl().toASCIIString()).addParameters(request.getQueryParams() .entrySet().stream().map(e -> new BasicNameValuePair(e.getKey(), e.getValue())) .collect(Collectors.toList())).build(); hcRequest = new HttpGet(uri); } } catch (URISyntaxException e) { throw context.rorException(e.getClass().getSimpleName() + ": " + e.getMessage()); } catch (UnsupportedEncodingException e) { throw context.rorException(e.getClass().getSimpleName() + ": " + e.getMessage()); } request.getHeaders().entrySet().forEach(e -> hcRequest.addHeader(e.getKey(), e.getValue())); AccessController.doPrivileged((PrivilegedAction<Void>) () -> { hcHttpClient.execute(hcRequest, new FutureCallback<HttpResponse>() { public void completed(final HttpResponse hcResponse) { int statusCode = hcResponse.getStatusLine().getStatusCode(); logger.debug("HTTP REQ SUCCESS with status: " + statusCode + " " + request); promise.complete(hcResponse); } public void failed(final Exception ex) { logger.debug("HTTP REQ FAILED " + request); logger.info("HTTP client failed to connect: " + request + " reason: " + ex.getMessage()); promise.completeExceptionally(ex); } public void cancelled() { promise.completeExceptionally(new RuntimeException("HTTP REQ CANCELLED: " + request)); } }); return null; }); return promise.thenApply(hcResp -> new RRHttpResponse(hcResp.getStatusLine().getStatusCode(), () -> { try { return hcResp.getEntity().getContent(); } catch (IOException e) { throw new RuntimeException("Cannot read content", e); } })); }
From source file:org.apache.pulsar.client.impl.ConnectionPool.java
private CompletableFuture<ClientCnx> createConnection(InetSocketAddress address, int connectionKey) { if (log.isDebugEnabled()) { log.debug("Connection for {} not found in cache", address); }/*from www . jav a 2s .co m*/ final CompletableFuture<ClientCnx> cnxFuture = new CompletableFuture<ClientCnx>(); // Trigger async connect to broker bootstrap.connect(address).addListener((ChannelFuture future) -> { if (!future.isSuccess()) { log.warn("Failed to open connection to {} : {}", address, future.cause().getClass().getSimpleName()); cnxFuture.completeExceptionally(new PulsarClientException(future.cause())); cleanupConnection(address, connectionKey, cnxFuture); return; } log.info("[{}] Connected to server", future.channel()); future.channel().closeFuture().addListener(v -> { // Remove connection from pool when it gets closed if (log.isDebugEnabled()) { log.debug("Removing closed connection from pool: {}", v); } cleanupConnection(address, connectionKey, cnxFuture); }); // We are connected to broker, but need to wait until the connect/connected handshake is // complete final ClientCnx cnx = (ClientCnx) future.channel().pipeline().get("handler"); if (!future.channel().isActive() || cnx == null) { if (log.isDebugEnabled()) { log.debug("[{}] Connection was already closed by the time we got notified", future.channel()); } cnxFuture.completeExceptionally(new ChannelException("Connection already closed")); return; } cnx.connectionFuture().thenRun(() -> { if (log.isDebugEnabled()) { log.debug("[{}] Connection handshake completed", cnx.channel()); } cnxFuture.complete(cnx); }).exceptionally(exception -> { log.warn("[{}] Connection handshake failed: {}", cnx.channel(), exception.getMessage()); cnxFuture.completeExceptionally(exception); cleanupConnection(address, connectionKey, cnxFuture); cnx.ctx().close(); return null; }); }); return cnxFuture; }
From source file:org.apache.pulsar.client.impl.ClientCnx.java
@Override protected void handleLookupResponse(CommandLookupTopicResponse lookupResult) { if (log.isDebugEnabled()) { log.debug("Received Broker lookup response: {}", lookupResult.getResponse()); }/*from w ww. j av a 2s .c om*/ long requestId = lookupResult.getRequestId(); CompletableFuture<LookupDataResult> requestFuture = getAndRemovePendingLookupRequest(requestId); if (requestFuture != null) { if (requestFuture.isCompletedExceptionally()) { if (log.isDebugEnabled()) { log.debug("{} Request {} already timed-out", ctx.channel(), lookupResult.getRequestId()); } return; } // Complete future with exception if : Result.response=fail/null if (!lookupResult.hasResponse() || CommandLookupTopicResponse.LookupType.Failed.equals(lookupResult.getResponse())) { if (lookupResult.hasError()) { checkServerError(lookupResult.getError(), lookupResult.getMessage()); requestFuture.completeExceptionally( getPulsarClientException(lookupResult.getError(), lookupResult.getMessage())); } else { requestFuture.completeExceptionally( new PulsarClientException.LookupException("Empty lookup response")); } } else { requestFuture.complete(new LookupDataResult(lookupResult)); } } else { log.warn("{} Received unknown request id from server: {}", ctx.channel(), lookupResult.getRequestId()); } }