List of usage examples for java.util.concurrent CompletableFuture completeExceptionally
public boolean completeExceptionally(Throwable ex)
From source file:io.pravega.controller.store.stream.PersistentStreamBase.java
/** * If scale is ongoing, try to delete the epoch node. * * @param epoch epoch/*www . j a va 2 s. c om*/ * @return true if we are able to delete the epoch, false otherwise. */ @Override public CompletableFuture<Boolean> scaleTryDeleteEpoch(final int epoch) { return getHistoryTableFromStore() .thenCompose(historyTable -> getSegmentTableFromStore() .thenApply(segmentTable -> new ImmutablePair<>(historyTable, segmentTable))) .thenCompose(pair -> { Data<T> segmentTable = pair.getRight(); Data<T> historyTable = pair.getLeft(); CompletableFuture<Boolean> result = new CompletableFuture<>(); if (TableHelper.isScaleOngoing(historyTable.getData(), segmentTable.getData())) { deleteEpochNode(epoch).whenComplete((r, e) -> { if (e != null) { Throwable ex = ExceptionHelpers.getRealException(e); if (ex instanceof StoreException.DataNotEmptyException) { // cant delete as there are transactions still running under epoch node result.complete(false); } else { result.completeExceptionally(ex); } } else { result.complete(true); } }); } else { result.complete(false); } return result; }); }
From source file:org.mascherl.example.service.ComposeMailService.java
public CompletableFuture<List<MailAddressUsage>> getLastSendToAddressesAsync2(User currentUser, int limit) { CompletableFuture<List<MailAddressUsage>> completableFuture = new CompletableFuture<>(); db.query(//w ww.jav a 2 s . c om "select distinct mto.address, m.datetime " + "from mail m " + "join mail_to mto on mto.mail_uuid = m.uuid " + "where m.user_uuid = $1 " + "and m.mail_type = $2 " + "and not exists (" + " select 1 from mail m2 " + " join mail_to mto2 on mto2.mail_uuid = m2.uuid " + " where m2.user_uuid = $1 " + " and m2.mail_type = $2 " + " and mto2.address = mto.address " + " and m2.datetime > m.datetime " + ") " + "order by m.datetime desc " + "limit $3", Arrays.asList(currentUser.getUuid(), MailType.SENT.name(), limit), result -> { try { TimestampColumnZonedDateTimeMapper dateTimeColumnMapper = new PersistentZonedDateTime() .getColumnMapper(); List<MailAddressUsage> usages = StreamSupport.stream(result.spliterator(), false) .map(row -> new MailAddressUsage(new MailAddress(row.getString(0)), dateTimeColumnMapper.fromNonNullValue(row.getTimestamp(1)))) .collect(Collectors.toList()); completableFuture.complete(usages); } catch (Exception e) { completableFuture.completeExceptionally(e); } }, completableFuture::completeExceptionally); return completableFuture; }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
/** * Requests all updates from each peer in the provided list of peers. * <p>//from w w w . ja v a 2 s . c om * The returned future will be completed once at least one peer bootstraps this map or bootstrap requests to all peers * fail. * * @param peers the list of peers from which to request updates * @return a future to be completed once updates have been received from at least one peer */ private CompletableFuture<Void> requestBootstrapFromPeers(List<MemberId> peers) { if (peers.isEmpty()) { return CompletableFuture.completedFuture(null); } CompletableFuture<Void> future = new CompletableFuture<>(); final int totalPeers = peers.size(); AtomicBoolean successful = new AtomicBoolean(); AtomicInteger totalCount = new AtomicInteger(); AtomicReference<Throwable> lastError = new AtomicReference<>(); // Iterate through all of the peers and send a bootstrap request. On the first peer that returns // a successful bootstrap response, complete the future. Otherwise, if no peers respond with any // successful bootstrap response, the future will be completed with the last exception. for (MemberId peer : peers) { requestBootstrapFromPeer(peer).whenComplete((result, error) -> { if (error == null) { if (successful.compareAndSet(false, true)) { future.complete(null); } else if (totalCount.incrementAndGet() == totalPeers) { Throwable e = lastError.get(); if (e != null) { future.completeExceptionally(e); } } } else { if (!successful.get() && totalCount.incrementAndGet() == totalPeers) { future.completeExceptionally(error); } else { lastError.set(error); } } }); } return future; }
From source file:com.yahoo.pulsar.broker.service.BrokerService.java
public CompletableFuture<ManagedLedgerConfig> getManagedLedgerConfig(DestinationName topicName) { CompletableFuture<ManagedLedgerConfig> future = new CompletableFuture<>(); // Execute in background thread, since getting the policies might block if the z-node wasn't already cached pulsar.getOrderedExecutor().submitOrdered(topicName, safeRun(() -> { NamespaceName namespace = topicName.getNamespaceObject(); ServiceConfiguration serviceConfig = pulsar.getConfiguration(); // Get persistence policy for this destination Policies policies;/*from w ww.j av a2s . c o m*/ try { policies = pulsar .getConfigurationCache().policiesCache().get(AdminResource.path("policies", namespace.getProperty(), namespace.getCluster(), namespace.getLocalName())) .orElse(null); } catch (Throwable t) { // Ignoring since if we don't have policies, we fallback on the default log.warn("Got exception when reading persistence policy for {}: {}", topicName, t.getMessage(), t); future.completeExceptionally(t); return; } PersistencePolicies persistencePolicies = policies != null ? policies.persistence : null; RetentionPolicies retentionPolicies = policies != null ? policies.retention_policies : null; if (persistencePolicies == null) { // Apply default values persistencePolicies = new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(), serviceConfig.getManagedLedgerDefaultWriteQuorum(), serviceConfig.getManagedLedgerDefaultAckQuorum(), serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit()); } if (retentionPolicies == null) { retentionPolicies = new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(), serviceConfig.getDefaultRetentionSizeInMB()); } ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble()); config.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum()); config.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum()); config.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate()); config.setDigestType(DigestType.CRC32); config.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger()); config.setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(), TimeUnit.MINUTES); config.setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(), TimeUnit.MINUTES); config.setMaxSizePerLedgerMb(2048); config.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize()); config.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum()); config.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum()); config.setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger()); config.setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds()); config.setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES); config.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB()); future.complete(config); }, (exception) -> future.completeExceptionally(exception))); return future; }
From source file:com.microsoft.azure.servicebus.samples.messagebrowse.MessageBrowse.java
CompletableFuture peekMessagesAsync(IMessageReceiver receiver) { CompletableFuture currentTask = new CompletableFuture(); try {/* w ww .j av a 2 s .c o m*/ CompletableFuture.runAsync(() -> { while (!currentTask.isCancelled()) { try { IMessage message = receiver.peek(); if (message != null) { // receives message is passed to callback if (message.getLabel() != null && message.getContentType() != null && message.getLabel().contentEquals("Scientist") && message.getContentType().contentEquals("application/json")) { byte[] body = message.getBody(); Map scientist = GSON.fromJson(new String(body, UTF_8), Map.class); System.out.printf( "\n\t\t\t\tMessage received: \n\t\t\t\t\t\tMessageId = %s, \n\t\t\t\t\t\tSequenceNumber = %s, \n\t\t\t\t\t\tEnqueuedTimeUtc = %s," + "\n\t\t\t\t\t\tExpiresAtUtc = %s, \n\t\t\t\t\t\tContentType = \"%s\", \n\t\t\t\t\t\tContent: [ firstName = %s, name = %s ]\n", message.getMessageId(), message.getSequenceNumber(), message.getEnqueuedTimeUtc(), message.getExpiresAtUtc(), message.getContentType(), scientist != null ? scientist.get("firstName") : "", scientist != null ? scientist.get("name") : ""); } else { currentTask.complete(null); } } } catch (Exception e) { currentTask.completeExceptionally(e); } } if (!currentTask.isCancelled()) { currentTask.complete(null); } }); return currentTask; } catch (Exception e) { currentTask.completeExceptionally(e); } return currentTask; }
From source file:com.microsoft.azure.servicebus.samples.receiveloop.ReceiveLoop.java
CompletableFuture receiveMessagesAsync(IMessageReceiver receiver) { CompletableFuture currentTask = new CompletableFuture(); try {/* w ww.ja v a2 s . c om*/ CompletableFuture.runAsync(() -> { while (!currentTask.isCancelled()) { try { IMessage message = receiver.receive(Duration.ofSeconds(60)); if (message != null) { // receives message is passed to callback if (message.getLabel() != null && message.getContentType() != null && message.getLabel().contentEquals("Scientist") && message.getContentType().contentEquals("application/json")) { byte[] body = message.getBody(); Map scientist = GSON.fromJson(new String(body, UTF_8), Map.class); System.out.printf( "\n\t\t\t\tMessage received: \n\t\t\t\t\t\tMessageId = %s, \n\t\t\t\t\t\tSequenceNumber = %s, \n\t\t\t\t\t\tEnqueuedTimeUtc = %s," + "\n\t\t\t\t\t\tExpiresAtUtc = %s, \n\t\t\t\t\t\tContentType = \"%s\", \n\t\t\t\t\t\tContent: [ firstName = %s, name = %s ]\n", message.getMessageId(), message.getSequenceNumber(), message.getEnqueuedTimeUtc(), message.getExpiresAtUtc(), message.getContentType(), scientist != null ? scientist.get("firstName") : "", scientist != null ? scientist.get("name") : ""); } receiver.completeAsync(message.getLockToken()); } } catch (Exception e) { currentTask.completeExceptionally(e); } } currentTask.complete(null); }); return currentTask; } catch (Exception e) { currentTask.completeExceptionally(e); } return currentTask; }
From source file:com.hurence.logisland.engine.vanilla.stream.amqp.AmqpClientPipelineStream.java
private CompletableFuture<ProtonConnection> setupConnection() { CompletableFuture<ProtonConnection> completableFuture = new CompletableFuture<>(); String hostname = streamContext.getPropertyValue(StreamOptions.CONNECTION_HOST).asString(); int port = streamContext.getPropertyValue(StreamOptions.CONNECTION_PORT).asInteger(); int credits = streamContext.getPropertyValue(StreamOptions.LINK_CREDITS).asInteger(); String user = streamContext.getPropertyValue(StreamOptions.CONNECTION_AUTH_USERNAME).asString(); String password = streamContext.getPropertyValue(StreamOptions.CONNECTION_AUTH_PASSWORD).asString(); if (user != null && password != null) { options.addEnabledSaslMechanism("PLAIN"); } else if (streamContext.getPropertyValue(StreamOptions.CONNECTION_AUTH_TLS_CERT).isSet()) { String tlsCert = streamContext.getPropertyValue(StreamOptions.CONNECTION_AUTH_TLS_CERT).asString(); String tlsKey = streamContext.getPropertyValue(StreamOptions.CONNECTION_AUTH_TLS_KEY).asString(); String caCert = streamContext.getPropertyValue(StreamOptions.CONNECTION_AUTH_CA_CERT).asString(); options.addEnabledSaslMechanism("EXTERNAL").setHostnameVerificationAlgorithm("") .setPemKeyCertOptions(new PemKeyCertOptions().addCertPath(new File(tlsCert).getAbsolutePath()) .addKeyPath(new File(tlsKey).getAbsolutePath())); if (caCert != null) { options.setPemTrustOptions(new PemTrustOptions().addCertPath(new File(caCert).getAbsolutePath())); }/* w ww . ja v a 2 s. c o m*/ } protonClient.connect(options, hostname, port, user, password, event -> { if (event.failed()) { handleConnectionFailure(false); completableFuture.completeExceptionally(event.cause()); return; } connectionControl.connected(); completableFuture.complete(event.result()); protonConnection = event.result(); String containerId = streamContext.getPropertyValue(StreamOptions.CONTAINER_ID).asString(); if (containerId != null) { protonConnection.setContainer(containerId); } protonConnection.closeHandler(x -> { handleConnectionFailure(true); }).disconnectHandler(x -> { handleConnectionFailure(false); }).openHandler(onOpen -> { //setup the output path sender = protonConnection .createSender(streamContext.getPropertyValue(StreamOptions.WRITE_TOPIC).asString()); sender.setAutoDrained(true); sender.setAutoSettle(true); sender.open(); //setup the input path receiver = protonConnection .createReceiver(streamContext.getPropertyValue(StreamOptions.READ_TOPIC).asString()); receiver.setPrefetch(credits); receiver.handler((delivery, message) -> { try { Record record; if (deserializer == null) { record = RecordUtils.getKeyValueRecord( StringUtils.defaultIfEmpty(message.getSubject(), ""), new String(extractBodyContent(message.getBody()))); } else { record = deserializer .deserialize(new ByteArrayInputStream(extractBodyContent(message.getBody()))); if (!record.hasField(FieldDictionary.RECORD_KEY)) { record.setField(FieldDictionary.RECORD_KEY, FieldType.STRING, message.getSubject()); } } Collection<Record> r = Collections.singleton(record); for (ProcessContext processContext : streamContext.getProcessContexts()) { r = processContext.getProcessor().process(processContext, r); } List<Message> toAdd = new ArrayList<>(); for (Record out : r) { ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(); serializer.serialize(byteOutputStream, out); Message mo = ProtonHelper.message(); if (out.hasField(FieldDictionary.RECORD_KEY)) { mo.setSubject(out.getField(FieldDictionary.RECORD_KEY).asString()); } if (StringUtils.isNotBlank(contentType)) { mo.setContentType(contentType); } mo.setMessageId(out.getId()); mo.setBody(new Data(Binary.create(ByteBuffer.wrap(byteOutputStream.toByteArray())))); toAdd.add(mo); } toAdd.forEach(sender::send); delivery.disposition(Accepted.getInstance(), true); } catch (Exception e) { Rejected rejected = new Rejected(); delivery.disposition(rejected, true); getLogger().warn("Unable to process message : " + e.getMessage()); } }).open(); }).open(); }); return completableFuture; }
From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java
@Override public CompletableFuture<Supplier<Object>> storeObject(final O new_object, final boolean replace_if_present) { try {//from ww w .java 2s .c om final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObject"); final IndexRequestBuilder irb = singleObjectIndexRequest(Either.left(rw_context), Either.left(new_object), replace_if_present, false); // Execute and handle result final Function<IndexResponse, Supplier<Object>> success_handler = ir -> { return () -> ir.getId(); }; // Recursive, so has some hoops to jump through (lambda can't access itself) final BiConsumer<Throwable, CompletableFuture<Supplier<Object>>> error_handler = new BiConsumer<Throwable, CompletableFuture<Supplier<Object>>>() { @Override public void accept(final Throwable error, final CompletableFuture<Supplier<Object>> future) { Patterns.match(error).andAct() .when(org.elasticsearch.index.mapper.MapperParsingException.class, mpe -> { final Set<String> fixed_type_fields = rw_context.typeContext().fixed_type_fields(); if (!fixed_type_fields.isEmpty()) { // Obtain the field name from the exception (if we fail then drop the record) final String field = getFieldFromParsingException(mpe.getMessage()); if ((null == field) || fixed_type_fields.contains(field)) { future.completeExceptionally(error); return; } } //(else roll on to...) Patterns.match(rw_context.typeContext()).andAct().when( ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext.class, auto_context -> { irb.setType(ElasticsearchContextUtils.getNextAutoType( auto_context.getPrefix(), irb.request().type())); ElasticsearchFutureUtils.wrap(irb.execute(), future, (ir, next_future) -> { next_future.complete(success_handler.apply(ir)); }, this); }).otherwise(() -> future.completeExceptionally(error)); }).otherwise(() -> future.completeExceptionally(error)); } }; return ElasticsearchFutureUtils.wrap(irb.execute(), success_handler, error_handler); } catch (Exception e) { return FutureUtils.returnError(e); } }
From source file:com.yahoo.pulsar.broker.namespace.NamespaceService.java
private void searchForCandidateBroker(NamespaceBundle bundle, CompletableFuture<LookupResult> lookupFuture, boolean authoritative) { String candidateBroker = null; try {//www . jav a 2 s . co m // check if this is Heartbeat or SLAMonitor namespace candidateBroker = checkHeartbeatNamespace(bundle); if (candidateBroker == null) { String broker = getSLAMonitorBrokerName(bundle); // checking if the broker is up and running if (broker != null && isBrokerActive(broker)) { candidateBroker = broker; } } if (candidateBroker == null) { if (!this.loadManager.isCentralized() || pulsar.getLeaderElectionService().isLeader()) { candidateBroker = getLeastLoadedFromLoadManager(bundle); } else { if (authoritative) { // leader broker already assigned the current broker as owner candidateBroker = pulsar.getWebServiceAddress(); } else { // forward to leader broker to make assignment candidateBroker = pulsar.getLeaderElectionService().getCurrentLeader().getServiceUrl(); } } } } catch (Exception e) { LOG.warn("Error when searching for candidate broker to acquire {}: {}", bundle, e.getMessage(), e); lookupFuture.completeExceptionally(e); return; } try { checkNotNull(candidateBroker); if (pulsar.getWebServiceAddress().equals(candidateBroker)) { // Load manager decided that the local broker should try to become the owner ownershipCache.tryAcquiringOwnership(bundle).thenAccept(ownerInfo -> { if (ownerInfo.isDisabled()) { if (LOG.isDebugEnabled()) { LOG.debug("Namespace bundle {} is currently being unloaded", bundle); } lookupFuture.completeExceptionally(new IllegalStateException( String.format("Namespace bundle %s is currently being unloaded", bundle))); } else { // Found owner for the namespace bundle // Schedule the task to pre-load destinations pulsar.loadNamespaceDestinations(bundle); lookupFuture.complete(new LookupResult(ownerInfo)); } }).exceptionally(exception -> { LOG.warn("Failed to acquire ownership for namespace bundle {}: ", bundle, exception.getMessage(), exception); lookupFuture.completeExceptionally(new PulsarServerException( "Failed to acquire ownership for namespace bundle " + bundle, exception)); return null; }); } else { // Load managed decider some other broker should try to acquire ownership if (LOG.isDebugEnabled()) { LOG.debug("Redirecting to broker {} to acquire ownership of bundle {}", candidateBroker, bundle); } // Now setting the redirect url createLookupResult(candidateBroker).thenAccept(lookupResult -> lookupFuture.complete(lookupResult)) .exceptionally(ex -> { lookupFuture.completeExceptionally(ex); return null; }); } } catch (Exception e) { LOG.warn("Error in trying to acquire namespace bundle ownership for {}: {}", bundle, e.getMessage(), e); lookupFuture.completeExceptionally(e); } }
From source file:tech.beshu.ror.httpclient.ApacheHttpCoreClient.java
@Override public CompletableFuture<RRHttpResponse> send(RRHttpRequest request) { CompletableFuture<HttpResponse> promise = new CompletableFuture<>(); URI uri;/* w w w. j av a 2 s . co m*/ HttpRequestBase hcRequest; try { if (request.getMethod() == HttpMethod.POST) { uri = new URIBuilder(request.getUrl().toASCIIString()).build(); hcRequest = new HttpPost(uri); List<NameValuePair> urlParameters = new ArrayList<NameValuePair>(); request.getQueryParams().entrySet() .forEach(x -> urlParameters.add(new BasicNameValuePair(x.getKey(), x.getValue()))); ((HttpPost) hcRequest).setEntity(new UrlEncodedFormEntity(urlParameters)); } else { uri = new URIBuilder(request.getUrl().toASCIIString()).addParameters(request.getQueryParams() .entrySet().stream().map(e -> new BasicNameValuePair(e.getKey(), e.getValue())) .collect(Collectors.toList())).build(); hcRequest = new HttpGet(uri); } } catch (URISyntaxException e) { throw context.rorException(e.getClass().getSimpleName() + ": " + e.getMessage()); } catch (UnsupportedEncodingException e) { throw context.rorException(e.getClass().getSimpleName() + ": " + e.getMessage()); } request.getHeaders().entrySet().forEach(e -> hcRequest.addHeader(e.getKey(), e.getValue())); AccessController.doPrivileged((PrivilegedAction<Void>) () -> { hcHttpClient.execute(hcRequest, new FutureCallback<HttpResponse>() { public void completed(final HttpResponse hcResponse) { int statusCode = hcResponse.getStatusLine().getStatusCode(); logger.debug("HTTP REQ SUCCESS with status: " + statusCode + " " + request); promise.complete(hcResponse); } public void failed(final Exception ex) { logger.debug("HTTP REQ FAILED " + request); logger.info("HTTP client failed to connect: " + request + " reason: " + ex.getMessage()); promise.completeExceptionally(ex); } public void cancelled() { promise.completeExceptionally(new RuntimeException("HTTP REQ CANCELLED: " + request)); } }); return null; }); return promise.thenApply(hcResp -> new RRHttpResponse(hcResp.getStatusLine().getStatusCode(), () -> { try { return hcResp.getEntity().getContent(); } catch (IOException e) { throw new RuntimeException("Cannot read content", e); } })); }