List of usage examples for java.util.concurrent CompletableFuture completeExceptionally
public boolean completeExceptionally(Throwable ex)
From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java
private CompletableFuture<Channel> getChannel(Address address, String messageType) { List<CompletableFuture<Channel>> channelPool = getChannelPool(address); int offset = getChannelOffset(messageType); CompletableFuture<Channel> channelFuture = channelPool.get(offset); if (channelFuture == null || channelFuture.isCompletedExceptionally()) { synchronized (channelPool) { channelFuture = channelPool.get(offset); if (channelFuture == null || channelFuture.isCompletedExceptionally()) { channelFuture = openChannel(address); channelPool.set(offset, channelFuture); }/*from www . java 2s . c o m*/ } } final CompletableFuture<Channel> future = new CompletableFuture<>(); final CompletableFuture<Channel> finalFuture = channelFuture; finalFuture.whenComplete((channel, error) -> { if (error == null) { if (!channel.isActive()) { CompletableFuture<Channel> currentFuture; synchronized (channelPool) { currentFuture = channelPool.get(offset); if (currentFuture == finalFuture) { channelPool.set(offset, null); } else if (currentFuture == null) { currentFuture = openChannel(address); channelPool.set(offset, currentFuture); } } final ClientConnection connection = clientConnections.remove(channel); if (connection != null) { connection.close(); } if (currentFuture == finalFuture) { getChannel(address, messageType).whenComplete((recursiveResult, recursiveError) -> { if (recursiveError == null) { future.complete(recursiveResult); } else { future.completeExceptionally(recursiveError); } }); } else { currentFuture.whenComplete((recursiveResult, recursiveError) -> { if (recursiveError == null) { future.complete(recursiveResult); } else { future.completeExceptionally(recursiveError); } }); } } else { future.complete(channel); } } else { future.completeExceptionally(error); } }); return future; }
From source file:com.yahoo.pulsar.broker.service.BrokerService.java
private CompletableFuture<Topic> createPersistentTopic(final String topic) throws RuntimeException { checkTopicNsOwnership(topic);// w w w. j a va2 s . co m final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); DestinationName destinationName = DestinationName.get(topic); if (!pulsar.getNamespaceService().isServiceUnitActive(destinationName)) { // namespace is being unloaded String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic); log.warn(msg); throw new RuntimeException(new ServiceUnitNotReadyException(msg)); } final CompletableFuture<Topic> topicFuture = new CompletableFuture<>(); getManagedLedgerConfig(destinationName).thenAccept(config -> { // Once we have the configuration, we can proceed with the async open operation managedLedgerFactory.asyncOpen(destinationName.getPersistenceNamingEncoding(), config, new OpenLedgerCallback() { @Override public void openLedgerComplete(ManagedLedger ledger, Object ctx) { PersistentTopic persistentTopic = new PersistentTopic(topic, ledger, BrokerService.this); CompletableFuture<Void> replicationFuture = persistentTopic.checkReplication(); replicationFuture.thenRun(() -> { log.info("Created topic {}", topic); long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs; pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs); addTopicToStatsMaps(destinationName, persistentTopic); topicFuture.complete(persistentTopic); }); replicationFuture.exceptionally((ex) -> { log.warn("Replication check failed. Removing topic from topics list {}, {}", topic, ex); persistentTopic.stopReplProducers().whenComplete((v, exception) -> { topics.remove(topic, topicFuture); topicFuture.completeExceptionally(ex); }); return null; }); } @Override public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { log.warn("Failed to create topic {}", topic, exception); topics.remove(topic, topicFuture); topicFuture.completeExceptionally(new PersistenceException(exception)); } }, null); }).exceptionally((exception) -> { log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception); topics.remove(topic, topicFuture); topicFuture.completeExceptionally(exception); return null; }); return topicFuture; }
From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java
private CompletableFuture<Void> startAcceptingConnections() { CompletableFuture<Void> future = new CompletableFuture<>(); ServerBootstrap b = new ServerBootstrap(); b.option(ChannelOption.SO_REUSEADDR, true); b.option(ChannelOption.SO_BACKLOG, 128); b.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(8 * 1024, 32 * 1024)); b.childOption(ChannelOption.SO_RCVBUF, 1024 * 1024); b.childOption(ChannelOption.SO_SNDBUF, 1024 * 1024); b.childOption(ChannelOption.SO_KEEPALIVE, true); b.childOption(ChannelOption.TCP_NODELAY, true); b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); b.group(serverGroup, clientGroup);/*from www. j av a2 s. c o m*/ b.channel(serverChannelClass); if (enableNettyTls) { b.childHandler(new SslServerCommunicationChannelInitializer()); } else { b.childHandler(new BasicChannelInitializer()); } // Bind and start to accept incoming connections. b.bind(localAddress.port()).addListener((ChannelFutureListener) f -> { if (f.isSuccess()) { log.info("{} accepting incoming connections on port {}", localAddress.address(true), localAddress.port()); serverChannel = f.channel(); future.complete(null); } else { log.warn("{} failed to bind to port {} due to {}", localAddress.address(true), localAddress.port(), f.cause()); future.completeExceptionally(f.cause()); } }); return future; }
From source file:co.runrightfast.vertx.orientdb.verticle.OrientDBVerticleTest.java
private <A extends com.google.protobuf.Message> Handler<AsyncResult<Message<A>>> responseHandler( final CompletableFuture future, final Class<A> messageType) { return result -> { if (result.succeeded()) { try { checkState(result.result().headers().contains(MessageHeader.FROM_ADDRESS.header)); checkState(result.result().headers().contains(MessageHeader.MESSAGE_ID.header)); checkState(result.result().headers().contains(MessageHeader.MESSAGE_TIMESTAMP.header)); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), String.format( "result.result().headers().contains(MessageHeader.FROM_ADDRESS.header) = %s", result.result().headers().contains(MessageHeader.FROM_ADDRESS.header))); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), String.format( "result.result().headers().contains(MessageHeader.MESSAGE_ID.header) = %s", result.result().headers().contains(MessageHeader.MESSAGE_ID.header))); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), String.format( "result.result().headers().contains(MessageHeader.MESSAGE_TIMESTAMP.header) = %s", result.result().headers().contains(MessageHeader.MESSAGE_TIMESTAMP.header))); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), JsonUtils.toVertxJsonObject(VertxUtils.toJsonObject(result.result().headers())) .encodePrettily()); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::message", messageType.getName()), JsonUtils.toVertxJsonObject(ProtobufUtils.protobuMessageToJson(result.result().body())) .encodePrettily()); future.complete(result.result().body()); } catch (final Throwable e) { future.completeExceptionally(e); }/*from w w w . j av a2 s. c o m*/ } else { log.logp(SEVERE, getClass().getName(), String.format("responseHandler.failure::%s", messageType.getName()), "request failed", result.cause()); future.completeExceptionally(result.cause()); } }; }
From source file:com.yahoo.pulsar.broker.namespace.NamespaceService.java
/** * 1. split the given bundle into two bundles 2. assign ownership of both the bundles to current broker 3. update * policies with newly created bundles into LocalZK 4. disable original bundle and refresh the cache * * @param bundle/*from www . ja v a2 s. co m*/ * @return * @throws Exception */ public CompletableFuture<Void> splitAndOwnBundle(NamespaceBundle bundle) throws Exception { final CompletableFuture<Void> future = new CompletableFuture<>(); Pair<NamespaceBundles, List<NamespaceBundle>> splittedBundles = bundleFactory.splitBundles(bundle, 2 /* by default split into 2 */); if (splittedBundles != null) { checkNotNull(splittedBundles.getLeft()); checkNotNull(splittedBundles.getRight()); checkArgument(splittedBundles.getRight().size() == 2, "bundle has to be split in two bundles"); NamespaceName nsname = bundle.getNamespaceObject(); try { // take ownership of newly split bundles for (NamespaceBundle sBundle : splittedBundles.getRight()) { checkNotNull(ownershipCache.tryAcquiringOwnership(sBundle)); } updateNamespaceBundles(nsname, splittedBundles.getLeft(), (rc, path, zkCtx, stat) -> pulsar.getOrderedExecutor().submit(safeRun(() -> { if (rc == KeeperException.Code.OK.intValue()) { // disable old bundle try { ownershipCache.disableOwnership(bundle); // invalidate cache as zookeeper has new split // namespace bundle bundleFactory.invalidateBundleCache(nsname); // update bundled_topic cache for load-report-generation pulsar.getBrokerService().refreshTopicToStatsMaps(bundle); loadManager.setLoadReportForceUpdateFlag(); future.complete(null); } catch (Exception e) { String msg1 = format( "failed to disable bundle %s under namespace [%s] with error %s", nsname.toString(), bundle.toString(), e.getMessage()); LOG.warn(msg1, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg1)); } } else { String msg2 = format("failed to update namespace [%s] policies due to %s", nsname.toString(), KeeperException.create(KeeperException.Code.get(rc)).getMessage()); LOG.warn(msg2); future.completeExceptionally(new ServiceUnitNotReadyException(msg2)); } }))); } catch (Exception e) { String msg = format("failed to aquire ownership of split bundle for namespace [%s], %s", nsname.toString(), e.getMessage()); LOG.warn(msg, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } } else { String msg = format("bundle %s not found under namespace", bundle.toString()); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } return future; }
From source file:com.yahoo.pulsar.broker.service.ServerCnx.java
@Override protected void handleProducer(final CommandProducer cmdProducer) { checkArgument(state == State.Connected); CompletableFuture<Boolean> authorizationFuture; if (service.isAuthorizationEnabled()) { authorizationFuture = service.getAuthorizationManager() .canProduceAsync(DestinationName.get(cmdProducer.getTopic().toString()), authRole); } else {//from w w w. j ava2 s.c om authorizationFuture = CompletableFuture.completedFuture(true); } // Use producer name provided by client if present final String producerName = cmdProducer.hasProducerName() ? cmdProducer.getProducerName() : service.generateUniqueProducerName(); final String topicName = cmdProducer.getTopic(); final long producerId = cmdProducer.getProducerId(); final long requestId = cmdProducer.getRequestId(); authorizationFuture.thenApply(isAuthorized -> { if (isAuthorized) { if (log.isDebugEnabled()) { log.debug("[{}] Client is authorized to Produce with role {}", remoteAddress, authRole); } CompletableFuture<Producer> producerFuture = new CompletableFuture<>(); CompletableFuture<Producer> existingProducerFuture = producers.putIfAbsent(producerId, producerFuture); if (existingProducerFuture != null) { if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) { Producer producer = existingProducerFuture.getNow(null); log.info("[{}] Producer with the same id is already created: {}", remoteAddress, producer); ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producer.getProducerName())); return null; } else { // There was an early request to create a producer with // same producerId. This can happen when // client // timeout is lower the broker timeouts. We need to wait // until the previous producer creation // request // either complete or fails. ServerError error = !existingProducerFuture.isDone() ? ServerError.ServiceNotReady : getErrorCode(existingProducerFuture); log.warn("[{}][{}] Producer is already present on the connection", remoteAddress, topicName); ctx.writeAndFlush(Commands.newError(requestId, error, "Producer is already present on the connection")); return null; } } log.info("[{}][{}] Creating producer. producerId={}", remoteAddress, topicName, producerId); service.getTopic(topicName).thenAccept((Topic topic) -> { // Before creating producer, check if backlog quota exceeded // on topic if (topic.isBacklogQuotaExceeded(producerName)) { IllegalStateException illegalStateException = new IllegalStateException( "Cannot create producer on topic with backlog quota exceeded"); BacklogQuota.RetentionPolicy retentionPolicy = topic.getBacklogQuota().getPolicy(); if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_request_hold) { ctx.writeAndFlush( Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededError, illegalStateException.getMessage())); } else if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_exception) { ctx.writeAndFlush( Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededException, illegalStateException.getMessage())); } producerFuture.completeExceptionally(illegalStateException); producers.remove(producerId, producerFuture); return; } disableTcpNoDelayIfNeeded(topicName, producerName); Producer producer = new Producer(topic, ServerCnx.this, producerId, producerName, authRole); try { topic.addProducer(producer); if (isActive()) { if (producerFuture.complete(producer)) { log.info("[{}] Created new producer: {}", remoteAddress, producer); ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producerName)); return; } else { // The producer's future was completed before by // a close command producer.closeNow(); log.info("[{}] Cleared producer created after timeout on client side {}", remoteAddress, producer); } } else { producer.closeNow(); log.info("[{}] Cleared producer created after connection was closed: {}", remoteAddress, producer); producerFuture.completeExceptionally( new IllegalStateException("Producer created after connection was closed")); } } catch (BrokerServiceException ise) { log.error("[{}] Failed to add producer to topic {}: {}", remoteAddress, topicName, ise.getMessage()); ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(ise), ise.getMessage())); producerFuture.completeExceptionally(ise); } producers.remove(producerId, producerFuture); }).exceptionally(exception -> { Throwable cause = exception.getCause(); if (!(cause instanceof ServiceUnitNotReadyException)) { // Do not print stack traces for expected exceptions log.error("[{}] Failed to create topic {}", remoteAddress, topicName, exception); } // If client timed out, the future would have been completed // by subsequent close. Send error back to // client, only if not completed already. if (producerFuture.completeExceptionally(exception)) { ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(cause), cause.getMessage())); } producers.remove(producerId, producerFuture); return null; }); } else { String msg = "Client is not authorized to Produce"; log.warn("[{}] {} with role {}", remoteAddress, msg, authRole); ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg)); } return null; }); }
From source file:com.yahoo.pulsar.broker.service.ServerCnx.java
@Override protected void handleSubscribe(final CommandSubscribe subscribe) { checkArgument(state == State.Connected); CompletableFuture<Boolean> authorizationFuture; if (service.isAuthorizationEnabled()) { authorizationFuture = service.getAuthorizationManager() .canConsumeAsync(DestinationName.get(subscribe.getTopic()), authRole); } else {//from ww w . ja v a 2 s . co m authorizationFuture = CompletableFuture.completedFuture(true); } final String topicName = subscribe.getTopic(); final String subscriptionName = subscribe.getSubscription(); final long requestId = subscribe.getRequestId(); final long consumerId = subscribe.getConsumerId(); final SubType subType = subscribe.getSubType(); final String consumerName = subscribe.getConsumerName(); final boolean isDurable = subscribe.getDurable(); final MessageIdImpl startMessageId = subscribe.hasStartMessageId() ? new MessageIdImpl(subscribe.getStartMessageId().getLedgerId(), subscribe.getStartMessageId().getEntryId(), subscribe.getStartMessageId().getPartition()) : null; final int priorityLevel = subscribe.hasPriorityLevel() ? subscribe.getPriorityLevel() : 0; authorizationFuture.thenApply(isAuthorized -> { if (isAuthorized) { if (log.isDebugEnabled()) { log.debug("[{}] Client is authorized to subscribe with role {}", remoteAddress, authRole); } log.info("[{}] Subscribing on topic {} / {}", remoteAddress, topicName, subscriptionName); CompletableFuture<Consumer> consumerFuture = new CompletableFuture<>(); CompletableFuture<Consumer> existingConsumerFuture = consumers.putIfAbsent(consumerId, consumerFuture); if (existingConsumerFuture != null) { if (existingConsumerFuture.isDone() && !existingConsumerFuture.isCompletedExceptionally()) { Consumer consumer = existingConsumerFuture.getNow(null); log.info("[{}] Consumer with the same id is already created: {}", remoteAddress, consumer); ctx.writeAndFlush(Commands.newSuccess(requestId)); return null; } else { // There was an early request to create a consumer with same consumerId. This can happen when // client timeout is lower the broker timeouts. We need to wait until the previous consumer // creation request either complete or fails. log.warn("[{}][{}][{}] Consumer is already present on the connection", remoteAddress, topicName, subscriptionName); ServerError error = !existingConsumerFuture.isDone() ? ServerError.ServiceNotReady : getErrorCode(existingConsumerFuture); ctx.writeAndFlush(Commands.newError(requestId, error, "Consumer is already present on the connection")); return null; } } service.getTopic(topicName).thenCompose(topic -> topic.subscribe(ServerCnx.this, subscriptionName, consumerId, subType, priorityLevel, consumerName, isDurable, startMessageId)) .thenAccept(consumer -> { if (consumerFuture.complete(consumer)) { log.info("[{}] Created subscription on topic {} / {}", remoteAddress, topicName, subscriptionName); ctx.writeAndFlush(Commands.newSuccess(requestId), ctx.voidPromise()); } else { // The consumer future was completed before by a close command try { consumer.close(); log.info("[{}] Cleared consumer created after timeout on client side {}", remoteAddress, consumer); } catch (BrokerServiceException e) { log.warn( "[{}] Error closing consumer created after timeout on client side {}: {}", remoteAddress, consumer, e.getMessage()); } consumers.remove(consumerId, consumerFuture); } }) // .exceptionally(exception -> { log.warn("[{}][{}][{}] Failed to create consumer: {}", remoteAddress, topicName, subscriptionName, exception.getCause().getMessage(), exception); // If client timed out, the future would have been completed by subsequent close. Send error // back to client, only if not completed already. if (consumerFuture.completeExceptionally(exception)) { ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(exception.getCause()), exception.getCause().getMessage())); } consumers.remove(consumerId, consumerFuture); return null; }); } else { String msg = "Client is not authorized to subscribe"; log.warn("[{}] {} with role {}", remoteAddress, msg, authRole); ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg)); } return null; }); }
From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java
@Override public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(final List<O> new_objects, final boolean replace_if_present) { try {// w w w .j a v a 2 s. co m final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObjects"); final BulkRequestBuilder brb = new_objects.stream() .reduce(_state.client.prepareBulk().setConsistencyLevel(WriteConsistencyLevel.ONE) .setRefresh(CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy), (acc, val) -> acc.add(singleObjectIndexRequest(Either.left(rw_context), Either.left(val), replace_if_present, true)), (acc1, acc2) -> { throw new RuntimeException("Internal logic error - Parallel not supported"); }); final BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>> action_handler = new BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>>() { // WARNING: mutable/imperative code ahead... long _curr_written = 0; List<Object> _id_list = null; HashMap<String, String> _mapping_failures = null; @Override public void accept(final BulkResponse result, final CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> future) { if (result.hasFailures() && (rw_context .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext)) { final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext auto_context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) rw_context .typeContext(); // Recursive builder in case I need to build a second batch of docs BulkRequestBuilder brb2 = null; if (null == _id_list) { _id_list = new LinkedList<Object>(); } HashMap<String, String> temp_mapping_failures = null; final Iterator<BulkItemResponse> it = result.iterator(); while (it.hasNext()) { final BulkItemResponse bir = it.next(); if (bir.isFailed()) { if (bir.getFailure().getMessage().startsWith("MapperParsingException")) { final Set<String> fixed_type_fields = rw_context.typeContext() .fixed_type_fields(); if (!fixed_type_fields.isEmpty()) { // Obtain the field name from the exception (if we fail then drop the record) final String field = getFieldFromParsingException( bir.getFailure().getMessage()); if ((null == field) || fixed_type_fields.contains(field)) { continue; } } //(else roll on to...) // OK this is the case where I might be able to apply auto types: if (null == brb2) { brb2 = _state.client.prepareBulk() .setConsistencyLevel(WriteConsistencyLevel.ONE).setRefresh( CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy); } String failed_json = null; if (null == _mapping_failures) { // first time through, use item id to grab the objects from the original request if (null == temp_mapping_failures) { temp_mapping_failures = new HashMap<String, String>(); } final ActionRequest<?> ar = brb.request().requests().get(bir.getItemId()); if (ar instanceof IndexRequest) { IndexRequest ir = (IndexRequest) ar; failed_json = ir.source().toUtf8(); temp_mapping_failures.put(bir.getId(), failed_json); } } else { // have already grabbed all the failure _ids and stuck in a map failed_json = _mapping_failures.get(bir.getId()); } if (null != failed_json) { brb2.add(singleObjectIndexRequest( Either.right(Tuples._2T(bir.getIndex(), ElasticsearchContextUtils.getNextAutoType( auto_context.getPrefix(), bir.getType()))), Either.right(Tuples._2T(bir.getId(), failed_json)), false, true)); } } // Ugh otherwise just silently fail I guess? //(should I also look for transient errors and resubmit them after a pause?!) } else { // (this item worked) _id_list.add(bir.getId()); _curr_written++; } } if (null != brb2) { // found mapping errors to retry with if (null == _mapping_failures) // (first level of recursion) _mapping_failures = temp_mapping_failures; // (note that if brb2.request().requests().isEmpty() this is an internal logic error, so it's OK to throw) ElasticsearchFutureUtils.wrap(brb2.execute(), future, this, (error, future2) -> { future2.completeExceptionally(error); }); } else { // relative success, plus we've built the list anyway future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written)); } } else { // No errors with this iteration of the bulk request _curr_written += result.getItems().length; if (null == _id_list) { // This is the first bulk request, no recursion on failures, so can lazily create the list in case it isn't needed final Supplier<List<Object>> get_objects = () -> { return StreamSupport.stream(result.spliterator(), false) .filter(bir -> !bir.isFailed()).map(bir -> bir.getId()) .collect(Collectors.toList()); }; final Supplier<Long> get_count_workaround = () -> { return StreamSupport.stream(result.spliterator(), false) .filter(bir -> !bir.isFailed()).collect(Collectors.counting()); }; get_count_workaround.get(); future.complete(Tuples._2T(get_objects, get_count_workaround)); } else { // have already calculated everything so just return it future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written)); } } } }; return ElasticsearchFutureUtils.wrap(brb.execute(), new CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>(), action_handler, (error, future) -> { future.completeExceptionally(error); }); } catch (Exception e) { return FutureUtils.returnError(e); } }
From source file:org.apache.bookkeeper.bookie.BookieWriteToJournalTest.java
/** * test that Bookie calls correctly Journal.forceLedger and is able to return the correct LastAddPersisted entry id. *///w w w .java 2 s. c o m @Test public void testForceLedger() throws Exception { File journalDir = tempDir.newFolder(); Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(journalDir)); File ledgerDir = tempDir.newFolder(); Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(ledgerDir)); ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setJournalDirName(journalDir.getPath()).setLedgerDirNames(new String[] { ledgerDir.getPath() }); Bookie b = new Bookie(conf); b.start(); long ledgerId = 1; long entryId = 0; Object expectedCtx = "foo"; byte[] masterKey = new byte[64]; CompletableFuture<Long> latchForceLedger1 = new CompletableFuture<>(); CompletableFuture<Long> latchForceLedger2 = new CompletableFuture<>(); CompletableFuture<Long> latchAddEntry = new CompletableFuture<>(); final ByteBuf data = buildEntry(ledgerId, entryId, -1); final long expectedEntryId = entryId; b.forceLedger(ledgerId, (int rc, long ledgerId1, long entryId1, BookieSocketAddress addr, Object ctx) -> { if (rc != BKException.Code.OK) { latchForceLedger1.completeExceptionally(org.apache.bookkeeper.client.BKException.create(rc)); return; } complete(latchForceLedger1, null); }, expectedCtx); result(latchForceLedger1); b.addEntry(data, true /* ackBeforesync */, (int rc, long ledgerId1, long entryId1, BookieSocketAddress addr, Object ctx) -> { if (rc != BKException.Code.OK) { latchAddEntry.completeExceptionally(org.apache.bookkeeper.client.BKException.create(rc)); return; } latchAddEntry.complete(entryId); }, expectedCtx, masterKey); assertEquals(expectedEntryId, result(latchAddEntry).longValue()); // issue a new "forceLedger" b.forceLedger(ledgerId, (int rc, long ledgerId1, long entryId1, BookieSocketAddress addr, Object ctx) -> { if (rc != BKException.Code.OK) { latchForceLedger2.completeExceptionally(org.apache.bookkeeper.client.BKException.create(rc)); return; } complete(latchForceLedger2, null); }, expectedCtx); result(latchForceLedger2); b.shutdown(); }
From source file:org.apache.bookkeeper.client.LedgerHandle.java
/** * {@inheritDoc}// www. jav a 2 s. c o m */ @Override public CompletableFuture<Void> force() { CompletableFuture<Void> result = new CompletableFuture<>(); ForceLedgerOp op = new ForceLedgerOp(this, clientCtx.getBookieClient(), getCurrentEnsemble(), result); boolean wasClosed = false; synchronized (this) { // synchronized on this to ensure that // the ledger isn't closed between checking and // updating lastAddPushed if (!isHandleWritable()) { wasClosed = true; } } if (wasClosed) { // make sure the callback is triggered in main worker pool try { clientCtx.getMainWorkerPool().executeOrdered(ledgerId, new SafeRunnable() { @Override public void safeRun() { LOG.warn("Force() attempted on a closed ledger: {}", ledgerId); result.completeExceptionally(new BKException.BKLedgerClosedException()); } @Override public String toString() { return String.format("force(lid=%d)", ledgerId); } }); } catch (RejectedExecutionException e) { result.completeExceptionally(new BKException.BKInterruptedException()); } return result; } // early exit: no write has been issued yet if (pendingAddsSequenceHead == INVALID_ENTRY_ID) { clientCtx.getMainWorkerPool().executeOrdered(ledgerId, new SafeRunnable() { @Override public void safeRun() { FutureUtils.complete(result, null); } @Override public String toString() { return String.format("force(lid=%d)", ledgerId); } }); return result; } try { clientCtx.getMainWorkerPool().executeOrdered(ledgerId, op); } catch (RejectedExecutionException e) { result.completeExceptionally(new BKException.BKInterruptedException()); } return result; }