List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:org.apache.pulsar.broker.namespace.NamespaceService.java
/** * 1. split the given bundle into two bundles 2. assign ownership of both the bundles to current broker 3. update * policies with newly created bundles into LocalZK 4. disable original bundle and refresh the cache * * @param bundle//w w w. j a v a 2 s . com * @return * @throws Exception */ public CompletableFuture<Void> splitAndOwnBundle(NamespaceBundle bundle) throws Exception { final CompletableFuture<Void> future = new CompletableFuture<>(); Pair<NamespaceBundles, List<NamespaceBundle>> splittedBundles = bundleFactory.splitBundles(bundle, 2 /* by default split into 2 */); if (splittedBundles != null) { checkNotNull(splittedBundles.getLeft()); checkNotNull(splittedBundles.getRight()); checkArgument(splittedBundles.getRight().size() == 2, "bundle has to be split in two bundles"); NamespaceName nsname = bundle.getNamespaceObject(); try { // take ownership of newly split bundles for (NamespaceBundle sBundle : splittedBundles.getRight()) { checkNotNull(ownershipCache.tryAcquiringOwnership(sBundle)); } updateNamespaceBundles(nsname, splittedBundles.getLeft(), (rc, path, zkCtx, stat) -> pulsar.getOrderedExecutor().submit(safeRun(() -> { if (rc == KeeperException.Code.OK.intValue()) { // disable old bundle try { ownershipCache.disableOwnership(bundle); // invalidate cache as zookeeper has new split // namespace bundle bundleFactory.invalidateBundleCache(nsname); // update bundled_topic cache for load-report-generation pulsar.getBrokerService().refreshTopicToStatsMaps(bundle); loadManager.get().setLoadReportForceUpdateFlag(); future.complete(null); } catch (Exception e) { String msg1 = format( "failed to disable bundle %s under namespace [%s] with error %s", nsname.toString(), bundle.toString(), e.getMessage()); LOG.warn(msg1, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg1)); } } else { String msg2 = format("failed to update namespace [%s] policies due to %s", nsname.toString(), KeeperException.create(KeeperException.Code.get(rc)).getMessage()); LOG.warn(msg2); future.completeExceptionally(new ServiceUnitNotReadyException(msg2)); } }))); } catch (Exception e) { String msg = format("failed to aquire ownership of split bundle for namespace [%s], %s", nsname.toString(), e.getMessage()); LOG.warn(msg, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } } else { String msg = format("bundle %s not found under namespace", bundle.toString()); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } return future; }
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutor.java
/** * Executors are only closed if they were not supplied externally in the * {@link org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutor.Builder} *//*from www . j a v a2 s . c o m*/ public CompletableFuture<Void> closeAsync() throws Exception { final CompletableFuture<Void> future = new CompletableFuture<>(); new Thread(() -> { // leave pools running if they are supplied externally. let the sender be responsible for shutting them down if (!suppliedExecutor) { executorService.shutdown(); try { if (!executorService.awaitTermination(180000, TimeUnit.MILLISECONDS)) logger.warn("Timeout while waiting for ExecutorService of GremlinExecutor to shutdown."); } catch (InterruptedException ie) { logger.warn( "ExecutorService on GremlinExecutor may not have shutdown properly as shutdown thread terminated early."); } } // calls to shutdown are idempotent so no problems calling it twice if the pool is shared if (!suppliedScheduledExecutor) { scheduledExecutorService.shutdown(); try { if (!scheduledExecutorService.awaitTermination(180000, TimeUnit.MILLISECONDS)) logger.warn( "Timeout while waiting for ScheduledExecutorService of GremlinExecutor to shutdown."); } catch (InterruptedException ie) { logger.warn( "ScheduledExecutorService on GremlinExecutor may not have shutdown properly as shutdown thread terminated early."); } } try { scriptEngines.close(); } catch (Exception ex) { logger.warn("Error while shutting down the ScriptEngines in the GremlinExecutor", ex); } future.complete(null); }, "gremlin-executor-close").start(); return future; }
From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java
public CompletableFuture<MessageId> terminate() { CompletableFuture<MessageId> future = new CompletableFuture<>(); ledger.asyncTerminate(new TerminateCallback() { @Override/*from w w w .java 2 s . c om*/ public void terminateComplete(Position lastCommittedPosition, Object ctx) { producers.forEach(Producer::disconnect); subscriptions.forEach((name, sub) -> sub.topicTerminated()); PositionImpl lastPosition = (PositionImpl) lastCommittedPosition; MessageId messageId = new MessageIdImpl(lastPosition.getLedgerId(), lastPosition.getEntryId(), -1); log.info("[{}] Topic terminated at {}", getName(), messageId); future.complete(messageId); } @Override public void terminateFailed(ManagedLedgerException exception, Object ctx) { future.completeExceptionally(exception); } }, null); return future; }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
@Override public CompletableFuture<LockWaiter> asyncTryLock(final long timeout, final TimeUnit unit) { final CompletableFuture<String> result = new CompletableFuture<String>(); final boolean wait = DistributedLogConstants.LOCK_IMMEDIATE != timeout; if (wait) {// ww w . ja v a2s . c o m asyncTryLock(wait, result); } else { // try to check locks first zk.getChildren(lockPath, null, new AsyncCallback.Children2Callback() { @Override public void processResult(final int rc, String path, Object ctx, final List<String> children, Stat stat) { lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() { @Override public void safeRun() { if (!lockState.inState(State.INIT)) { result.completeExceptionally(new LockStateChangedException(lockPath, lockId, State.INIT, lockState.getState())); return; } if (KeeperException.Code.OK.intValue() != rc) { result.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc))); return; } FailpointUtils.checkFailPointNoThrow(FailpointUtils.FailPointName.FP_LockTryAcquire); Collections.sort(children, MEMBER_COMPARATOR); if (children.size() > 0) { asyncParseClientID(zk, lockPath, children.get(0)) .whenCompleteAsync(new FutureEventListener<Pair<String, Long>>() { @Override public void onSuccess(Pair<String, Long> owner) { if (!checkOrClaimLockOwner(owner, result)) { acquireFuture.complete(false); } } @Override public void onFailure(final Throwable cause) { result.completeExceptionally(cause); } }, lockStateExecutor.chooseThread(lockPath)); } else { asyncTryLock(wait, result); } } }); } }, null); } final CompletableFuture<Boolean> waiterAcquireFuture = FutureUtils.createFuture(); waiterAcquireFuture.whenComplete((value, cause) -> acquireFuture.completeExceptionally(cause)); return result.thenApply(new Function<String, LockWaiter>() { @Override public LockWaiter apply(final String currentOwner) { final Exception acquireException = new OwnershipAcquireFailedException(lockPath, currentOwner); FutureUtils.within(acquireFuture, timeout, unit, acquireException, lockStateExecutor, lockPath) .whenComplete(new FutureEventListener<Boolean>() { @Override public void onSuccess(Boolean acquired) { completeOrFail(acquireException); } @Override public void onFailure(final Throwable acquireCause) { completeOrFail(acquireException); } private void completeOrFail(final Throwable acquireCause) { if (isLockHeld()) { waiterAcquireFuture.complete(true); } else { asyncUnlock().whenComplete(new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { waiterAcquireFuture.completeExceptionally(acquireCause); } @Override public void onFailure(Throwable cause) { waiterAcquireFuture.completeExceptionally(acquireCause); } }); } } }); return new LockWaiter(lockId.getLeft(), currentOwner, waiterAcquireFuture); } }); }
From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java
public static CompletableFuture<PartitionedTopicMetadata> getPartitionedTopicMetadata(PulsarService pulsar, String clientAppId, String originalPrincipal, AuthenticationDataSource authenticationData, TopicName topicName) {//from ww w .j av a2s .c o m CompletableFuture<PartitionedTopicMetadata> metadataFuture = new CompletableFuture<>(); try { // (1) authorize client try { checkAuthorization(pulsar, topicName, clientAppId, authenticationData); } catch (RestException e) { try { validateAdminAccessForTenant(pulsar, clientAppId, originalPrincipal, topicName.getTenant()); } catch (RestException authException) { log.warn("Failed to authorize {} on cluster {}", clientAppId, topicName.toString()); throw new PulsarClientException( String.format("Authorization failed %s on topic %s with error %s", clientAppId, topicName.toString(), authException.getMessage())); } } catch (Exception ex) { // throw without wrapping to PulsarClientException that considers: unknown error marked as internal // server error log.warn("Failed to authorize {} on cluster {} with unexpected exception {}", clientAppId, topicName.toString(), ex.getMessage(), ex); throw ex; } String path = path(PARTITIONED_TOPIC_PATH_ZNODE, topicName.getNamespace(), topicName.getDomain().toString(), topicName.getEncodedLocalName()); // validates global-namespace contains local/peer cluster: if peer/local cluster present then lookup can // serve/redirect request else fail partitioned-metadata-request so, client fails while creating // producer/consumer checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject()) .thenCompose(res -> fetchPartitionedTopicMetadataAsync(pulsar, path)).thenAccept(metadata -> { if (log.isDebugEnabled()) { log.debug("[{}] Total number of partitions for topic {} is {}", clientAppId, topicName, metadata.partitions); } metadataFuture.complete(metadata); }).exceptionally(ex -> { metadataFuture.completeExceptionally(ex.getCause()); return null; }); } catch (Exception ex) { metadataFuture.completeExceptionally(ex); } return metadataFuture; }
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutor.java
/** * Evaluate a script and allow for the submission of alteration to the entire evaluation execution lifecycle. * * @param script the script to evaluate// w w w . j ava 2s . com * @param language the language to evaluate it in * @param boundVars the bindings to evaluate in the context of the script * @param lifeCycle a set of functions that can be applied at various stages of the evaluation process */ public CompletableFuture<Object> eval(final String script, final String language, final Bindings boundVars, final LifeCycle lifeCycle) { final String lang = Optional.ofNullable(language).orElse("gremlin-groovy"); logger.debug("Preparing to evaluate script - {} - in thread [{}]", script, Thread.currentThread().getName()); final Bindings bindings = new SimpleBindings(); bindings.putAll(globalBindings); bindings.putAll(boundVars); final CompletableFuture<Object> evaluationFuture = new CompletableFuture<>(); final FutureTask<Void> f = new FutureTask<>(() -> { try { lifeCycle.getBeforeEval().orElse(beforeEval).accept(bindings); logger.debug("Evaluating script - {} - in thread [{}]", script, Thread.currentThread().getName()); final Object o = scriptEngines.eval(script, bindings, lang); // apply a transformation before sending back the result - useful when trying to force serialization // in the same thread that the eval took place given ThreadLocal nature of graphs as well as some // transactional constraints final Object result = lifeCycle.getTransformResult().isPresent() ? lifeCycle.getTransformResult().get().apply(o) : o; // a mechanism for taking the final result and doing something with it in the same thread, but // AFTER the eval and transform are done and that future completed. this provides a final means // for working with the result in the same thread as it was eval'd if (lifeCycle.getWithResult().isPresent()) lifeCycle.getWithResult().get().accept(result); lifeCycle.getAfterSuccess().orElse(afterSuccess).accept(bindings); // the evaluationFuture must be completed after all processing as an exception in lifecycle events // that must raise as an exception to the caller who has the returned evaluationFuture. in other words, // if it occurs before this point, then the handle() method won't be called again if there is an // exception that ends up below trying to completeExceptionally() evaluationFuture.complete(result); } catch (Throwable ex) { final Throwable root = null == ex.getCause() ? ex : ExceptionUtils.getRootCause(ex); // thread interruptions will typically come as the result of a timeout, so in those cases, // check for that situation and convert to TimeoutException if (root instanceof InterruptedException) evaluationFuture.completeExceptionally(new TimeoutException(String.format( "Script evaluation exceeded the configured 'scriptEvaluationTimeout' threshold of %s ms for request [%s]: %s", scriptEvaluationTimeout, script, root.getMessage()))); else { lifeCycle.getAfterFailure().orElse(afterFailure).accept(bindings, root); evaluationFuture.completeExceptionally(root); } } return null; }); executorService.execute(f); if (scriptEvaluationTimeout > 0) { // Schedule a timeout in the thread pool for future execution final ScheduledFuture<?> sf = scheduledExecutorService.schedule(() -> { logger.warn("Timing out script - {} - in thread [{}]", script, Thread.currentThread().getName()); if (!f.isDone()) { lifeCycle.getAfterTimeout().orElse(afterTimeout).accept(bindings); f.cancel(true); } }, scriptEvaluationTimeout, TimeUnit.MILLISECONDS); // Cancel the scheduled timeout if the eval future is complete or the script evaluation failed // with exception evaluationFuture.handleAsync((v, t) -> { logger.debug( "Killing scheduled timeout on script evaluation as the eval completed (possibly with exception)."); return sf.cancel(true); }); } return evaluationFuture; }
From source file:co.runrightfast.vertx.orientdb.verticle.OrientDBVerticleTest.java
private <A extends com.google.protobuf.Message> Handler<AsyncResult<Message<A>>> responseHandler( final CompletableFuture future, final Class<A> messageType) { return result -> { if (result.succeeded()) { try { checkState(result.result().headers().contains(MessageHeader.FROM_ADDRESS.header)); checkState(result.result().headers().contains(MessageHeader.MESSAGE_ID.header)); checkState(result.result().headers().contains(MessageHeader.MESSAGE_TIMESTAMP.header)); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), String.format( "result.result().headers().contains(MessageHeader.FROM_ADDRESS.header) = %s", result.result().headers().contains(MessageHeader.FROM_ADDRESS.header))); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), String.format( "result.result().headers().contains(MessageHeader.MESSAGE_ID.header) = %s", result.result().headers().contains(MessageHeader.MESSAGE_ID.header))); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), String.format( "result.result().headers().contains(MessageHeader.MESSAGE_TIMESTAMP.header) = %s", result.result().headers().contains(MessageHeader.MESSAGE_TIMESTAMP.header))); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::headers", messageType.getName()), JsonUtils.toVertxJsonObject(VertxUtils.toJsonObject(result.result().headers())) .encodePrettily()); log.logp(INFO, getClass().getName(), String.format("responseHandler::%s::message", messageType.getName()), JsonUtils.toVertxJsonObject(ProtobufUtils.protobuMessageToJson(result.result().body())) .encodePrettily()); future.complete(result.result().body()); } catch (final Throwable e) { future.completeExceptionally(e); }/* www .j a v a 2s . co m*/ } else { log.logp(SEVERE, getClass().getName(), String.format("responseHandler.failure::%s", messageType.getName()), "request failed", result.cause()); future.completeExceptionally(result.cause()); } }; }
From source file:org.apache.distributedlog.auditor.DLAuditor.java
private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService) throws IOException { final AtomicLong totalBytes = new AtomicLong(0); final AtomicLong totalEntries = new AtomicLong(0); final AtomicLong numLedgers = new AtomicLong(0); LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get()); final CompletableFuture<Void> doneFuture = FutureUtils.createFuture(); final BookKeeper bk = bkc.get(); BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() { @Override/*w w w. ja v a2 s . c om*/ public void process(final Long lid, final AsyncCallback.VoidCallback cb) { numLedgers.incrementAndGet(); executorService.submit(new Runnable() { @Override public void run() { bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8), new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() { @Override public void openComplete(int rc, LedgerHandle lh, Object ctx) { final int cbRc; if (BKException.Code.OK == rc) { totalBytes.addAndGet(lh.getLength()); totalEntries.addAndGet(lh.getLastAddConfirmed() + 1); cbRc = rc; } else { cbRc = BKException.Code.ZKException; } executorService.submit(new Runnable() { @Override public void run() { cb.processResult(cbRc, null, null); } }); } }, null); } }); } }; AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { if (BKException.Code.OK == rc) { doneFuture.complete(null); } else { doneFuture.completeExceptionally(BKException.create(rc)); } } }; lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException); try { doneFuture.get(); logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}", new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on calculating ledger space : ", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) (e.getCause()); } else { throw new IOException("Failed to calculate ledger space : ", e.getCause()); } } return totalBytes.get(); }
From source file:org.apache.tinkerpop.gremlin.server.GremlinServer.java
/** * Start Gremlin Server with {@link Settings} provided to the constructor. *///from w w w .j av a2 s. c o m public synchronized CompletableFuture<ServerGremlinExecutor<EventLoopGroup>> start() throws Exception { if (serverStarted != null) { // server already started - don't get it rolling again return serverStarted; } serverStarted = new CompletableFuture<>(); final CompletableFuture<ServerGremlinExecutor<EventLoopGroup>> serverReadyFuture = serverStarted; try { final ServerBootstrap b = new ServerBootstrap(); // when high value is reached then the channel becomes non-writable and stays like that until the // low value is so that there is time to recover b.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, settings.writeBufferLowWaterMark); b.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, settings.writeBufferHighWaterMark); b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); // fire off any lifecycle scripts that were provided by the user. hooks get initialized during // ServerGremlinExecutor initialization serverGremlinExecutor.getHooks().forEach(hook -> { logger.info("Executing start up {}", LifeCycleHook.class.getSimpleName()); try { hook.onStartUp(new LifeCycleHook.Context(logger)); } catch (UnsupportedOperationException uoe) { // if the user doesn't implement onStartUp the scriptengine will throw // this exception. it can safely be ignored. } }); final Channelizer channelizer = createChannelizer(settings); channelizer.init(serverGremlinExecutor); b.group(bossGroup, workerGroup).childHandler(channelizer); if (isEpollEnabled) { b.channel(EpollServerSocketChannel.class); } else { b.channel(NioServerSocketChannel.class); } // bind to host/port and wait for channel to be ready b.bind(settings.host, settings.port).addListener(new ChannelFutureListener() { @Override public void operationComplete(final ChannelFuture channelFuture) throws Exception { if (channelFuture.isSuccess()) { ch = channelFuture.channel(); logger.info( "Gremlin Server configured with worker thread pool of {}, gremlin pool of {} and boss thread pool of {}.", settings.threadPoolWorker, settings.gremlinPool, settings.threadPoolBoss); logger.info("Channel started at port {}.", settings.port); serverReadyFuture.complete(serverGremlinExecutor); } else { serverReadyFuture.completeExceptionally(new IOException(String.format( "Could not bind to %s and %s - perhaps something else is bound to that address.", settings.host, settings.port))); } } }); } catch (Exception ex) { logger.error("Gremlin Server Error", ex); serverReadyFuture.completeExceptionally(ex); } return serverStarted; }
From source file:com.yahoo.pulsar.broker.service.ServerCnx.java
@Override protected void handleSubscribe(final CommandSubscribe subscribe) { checkArgument(state == State.Connected); CompletableFuture<Boolean> authorizationFuture; if (service.isAuthorizationEnabled()) { authorizationFuture = service.getAuthorizationManager() .canConsumeAsync(DestinationName.get(subscribe.getTopic()), authRole); } else {// w w w . ja v a2s . c o m authorizationFuture = CompletableFuture.completedFuture(true); } final String topicName = subscribe.getTopic(); final String subscriptionName = subscribe.getSubscription(); final long requestId = subscribe.getRequestId(); final long consumerId = subscribe.getConsumerId(); final SubType subType = subscribe.getSubType(); final String consumerName = subscribe.getConsumerName(); final boolean isDurable = subscribe.getDurable(); final MessageIdImpl startMessageId = subscribe.hasStartMessageId() ? new MessageIdImpl(subscribe.getStartMessageId().getLedgerId(), subscribe.getStartMessageId().getEntryId(), subscribe.getStartMessageId().getPartition()) : null; final int priorityLevel = subscribe.hasPriorityLevel() ? subscribe.getPriorityLevel() : 0; authorizationFuture.thenApply(isAuthorized -> { if (isAuthorized) { if (log.isDebugEnabled()) { log.debug("[{}] Client is authorized to subscribe with role {}", remoteAddress, authRole); } log.info("[{}] Subscribing on topic {} / {}", remoteAddress, topicName, subscriptionName); CompletableFuture<Consumer> consumerFuture = new CompletableFuture<>(); CompletableFuture<Consumer> existingConsumerFuture = consumers.putIfAbsent(consumerId, consumerFuture); if (existingConsumerFuture != null) { if (existingConsumerFuture.isDone() && !existingConsumerFuture.isCompletedExceptionally()) { Consumer consumer = existingConsumerFuture.getNow(null); log.info("[{}] Consumer with the same id is already created: {}", remoteAddress, consumer); ctx.writeAndFlush(Commands.newSuccess(requestId)); return null; } else { // There was an early request to create a consumer with same consumerId. This can happen when // client timeout is lower the broker timeouts. We need to wait until the previous consumer // creation request either complete or fails. log.warn("[{}][{}][{}] Consumer is already present on the connection", remoteAddress, topicName, subscriptionName); ServerError error = !existingConsumerFuture.isDone() ? ServerError.ServiceNotReady : getErrorCode(existingConsumerFuture); ctx.writeAndFlush(Commands.newError(requestId, error, "Consumer is already present on the connection")); return null; } } service.getTopic(topicName).thenCompose(topic -> topic.subscribe(ServerCnx.this, subscriptionName, consumerId, subType, priorityLevel, consumerName, isDurable, startMessageId)) .thenAccept(consumer -> { if (consumerFuture.complete(consumer)) { log.info("[{}] Created subscription on topic {} / {}", remoteAddress, topicName, subscriptionName); ctx.writeAndFlush(Commands.newSuccess(requestId), ctx.voidPromise()); } else { // The consumer future was completed before by a close command try { consumer.close(); log.info("[{}] Cleared consumer created after timeout on client side {}", remoteAddress, consumer); } catch (BrokerServiceException e) { log.warn( "[{}] Error closing consumer created after timeout on client side {}: {}", remoteAddress, consumer, e.getMessage()); } consumers.remove(consumerId, consumerFuture); } }) // .exceptionally(exception -> { log.warn("[{}][{}][{}] Failed to create consumer: {}", remoteAddress, topicName, subscriptionName, exception.getCause().getMessage(), exception); // If client timed out, the future would have been completed by subsequent close. Send error // back to client, only if not completed already. if (consumerFuture.completeExceptionally(exception)) { ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(exception.getCause()), exception.getCause().getMessage())); } consumers.remove(consumerId, consumerFuture); return null; }); } else { String msg = "Client is not authorized to subscribe"; log.warn("[{}] {} with role {}", remoteAddress, msg, authRole); ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg)); } return null; }); }