Example usage for java.util.concurrent CompletableFuture isDone

List of usage examples for java.util.concurrent CompletableFuture isDone

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture isDone.

Prototype

public boolean isDone() 

Source Link

Document

Returns true if completed in any fashion: normally, exceptionally, or via cancellation.

Usage

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleFlow(CommandFlow flow) {
    checkArgument(state == State.Connected);
    if (log.isDebugEnabled()) {
        log.debug("[{}] Received flow from consumer {} permits: {}", remoteAddress, flow.getConsumerId(),
                flow.getMessagePermits());
    }/*from w  ww  .  j  av  a 2 s  .  c om*/

    CompletableFuture<Consumer> consumerFuture = consumers.get(flow.getConsumerId());

    if (consumerFuture != null && consumerFuture.isDone() && !consumerFuture.isCompletedExceptionally()) {
        consumerFuture.getNow(null).flowPermits(flow.getMessagePermits());
    }
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleUnsubscribe(CommandUnsubscribe unsubscribe) {
    checkArgument(state == State.Connected);

    CompletableFuture<Consumer> consumerFuture = consumers.get(unsubscribe.getConsumerId());

    if (consumerFuture != null && consumerFuture.isDone() && !consumerFuture.isCompletedExceptionally()) {
        consumerFuture.getNow(null).doUnsubscribe(unsubscribe.getRequestId());
    } else {/* w w w.ja v a  2 s  .  c  o m*/
        ctx.writeAndFlush(
                Commands.newError(unsubscribe.getRequestId(), ServerError.MetadataError, "Consumer not found"));
    }
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleSend(CommandSend send, ByteBuf headersAndPayload) {
    checkArgument(state == State.Connected);

    CompletableFuture<Producer> producerFuture = producers.get(send.getProducerId());

    if (producerFuture == null || !producerFuture.isDone() || producerFuture.isCompletedExceptionally()) {
        log.warn("[{}] Producer had already been closed: {}", remoteAddress, send.getProducerId());
        return;//ww  w .  ja va2 s.co  m
    }

    Producer producer = producerFuture.getNow(null);
    if (log.isDebugEnabled()) {
        printSendCommandDebug(send, headersAndPayload);
    }

    startSendOperation();

    // Persist the message
    producer.publishMessage(send.getProducerId(), send.getSequenceId(), headersAndPayload,
            send.getNumMessages());
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleRedeliverUnacknowledged(CommandRedeliverUnacknowledgedMessages redeliver) {
    checkArgument(state == State.Connected);
    if (log.isDebugEnabled()) {
        log.debug("[{}] Received Resend Command from consumer {} ", remoteAddress, redeliver.getConsumerId());
    }/*from   www . java 2 s  .  co m*/

    CompletableFuture<Consumer> consumerFuture = consumers.get(redeliver.getConsumerId());

    if (consumerFuture != null && consumerFuture.isDone() && !consumerFuture.isCompletedExceptionally()) {
        Consumer consumer = consumerFuture.getNow(null);
        if (redeliver.getMessageIdsCount() > 0 && consumer.subType() == SubType.Shared) {
            consumer.redeliverUnacknowledgedMessages(redeliver.getMessageIdsList());
        } else {
            consumer.redeliverUnacknowledgedMessages();
        }
    }
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;/*ww  w .  java 2 s.c  om*/
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleSubscribe(final CommandSubscribe subscribe) {
    checkArgument(state == State.Connected);
    CompletableFuture<Boolean> authorizationFuture;
    if (service.isAuthorizationEnabled()) {
        authorizationFuture = service.getAuthorizationManager()
                .canConsumeAsync(DestinationName.get(subscribe.getTopic()), authRole);
    } else {// w ww .j ava2 s  .co  m
        authorizationFuture = CompletableFuture.completedFuture(true);
    }
    final String topicName = subscribe.getTopic();
    final String subscriptionName = subscribe.getSubscription();
    final long requestId = subscribe.getRequestId();
    final long consumerId = subscribe.getConsumerId();
    final SubType subType = subscribe.getSubType();
    final String consumerName = subscribe.getConsumerName();
    final boolean isDurable = subscribe.getDurable();
    final MessageIdImpl startMessageId = subscribe.hasStartMessageId()
            ? new MessageIdImpl(subscribe.getStartMessageId().getLedgerId(),
                    subscribe.getStartMessageId().getEntryId(), subscribe.getStartMessageId().getPartition())
            : null;

    final int priorityLevel = subscribe.hasPriorityLevel() ? subscribe.getPriorityLevel() : 0;

    authorizationFuture.thenApply(isAuthorized -> {
        if (isAuthorized) {
            if (log.isDebugEnabled()) {
                log.debug("[{}] Client is authorized to subscribe with role {}", remoteAddress, authRole);
            }

            log.info("[{}] Subscribing on topic {} / {}", remoteAddress, topicName, subscriptionName);

            CompletableFuture<Consumer> consumerFuture = new CompletableFuture<>();
            CompletableFuture<Consumer> existingConsumerFuture = consumers.putIfAbsent(consumerId,
                    consumerFuture);

            if (existingConsumerFuture != null) {
                if (existingConsumerFuture.isDone() && !existingConsumerFuture.isCompletedExceptionally()) {
                    Consumer consumer = existingConsumerFuture.getNow(null);
                    log.info("[{}] Consumer with the same id is already created: {}", remoteAddress, consumer);
                    ctx.writeAndFlush(Commands.newSuccess(requestId));
                    return null;
                } else {
                    // There was an early request to create a consumer with same consumerId. This can happen when
                    // client timeout is lower the broker timeouts. We need to wait until the previous consumer
                    // creation request either complete or fails.
                    log.warn("[{}][{}][{}] Consumer is already present on the connection", remoteAddress,
                            topicName, subscriptionName);
                    ServerError error = !existingConsumerFuture.isDone() ? ServerError.ServiceNotReady
                            : getErrorCode(existingConsumerFuture);
                    ctx.writeAndFlush(Commands.newError(requestId, error,
                            "Consumer is already present on the connection"));
                    return null;
                }
            }

            service.getTopic(topicName).thenCompose(topic -> topic.subscribe(ServerCnx.this, subscriptionName,
                    consumerId, subType, priorityLevel, consumerName, isDurable, startMessageId))
                    .thenAccept(consumer -> {
                        if (consumerFuture.complete(consumer)) {
                            log.info("[{}] Created subscription on topic {} / {}", remoteAddress, topicName,
                                    subscriptionName);
                            ctx.writeAndFlush(Commands.newSuccess(requestId), ctx.voidPromise());
                        } else {
                            // The consumer future was completed before by a close command
                            try {
                                consumer.close();
                                log.info("[{}] Cleared consumer created after timeout on client side {}",
                                        remoteAddress, consumer);
                            } catch (BrokerServiceException e) {
                                log.warn(
                                        "[{}] Error closing consumer created after timeout on client side {}: {}",
                                        remoteAddress, consumer, e.getMessage());
                            }
                            consumers.remove(consumerId, consumerFuture);
                        }

                    }) //
                    .exceptionally(exception -> {
                        log.warn("[{}][{}][{}] Failed to create consumer: {}", remoteAddress, topicName,
                                subscriptionName, exception.getCause().getMessage(), exception);

                        // If client timed out, the future would have been completed by subsequent close. Send error
                        // back to client, only if not completed already.
                        if (consumerFuture.completeExceptionally(exception)) {
                            ctx.writeAndFlush(Commands.newError(requestId,
                                    BrokerServiceException.getClientErrorCode(exception.getCause()),
                                    exception.getCause().getMessage()));
                        }
                        consumers.remove(consumerId, consumerFuture);

                        return null;

                    });
        } else {
            String msg = "Client is not authorized to subscribe";
            log.warn("[{}] {} with role {}", remoteAddress, msg, authRole);
            ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg));
        }
        return null;
    });
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, with concurrent requests.
 *///from   w  ww .j  a va 2s . c om
@Test
public void testGetOrAssignStreamSegmentIdWithConcurrency() throws Exception {
    // We setup a delay in the OperationLog process. We only do this for a stand-alone StreamSegment because the process
    // is driven by the same code for Transactions as well.
    final String segmentName = "Segment";
    final long segmentId = 12345;

    HashSet<String> storageSegments = new HashSet<>();
    storageSegments.add(segmentName);

    @Cleanup
    TestContext context = new TestContext();
    setupStorageGetHandler(context, storageSegments,
            sn -> new StreamSegmentInformation(sn, 0, false, false, new ImmutableDate()));
    CompletableFuture<Long> initialAddFuture = new CompletableFuture<>();
    AtomicBoolean operationLogInvoked = new AtomicBoolean(false);
    context.operationLog.addHandler = op -> {
        if (!(op instanceof StreamSegmentMapOperation)) {
            return FutureHelpers.failedFuture(new IllegalArgumentException("unexpected operation"));
        }
        if (operationLogInvoked.getAndSet(true)) {
            return FutureHelpers.failedFuture(new IllegalStateException("multiple calls to OperationLog.add"));
        }

        // Need to set SegmentId on operation.
        ((StreamSegmentMapOperation) op).setStreamSegmentId(segmentId);
        return initialAddFuture;
    };

    CompletableFuture<Long> firstCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT);
    CompletableFuture<Long> secondCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT);
    Thread.sleep(20);
    Assert.assertFalse("getOrAssignStreamSegmentId (first call) returned before OperationLog finished.",
            firstCall.isDone());
    Assert.assertFalse("getOrAssignStreamSegmentId (second call) returned before OperationLog finished.",
            secondCall.isDone());
    initialAddFuture.complete(1L);
    long firstCallResult = firstCall.get(100, TimeUnit.MILLISECONDS);
    long secondCallResult = secondCall.get(100, TimeUnit.MILLISECONDS);

    Assert.assertEquals(
            "Two concurrent calls to getOrAssignStreamSegmentId for the same StreamSegment returned different ids.",
            firstCallResult, secondCallResult);
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleProducer(final CommandProducer cmdProducer) {
    checkArgument(state == State.Connected);
    CompletableFuture<Boolean> authorizationFuture;
    if (service.isAuthorizationEnabled()) {
        authorizationFuture = service.getAuthorizationManager()
                .canProduceAsync(DestinationName.get(cmdProducer.getTopic().toString()), authRole);
    } else {//from  w  w  w  .j  a  v a 2s  .c  o m
        authorizationFuture = CompletableFuture.completedFuture(true);
    }

    // Use producer name provided by client if present
    final String producerName = cmdProducer.hasProducerName() ? cmdProducer.getProducerName()
            : service.generateUniqueProducerName();
    final String topicName = cmdProducer.getTopic();
    final long producerId = cmdProducer.getProducerId();
    final long requestId = cmdProducer.getRequestId();
    authorizationFuture.thenApply(isAuthorized -> {
        if (isAuthorized) {
            if (log.isDebugEnabled()) {
                log.debug("[{}] Client is authorized to Produce with role {}", remoteAddress, authRole);
            }
            CompletableFuture<Producer> producerFuture = new CompletableFuture<>();
            CompletableFuture<Producer> existingProducerFuture = producers.putIfAbsent(producerId,
                    producerFuture);

            if (existingProducerFuture != null) {
                if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) {
                    Producer producer = existingProducerFuture.getNow(null);
                    log.info("[{}] Producer with the same id is already created: {}", remoteAddress, producer);
                    ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producer.getProducerName()));
                    return null;
                } else {
                    // There was an early request to create a producer with
                    // same producerId. This can happen when
                    // client
                    // timeout is lower the broker timeouts. We need to wait
                    // until the previous producer creation
                    // request
                    // either complete or fails.
                    ServerError error = !existingProducerFuture.isDone() ? ServerError.ServiceNotReady
                            : getErrorCode(existingProducerFuture);
                    log.warn("[{}][{}] Producer is already present on the connection", remoteAddress,
                            topicName);
                    ctx.writeAndFlush(Commands.newError(requestId, error,
                            "Producer is already present on the connection"));
                    return null;
                }
            }

            log.info("[{}][{}] Creating producer. producerId={}", remoteAddress, topicName, producerId);

            service.getTopic(topicName).thenAccept((Topic topic) -> {
                // Before creating producer, check if backlog quota exceeded
                // on topic
                if (topic.isBacklogQuotaExceeded(producerName)) {
                    IllegalStateException illegalStateException = new IllegalStateException(
                            "Cannot create producer on topic with backlog quota exceeded");
                    BacklogQuota.RetentionPolicy retentionPolicy = topic.getBacklogQuota().getPolicy();
                    if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_request_hold) {
                        ctx.writeAndFlush(
                                Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededError,
                                        illegalStateException.getMessage()));
                    } else if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_exception) {
                        ctx.writeAndFlush(
                                Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededException,
                                        illegalStateException.getMessage()));
                    }
                    producerFuture.completeExceptionally(illegalStateException);
                    producers.remove(producerId, producerFuture);
                    return;
                }

                disableTcpNoDelayIfNeeded(topicName, producerName);

                Producer producer = new Producer(topic, ServerCnx.this, producerId, producerName, authRole);

                try {
                    topic.addProducer(producer);

                    if (isActive()) {
                        if (producerFuture.complete(producer)) {
                            log.info("[{}] Created new producer: {}", remoteAddress, producer);
                            ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producerName));
                            return;
                        } else {
                            // The producer's future was completed before by
                            // a close command
                            producer.closeNow();
                            log.info("[{}] Cleared producer created after timeout on client side {}",
                                    remoteAddress, producer);
                        }
                    } else {
                        producer.closeNow();
                        log.info("[{}] Cleared producer created after connection was closed: {}", remoteAddress,
                                producer);
                        producerFuture.completeExceptionally(
                                new IllegalStateException("Producer created after connection was closed"));
                    }
                } catch (BrokerServiceException ise) {
                    log.error("[{}] Failed to add producer to topic {}: {}", remoteAddress, topicName,
                            ise.getMessage());
                    ctx.writeAndFlush(Commands.newError(requestId,
                            BrokerServiceException.getClientErrorCode(ise), ise.getMessage()));
                    producerFuture.completeExceptionally(ise);
                }

                producers.remove(producerId, producerFuture);
            }).exceptionally(exception -> {
                Throwable cause = exception.getCause();
                if (!(cause instanceof ServiceUnitNotReadyException)) {
                    // Do not print stack traces for expected exceptions
                    log.error("[{}] Failed to create topic {}", remoteAddress, topicName, exception);
                }

                // If client timed out, the future would have been completed
                // by subsequent close. Send error back to
                // client, only if not completed already.
                if (producerFuture.completeExceptionally(exception)) {
                    ctx.writeAndFlush(Commands.newError(requestId,
                            BrokerServiceException.getClientErrorCode(cause), cause.getMessage()));
                }
                producers.remove(producerId, producerFuture);

                return null;
            });
        } else {
            String msg = "Client is not authorized to Produce";
            log.warn("[{}] {} with role {}", remoteAddress, msg, authRole);
            ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg));
        }
        return null;
    });
}

From source file:io.pravega.controller.eventProcessor.impl.SerializedRequestHandlerTest.java

@Test(timeout = 10000)
public void testPostponeEvent() throws InterruptedException, ExecutionException {
    AtomicInteger postponeS1e1Count = new AtomicInteger();
    AtomicInteger postponeS1e2Count = new AtomicInteger();
    AtomicBoolean allowCompletion = new AtomicBoolean(false);

    SerializedRequestHandler<TestEvent> requestHandler = new SerializedRequestHandler<TestEvent>(
            executorService()) {//w w  w. j  a  v a  2s  . c om
        @Override
        public CompletableFuture<Void> processEvent(TestEvent event) {
            if (!event.future.isDone()) {
                return Futures.failedFuture(new TestPostponeException());
            }
            return event.getFuture();
        }

        @Override
        public boolean toPostpone(TestEvent event, long pickupTime, Throwable exception) {

            boolean retval = true;

            if (allowCompletion.get()) {
                if (event.number == 1) {
                    postponeS1e1Count.incrementAndGet();
                    retval = exception instanceof TestPostponeException && postponeS1e1Count.get() < 2;
                }

                if (event.number == 2) {
                    postponeS1e2Count.incrementAndGet();
                    retval = exception instanceof TestPostponeException
                            && (System.currentTimeMillis() - pickupTime < Duration.ofMillis(100).toMillis());
                }
            }

            return retval;
        }
    };

    List<Pair<TestEvent, CompletableFuture<Void>>> stream1Queue = requestHandler
            .getEventQueueForKey(getKeyForStream("scope", "stream1"));
    assertNull(stream1Queue);
    // post 3 work for stream1
    TestEvent s1e1 = new TestEvent("scope", "stream1", 1);
    CompletableFuture<Void> s1p1 = requestHandler.process(s1e1);
    TestEvent s1e2 = new TestEvent("scope", "stream1", 2);
    CompletableFuture<Void> s1p2 = requestHandler.process(s1e2);
    TestEvent s1e3 = new TestEvent("scope", "stream1", 3);
    CompletableFuture<Void> s1p3 = requestHandler.process(s1e3);

    // post events for some more arbitrary streams in background
    AtomicBoolean stop = new AtomicBoolean(false);

    runBackgroundStreamProcessing("stream2", requestHandler, stop);
    runBackgroundStreamProcessing("stream3", requestHandler, stop);
    runBackgroundStreamProcessing("stream4", requestHandler, stop);

    s1e3.complete();
    // verify that s1p3 completes.
    assertTrue(Futures.await(s1p3));
    // verify that s1e1 and s1e2 are still not complete.
    assertTrue(!s1e1.getFuture().isDone());
    assertTrue(!s1p1.isDone());
    assertTrue(!s1e2.getFuture().isDone());
    assertTrue(!s1p2.isDone());

    // Allow completion
    allowCompletion.set(true);

    assertFalse(Futures.await(s1p1));
    assertFalse(Futures.await(s1p2));
    AssertExtensions.assertThrows("", s1p1::join, e -> Exceptions.unwrap(e) instanceof TestPostponeException);
    AssertExtensions.assertThrows("", s1p2::join, e -> Exceptions.unwrap(e) instanceof TestPostponeException);
    assertTrue(postponeS1e1Count.get() == 2);
    assertTrue(postponeS1e2Count.get() > 0);
    stop.set(true);
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleCloseConsumer(CommandCloseConsumer closeConsumer) {
    checkArgument(state == State.Connected);
    log.info("[{}] Closing consumer: {}", remoteAddress, closeConsumer.getConsumerId());

    long requestId = closeConsumer.getRequestId();
    long consumerId = closeConsumer.getConsumerId();

    CompletableFuture<Consumer> consumerFuture = consumers.get(consumerId);
    if (consumerFuture == null) {
        log.warn("[{}] Consumer was not registered on the connection: {}", consumerId, remoteAddress);
        ctx.writeAndFlush(Commands.newError(requestId, ServerError.MetadataError, "Consumer not found"));
        return;/*from   w ww . jav a  2  s  .  c o  m*/
    }

    if (!consumerFuture.isDone() && consumerFuture
            .completeExceptionally(new IllegalStateException("Closed consumer before creation was complete"))) {
        // We have received a request to close the consumer before it was actually completed, we have marked the
        // consumer future as failed and we can tell the client the close operation was successful. When the actual
        // create operation will complete, the new consumer will be discarded.
        log.info("[{}] Closed consumer {} before its creation was completed", remoteAddress, consumerId);
        ctx.writeAndFlush(Commands.newSuccess(requestId));
        return;
    }

    if (consumerFuture.isCompletedExceptionally()) {
        log.info("[{}] Closed consumer {} that already failed to be created", remoteAddress, consumerId);
        ctx.writeAndFlush(Commands.newSuccess(requestId));
        return;
    }

    // Proceed with normal consumer close
    Consumer consumer = consumerFuture.getNow(null);
    try {
        consumer.close();
        consumers.remove(consumerId, consumerFuture);
        ctx.writeAndFlush(Commands.newSuccess(requestId));
        log.info("[{}] Closed consumer {}", remoteAddress, consumer);
    } catch (BrokerServiceException e) {
        log.warn("[{]] Error closing consumer: ", remoteAddress, consumer, e);
        ctx.writeAndFlush(
                Commands.newError(requestId, BrokerServiceException.getClientErrorCode(e), e.getMessage()));
    }
}