List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java
@Test public void testOffloadConflict() throws Exception { Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet(); CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>(); Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet(); MockLedgerOffloader offloader = new MockLedgerOffloader() { @Override// ww w . j ava 2 s . co m public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid, Map<String, String> extraMetadata) { return errorLedgers.thenCompose((errors) -> { if (errors.remove(ledger.getId())) { failedOffloads.add(Pair.of(ledger.getId(), uuid)); CompletableFuture<Void> future = new CompletableFuture<>(); future.completeExceptionally(new Exception("Some kind of error")); return future; } else { return super.offload(ledger, uuid, extraMetadata); } }); } @Override public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uuid, Map<String, String> offloadDriverMetadata) { deleted.add(Pair.of(ledgerId, uuid)); return super.deleteOffloaded(ledgerId, uuid, offloadDriverMetadata); } }; ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(10, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config); for (int i = 0; i < 15; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Set<Long> errorSet = ConcurrentHashMap.newKeySet(); errorSet.add(ledger.getLedgersInfoAsList().get(0).getLedgerId()); errorLedgers.complete(errorSet); try { ledger.offloadPrefix(ledger.getLastConfirmedEntry()); } catch (ManagedLedgerException e) { // expected } Assert.assertTrue(errorSet.isEmpty()); Assert.assertEquals(failedOffloads.size(), 1); Assert.assertEquals(deleted.size(), 0); long expectedFailedLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId(); UUID expectedFailedUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(), ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb()); Assert.assertEquals(failedOffloads.stream().findFirst().get(), Pair.of(expectedFailedLedger, expectedFailedUUID)); Assert.assertFalse(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); // try offload again ledger.offloadPrefix(ledger.getLastConfirmedEntry()); Assert.assertEquals(failedOffloads.size(), 1); Assert.assertEquals(deleted.size(), 1); Assert.assertEquals(deleted.stream().findFirst().get(), Pair.of(expectedFailedLedger, expectedFailedUUID)); UUID successUUID = new UUID(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidMsb(), ledger.getLedgersInfoAsList().get(0).getOffloadContext().getUidLsb()); Assert.assertFalse(successUUID.equals(expectedFailedUUID)); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); }
From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java
@Test public void testTrimOccursDuringOffload() throws Exception { CountDownLatch offloadStarted = new CountDownLatch(1); CompletableFuture<Void> blocker = new CompletableFuture<>(); MockLedgerOffloader offloader = new MockLedgerOffloader() { @Override//from www.j av a 2 s .c o m public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid, Map<String, String> extraMetadata) { offloadStarted.countDown(); return blocker.thenCompose((f) -> super.offload(ledger, uuid, extraMetadata)); } }; ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(0, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config); ManagedCursor cursor = ledger.openCursor("foobar"); // Create 3 ledgers, saving position at start of each for (int i = 0; i < 21; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); PositionImpl startOfSecondLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(1).getLedgerId(), 0); PositionImpl startOfThirdLedger = PositionImpl.get(ledger.getLedgersInfoAsList().get(2).getLedgerId(), 0); // trigger an offload which should offload the first two ledgers OffloadCallbackPromise cbPromise = new OffloadCallbackPromise(); ledger.asyncOffloadPrefix(startOfThirdLedger, cbPromise, null); offloadStarted.await(); // trim first ledger cursor.markDelete(startOfSecondLedger, new HashMap<>()); assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 2); Assert.assertEquals( ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0); // complete offloading blocker.complete(null); cbPromise.get(); Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2); Assert.assertEquals( ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 1); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); Assert.assertEquals(offloader.offloadedLedgers().size(), 1); Assert.assertTrue( offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId())); }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
/** * Check Lock Owner Phase 2 : check all lock waiters to get current owner and wait for ownership if necessary. * * @param lockWatcher//from www .ja v a2 s. c o m * lock watcher. * @param wait * whether to wait for ownership. * @param getChildrenRc * result of getting all lock waiters * @param children * current lock waiters. * @param promise * promise to satisfy with current lock owner. */ private void processLockWaiters(final LockWatcher lockWatcher, final boolean wait, final int getChildrenRc, final List<String> children, final CompletableFuture<String> promise) { executeLockAction(lockWatcher.epoch, new LockAction() { @Override public void execute() { if (!lockState.inState(State.PREPARED)) { // e.g. lock closed or session expired after prepared promise.completeExceptionally( new LockStateChangedException(lockPath, lockId, State.PREPARED, lockState.getState())); return; } if (KeeperException.Code.OK.intValue() != getChildrenRc) { promise.completeExceptionally(KeeperException.create(KeeperException.Code.get(getChildrenRc))); return; } if (children.isEmpty()) { LOG.error("Error, member list is empty for lock {}.", lockPath); promise.completeExceptionally( new UnexpectedException("Empty member list for lock " + lockPath)); return; } // sort the children Collections.sort(children, MEMBER_COMPARATOR); final String cid = currentId; final int memberIndex = children.indexOf(cid); if (LOG.isDebugEnabled()) { LOG.debug("{} is the number {} member in the list.", cid, memberIndex); } // If we hold the lock if (memberIndex == 0) { LOG.info("{} acquired the lock {}.", cid, lockPath); claimOwnership(lockWatcher.epoch); promise.complete(cid); } else if (memberIndex > 0) { // we are in the member list but we didn't hold the lock // get ownership of current owner asyncParseClientID(zk, lockPath, children.get(0)) .whenComplete(new FutureEventListener<Pair<String, Long>>() { @Override public void onSuccess(Pair<String, Long> currentOwner) { watchLockOwner(lockWatcher, wait, cid, children.get(memberIndex - 1), children.get(0), currentOwner, promise); } @Override public void onFailure(final Throwable cause) { // ensure promise is satisfied in lock thread executeLockAction(lockWatcher.epoch, new LockAction() { @Override public void execute() { promise.completeExceptionally(cause); } @Override public String getActionName() { return "handleFailureOnParseClientID(lockPath=" + lockPath + ")"; } }, promise); } }); } else { LOG.error("Member {} doesn't exist in the members list {} for lock {}.", new Object[] { cid, children, lockPath }); promise.completeExceptionally(new UnexpectedException("Member " + cid + " doesn't exist in member list " + children + " for lock " + lockPath)); } } @Override public String getActionName() { return "processLockWaiters(rc=" + getChildrenRc + ", waiters=" + children + ")"; } }, promise); }
From source file:org.apache.pulsar.broker.service.BrokerService.java
private void createPersistentTopic(final String topic, CompletableFuture<Topic> topicFuture) { final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); DestinationName destinationName = DestinationName.get(topic); if (!pulsar.getNamespaceService().isServiceUnitActive(destinationName)) { // namespace is being unloaded String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic); log.warn(msg);/*www .ja v a 2s .com*/ pulsar.getExecutor().submit(() -> topics.remove(topic, topicFuture)); topicFuture.completeExceptionally(new ServiceUnitNotReadyException(msg)); return; } getManagedLedgerConfig(destinationName).thenAccept(config -> { // Once we have the configuration, we can proceed with the async open operation managedLedgerFactory.asyncOpen(destinationName.getPersistenceNamingEncoding(), config, new OpenLedgerCallback() { @Override public void openLedgerComplete(ManagedLedger ledger, Object ctx) { PersistentTopic persistentTopic = new PersistentTopic(topic, ledger, BrokerService.this); CompletableFuture<Void> replicationFuture = persistentTopic.checkReplication(); replicationFuture.thenRun(() -> { log.info("Created topic {}", topic); long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs; pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs); addTopicToStatsMaps(destinationName, persistentTopic); topicFuture.complete(persistentTopic); }); replicationFuture.exceptionally((ex) -> { log.warn("Replication check failed. Removing topic from topics list {}, {}", topic, ex); persistentTopic.stopReplProducers().whenComplete((v, exception) -> { topics.remove(topic, topicFuture); topicFuture.completeExceptionally(ex); }); return null; }); } @Override public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { log.warn("Failed to create topic {}", topic, exception); topics.remove(topic, topicFuture); topicFuture.completeExceptionally(new PersistenceException(exception)); } }, null); }).exceptionally((exception) -> { log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception); // remove topic from topics-map in different thread to avoid possible deadlock if // createPersistentTopic-thread only tries to handle this future-result pulsar.getExecutor().submit(() -> topics.remove(topic, topicFuture)); topicFuture.completeExceptionally(exception); return null; }); }
From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java
@Override public void checkGC(int gcIntervalInSeconds) { if (isActive()) { lastActive = System.nanoTime(); } else if (System.nanoTime() - lastActive < TimeUnit.SECONDS.toNanos(gcIntervalInSeconds)) { // Gc interval did not expire yet return;// w w w . j a v a 2s .c o m } else if (shouldTopicBeRetained()) { // Topic activity is still within the retention period return; } else { CompletableFuture<Void> replCloseFuture = new CompletableFuture<>(); if (TopicName.get(topic).isGlobal()) { // For global namespace, close repl producers first. // Once all repl producers are closed, we can delete the topic, // provided no remote producers connected to the broker. if (log.isDebugEnabled()) { log.debug("[{}] Global topic inactive for {} seconds, closing repl producers.", topic, gcIntervalInSeconds); } closeReplProducersIfNoBacklog().thenRun(() -> { if (hasRemoteProducers()) { if (log.isDebugEnabled()) { log.debug("[{}] Global topic has connected remote producers. Not a candidate for GC", topic); } replCloseFuture.completeExceptionally( new TopicBusyException("Topic has connected remote producers")); } else { log.info("[{}] Global topic inactive for {} seconds, closed repl producers", topic, gcIntervalInSeconds); replCloseFuture.complete(null); } }).exceptionally(e -> { if (log.isDebugEnabled()) { log.debug("[{}] Global topic has replication backlog. Not a candidate for GC", topic); } replCloseFuture.completeExceptionally(e.getCause()); return null; }); } else { replCloseFuture.complete(null); } replCloseFuture.thenCompose(v -> delete(true)) .thenRun(() -> log.info("[{}] Topic deleted successfully due to inactivity", topic)) .exceptionally(e -> { if (e.getCause() instanceof TopicBusyException) { // topic became active again if (log.isDebugEnabled()) { log.debug("[{}] Did not delete busy topic: {}", topic, e.getCause().getMessage()); } } else { log.warn("[{}] Inactive topic deletion failed", topic, e); } return null; }); } }
From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java
/** * Close this topic - close all producers and subscriptions associated with this topic * * @return Completable future indicating completion of close operation */// w w w. j a v a 2 s . c om @Override public CompletableFuture<Void> close() { CompletableFuture<Void> closeFuture = new CompletableFuture<>(); lock.writeLock().lock(); try { if (!isFenced) { isFenced = true; } else { log.warn("[{}] Topic is already being closed or deleted", topic); closeFuture.completeExceptionally(new TopicFencedException("Topic is already fenced")); return closeFuture; } } finally { lock.writeLock().unlock(); } List<CompletableFuture<Void>> futures = Lists.newArrayList(); replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect())); producers.forEach(producer -> futures.add(producer.disconnect())); subscriptions.forEach((s, sub) -> futures.add(sub.disconnect())); FutureUtil.waitForAll(futures).thenRun(() -> { // After having disconnected all producers/consumers, close the managed ledger ledger.asyncClose(new CloseCallback() { @Override public void closeComplete(Object ctx) { // Everything is now closed, remove the topic from map brokerService.removeTopicFromCache(topic); log.info("[{}] Topic closed", topic); closeFuture.complete(null); } @Override public void closeFailed(ManagedLedgerException exception, Object ctx) { log.error("[{}] Failed to close managed ledger, proceeding anyway.", topic, exception); brokerService.removeTopicFromCache(topic); closeFuture.complete(null); } }, null); if (dispatchRateLimiter.isPresent()) { dispatchRateLimiter.get().close(); } if (subscribeRateLimiter.isPresent()) { subscribeRateLimiter.get().close(); } }).exceptionally(exception -> { log.error("[{}] Error closing topic", topic, exception); isFenced = false; closeFuture.completeExceptionally(exception); return null; }); return closeFuture; }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
@Override public Position offloadPrefix(Position pos) throws InterruptedException, ManagedLedgerException { CompletableFuture<Position> promise = new CompletableFuture<>(); asyncOffloadPrefix(pos, new OffloadCallback() { @Override//from w w w .j a v a 2s. c om public void offloadComplete(Position offloadedTo, Object ctx) { promise.complete(offloadedTo); } @Override public void offloadFailed(ManagedLedgerException e, Object ctx) { promise.completeExceptionally(e); } }, null); try { return promise.get(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS); } catch (TimeoutException te) { throw new ManagedLedgerException("Timeout during managed ledger offload operation"); } catch (ExecutionException e) { log.error("[{}] Error offloading. pos = {}", name, pos, e.getCause()); throw ManagedLedgerException.getManagedLedgerException(e.getCause()); } }
From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java
/** * It creates subscriptions for new partitions of existing partitioned-topics * * @param topicName// w ww. java 2 s .co m * : topic-name: persistent://prop/cluster/ns/topic * @param numPartitions * : number partitions for the topics */ private CompletableFuture<Void> createSubscriptions(TopicName topicName, int numPartitions) { String path = path(PARTITIONED_TOPIC_PATH_ZNODE, topicName.getPersistenceNamingEncoding()); CompletableFuture<Void> result = new CompletableFuture<>(); fetchPartitionedTopicMetadataAsync(pulsar(), path).thenAccept(partitionMetadata -> { if (partitionMetadata.partitions <= 1) { result.completeExceptionally(new RestException(Status.CONFLICT, "Topic is not partitioned topic")); return; } if (partitionMetadata.partitions >= numPartitions) { result.completeExceptionally(new RestException(Status.CONFLICT, "number of partitions must be more than existing " + partitionMetadata.partitions)); return; } PulsarAdmin admin; try { admin = pulsar().getAdminClient(); } catch (PulsarServerException e1) { result.completeExceptionally(e1); return; } admin.topics().getStatsAsync(topicName.getPartition(0).toString()).thenAccept(stats -> { stats.subscriptions.keySet().forEach(subscription -> { List<CompletableFuture<Void>> subscriptionFutures = new ArrayList<>(); for (int i = partitionMetadata.partitions; i < numPartitions; i++) { final String topicNamePartition = topicName.getPartition(i).toString(); subscriptionFutures.add(admin.topics().createSubscriptionAsync(topicNamePartition, subscription, MessageId.latest)); } FutureUtil.waitForAll(subscriptionFutures).thenRun(() -> { log.info("[{}] Successfully created new partitions {}", clientAppId(), topicName); result.complete(null); }).exceptionally(ex -> { log.warn("[{}] Failed to create subscriptions on new partitions for {}", clientAppId(), topicName, ex); result.completeExceptionally(ex); return null; }); }); }).exceptionally(ex -> { if (ex.getCause() instanceof PulsarAdminException.NotFoundException) { // The first partition doesn't exist, so there are currently to subscriptions to recreate result.complete(null); } else { log.warn("[{}] Failed to get list of subscriptions of {}", clientAppId(), topicName.getPartition(0), ex); result.completeExceptionally(ex); } return null; }); }).exceptionally(ex -> { log.warn("[{}] Failed to get partition metadata for {}", clientAppId(), topicName.toString()); result.completeExceptionally(ex); return null; }); return result; }
From source file:com.yahoo.pulsar.broker.namespace.NamespaceService.java
private void searchForCandidateBroker(NamespaceBundle bundle, CompletableFuture<LookupResult> lookupFuture, boolean authoritative) { String candidateBroker = null; try {/*from www .j a v a 2s. c o m*/ // check if this is Heartbeat or SLAMonitor namespace candidateBroker = checkHeartbeatNamespace(bundle); if (candidateBroker == null) { String broker = getSLAMonitorBrokerName(bundle); // checking if the broker is up and running if (broker != null && isBrokerActive(broker)) { candidateBroker = broker; } } if (candidateBroker == null) { if (!this.loadManager.isCentralized() || pulsar.getLeaderElectionService().isLeader()) { candidateBroker = getLeastLoadedFromLoadManager(bundle); } else { if (authoritative) { // leader broker already assigned the current broker as owner candidateBroker = pulsar.getWebServiceAddress(); } else { // forward to leader broker to make assignment candidateBroker = pulsar.getLeaderElectionService().getCurrentLeader().getServiceUrl(); } } } } catch (Exception e) { LOG.warn("Error when searching for candidate broker to acquire {}: {}", bundle, e.getMessage(), e); lookupFuture.completeExceptionally(e); return; } try { checkNotNull(candidateBroker); if (pulsar.getWebServiceAddress().equals(candidateBroker)) { // Load manager decided that the local broker should try to become the owner ownershipCache.tryAcquiringOwnership(bundle).thenAccept(ownerInfo -> { if (ownerInfo.isDisabled()) { if (LOG.isDebugEnabled()) { LOG.debug("Namespace bundle {} is currently being unloaded", bundle); } lookupFuture.completeExceptionally(new IllegalStateException( String.format("Namespace bundle %s is currently being unloaded", bundle))); } else { // Found owner for the namespace bundle // Schedule the task to pre-load destinations pulsar.loadNamespaceDestinations(bundle); lookupFuture.complete(new LookupResult(ownerInfo)); } }).exceptionally(exception -> { LOG.warn("Failed to acquire ownership for namespace bundle {}: ", bundle, exception.getMessage(), exception); lookupFuture.completeExceptionally(new PulsarServerException( "Failed to acquire ownership for namespace bundle " + bundle, exception)); return null; }); } else { // Load managed decider some other broker should try to acquire ownership if (LOG.isDebugEnabled()) { LOG.debug("Redirecting to broker {} to acquire ownership of bundle {}", candidateBroker, bundle); } // Now setting the redirect url createLookupResult(candidateBroker).thenAccept(lookupResult -> lookupFuture.complete(lookupResult)) .exceptionally(ex -> { lookupFuture.completeExceptionally(ex); return null; }); } } catch (Exception e) { LOG.warn("Error in trying to acquire namespace bundle ownership for {}: {}", bundle, e.getMessage(), e); lookupFuture.completeExceptionally(e); } }
From source file:org.apache.pulsar.broker.namespace.NamespaceService.java
private void searchForCandidateBroker(NamespaceBundle bundle, CompletableFuture<LookupResult> lookupFuture, boolean authoritative) { String candidateBroker = null; try {/*from w ww . ja va 2 s . co m*/ // check if this is Heartbeat or SLAMonitor namespace candidateBroker = checkHeartbeatNamespace(bundle); if (candidateBroker == null) { String broker = getSLAMonitorBrokerName(bundle); // checking if the broker is up and running if (broker != null && isBrokerActive(broker)) { candidateBroker = broker; } } if (candidateBroker == null) { if (!this.loadManager.get().isCentralized() || pulsar.getLeaderElectionService().isLeader()) { candidateBroker = getLeastLoadedFromLoadManager(bundle); } else { if (authoritative) { // leader broker already assigned the current broker as owner candidateBroker = pulsar.getWebServiceAddress(); } else { // forward to leader broker to make assignment candidateBroker = pulsar.getLeaderElectionService().getCurrentLeader().getServiceUrl(); } } } } catch (Exception e) { LOG.warn("Error when searching for candidate broker to acquire {}: {}", bundle, e.getMessage(), e); lookupFuture.completeExceptionally(e); return; } try { checkNotNull(candidateBroker); if (pulsar.getWebServiceAddress().equals(candidateBroker)) { // Load manager decided that the local broker should try to become the owner ownershipCache.tryAcquiringOwnership(bundle).thenAccept(ownerInfo -> { if (ownerInfo.isDisabled()) { if (LOG.isDebugEnabled()) { LOG.debug("Namespace bundle {} is currently being unloaded", bundle); } lookupFuture.completeExceptionally(new IllegalStateException( String.format("Namespace bundle %s is currently being unloaded", bundle))); } else { // Found owner for the namespace bundle // Schedule the task to pre-load destinations pulsar.loadNamespaceDestinations(bundle); lookupFuture.complete(new LookupResult(ownerInfo)); } }).exceptionally(exception -> { LOG.warn("Failed to acquire ownership for namespace bundle {}: ", bundle, exception.getMessage(), exception); lookupFuture.completeExceptionally(new PulsarServerException( "Failed to acquire ownership for namespace bundle " + bundle, exception)); return null; }); } else { // Load managed decider some other broker should try to acquire ownership if (LOG.isDebugEnabled()) { LOG.debug("Redirecting to broker {} to acquire ownership of bundle {}", candidateBroker, bundle); } // Now setting the redirect url createLookupResult(candidateBroker).thenAccept(lookupResult -> lookupFuture.complete(lookupResult)) .exceptionally(ex -> { lookupFuture.completeExceptionally(ex); return null; }); } } catch (Exception e) { LOG.warn("Error in trying to acquire namespace bundle ownership for {}: {}", bundle, e.getMessage(), e); lookupFuture.completeExceptionally(e); } }