List of usage examples for java.util.concurrent CompletableFuture CompletableFuture
public CompletableFuture()
From source file:org.apache.flink.runtime.rest.RestServerEndpointITCase.java
/** * Tests that after calling {@link RestServerEndpoint#closeAsync()}, the handlers are closed * first, and we wait for in-flight requests to finish. As long as not all handlers are closed, * HTTP requests should be served./* ww w . j ava 2 s .c o m*/ */ @Test public void testShouldWaitForHandlersWhenClosing() throws Exception { testHandler.closeFuture = new CompletableFuture<>(); final HandlerBlocker handlerBlocker = new HandlerBlocker(timeout); testHandler.handlerBody = id -> { // Intentionally schedule the work on a different thread. This is to simulate // handlers where the CompletableFuture is finished by the RPC framework. return CompletableFuture.supplyAsync(() -> { handlerBlocker.arriveAndBlock(); return new TestResponse(id); }); }; // Initiate closing RestServerEndpoint but the test handler should block. final CompletableFuture<Void> closeRestServerEndpointFuture = serverEndpoint.closeAsync(); assertThat(closeRestServerEndpointFuture.isDone(), is(false)); final CompletableFuture<TestResponse> request = sendRequestToTestHandler(new TestRequest(1)); handlerBlocker.awaitRequestToArrive(); // Allow handler to close but there is still one in-flight request which should prevent // the RestServerEndpoint from closing. testHandler.closeFuture.complete(null); assertThat(closeRestServerEndpointFuture.isDone(), is(false)); // Finish the in-flight request. handlerBlocker.unblockRequest(); request.get(timeout.getSize(), timeout.getUnit()); closeRestServerEndpointFuture.get(timeout.getSize(), timeout.getUnit()); }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
@Override public CompletableFuture<LockWaiter> asyncTryLock(final long timeout, final TimeUnit unit) { final CompletableFuture<String> result = new CompletableFuture<String>(); final boolean wait = DistributedLogConstants.LOCK_IMMEDIATE != timeout; if (wait) {//from w w w .j a va2 s . c o m asyncTryLock(wait, result); } else { // try to check locks first zk.getChildren(lockPath, null, new AsyncCallback.Children2Callback() { @Override public void processResult(final int rc, String path, Object ctx, final List<String> children, Stat stat) { lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() { @Override public void safeRun() { if (!lockState.inState(State.INIT)) { result.completeExceptionally(new LockStateChangedException(lockPath, lockId, State.INIT, lockState.getState())); return; } if (KeeperException.Code.OK.intValue() != rc) { result.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc))); return; } FailpointUtils.checkFailPointNoThrow(FailpointUtils.FailPointName.FP_LockTryAcquire); Collections.sort(children, MEMBER_COMPARATOR); if (children.size() > 0) { asyncParseClientID(zk, lockPath, children.get(0)) .whenCompleteAsync(new FutureEventListener<Pair<String, Long>>() { @Override public void onSuccess(Pair<String, Long> owner) { if (!checkOrClaimLockOwner(owner, result)) { acquireFuture.complete(false); } } @Override public void onFailure(final Throwable cause) { result.completeExceptionally(cause); } }, lockStateExecutor.chooseThread(lockPath)); } else { asyncTryLock(wait, result); } } }); } }, null); } final CompletableFuture<Boolean> waiterAcquireFuture = FutureUtils.createFuture(); waiterAcquireFuture.whenComplete((value, cause) -> acquireFuture.completeExceptionally(cause)); return result.thenApply(new Function<String, LockWaiter>() { @Override public LockWaiter apply(final String currentOwner) { final Exception acquireException = new OwnershipAcquireFailedException(lockPath, currentOwner); FutureUtils.within(acquireFuture, timeout, unit, acquireException, lockStateExecutor, lockPath) .whenComplete(new FutureEventListener<Boolean>() { @Override public void onSuccess(Boolean acquired) { completeOrFail(acquireException); } @Override public void onFailure(final Throwable acquireCause) { completeOrFail(acquireException); } private void completeOrFail(final Throwable acquireCause) { if (isLockHeld()) { waiterAcquireFuture.complete(true); } else { asyncUnlock().whenComplete(new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { waiterAcquireFuture.completeExceptionally(acquireCause); } @Override public void onFailure(Throwable cause) { waiterAcquireFuture.completeExceptionally(acquireCause); } }); } } }); return new LockWaiter(lockId.getLeft(), currentOwner, waiterAcquireFuture); } }); }
From source file:com.yahoo.pulsar.broker.service.ServerCnx.java
@Override protected void handleProducer(final CommandProducer cmdProducer) { checkArgument(state == State.Connected); CompletableFuture<Boolean> authorizationFuture; if (service.isAuthorizationEnabled()) { authorizationFuture = service.getAuthorizationManager() .canProduceAsync(DestinationName.get(cmdProducer.getTopic().toString()), authRole); } else {//from w w w.j a v a2 s . c o m authorizationFuture = CompletableFuture.completedFuture(true); } // Use producer name provided by client if present final String producerName = cmdProducer.hasProducerName() ? cmdProducer.getProducerName() : service.generateUniqueProducerName(); final String topicName = cmdProducer.getTopic(); final long producerId = cmdProducer.getProducerId(); final long requestId = cmdProducer.getRequestId(); authorizationFuture.thenApply(isAuthorized -> { if (isAuthorized) { if (log.isDebugEnabled()) { log.debug("[{}] Client is authorized to Produce with role {}", remoteAddress, authRole); } CompletableFuture<Producer> producerFuture = new CompletableFuture<>(); CompletableFuture<Producer> existingProducerFuture = producers.putIfAbsent(producerId, producerFuture); if (existingProducerFuture != null) { if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) { Producer producer = existingProducerFuture.getNow(null); log.info("[{}] Producer with the same id is already created: {}", remoteAddress, producer); ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producer.getProducerName())); return null; } else { // There was an early request to create a producer with // same producerId. This can happen when // client // timeout is lower the broker timeouts. We need to wait // until the previous producer creation // request // either complete or fails. ServerError error = !existingProducerFuture.isDone() ? ServerError.ServiceNotReady : getErrorCode(existingProducerFuture); log.warn("[{}][{}] Producer is already present on the connection", remoteAddress, topicName); ctx.writeAndFlush(Commands.newError(requestId, error, "Producer is already present on the connection")); return null; } } log.info("[{}][{}] Creating producer. producerId={}", remoteAddress, topicName, producerId); service.getTopic(topicName).thenAccept((Topic topic) -> { // Before creating producer, check if backlog quota exceeded // on topic if (topic.isBacklogQuotaExceeded(producerName)) { IllegalStateException illegalStateException = new IllegalStateException( "Cannot create producer on topic with backlog quota exceeded"); BacklogQuota.RetentionPolicy retentionPolicy = topic.getBacklogQuota().getPolicy(); if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_request_hold) { ctx.writeAndFlush( Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededError, illegalStateException.getMessage())); } else if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_exception) { ctx.writeAndFlush( Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededException, illegalStateException.getMessage())); } producerFuture.completeExceptionally(illegalStateException); producers.remove(producerId, producerFuture); return; } disableTcpNoDelayIfNeeded(topicName, producerName); Producer producer = new Producer(topic, ServerCnx.this, producerId, producerName, authRole); try { topic.addProducer(producer); if (isActive()) { if (producerFuture.complete(producer)) { log.info("[{}] Created new producer: {}", remoteAddress, producer); ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producerName)); return; } else { // The producer's future was completed before by // a close command producer.closeNow(); log.info("[{}] Cleared producer created after timeout on client side {}", remoteAddress, producer); } } else { producer.closeNow(); log.info("[{}] Cleared producer created after connection was closed: {}", remoteAddress, producer); producerFuture.completeExceptionally( new IllegalStateException("Producer created after connection was closed")); } } catch (BrokerServiceException ise) { log.error("[{}] Failed to add producer to topic {}: {}", remoteAddress, topicName, ise.getMessage()); ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(ise), ise.getMessage())); producerFuture.completeExceptionally(ise); } producers.remove(producerId, producerFuture); }).exceptionally(exception -> { Throwable cause = exception.getCause(); if (!(cause instanceof ServiceUnitNotReadyException)) { // Do not print stack traces for expected exceptions log.error("[{}] Failed to create topic {}", remoteAddress, topicName, exception); } // If client timed out, the future would have been completed // by subsequent close. Send error back to // client, only if not completed already. if (producerFuture.completeExceptionally(exception)) { ctx.writeAndFlush(Commands.newError(requestId, BrokerServiceException.getClientErrorCode(cause), cause.getMessage())); } producers.remove(producerId, producerFuture); return null; }); } else { String msg = "Client is not authorized to Produce"; log.warn("[{}] {} with role {}", remoteAddress, msg, authRole); ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg)); } return null; }); }
From source file:org.apache.distributedlog.BKLogSegmentWriter.java
private CompletableFuture<Void> closeInternal(boolean abort) { CompletableFuture<Void> closePromise; synchronized (this) { if (null != closeFuture) { return closeFuture; }/*from w w w . j ava 2s .co m*/ closePromise = closeFuture = new CompletableFuture<Void>(); } MutableObject<Throwable> throwExc = new MutableObject<>(null); closeInternal(abort, throwExc, closePromise); return closePromise; }
From source file:com.ikanow.aleph2.management_db.mongodb.services.TestIkanowV1SyncService_LibraryJars.java
@Test public void test_updateV1SourceStatus() throws JsonProcessingException, IOException, InterruptedException, ExecutionException, ParseException { @SuppressWarnings("unchecked") ICrudService<JsonNode> v1_share_db = this._service_context.getCoreManagementDbService() .getUnderlyingPlatformDriver(ICrudService.class, Optional.of("social.share")).get(); final DBCollection dbc = v1_share_db.getUnderlyingPlatformDriver(DBCollection.class, Optional.empty()) .get();//w ww.j ava 2s. c o m v1_share_db.deleteDatastore().get(); IManagementCrudService<SharedLibraryBean> library_db = this._service_context.getCoreManagementDbService() .getSharedLibraryStore(); library_db.deleteDatastore().get(); final ObjectMapper mapper = BeanTemplateUtils.configureMapper(Optional.empty()); final ObjectNode v1_share_1 = (ObjectNode) mapper .readTree(this.getClass().getResourceAsStream("test_v1_sync_sample_share.json")); final DBObject v1_share_1_dbo = (DBObject) JSON.parse(v1_share_1.toString()); v1_share_1_dbo.put("_id", new ObjectId(v1_share_1.get("_id").asText())); assertEquals(0L, (long) v1_share_db.countObjects().get()); dbc.save(v1_share_1_dbo); //v1_share_db.storeObjects(Arrays.asList(v1_share_1)).get(); assertEquals(1L, (long) v1_share_db.countObjects().get()); final SharedLibraryBean share1 = IkanowV1SyncService_LibraryJars.getLibraryBeanFromV1Share(v1_share_1); assertEquals(0L, (long) library_db.countObjects().get()); library_db.storeObjects(Arrays.asList(share1)).get(); assertEquals(1L, (long) library_db.countObjects().get()); // No error - create { final ManagementFuture<?> test_1 = FutureUtils.createManagementFuture( CompletableFuture.completedFuture(Unit.unit()), CompletableFuture.completedFuture(Arrays.asList(ErrorUtils.buildSuccessMessage("", "", "", ""))) // (single non error) ); final CompletableFuture<Boolean> res = IkanowV1SyncService_LibraryJars.updateV1ShareErrorStatus_top( "555d44e3347d336b3e8c4cbe", test_1, library_db, v1_share_db, true); assertEquals(false, res.get()); ObjectNode unchanged = (ObjectNode) v1_share_db.getRawService() .getObjectById(new ObjectId("555d44e3347d336b3e8c4cbe")).get().get(); assertEquals(v1_share_1.without("_id").toString(), unchanged.without("_id").toString()); } // DB call throws exception { final CompletableFuture<?> error_out = new CompletableFuture<>(); error_out.completeExceptionally(new RuntimeException("test")); final ManagementFuture<?> test_1 = FutureUtils.createManagementFuture(error_out); final CompletableFuture<Boolean> res = IkanowV1SyncService_LibraryJars.updateV1ShareErrorStatus_top( "555d44e3347d336b3e8c4cbe", test_1, library_db, v1_share_db, true); assertEquals(true, res.get()); JsonNode changed = v1_share_db.getRawService().getObjectById(new ObjectId("555d44e3347d336b3e8c4cbe")) .get().get(); assertTrue(changed.get("description").asText() .contains("] (unknown) ((unknown)): ERROR: [java.lang.RuntimeException: test")); // This shouldn't yet pe present assertFalse("Description error time travels: " + changed.get("description").asText(), changed.get("description").asText().contains("] (test) (unknown): ERROR: test")); } // db call throws exception, object doesn't exist (code coverage!) { final CompletableFuture<?> error_out = new CompletableFuture<>(); error_out.completeExceptionally(new RuntimeException("test")); final ManagementFuture<?> test_1 = FutureUtils.createManagementFuture(error_out); final CompletableFuture<Boolean> res = IkanowV1SyncService_LibraryJars.updateV1ShareErrorStatus_top( "555d44e3347d336b3e8c4cbf", test_1, library_db, v1_share_db, true); assertEquals(false, res.get()); } // User errors (+update not create) { final ManagementFuture<?> test_1 = FutureUtils.createManagementFuture( CompletableFuture.completedFuture(Unit.unit()), CompletableFuture.completedFuture( Arrays.asList(ErrorUtils.buildErrorMessage("test", "test", "test", "test"))) // (single non error) ); final CompletableFuture<Boolean> res = IkanowV1SyncService_LibraryJars.updateV1ShareErrorStatus_top( "555d44e3347d336b3e8c4cbe", test_1, library_db, v1_share_db, false); assertEquals(true, res.get()); JsonNode changed = v1_share_db.getRawService().getObjectById(new ObjectId("555d44e3347d336b3e8c4cbe")) .get().get(); SharedLibraryBean v2_version = library_db.getObjectById("v1_555d44e3347d336b3e8c4cbe").get().get(); assertTrue("v2 lib bean needed updating: " + v2_version.modified(), new Date().getTime() - v2_version.modified().getTime() < 5000L); // Still has the old error assertTrue("Description missing errors: " + changed.get("description").asText(), changed.get("description").asText() .contains("] (unknown) ((unknown)): ERROR: [java.lang.RuntimeException: test")); // Now has the new error assertTrue("Description missing errors: " + changed.get("description").asText(), changed.get("description").asText().contains("] test (test): ERROR: test")); } }
From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java
@Override public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uid, Map<String, String> offloadDriverMetadata) { String readBucket = getReadBucket(offloadDriverMetadata); BlobStore readBlobstore = getReadBlobStore(offloadDriverMetadata); CompletableFuture<Void> promise = new CompletableFuture<>(); scheduler.chooseThread(ledgerId).submit(() -> { try {// www . j a va 2s. co m readBlobstore.removeBlobs(readBucket, ImmutableList.of(dataBlockOffloadKey(ledgerId, uid), indexBlockOffloadKey(ledgerId, uid))); promise.complete(null); } catch (Throwable t) { log.error("Failed delete Blob", t); promise.completeExceptionally(t); } }); return promise; }
From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java
@Override public CompletableFuture<Consumer> subscribe(final ServerCnx cnx, String subscriptionName, long consumerId, SubType subType, int priorityLevel, String consumerName, boolean isDurable, MessageId startMessageId, Map<String, String> metadata, boolean readCompacted, InitialPosition initialPosition) { final CompletableFuture<Consumer> future = new CompletableFuture<>(); try {/*from w w w.j a v a 2 s.c o m*/ brokerService.checkTopicNsOwnership(getName()); } catch (Exception e) { future.completeExceptionally(e); return future; } if (readCompacted && !(subType == SubType.Failover || subType == SubType.Exclusive)) { future.completeExceptionally( new NotAllowedException("readCompacted only allowed on failover or exclusive subscriptions")); return future; } if (isBlank(subscriptionName)) { if (log.isDebugEnabled()) { log.debug("[{}] Empty subscription name", topic); } future.completeExceptionally(new NamingException("Empty subscription name")); return future; } if (hasBatchMessagePublished && !cnx.isBatchMessageCompatibleVersion()) { if (log.isDebugEnabled()) { log.debug("[{}] Consumer doesn't support batch-message {}", topic, subscriptionName); } future.completeExceptionally(new UnsupportedVersionException("Consumer doesn't support batch-message")); return future; } if (subscriptionName.startsWith(replicatorPrefix) || subscriptionName.equals(DEDUPLICATION_CURSOR_NAME)) { log.warn("[{}] Failed to create subscription for {}", topic, subscriptionName); future.completeExceptionally( new NamingException("Subscription with reserved subscription name attempted")); return future; } if (cnx.getRemoteAddress() != null && cnx.getRemoteAddress().toString().contains(":")) { SubscribeRateLimiter.ConsumerIdentifier consumer = new SubscribeRateLimiter.ConsumerIdentifier( cnx.getRemoteAddress().toString().split(":")[0], consumerName, consumerId); if (subscribeRateLimiter.isPresent() && !subscribeRateLimiter.get().subscribeAvailable(consumer) || !subscribeRateLimiter.get().tryAcquire(consumer)) { log.warn("[{}] Failed to create subscription for {} {} limited by {}, available {}", topic, subscriptionName, consumer, subscribeRateLimiter.get().getSubscribeRate(), subscribeRateLimiter.get().getAvailableSubscribeRateLimit(consumer)); future.completeExceptionally( new NotAllowedException("Subscribe limited by subscribe rate limit per consumer.")); return future; } } lock.readLock().lock(); try { if (isFenced) { log.warn("[{}] Attempting to subscribe to a fenced topic", topic); future.completeExceptionally(new TopicFencedException("Topic is temporarily unavailable")); return future; } USAGE_COUNT_UPDATER.incrementAndGet(this); if (log.isDebugEnabled()) { log.debug("[{}] [{}] [{}] Added consumer -- count: {}", topic, subscriptionName, consumerName, USAGE_COUNT_UPDATER.get(this)); } } finally { lock.readLock().unlock(); } CompletableFuture<? extends Subscription> subscriptionFuture = isDurable ? // getDurableSubscription(subscriptionName, initialPosition) // : getNonDurableSubscription(subscriptionName, startMessageId); int maxUnackedMessages = isDurable ? brokerService.pulsar().getConfiguration().getMaxUnackedMessagesPerConsumer() : 0; subscriptionFuture.thenAccept(subscription -> { try { Consumer consumer = new Consumer(subscription, subType, topic, consumerId, priorityLevel, consumerName, maxUnackedMessages, cnx, cnx.getRole(), metadata, readCompacted, initialPosition); subscription.addConsumer(consumer); if (!cnx.isActive()) { consumer.close(); if (log.isDebugEnabled()) { log.debug("[{}] [{}] [{}] Subscribe failed -- count: {}", topic, subscriptionName, consumer.consumerName(), USAGE_COUNT_UPDATER.get(PersistentTopic.this)); } future.completeExceptionally( new BrokerServiceException("Connection was closed while the opening the cursor ")); } else { log.info("[{}][{}] Created new subscription for {}", topic, subscriptionName, consumerId); future.complete(consumer); } } catch (BrokerServiceException e) { if (e instanceof ConsumerBusyException) { log.warn("[{}][{}] Consumer {} {} already connected", topic, subscriptionName, consumerId, consumerName); } else if (e instanceof SubscriptionBusyException) { log.warn("[{}][{}] {}", topic, subscriptionName, e.getMessage()); } USAGE_COUNT_UPDATER.decrementAndGet(PersistentTopic.this); future.completeExceptionally(e); } }).exceptionally(ex -> { log.warn("[{}] Failed to create subscription for {}: ", topic, subscriptionName, ex.getMessage()); USAGE_COUNT_UPDATER.decrementAndGet(PersistentTopic.this); future.completeExceptionally(new PersistenceException(ex)); return null; }); return future; }
From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java
/** * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, with concurrent requests. *//*ww w.j a v a 2s. c o m*/ @Test public void testGetOrAssignStreamSegmentIdWithConcurrency() throws Exception { // We setup a delay in the OperationLog process. We only do this for a stand-alone StreamSegment because the process // is driven by the same code for Transactions as well. final String segmentName = "Segment"; final long segmentId = 12345; HashSet<String> storageSegments = new HashSet<>(); storageSegments.add(segmentName); @Cleanup TestContext context = new TestContext(); setupStorageGetHandler(context, storageSegments, sn -> new StreamSegmentInformation(sn, 0, false, false, new ImmutableDate())); CompletableFuture<Long> initialAddFuture = new CompletableFuture<>(); AtomicBoolean operationLogInvoked = new AtomicBoolean(false); context.operationLog.addHandler = op -> { if (!(op instanceof StreamSegmentMapOperation)) { return FutureHelpers.failedFuture(new IllegalArgumentException("unexpected operation")); } if (operationLogInvoked.getAndSet(true)) { return FutureHelpers.failedFuture(new IllegalStateException("multiple calls to OperationLog.add")); } // Need to set SegmentId on operation. ((StreamSegmentMapOperation) op).setStreamSegmentId(segmentId); return initialAddFuture; }; CompletableFuture<Long> firstCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT); CompletableFuture<Long> secondCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT); Thread.sleep(20); Assert.assertFalse("getOrAssignStreamSegmentId (first call) returned before OperationLog finished.", firstCall.isDone()); Assert.assertFalse("getOrAssignStreamSegmentId (second call) returned before OperationLog finished.", secondCall.isDone()); initialAddFuture.complete(1L); long firstCallResult = firstCall.get(100, TimeUnit.MILLISECONDS); long secondCallResult = secondCall.get(100, TimeUnit.MILLISECONDS); Assert.assertEquals( "Two concurrent calls to getOrAssignStreamSegmentId for the same StreamSegment returned different ids.", firstCallResult, secondCallResult); }
From source file:com.yahoo.pulsar.broker.namespace.NamespaceService.java
/** * 1. split the given bundle into two bundles 2. assign ownership of both the bundles to current broker 3. update * policies with newly created bundles into LocalZK 4. disable original bundle and refresh the cache * * @param bundle/*from ww w . j a va 2 s.c o m*/ * @return * @throws Exception */ public CompletableFuture<Void> splitAndOwnBundle(NamespaceBundle bundle) throws Exception { final CompletableFuture<Void> future = new CompletableFuture<>(); Pair<NamespaceBundles, List<NamespaceBundle>> splittedBundles = bundleFactory.splitBundles(bundle, 2 /* by default split into 2 */); if (splittedBundles != null) { checkNotNull(splittedBundles.getLeft()); checkNotNull(splittedBundles.getRight()); checkArgument(splittedBundles.getRight().size() == 2, "bundle has to be split in two bundles"); NamespaceName nsname = bundle.getNamespaceObject(); try { // take ownership of newly split bundles for (NamespaceBundle sBundle : splittedBundles.getRight()) { checkNotNull(ownershipCache.tryAcquiringOwnership(sBundle)); } updateNamespaceBundles(nsname, splittedBundles.getLeft(), (rc, path, zkCtx, stat) -> pulsar.getOrderedExecutor().submit(safeRun(() -> { if (rc == KeeperException.Code.OK.intValue()) { // disable old bundle try { ownershipCache.disableOwnership(bundle); // invalidate cache as zookeeper has new split // namespace bundle bundleFactory.invalidateBundleCache(nsname); // update bundled_topic cache for load-report-generation pulsar.getBrokerService().refreshTopicToStatsMaps(bundle); loadManager.setLoadReportForceUpdateFlag(); future.complete(null); } catch (Exception e) { String msg1 = format( "failed to disable bundle %s under namespace [%s] with error %s", nsname.toString(), bundle.toString(), e.getMessage()); LOG.warn(msg1, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg1)); } } else { String msg2 = format("failed to update namespace [%s] policies due to %s", nsname.toString(), KeeperException.create(KeeperException.Code.get(rc)).getMessage()); LOG.warn(msg2); future.completeExceptionally(new ServiceUnitNotReadyException(msg2)); } }))); } catch (Exception e) { String msg = format("failed to aquire ownership of split bundle for namespace [%s], %s", nsname.toString(), e.getMessage()); LOG.warn(msg, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } } else { String msg = format("bundle %s not found under namespace", bundle.toString()); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } return future; }
From source file:org.apache.pulsar.broker.namespace.NamespaceService.java
/** * 1. split the given bundle into two bundles 2. assign ownership of both the bundles to current broker 3. update * policies with newly created bundles into LocalZK 4. disable original bundle and refresh the cache * * @param bundle//from w w w . j av a 2s. c om * @return * @throws Exception */ public CompletableFuture<Void> splitAndOwnBundle(NamespaceBundle bundle) throws Exception { final CompletableFuture<Void> future = new CompletableFuture<>(); Pair<NamespaceBundles, List<NamespaceBundle>> splittedBundles = bundleFactory.splitBundles(bundle, 2 /* by default split into 2 */); if (splittedBundles != null) { checkNotNull(splittedBundles.getLeft()); checkNotNull(splittedBundles.getRight()); checkArgument(splittedBundles.getRight().size() == 2, "bundle has to be split in two bundles"); NamespaceName nsname = bundle.getNamespaceObject(); try { // take ownership of newly split bundles for (NamespaceBundle sBundle : splittedBundles.getRight()) { checkNotNull(ownershipCache.tryAcquiringOwnership(sBundle)); } updateNamespaceBundles(nsname, splittedBundles.getLeft(), (rc, path, zkCtx, stat) -> pulsar.getOrderedExecutor().submit(safeRun(() -> { if (rc == KeeperException.Code.OK.intValue()) { // disable old bundle try { ownershipCache.disableOwnership(bundle); // invalidate cache as zookeeper has new split // namespace bundle bundleFactory.invalidateBundleCache(nsname); // update bundled_topic cache for load-report-generation pulsar.getBrokerService().refreshTopicToStatsMaps(bundle); loadManager.get().setLoadReportForceUpdateFlag(); future.complete(null); } catch (Exception e) { String msg1 = format( "failed to disable bundle %s under namespace [%s] with error %s", nsname.toString(), bundle.toString(), e.getMessage()); LOG.warn(msg1, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg1)); } } else { String msg2 = format("failed to update namespace [%s] policies due to %s", nsname.toString(), KeeperException.create(KeeperException.Code.get(rc)).getMessage()); LOG.warn(msg2); future.completeExceptionally(new ServiceUnitNotReadyException(msg2)); } }))); } catch (Exception e) { String msg = format("failed to aquire ownership of split bundle for namespace [%s], %s", nsname.toString(), e.getMessage()); LOG.warn(msg, e); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } } else { String msg = format("bundle %s not found under namespace", bundle.toString()); future.completeExceptionally(new ServiceUnitNotReadyException(msg)); } return future; }