List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:com.yahoo.pulsar.broker.namespace.NamespaceService.java
/** * Main internal method to lookup and setup ownership of service unit to a broker * * @param bundle/*from w w w . j a v a 2 s . c o m*/ * @param authoritative * @param readOnly * @return * @throws PulsarServerException */ private CompletableFuture<LookupResult> findBrokerServiceUrl(NamespaceBundle bundle, boolean authoritative, boolean readOnly) { if (LOG.isDebugEnabled()) { LOG.debug("findBrokerServiceUrl: {} - read-only: {}", bundle, readOnly); } CompletableFuture<LookupResult> future = new CompletableFuture<>(); // First check if we or someone else already owns the bundle ownershipCache.getOwnerAsync(bundle).thenAccept(nsData -> { if (!nsData.isPresent()) { // No one owns this bundle if (readOnly) { // Do not attempt to acquire ownership future.completeExceptionally(new IllegalStateException( String.format("Can't find owner of ServiceUnit: %s", bundle))); } else { // Now, no one owns the namespace yet. Hence, we will try to dynamically assign it pulsar.getExecutor().execute(() -> { searchForCandidateBroker(bundle, future, authoritative); }); } } else if (nsData.get().isDisabled()) { future.completeExceptionally( new IllegalStateException(String.format("Namespace bundle %s is being unloaded", bundle))); } else { if (LOG.isDebugEnabled()) { LOG.debug("Namespace bundle {} already owned by {} ", bundle, nsData); } future.complete(new LookupResult(nsData.get())); } }).exceptionally(exception -> { LOG.warn("Failed to check owner for bundle {}: {}", bundle, exception.getMessage(), exception); future.completeExceptionally(exception); return null; }); return future; }
From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java
/** * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, with concurrent requests. *///w w w. j a v a 2 s .c o m @Test public void testGetOrAssignStreamSegmentIdWithConcurrency() throws Exception { // We setup a delay in the OperationLog process. We only do this for a stand-alone StreamSegment because the process // is driven by the same code for Transactions as well. final String segmentName = "Segment"; final long segmentId = 12345; HashSet<String> storageSegments = new HashSet<>(); storageSegments.add(segmentName); @Cleanup TestContext context = new TestContext(); setupStorageGetHandler(context, storageSegments, sn -> new StreamSegmentInformation(sn, 0, false, false, new ImmutableDate())); CompletableFuture<Long> initialAddFuture = new CompletableFuture<>(); AtomicBoolean operationLogInvoked = new AtomicBoolean(false); context.operationLog.addHandler = op -> { if (!(op instanceof StreamSegmentMapOperation)) { return FutureHelpers.failedFuture(new IllegalArgumentException("unexpected operation")); } if (operationLogInvoked.getAndSet(true)) { return FutureHelpers.failedFuture(new IllegalStateException("multiple calls to OperationLog.add")); } // Need to set SegmentId on operation. ((StreamSegmentMapOperation) op).setStreamSegmentId(segmentId); return initialAddFuture; }; CompletableFuture<Long> firstCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT); CompletableFuture<Long> secondCall = context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT); Thread.sleep(20); Assert.assertFalse("getOrAssignStreamSegmentId (first call) returned before OperationLog finished.", firstCall.isDone()); Assert.assertFalse("getOrAssignStreamSegmentId (second call) returned before OperationLog finished.", secondCall.isDone()); initialAddFuture.complete(1L); long firstCallResult = firstCall.get(100, TimeUnit.MILLISECONDS); long secondCallResult = secondCall.get(100, TimeUnit.MILLISECONDS); Assert.assertEquals( "Two concurrent calls to getOrAssignStreamSegmentId for the same StreamSegment returned different ids.", firstCallResult, secondCallResult); }
From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java
private CompletableFuture<Void> checkReplicationAndRetryOnFailure() { CompletableFuture<Void> result = new CompletableFuture<Void>(); checkReplication().thenAccept(res -> { log.info("[{}] Policies updated successfully", topic); result.complete(null); }).exceptionally(th -> {// www. ja v a2 s .c om log.error("[{}] Policies update failed {}, scheduled retry in {} seconds", topic, th.getMessage(), POLICY_UPDATE_FAILURE_RETRY_TIME_SECONDS, th); brokerService.executor().schedule(this::checkReplicationAndRetryOnFailure, POLICY_UPDATE_FAILURE_RETRY_TIME_SECONDS, TimeUnit.SECONDS); result.completeExceptionally(th); return null; }); return result; }
From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java
private void phaseOneLoop(RawReader reader, Optional<MessageId> firstMessageId, Optional<MessageId> toMessageId, MessageId lastMessageId, Map<String, MessageId> latestForKey, CompletableFuture<PhaseOneResult> loopPromise) { if (loopPromise.isDone()) { return;/*from w ww .j av a2s . co m*/ } CompletableFuture<RawMessage> future = reader.readNextAsync(); scheduleTimeout(future); future.whenCompleteAsync((m, exception) -> { try { if (exception != null) { loopPromise.completeExceptionally(exception); return; } MessageId id = m.getMessageId(); boolean deletedMessage = false; if (RawBatchConverter.isReadableBatch(m)) { try { RawBatchConverter.extractIdsAndKeys(m) .forEach(e -> latestForKey.put(e.getRight(), e.getLeft())); } catch (IOException ioe) { log.info("Error decoding batch for message {}. Whole batch will be included in output", id, ioe); } } else { Pair<String, Integer> keyAndSize = extractKeyAndSize(m); if (keyAndSize != null) { if (keyAndSize.getRight() > 0) { latestForKey.put(keyAndSize.getLeft(), id); } else { deletedMessage = true; latestForKey.remove(keyAndSize.getLeft()); } } } MessageId first = firstMessageId.orElse(deletedMessage ? null : id); MessageId to = deletedMessage ? toMessageId.orElse(null) : id; if (id.compareTo(lastMessageId) == 0) { loopPromise.complete(new PhaseOneResult(first, to, lastMessageId, latestForKey)); } else { phaseOneLoop(reader, Optional.ofNullable(first), Optional.ofNullable(to), lastMessageId, latestForKey, loopPromise); } } finally { m.close(); } }, scheduler); }
From source file:io.pravega.service.server.host.stat.AutoScaleProcessor.java
private void bootstrapRequestWriters() { CompletableFuture<Void> createWriter = new CompletableFuture<>(); // Starting with initial delay, in case request stream has not been created, to give it time to start // However, we have this wrapped in consumeFailure which means the creation of writer will be retried. // We are introducing a delay to avoid exceptions in the log in case creation of writer is attempted before // creation of requeststream. maintenanceExecutor/* ww w .j a v a 2s .c o m*/ .schedule(() -> Retry .indefinitelyWithExpBackoff(100, 10, 10000, e -> log.error("error while creating writer for requeststream {}", e)) .runAsync(() -> { if (clientFactory.get() == null) { clientFactory.compareAndSet(null, ClientFactory.withScope( NameUtils.INTERNAL_SCOPE_NAME, configuration.getControllerUri())); } this.writer.set(clientFactory.get().createEventWriter( configuration.getInternalRequestStream(), serializer, writerConfig)); initialized.set(true); // even if there is no activity, keep cleaning up the cache so that scale down can be triggered. // caches do not perform clean up if there is no activity. This is because they do not maintain their // own background thread. maintenanceExecutor.scheduleAtFixedRate(cache::cleanUp, 0, configuration.getCacheCleanup().getSeconds(), TimeUnit.SECONDS); log.debug("bootstrapping auto-scale reporter done"); createWriter.complete(null); return createWriter; }, maintenanceExecutor), 10, TimeUnit.SECONDS); }
From source file:com.microsoft.azure.servicebus.samples.receiveloop.ReceiveLoop.java
CompletableFuture receiveMessagesAsync(IMessageReceiver receiver) { CompletableFuture currentTask = new CompletableFuture(); try {//from w w w . j a v a2 s . c om CompletableFuture.runAsync(() -> { while (!currentTask.isCancelled()) { try { IMessage message = receiver.receive(Duration.ofSeconds(60)); if (message != null) { // receives message is passed to callback if (message.getLabel() != null && message.getContentType() != null && message.getLabel().contentEquals("Scientist") && message.getContentType().contentEquals("application/json")) { byte[] body = message.getBody(); Map scientist = GSON.fromJson(new String(body, UTF_8), Map.class); System.out.printf( "\n\t\t\t\tMessage received: \n\t\t\t\t\t\tMessageId = %s, \n\t\t\t\t\t\tSequenceNumber = %s, \n\t\t\t\t\t\tEnqueuedTimeUtc = %s," + "\n\t\t\t\t\t\tExpiresAtUtc = %s, \n\t\t\t\t\t\tContentType = \"%s\", \n\t\t\t\t\t\tContent: [ firstName = %s, name = %s ]\n", message.getMessageId(), message.getSequenceNumber(), message.getEnqueuedTimeUtc(), message.getExpiresAtUtc(), message.getContentType(), scientist != null ? scientist.get("firstName") : "", scientist != null ? scientist.get("name") : ""); } receiver.completeAsync(message.getLockToken()); } } catch (Exception e) { currentTask.completeExceptionally(e); } } currentTask.complete(null); }); return currentTask; } catch (Exception e) { currentTask.completeExceptionally(e); } return currentTask; }
From source file:org.apache.pulsar.client.impl.ClientCnx.java
@Override protected void handlePartitionResponse(CommandPartitionedTopicMetadataResponse lookupResult) { if (log.isDebugEnabled()) { log.debug("Received Broker Partition response: {}", lookupResult.getPartitions()); }/*from w w w . j a va 2s . c o m*/ long requestId = lookupResult.getRequestId(); CompletableFuture<LookupDataResult> requestFuture = getAndRemovePendingLookupRequest(requestId); if (requestFuture != null) { if (requestFuture.isCompletedExceptionally()) { if (log.isDebugEnabled()) { log.debug("{} Request {} already timed-out", ctx.channel(), lookupResult.getRequestId()); } return; } // Complete future with exception if : Result.response=fail/null if (!lookupResult.hasResponse() || CommandPartitionedTopicMetadataResponse.LookupType.Failed .equals(lookupResult.getResponse())) { if (lookupResult.hasError()) { checkServerError(lookupResult.getError(), lookupResult.getMessage()); requestFuture.completeExceptionally( getPulsarClientException(lookupResult.getError(), lookupResult.getMessage())); } else { requestFuture.completeExceptionally( new PulsarClientException.LookupException("Empty lookup response")); } } else { // return LookupDataResult when Result.response = success/redirect requestFuture.complete(new LookupDataResult(lookupResult.getPartitions())); } } else { log.warn("{} Received unknown request id from server: {}", ctx.channel(), lookupResult.getRequestId()); } }
From source file:com.yahoo.pulsar.broker.service.BrokerService.java
private CompletableFuture<Topic> createPersistentTopic(final String topic) throws RuntimeException { checkTopicNsOwnership(topic);//from ww w.j a va2s .c o m final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); DestinationName destinationName = DestinationName.get(topic); if (!pulsar.getNamespaceService().isServiceUnitActive(destinationName)) { // namespace is being unloaded String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic); log.warn(msg); throw new RuntimeException(new ServiceUnitNotReadyException(msg)); } final CompletableFuture<Topic> topicFuture = new CompletableFuture<>(); getManagedLedgerConfig(destinationName).thenAccept(config -> { // Once we have the configuration, we can proceed with the async open operation managedLedgerFactory.asyncOpen(destinationName.getPersistenceNamingEncoding(), config, new OpenLedgerCallback() { @Override public void openLedgerComplete(ManagedLedger ledger, Object ctx) { PersistentTopic persistentTopic = new PersistentTopic(topic, ledger, BrokerService.this); CompletableFuture<Void> replicationFuture = persistentTopic.checkReplication(); replicationFuture.thenRun(() -> { log.info("Created topic {}", topic); long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs; pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs); addTopicToStatsMaps(destinationName, persistentTopic); topicFuture.complete(persistentTopic); }); replicationFuture.exceptionally((ex) -> { log.warn("Replication check failed. Removing topic from topics list {}, {}", topic, ex); persistentTopic.stopReplProducers().whenComplete((v, exception) -> { topics.remove(topic, topicFuture); topicFuture.completeExceptionally(ex); }); return null; }); } @Override public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { log.warn("Failed to create topic {}", topic, exception); topics.remove(topic, topicFuture); topicFuture.completeExceptionally(new PersistenceException(exception)); } }, null); }).exceptionally((exception) -> { log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception); topics.remove(topic, topicFuture); topicFuture.completeExceptionally(exception); return null; }); return topicFuture; }
From source file:org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider.java
private CompletableFuture<Void> updateSubscriptionPermissionAsync(NamespaceName namespace, String subscriptionName, Set<String> roles, boolean remove) { CompletableFuture<Void> result = new CompletableFuture<>(); try {// w w w . j ava 2 s . c om validatePoliciesReadOnlyAccess(); } catch (Exception e) { result.completeExceptionally(e); } ZooKeeper globalZk = configCache.getZooKeeper(); final String policiesPath = String.format("/%s/%s/%s", "admin", POLICIES, namespace.toString()); try { Stat nodeStat = new Stat(); byte[] content = globalZk.getData(policiesPath, null, nodeStat); Policies policies = getThreadLocal().readValue(content, Policies.class); if (remove) { if (policies.auth_policies.subscription_auth_roles.get(subscriptionName) != null) { policies.auth_policies.subscription_auth_roles.get(subscriptionName).removeAll(roles); } else { log.info("[{}] Couldn't find role {} while revoking for sub = {}", namespace, subscriptionName, roles); result.completeExceptionally(new IllegalArgumentException("couldn't find subscription")); return result; } } else { policies.auth_policies.subscription_auth_roles.put(subscriptionName, roles); } // Write back the new policies into zookeeper globalZk.setData(policiesPath, getThreadLocal().writeValueAsBytes(policies), nodeStat.getVersion()); configCache.policiesCache().invalidate(policiesPath); log.info("[{}] Successfully granted access for role {} for sub = {}", namespace, subscriptionName, roles); result.complete(null); } catch (KeeperException.NoNodeException e) { log.warn("[{}] Failed to set permissions for namespace {}: does not exist", subscriptionName, namespace); result.completeExceptionally(new IllegalArgumentException("Namespace does not exist" + namespace)); } catch (KeeperException.BadVersionException e) { log.warn("[{}] Failed to set permissions for {} on namespace {}: concurrent modification", subscriptionName, roles, namespace); result.completeExceptionally(new IllegalStateException( "Concurrent modification on zk path: " + policiesPath + ", " + e.getMessage())); } catch (Exception e) { log.error("[{}] Failed to get permissions for role {} on namespace {}", subscriptionName, roles, namespace, e); result.completeExceptionally( new IllegalStateException("Failed to get permissions for namespace " + namespace)); } return result; }
From source file:org.apache.pulsar.client.impl.ConsumerImpl.java
@Override public CompletableFuture<Void> seekAsync(MessageId messageId) { if (getState() == State.Closing || getState() == State.Closed) { return FutureUtil .failedFuture(new PulsarClientException.AlreadyClosedException("Consumer was already closed")); }// www . j ava2 s. c o m if (!isConnected()) { return FutureUtil.failedFuture(new PulsarClientException("Not connected to broker")); } final CompletableFuture<Void> seekFuture = new CompletableFuture<>(); long requestId = client.newRequestId(); MessageIdImpl msgId = (MessageIdImpl) messageId; ByteBuf seek = Commands.newSeek(consumerId, requestId, msgId.getLedgerId(), msgId.getEntryId()); ClientCnx cnx = cnx(); log.info("[{}][{}] Seek subscription to message id {}", topic, subscription, messageId); cnx.sendRequestWithId(seek, requestId).thenRun(() -> { log.info("[{}][{}] Successfully reset subscription to message id {}", topic, subscription, messageId); seekFuture.complete(null); }).exceptionally(e -> { log.error("[{}][{}] Failed to reset subscription: {}", topic, subscription, e.getCause().getMessage()); seekFuture.completeExceptionally(e.getCause()); return null; }); return seekFuture; }