List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java
@Override public CompletableFuture<HTableDescriptor> getTableDescriptor(TableName tableName) { CompletableFuture<HTableDescriptor> future = new CompletableFuture<>(); this.<List<TableSchema>>newMasterCaller() .action((controller, stub) -> this .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, List<TableSchema>>call( controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (resp) -> resp.getTableSchemaList())) .call().whenComplete((tableSchemas, error) -> { if (error != null) { future.completeExceptionally(error); return; }//from ww w . j ava 2 s .com if (!tableSchemas.isEmpty()) { future.complete(ProtobufUtil.convertToHTableDesc(tableSchemas.get(0))); } else { future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString())); } }); return future; }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
private boolean checkOrClaimLockOwner(final Pair<String, Long> currentOwner, final CompletableFuture<String> result) { if (lockId.compareTo(currentOwner) != 0 && !lockContext.hasLockId(currentOwner)) { lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() { @Override/* w w w . j a va 2 s . c o m*/ public void safeRun() { result.complete(currentOwner.getLeft()); } }); return false; } // current owner is itself final int curEpoch = epochUpdater.incrementAndGet(this); executeLockAction(curEpoch, new LockAction() { @Override public void execute() { if (!lockState.inState(State.INIT)) { result.completeExceptionally( new LockStateChangedException(lockPath, lockId, State.INIT, lockState.getState())); return; } asyncTryLock(false, result); } @Override public String getActionName() { return "claimOwnership(owner=" + currentOwner + ")"; } }, result); return true; }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
/** * Try lock. If it failed, it would cleanup its attempt. * * @param wait/*ww w. j a v a2 s . com*/ * whether to wait for ownership. * @param result * promise to satisfy with current lock owner */ private void asyncTryLock(boolean wait, final CompletableFuture<String> result) { final CompletableFuture<String> lockResult = new CompletableFuture<String>(); lockResult.whenComplete(new FutureEventListener<String>() { @Override public void onSuccess(String currentOwner) { result.complete(currentOwner); } @Override public void onFailure(final Throwable lockCause) { // If tryLock failed due to state changed, we don't need to cleanup if (lockCause instanceof LockStateChangedException) { LOG.info("skipping cleanup for {} at {} after encountering lock " + "state change exception : ", new Object[] { lockId, lockPath, lockCause }); result.completeExceptionally(lockCause); return; } if (LOG.isDebugEnabled()) { LOG.debug("{} is cleaning up its lock state for {} due to : ", new Object[] { lockId, lockPath, lockCause }); } // If we encountered any exception we should cleanup CompletableFuture<Void> unlockResult = asyncUnlock(); unlockResult.whenComplete(new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { result.completeExceptionally(lockCause); } @Override public void onFailure(Throwable cause) { result.completeExceptionally(lockCause); } }); } }); asyncTryLockWithoutCleanup(wait, lockResult); }
From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java
private CompletableFuture<HTableDescriptor[]> batchTableOperations(Pattern pattern, TableOperator operator, String operationType) {//from www .ja v a2s .c o m CompletableFuture<HTableDescriptor[]> future = new CompletableFuture<>(); List<HTableDescriptor> failed = new LinkedList<>(); listTables(pattern, false).whenComplete((tables, error) -> { if (error != null) { future.completeExceptionally(error); return; } CompletableFuture[] futures = Arrays.stream(tables) .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { if (ex != null) { LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); failed.add(table); } })).toArray(size -> new CompletableFuture[size]); CompletableFuture.allOf(futures).thenAccept((v) -> { future.complete(failed.toArray(new HTableDescriptor[failed.size()])); }); }); return future; }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
/** * Requests all updates from each peer in the provided list of peers. * <p>/*from w w w.ja va 2 s . c o m*/ * The returned future will be completed once at least one peer bootstraps this map or bootstrap requests to all peers * fail. * * @param peers the list of peers from which to request updates * @return a future to be completed once updates have been received from at least one peer */ private CompletableFuture<Void> requestBootstrapFromPeers(List<MemberId> peers) { if (peers.isEmpty()) { return CompletableFuture.completedFuture(null); } CompletableFuture<Void> future = new CompletableFuture<>(); final int totalPeers = peers.size(); AtomicBoolean successful = new AtomicBoolean(); AtomicInteger totalCount = new AtomicInteger(); AtomicReference<Throwable> lastError = new AtomicReference<>(); // Iterate through all of the peers and send a bootstrap request. On the first peer that returns // a successful bootstrap response, complete the future. Otherwise, if no peers respond with any // successful bootstrap response, the future will be completed with the last exception. for (MemberId peer : peers) { requestBootstrapFromPeer(peer).whenComplete((result, error) -> { if (error == null) { if (successful.compareAndSet(false, true)) { future.complete(null); } else if (totalCount.incrementAndGet() == totalPeers) { Throwable e = lastError.get(); if (e != null) { future.completeExceptionally(e); } } } else { if (!successful.get() && totalCount.incrementAndGet() == totalPeers) { future.completeExceptionally(error); } else { lastError.set(error); } } }); } return future; }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
/** * NOTE: unlockInternal should only after try lock. *///from w w w .j a v a 2 s.c o m private void unlockInternal(final CompletableFuture<Void> promise) { // already closed or expired, nothing to cleanup this.epochUpdater.incrementAndGet(this); if (null != watcher) { this.zkClient.unregister(watcher); } if (lockState.inState(State.CLOSED)) { promise.complete(null); return; } LOG.info("Lock {} for {} is closed from state {}.", new Object[] { lockId, lockPath, lockState.getState() }); final boolean skipCleanup = lockState.inState(State.INIT) || lockState.inState(State.EXPIRED); lockState.transition(State.CLOSING); if (skipCleanup) { // Nothing to cleanup if INIT (never tried) or EXPIRED (ephemeral node // auto-removed) lockState.transition(State.CLOSED); promise.complete(null); return; } // In any other state, we should clean up the member node CompletableFuture<Void> deletePromise = new CompletableFuture<Void>(); deleteLockNode(deletePromise); // Set the state to closed after we've cleaned up deletePromise.whenCompleteAsync(new FutureEventListener<Void>() { @Override public void onSuccess(Void complete) { lockState.transition(State.CLOSED); promise.complete(null); } @Override public void onFailure(Throwable cause) { // Delete failure is quite serious (causes lock leak) and should be // handled better LOG.error("lock node delete failed {} {}", lockId, lockPath); promise.complete(null); } }, lockStateExecutor.chooseThread(lockPath)); }
From source file:org.apache.distributedlog.auditor.DLAuditor.java
/** * Find leak ledgers phase 1: collect ledgers set. *///from w w w . j a va 2 s .c om private Set<Long> collectLedgersFromBK(BookKeeperClient bkc, final ExecutorService executorService) throws IOException { LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get()); final Set<Long> ledgers = new HashSet<Long>(); final CompletableFuture<Void> doneFuture = FutureUtils.createFuture(); BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() { @Override public void process(Long lid, final AsyncCallback.VoidCallback cb) { synchronized (ledgers) { ledgers.add(lid); if (0 == ledgers.size() % 1000) { logger.info("Collected {} ledgers", ledgers.size()); } } executorService.submit(new Runnable() { @Override public void run() { cb.processResult(BKException.Code.OK, null, null); } }); } }; AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() { @Override public void processResult(int rc, String path, Object ctx) { if (BKException.Code.OK == rc) { doneFuture.complete(null); } else { doneFuture.completeExceptionally(BKException.create(rc)); } } }; lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException); try { doneFuture.get(); logger.info("Collected total {} ledgers", ledgers.size()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on collecting ledgers : ", e); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) (e.getCause()); } else { throw new IOException("Failed to collect ledgers : ", e.getCause()); } } return ledgers; }
From source file:org.apache.bookkeeper.meta.MockLedgerManager.java
@Override public CompletableFuture<Versioned<LedgerMetadata>> writeLedgerMetadata(long ledgerId, LedgerMetadata metadata, Version currentVersion) {//ww w . ja v a2 s . c o m CompletableFuture<Versioned<LedgerMetadata>> promise = new CompletableFuture<>(); preWriteHook.runHook(ledgerId, metadata).thenComposeAsync((ignore) -> { try { Versioned<LedgerMetadata> oldMetadata = readMetadata(ledgerId); if (oldMetadata == null) { return FutureUtils.exception(new BKException.BKNoSuchLedgerExistsException()); } else if (!oldMetadata.getVersion().equals(currentVersion)) { return FutureUtils.exception(new BKException.BKMetadataVersionException()); } else { LongVersion oldVersion = (LongVersion) oldMetadata.getVersion(); metadataMap.put(ledgerId, Pair.of(new LongVersion(oldVersion.getLongVersion() + 1), serDe.serialize(metadata))); Versioned<LedgerMetadata> readBack = readMetadata(ledgerId); return FutureUtils.value(readBack); } } catch (Exception e) { LOG.error("Error writing metadata", e); return FutureUtils.exception(e); } }, executor).whenComplete((res, ex) -> { if (ex != null) { Throwable cause = (ex instanceof CompletionException) ? ex.getCause() : ex; executeCallback(() -> promise.completeExceptionally(cause)); } else { executeCallback(() -> promise.complete(res)); } }); return promise; }
From source file:org.mascherl.example.service.ComposeMailService.java
public CompletableFuture<List<MailAddressUsage>> getLastSendToAddressesAsync2(User currentUser, int limit) { CompletableFuture<List<MailAddressUsage>> completableFuture = new CompletableFuture<>(); db.query(// w w w .ja v a2 s .c o m "select distinct mto.address, m.datetime " + "from mail m " + "join mail_to mto on mto.mail_uuid = m.uuid " + "where m.user_uuid = $1 " + "and m.mail_type = $2 " + "and not exists (" + " select 1 from mail m2 " + " join mail_to mto2 on mto2.mail_uuid = m2.uuid " + " where m2.user_uuid = $1 " + " and m2.mail_type = $2 " + " and mto2.address = mto.address " + " and m2.datetime > m.datetime " + ") " + "order by m.datetime desc " + "limit $3", Arrays.asList(currentUser.getUuid(), MailType.SENT.name(), limit), result -> { try { TimestampColumnZonedDateTimeMapper dateTimeColumnMapper = new PersistentZonedDateTime() .getColumnMapper(); List<MailAddressUsage> usages = StreamSupport.stream(result.spliterator(), false) .map(row -> new MailAddressUsage(new MailAddress(row.getString(0)), dateTimeColumnMapper.fromNonNullValue(row.getTimestamp(1)))) .collect(Collectors.toList()); completableFuture.complete(usages); } catch (Exception e) { completableFuture.completeExceptionally(e); } }, completableFuture::completeExceptionally); return completableFuture; }
From source file:org.apache.pulsar.client.impl.PulsarClientImpl.java
@Override public CompletableFuture<Reader> createReaderAsync(String topic, MessageId startMessageId, ReaderConfiguration conf) {//ww w .j a v a 2 s. com if (state.get() != State.Open) { return FutureUtil .failedFuture(new PulsarClientException.AlreadyClosedException("Client already closed")); } if (!DestinationName.isValid(topic)) { return FutureUtil .failedFuture(new PulsarClientException.InvalidTopicNameException("Invalid topic name")); } if (startMessageId == null) { return FutureUtil.failedFuture( new PulsarClientException.InvalidConfigurationException("Invalid startMessageId")); } if (conf == null) { return FutureUtil.failedFuture( new PulsarClientException.InvalidConfigurationException("Consumer configuration undefined")); } CompletableFuture<Reader> readerFuture = new CompletableFuture<>(); getPartitionedTopicMetadata(topic).thenAccept(metadata -> { if (log.isDebugEnabled()) { log.debug("[{}] Received topic metadata. partitions: {}", topic, metadata.partitions); } if (metadata.partitions > 1) { readerFuture.completeExceptionally( new PulsarClientException("Topic reader cannot be created on a partitioned topic")); return; } CompletableFuture<Consumer> consumerSubscribedFuture = new CompletableFuture<>(); // gets the next single threaded executor from the list of executors ExecutorService listenerThread = externalExecutorProvider.getExecutor(); ReaderImpl reader = new ReaderImpl(PulsarClientImpl.this, topic, startMessageId, conf, listenerThread, consumerSubscribedFuture); synchronized (consumers) { consumers.put(reader.getConsumer(), Boolean.TRUE); } consumerSubscribedFuture.thenRun(() -> { readerFuture.complete(reader); }).exceptionally(ex -> { log.warn("[{}] Failed to get create topic reader", topic, ex); readerFuture.completeExceptionally(ex); return null; }); }).exceptionally(ex -> { log.warn("[{}] Failed to get partitioned topic metadata", topic, ex); readerFuture.completeExceptionally(ex); return null; }); return readerFuture; }