List of usage examples for java.util.concurrent CompletableFuture whenComplete
public CompletableFuture<T> whenComplete(BiConsumer<? super T, ? super Throwable> action)
From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java
/** * Seals a txn and transitions it to COMMITTING (resp. ABORTING) state if commit param is true (resp. false). * * Post-condition://from w w w .j a v a 2 s . c o m * 1. If seal completes successfully, then * (a) txn state is COMMITTING/ABORTING, * (b) CommitEvent/AbortEvent is present in the commit stream/abort stream, * (c) txn is removed from host-txn index, * (d) txn is removed from the timeout service. * * 2. If process fails after transitioning txn to COMMITTING/ABORTING state, but before responding to client, then * since txn is present in the host-txn index, some other controller process shall put CommitEvent/AbortEvent to * commit stream/abort stream. * * @param host host id. It is different from hostId iff invoked from TxnSweeper for aborting orphaned txn. * @param scope scope name. * @param stream stream name. * @param commit boolean indicating whether to commit txn. * @param txnId txn id. * @param version expected version of txn node in store. * @param ctx context. * @return Txn status after sealing it. */ CompletableFuture<TxnStatus> sealTxnBody(final String host, final String scope, final String stream, final boolean commit, final UUID txnId, final Integer version, final OperationContext ctx) { TxnResource resource = new TxnResource(scope, stream, txnId); Optional<Integer> versionOpt = Optional.ofNullable(version); // Step 1. Add txn to current host's index, if it is not already present CompletableFuture<Void> addIndex = host.equals(hostId) && !timeoutService.containsTxn(scope, stream, txnId) ? // PS: txn version in index does not matter, because if update is successful, // then txn would no longer be open. streamMetadataStore.addTxnToIndex(hostId, resource, Integer.MAX_VALUE) : CompletableFuture.completedFuture(null); addIndex.whenComplete((v, e) -> { if (e != null) { log.debug("Txn={}, already present/newly added to host-txn index of host={}", txnId, hostId); } else { log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId); } }); // Step 2. Seal txn CompletableFuture<AbstractMap.SimpleEntry<TxnStatus, Integer>> sealFuture = addIndex.thenComposeAsync( x -> streamMetadataStore.sealTransaction(scope, stream, txnId, commit, versionOpt, ctx, executor), executor).whenComplete((v, e) -> { if (e != null) { log.debug("Txn={}, failed sealing txn", txnId); } else { log.debug("Txn={}, sealed successfully, commit={}", txnId, commit); } }); // Step 3. write event to corresponding stream. return sealFuture.thenComposeAsync(pair -> { TxnStatus status = pair.getKey(); switch (status) { case COMMITTING: return writeCommitEvent(scope, stream, pair.getValue(), txnId, status); case ABORTING: return writeAbortEvent(scope, stream, pair.getValue(), txnId, status); case ABORTED: case COMMITTED: return CompletableFuture.completedFuture(status); case OPEN: case UNKNOWN: default: // Not possible after successful streamStore.sealTransaction call, because otherwise an // exception would be thrown. return CompletableFuture.completedFuture(status); } }, executor).thenComposeAsync(status -> { // Step 4. Remove txn from timeoutService, and from the index. timeoutService.removeTxn(scope, stream, txnId); log.debug("Txn={}, removed from timeout service", txnId); return streamMetadataStore.removeTxnFromIndex(host, resource, true).whenComplete((v, e) -> { if (e != null) { log.debug("Txn={}, failed removing txn from host-txn index of host={}", txnId, hostId); } else { log.debug("Txn={}, removed txn from host-txn index of host={}", txnId, hostId); } }).thenApply(x -> status); }, executor); }
From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java
private CompletableFuture<Channel> getChannel(Address address, String messageType) { List<CompletableFuture<Channel>> channelPool = getChannelPool(address); int offset = getChannelOffset(messageType); CompletableFuture<Channel> channelFuture = channelPool.get(offset); if (channelFuture == null || channelFuture.isCompletedExceptionally()) { synchronized (channelPool) { channelFuture = channelPool.get(offset); if (channelFuture == null || channelFuture.isCompletedExceptionally()) { channelFuture = openChannel(address); channelPool.set(offset, channelFuture); }/* w w w.jav a2 s . c o m*/ } } final CompletableFuture<Channel> future = new CompletableFuture<>(); final CompletableFuture<Channel> finalFuture = channelFuture; finalFuture.whenComplete((channel, error) -> { if (error == null) { if (!channel.isActive()) { CompletableFuture<Channel> currentFuture; synchronized (channelPool) { currentFuture = channelPool.get(offset); if (currentFuture == finalFuture) { channelPool.set(offset, null); } else if (currentFuture == null) { currentFuture = openChannel(address); channelPool.set(offset, currentFuture); } } final ClientConnection connection = clientConnections.remove(channel); if (connection != null) { connection.close(); } if (currentFuture == finalFuture) { getChannel(address, messageType).whenComplete((recursiveResult, recursiveError) -> { if (recursiveError == null) { future.complete(recursiveResult); } else { future.completeExceptionally(recursiveError); } }); } else { currentFuture.whenComplete((recursiveResult, recursiveError) -> { if (recursiveError == null) { future.complete(recursiveResult); } else { future.completeExceptionally(recursiveError); } }); } } else { future.complete(channel); } } else { future.completeExceptionally(error); } }); return future; }
From source file:io.pravega.client.segment.impl.SegmentOutputStreamFactoryImpl.java
@Override public SegmentOutputStream createOutputStreamForTransaction(Segment segment, UUID txId, Consumer<Segment> segmentSealedCallback, EventWriterConfig config) { CompletableFuture<String> name = new CompletableFuture<>(); FailingReplyProcessor replyProcessor = new FailingReplyProcessor() { @Override/*w w w. ja v a2s . c om*/ public void connectionDropped() { name.completeExceptionally(new ConnectionClosedException()); } @Override public void wrongHost(WireCommands.WrongHost wrongHost) { name.completeExceptionally(new NotImplementedException()); } @Override public void transactionInfo(WireCommands.TransactionInfo info) { name.complete(info.getTransactionName()); } @Override public void processingFailure(Exception error) { name.completeExceptionally(error); } }; val connectionFuture = controller.getEndpointForSegment(segment.getScopedName()) .thenCompose((PravegaNodeUri endpointForSegment) -> { return cf.establishConnection(endpointForSegment, replyProcessor); }); connectionFuture.thenAccept((ClientConnection connection) -> { try { connection.send(new WireCommands.GetTransactionInfo(1, segment.getScopedName(), txId)); } catch (ConnectionFailedException e) { throw new RuntimeException(e); } }).exceptionally(t -> { name.completeExceptionally(t); return null; }); name.whenComplete((s, e) -> { getAndHandleExceptions(connectionFuture, RuntimeException::new).close(); }); return new SegmentOutputStreamImpl(getAndHandleExceptions(name, RuntimeException::new), controller, cf, UUID.randomUUID(), segmentSealedCallback, getRetryFromConfig(config)); }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
private void maybeOffload(CompletableFuture<PositionImpl> finalPromise) { if (!offloadMutex.tryLock()) { scheduledExecutor.schedule(safeRun(() -> maybeOffloadInBackground(finalPromise)), 100, TimeUnit.MILLISECONDS); } else {//from ww w . java2 s . c om CompletableFuture<PositionImpl> unlockingPromise = new CompletableFuture<>(); unlockingPromise.whenComplete((res, ex) -> { offloadMutex.unlock(); if (ex != null) { finalPromise.completeExceptionally(ex); } else { finalPromise.complete(res); } }); long threshold = config.getOffloadAutoTriggerSizeThresholdBytes(); long sizeSummed = 0; long alreadyOffloadedSize = 0; long toOffloadSize = 0; ConcurrentLinkedDeque<LedgerInfo> toOffload = new ConcurrentLinkedDeque(); // go through ledger list from newest to oldest and build a list to offload in oldest to newest order for (Map.Entry<Long, LedgerInfo> e : ledgers.descendingMap().entrySet()) { long size = e.getValue().getSize(); sizeSummed += size; boolean alreadyOffloaded = e.getValue().hasOffloadContext() && e.getValue().getOffloadContext().getComplete(); if (alreadyOffloaded) { alreadyOffloadedSize += size; } else if (sizeSummed > threshold) { toOffloadSize += size; toOffload.addFirst(e.getValue()); } } if (toOffload.size() > 0) { log.info( "[{}] Going to automatically offload ledgers {}" + ", total size = {}, already offloaded = {}, to offload = {}", name, toOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList()), sizeSummed, alreadyOffloadedSize, toOffloadSize); } else { // offloadLoop will complete immediately with an empty list to offload log.debug("[{}] Nothing to offload, total size = {}, already offloaded = {}, threshold = {}", name, sizeSummed, alreadyOffloadedSize, threshold); } offloadLoop(unlockingPromise, toOffload, PositionImpl.latest, Optional.empty()); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
@Override public void asyncOffloadPrefix(Position pos, OffloadCallback callback, Object ctx) { PositionImpl requestOffloadTo = (PositionImpl) pos; if (!isValidPosition(requestOffloadTo)) { callback.offloadFailed(new InvalidCursorPositionException("Invalid position for offload"), ctx); return;//from w w w.jav a 2 s .c o m } PositionImpl firstUnoffloaded; Queue<LedgerInfo> ledgersToOffload = new ConcurrentLinkedQueue<>(); synchronized (this) { log.info("[{}] Start ledgersOffload. ledgers={} totalSize={}", name, ledgers.keySet(), TOTAL_SIZE_UPDATER.get(this)); if (STATE_UPDATER.get(this) == State.Closed) { log.info("[{}] Ignoring offload request since the managed ledger was already closed", name); callback.offloadFailed(new ManagedLedgerAlreadyClosedException( "Can't offload closed managed ledger (" + name + ")"), ctx); return; } if (ledgers.isEmpty()) { log.info("[{}] Tried to offload a managed ledger with no ledgers, giving up", name); callback.offloadFailed(new ManagedLedgerAlreadyClosedException( "Can't offload managed ledger (" + name + ") with no ledgers"), ctx); return; } long current = ledgers.lastKey(); // the first ledger which will not be offloaded. Defaults to current, // in the case that the whole headmap is offloaded. Otherwise it will // be set as we iterate through the headmap values long firstLedgerRetained = current; for (LedgerInfo ls : ledgers.headMap(current).values()) { if (requestOffloadTo.getLedgerId() > ls.getLedgerId()) { // don't offload if ledger has already been offloaded, or is empty if (!ls.getOffloadContext().getComplete() && ls.getSize() > 0) { ledgersToOffload.add(ls); } } else { firstLedgerRetained = ls.getLedgerId(); break; } } firstUnoffloaded = PositionImpl.get(firstLedgerRetained, 0); } if (ledgersToOffload.isEmpty()) { log.info("[{}] No ledgers to offload", name); callback.offloadComplete(firstUnoffloaded, ctx); return; } if (offloadMutex.tryLock()) { log.info("[{}] Going to offload ledgers {}", name, ledgersToOffload.stream().map(l -> l.getLedgerId()).collect(Collectors.toList())); CompletableFuture<PositionImpl> promise = new CompletableFuture<>(); promise.whenComplete((result, exception) -> { offloadMutex.unlock(); if (exception != null) { callback.offloadFailed(new ManagedLedgerException(exception), ctx); } else { callback.offloadComplete(result, ctx); } }); offloadLoop(promise, ledgersToOffload, firstUnoffloaded, Optional.empty()); } else { callback.offloadFailed( new ManagedLedgerException.OffloadInProgressException("Offload operation already running"), ctx); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
private void tryTransformLedgerInfo(long ledgerId, LedgerInfoTransformation transformation, CompletableFuture<Void> finalPromise) { synchronized (this) { if (!ledgersListMutex.tryLock()) { // retry in 100 milliseconds scheduledExecutor.schedule(/*from w ww . j a va 2 s . c om*/ safeRun(() -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise)), 100, TimeUnit.MILLISECONDS); } else { // lock acquired CompletableFuture<Void> unlockingPromise = new CompletableFuture<>(); unlockingPromise.whenComplete((res, ex) -> { ledgersListMutex.unlock(); if (ex != null) { finalPromise.completeExceptionally(ex); } else { finalPromise.complete(res); } }); LedgerInfo oldInfo = ledgers.get(ledgerId); if (oldInfo == null) { unlockingPromise.completeExceptionally(new OffloadConflict( "Ledger " + ledgerId + " no longer exists in ManagedLedger, likely trimmed")); } else { try { LedgerInfo newInfo = transformation.transform(oldInfo); ledgers.put(ledgerId, newInfo); store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, new MetaStoreCallback<Void>() { @Override public void operationComplete(Void result, Stat stat) { ledgersStat = stat; unlockingPromise.complete(null); } @Override public void operationFailed(MetaStoreException e) { unlockingPromise.completeExceptionally(e); } }); } catch (ManagedLedgerException mle) { unlockingPromise.completeExceptionally(mle); } } } } }
From source file:org.apache.distributedlog.BKLogSegmentWriter.java
public synchronized CompletableFuture<DLSN> writeInternal(LogRecord record) throws LogRecordTooLongException, LockingException, BKTransmitException, WriteException, InvalidEnvelopedEntryException { int logRecordSize = record.getPersistentSize(); if (logRecordSize > MAX_LOGRECORD_SIZE) { throw new LogRecordTooLongException(String.format( "Log Record of size %d written when only %d is allowed", logRecordSize, MAX_LOGRECORD_SIZE)); }/*from w ww . j a v a2 s . c o m*/ // If we will exceed the max number of bytes allowed per entry // initiate a transmit before accepting the new log record if ((recordSetWriter.getNumBytes() + logRecordSize) > MAX_LOGRECORDSET_SIZE) { checkStateAndTransmit(); } checkWriteLock(); if (enableRecordCounts) { // Set the count here. The caller would appropriately increment it // if this log record is to be counted record.setPositionWithinLogSegment(positionWithinLogSegment); } CompletableFuture<DLSN> writePromise = new CompletableFuture<DLSN>(); writePromise.whenComplete(new OpStatsListener<DLSN>(writeTime)); recordSetWriter.writeRecord(record, writePromise); if (record.getTransactionId() < lastTxId) { LOG.info("Log Segment {} TxId decreased Last: {} Record: {}", new Object[] { fullyQualifiedLogSegment, lastTxId, record.getTransactionId() }); } if (!record.isControl()) { // only update last tx id for user records lastTxId = record.getTransactionId(); outstandingBytes += (20 + record.getPayload().length); } return writePromise; }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
@Override public CompletableFuture<LockWaiter> asyncTryLock(final long timeout, final TimeUnit unit) { final CompletableFuture<String> result = new CompletableFuture<String>(); final boolean wait = DistributedLogConstants.LOCK_IMMEDIATE != timeout; if (wait) {/*from w w w. j av a 2 s .c o m*/ asyncTryLock(wait, result); } else { // try to check locks first zk.getChildren(lockPath, null, new AsyncCallback.Children2Callback() { @Override public void processResult(final int rc, String path, Object ctx, final List<String> children, Stat stat) { lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() { @Override public void safeRun() { if (!lockState.inState(State.INIT)) { result.completeExceptionally(new LockStateChangedException(lockPath, lockId, State.INIT, lockState.getState())); return; } if (KeeperException.Code.OK.intValue() != rc) { result.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc))); return; } FailpointUtils.checkFailPointNoThrow(FailpointUtils.FailPointName.FP_LockTryAcquire); Collections.sort(children, MEMBER_COMPARATOR); if (children.size() > 0) { asyncParseClientID(zk, lockPath, children.get(0)) .whenCompleteAsync(new FutureEventListener<Pair<String, Long>>() { @Override public void onSuccess(Pair<String, Long> owner) { if (!checkOrClaimLockOwner(owner, result)) { acquireFuture.complete(false); } } @Override public void onFailure(final Throwable cause) { result.completeExceptionally(cause); } }, lockStateExecutor.chooseThread(lockPath)); } else { asyncTryLock(wait, result); } } }); } }, null); } final CompletableFuture<Boolean> waiterAcquireFuture = FutureUtils.createFuture(); waiterAcquireFuture.whenComplete((value, cause) -> acquireFuture.completeExceptionally(cause)); return result.thenApply(new Function<String, LockWaiter>() { @Override public LockWaiter apply(final String currentOwner) { final Exception acquireException = new OwnershipAcquireFailedException(lockPath, currentOwner); FutureUtils.within(acquireFuture, timeout, unit, acquireException, lockStateExecutor, lockPath) .whenComplete(new FutureEventListener<Boolean>() { @Override public void onSuccess(Boolean acquired) { completeOrFail(acquireException); } @Override public void onFailure(final Throwable acquireCause) { completeOrFail(acquireException); } private void completeOrFail(final Throwable acquireCause) { if (isLockHeld()) { waiterAcquireFuture.complete(true); } else { asyncUnlock().whenComplete(new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { waiterAcquireFuture.completeExceptionally(acquireCause); } @Override public void onFailure(Throwable cause) { waiterAcquireFuture.completeExceptionally(acquireCause); } }); } } }); return new LockWaiter(lockId.getLeft(), currentOwner, waiterAcquireFuture); } }); }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
/** * Try lock. If it failed, it would cleanup its attempt. * * @param wait/*w w w .jav a 2s . c o m*/ * whether to wait for ownership. * @param result * promise to satisfy with current lock owner */ private void asyncTryLock(boolean wait, final CompletableFuture<String> result) { final CompletableFuture<String> lockResult = new CompletableFuture<String>(); lockResult.whenComplete(new FutureEventListener<String>() { @Override public void onSuccess(String currentOwner) { result.complete(currentOwner); } @Override public void onFailure(final Throwable lockCause) { // If tryLock failed due to state changed, we don't need to cleanup if (lockCause instanceof LockStateChangedException) { LOG.info("skipping cleanup for {} at {} after encountering lock " + "state change exception : ", new Object[] { lockId, lockPath, lockCause }); result.completeExceptionally(lockCause); return; } if (LOG.isDebugEnabled()) { LOG.debug("{} is cleaning up its lock state for {} due to : ", new Object[] { lockId, lockPath, lockCause }); } // If we encountered any exception we should cleanup CompletableFuture<Void> unlockResult = asyncUnlock(); unlockResult.whenComplete(new FutureEventListener<Void>() { @Override public void onSuccess(Void value) { result.completeExceptionally(lockCause); } @Override public void onFailure(Throwable cause) { result.completeExceptionally(lockCause); } }); } }); asyncTryLockWithoutCleanup(wait, lockResult); }
From source file:org.apache.distributedlog.lock.ZKSessionLock.java
CompletableFuture<Void> asyncUnlock(final Throwable cause) { final CompletableFuture<Void> promise = new CompletableFuture<Void>(); // Use lock executor here rather than lock action, because we want this opertaion to be applied // whether the epoch has changed or not. The member node is EPHEMERAL_SEQUENTIAL so there's no // risk of an ABA problem where we delete and recreate a node and then delete it again here. lockStateExecutor.executeOrdered(lockPath, new SafeRunnable() { @Override// w w w . j a v a 2s. c o m public void safeRun() { acquireFuture.completeExceptionally(cause); unlockInternal(promise); promise.whenComplete(new OpStatsListener<Void>(unlockStats)); } }); return promise; }