Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Optional<Long>> getLastMajorCompactionTimestampForRegion(byte[] regionName) {
    CompletableFuture<Optional<Long>> future = new CompletableFuture<>();
    // regionName may be a full region name or encoded region name, so getRegionInfo(byte[]) first
    addListener(getRegionInfo(regionName), (region, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }/*from  w w  w  .  ja  v  a  2 s . c o  m*/
        MajorCompactionTimestampForRegionRequest.Builder builder = MajorCompactionTimestampForRegionRequest
                .newBuilder();
        builder.setRegion(RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName));
        addListener(this.<Optional<Long>>newMasterCaller().action((controller, stub) -> this
                .<MajorCompactionTimestampForRegionRequest, MajorCompactionTimestampResponse, Optional<Long>>call(
                        controller, stub, builder.build(),
                        (s, c, req, done) -> s.getLastMajorCompactionTimestampForRegion(c, req, done),
                        ProtobufUtil::toOptionalTimestamp))
                .call(), (timestamp, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        future.complete(timestamp);
                    }
                });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> mergeRegions(List<byte[]> nameOfRegionsToMerge, boolean forcible) {
    if (nameOfRegionsToMerge.size() < 2) {
        return failedFuture(
                new IllegalArgumentException("Can not merge only " + nameOfRegionsToMerge.size() + " region"));
    }//  w  w  w .j  a  v a2  s. c  o m
    CompletableFuture<Void> future = new CompletableFuture<>();
    byte[][] encodedNameOfRegionsToMerge = nameOfRegionsToMerge.stream().map(this::toEncodeRegionName)
            .toArray(byte[][]::new);

    addListener(checkRegionsAndGetTableName(encodedNameOfRegionsToMerge), (tableName, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }

        MergeTableRegionsRequest request = null;
        try {
            request = RequestConverter.buildMergeTableRegionsRequest(encodedNameOfRegionsToMerge, forcible,
                    ng.getNonceGroup(), ng.newNonce());
        } catch (DeserializationException e) {
            future.completeExceptionally(e);
            return;
        }

        addListener(this.<MergeTableRegionsRequest, MergeTableRegionsResponse>procedureCall(tableName, request,
                (s, c, req, done) -> s.mergeTableRegions(c, req, done), (resp) -> resp.getProcId(),
                new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        future.complete(ret);
                    }
                });
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.BrokerService.java

public CompletableFuture<ManagedLedgerConfig> getManagedLedgerConfig(DestinationName topicName) {
    CompletableFuture<ManagedLedgerConfig> future = new CompletableFuture<>();
    // Execute in background thread, since getting the policies might block if the z-node wasn't already cached
    pulsar.getOrderedExecutor().submitOrdered(topicName, safeRun(() -> {
        NamespaceName namespace = topicName.getNamespaceObject();
        ServiceConfiguration serviceConfig = pulsar.getConfiguration();

        // Get persistence policy for this destination
        Policies policies;/*from   w  ww  .  j  av a  2 s. co  m*/
        try {
            policies = pulsar
                    .getConfigurationCache().policiesCache().get(AdminResource.path("policies",
                            namespace.getProperty(), namespace.getCluster(), namespace.getLocalName()))
                    .orElse(null);
        } catch (Throwable t) {
            // Ignoring since if we don't have policies, we fallback on the default
            log.warn("Got exception when reading persistence policy for {}: {}", topicName, t.getMessage(), t);
            future.completeExceptionally(t);
            return;
        }

        PersistencePolicies persistencePolicies = policies != null ? policies.persistence : null;
        RetentionPolicies retentionPolicies = policies != null ? policies.retention_policies : null;

        if (persistencePolicies == null) {
            // Apply default values
            persistencePolicies = new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(),
                    serviceConfig.getManagedLedgerDefaultWriteQuorum(),
                    serviceConfig.getManagedLedgerDefaultAckQuorum(),
                    serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit());
        }

        if (retentionPolicies == null) {
            retentionPolicies = new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(),
                    serviceConfig.getDefaultRetentionSizeInMB());
        }

        ManagedLedgerConfig config = new ManagedLedgerConfig();
        config.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble());
        config.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum());
        config.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum());
        config.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate());
        config.setDigestType(DigestType.CRC32);

        config.setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist());
        config.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger());
        config.setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(),
                TimeUnit.MINUTES);
        config.setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(),
                TimeUnit.MINUTES);
        config.setMaxSizePerLedgerMb(2048);

        config.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize());
        config.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum());
        config.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum());
        config.setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger());

        config.setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds());
        config.setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES);
        config.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB());

        future.complete(config);
    }, (exception) -> future.completeExceptionally(exception)));

    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Map<ServerName, Boolean>> compactionSwitch(boolean switchState,
        List<String> serverNamesList) {
    CompletableFuture<Map<ServerName, Boolean>> future = new CompletableFuture<>();
    addListener(getRegionServerList(serverNamesList), (serverNames, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }/*  w ww.j  a  va 2s .  co m*/
        // Accessed by multiple threads.
        Map<ServerName, Boolean> serverStates = new ConcurrentHashMap<>(serverNames.size());
        List<CompletableFuture<Boolean>> futures = new ArrayList<>(serverNames.size());
        serverNames.stream().forEach(serverName -> {
            futures.add(switchCompact(serverName, switchState).whenComplete((serverState, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(unwrapCompletionException(err2));
                } else {
                    serverStates.put(serverName, serverState);
                }
            }));
        });
        addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                (ret, err3) -> {
                    if (!future.isCompletedExceptionally()) {
                        if (err3 != null) {
                            future.completeExceptionally(err3);
                        } else {
                            future.complete(serverStates);
                        }
                    }
                });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> splitRegion(byte[] regionName, byte[] splitPoint) {
    Preconditions.checkNotNull(splitPoint,
            "splitPoint is null. If you don't specify a splitPoint, use splitRegion(byte[]) instead");
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(getRegionLocation(regionName), (location, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }//  www.  j ava2 s. c om
        RegionInfo regionInfo = location.getRegion();
        if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
                    + "Replicas are auto-split when their primary is split."));
            return;
        }
        ServerName serverName = location.getServerName();
        if (serverName == null) {
            future.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
            return;
        }
        if (regionInfo.getStartKey() != null && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) {
            future.completeExceptionally(
                    new IllegalArgumentException("should not give a splitkey which equals to startkey!"));
            return;
        }
        addListener(split(regionInfo, splitPoint), (ret, err2) -> {
            if (err2 != null) {
                future.completeExceptionally(err2);
            } else {
                future.complete(ret);
            }
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<CompactionState> getCompactionState(TableName tableName, CompactType compactType) {
    CompletableFuture<CompactionState> future = new CompletableFuture<>();

    switch (compactType) {
    case MOB:/*  ww  w  .  j a  v  a  2 s.  com*/
        addListener(connection.registry.getMasterAddress(), (serverName, err) -> {
            if (err != null) {
                future.completeExceptionally(err);
                return;
            }
            RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName);

            addListener(this.<GetRegionInfoResponse>newAdminCaller().serverName(serverName)
                    .action((controller, stub) -> this
                            .<GetRegionInfoRequest, GetRegionInfoResponse, GetRegionInfoResponse>adminCall(
                                    controller, stub,
                                    RequestConverter.buildGetRegionInfoRequest(regionInfo.getRegionName(),
                                            true),
                                    (s, c, req, done) -> s.getRegionInfo(controller, req, done), resp -> resp))
                    .call(), (resp2, err2) -> {
                        if (err2 != null) {
                            future.completeExceptionally(err2);
                        } else {
                            if (resp2.hasCompactionState()) {
                                future.complete(ProtobufUtil.createCompactionState(resp2.getCompactionState()));
                            } else {
                                future.complete(CompactionState.NONE);
                            }
                        }
                    });
        });
        break;
    case NORMAL:
        addListener(getTableHRegionLocations(tableName), (locations, err) -> {
            if (err != null) {
                future.completeExceptionally(err);
                return;
            }
            ConcurrentLinkedQueue<CompactionState> regionStates = new ConcurrentLinkedQueue<>();
            List<CompletableFuture<CompactionState>> futures = new ArrayList<>();
            locations.stream().filter(loc -> loc.getServerName() != null).filter(loc -> loc.getRegion() != null)
                    .filter(loc -> !loc.getRegion().isOffline()).map(loc -> loc.getRegion().getRegionName())
                    .forEach(region -> {
                        futures.add(getCompactionStateForRegion(region).whenComplete((regionState, err2) -> {
                            // If any region compaction state is MAJOR_AND_MINOR
                            // the table compaction state is MAJOR_AND_MINOR, too.
                            if (err2 != null) {
                                future.completeExceptionally(unwrapCompletionException(err2));
                            } else if (regionState == CompactionState.MAJOR_AND_MINOR) {
                                future.complete(regionState);
                            } else {
                                regionStates.add(regionState);
                            }
                        }));
                    });
            addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                    (ret, err3) -> {
                        // If future not completed, check all regions's compaction state
                        if (!future.isCompletedExceptionally() && !future.isDone()) {
                            CompactionState state = CompactionState.NONE;
                            for (CompactionState regionState : regionStates) {
                                switch (regionState) {
                                case MAJOR:
                                    if (state == CompactionState.MINOR) {
                                        future.complete(CompactionState.MAJOR_AND_MINOR);
                                    } else {
                                        state = CompactionState.MAJOR;
                                    }
                                    break;
                                case MINOR:
                                    if (state == CompactionState.MAJOR) {
                                        future.complete(CompactionState.MAJOR_AND_MINOR);
                                    } else {
                                        state = CompactionState.MINOR;
                                    }
                                    break;
                                case NONE:
                                default:
                                }
                            }
                            if (!future.isDone()) {
                                future.complete(state);
                            }
                        }
                    });
        });
        break;
    default:
        throw new IllegalArgumentException("Unknown compactType: " + compactType);
    }

    return future;
}

From source file:org.apache.distributedlog.lock.ZKSessionLock.java

/**
 * Check Lock Owner Phase 3: watch sibling node for lock ownership.
 *
 * @param lockWatcher/*w  w  w  .j  a v a2s.co  m*/
 *          lock watcher.
 * @param wait
 *          whether to wait for ownership.
 * @param myNode
 *          my lock node.
 * @param siblingNode
 *          my sibling lock node.
 * @param ownerNode
 *          owner lock node.
 * @param currentOwner
 *          current owner info.
 * @param promise
 *          promise to satisfy with current lock owner.
 */
private void watchLockOwner(final LockWatcher lockWatcher, final boolean wait, final String myNode,
        final String siblingNode, final String ownerNode, final Pair<String, Long> currentOwner,
        final CompletableFuture<String> promise) {
    executeLockAction(lockWatcher.epoch, new LockAction() {
        @Override
        public void execute() {
            boolean shouldWatch;
            final boolean shouldClaimOwnership;
            if (lockContext.hasLockId(currentOwner) && siblingNode.equals(ownerNode)) {
                // if the current owner is the znode left from previous session
                // we should watch it and claim ownership
                shouldWatch = true;
                shouldClaimOwnership = true;
                LOG.info(
                        "LockWatcher {} for {} found its previous session {} held lock,"
                                + " watch it to claim ownership.",
                        new Object[] { myNode, lockPath, currentOwner });
            } else if (lockId.compareTo(currentOwner) == 0
                    && areLockWaitersInSameSession(siblingNode, ownerNode)) {
                // I found that my sibling is the current owner with same lock id (client id & session id)
                // It must be left by any race condition from same zookeeper client
                shouldWatch = true;
                shouldClaimOwnership = true;
                LOG.info(
                        "LockWatcher {} for {} found itself {} already held lock at sibling node {},"
                                + " watch it to claim ownership.",
                        new Object[] { myNode, lockPath, lockId, siblingNode });
            } else {
                shouldWatch = wait;
                if (wait) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "Current LockWatcher for {} with ephemeral node {}, "
                                        + "is waiting for {} to release lock at {}.",
                                new Object[] { lockPath, myNode, siblingNode, System.currentTimeMillis() });
                    }
                }
                shouldClaimOwnership = false;
            }

            // watch sibling for lock ownership
            if (shouldWatch) {
                watchedNode = String.format("%s/%s", lockPath, siblingNode);
                zk.exists(watchedNode, lockWatcher, new AsyncCallback.StatCallback() {
                    @Override
                    public void processResult(final int rc, String path, Object ctx, final Stat stat) {
                        executeLockAction(lockWatcher.epoch, new LockAction() {
                            @Override
                            public void execute() {
                                if (!lockState.inState(State.PREPARED)) {
                                    promise.completeExceptionally(new LockStateChangedException(lockPath,
                                            lockId, State.PREPARED, lockState.getState()));
                                    return;
                                }

                                if (KeeperException.Code.OK.intValue() == rc) {
                                    if (shouldClaimOwnership) {
                                        // watch owner successfully
                                        LOG.info(
                                                "LockWatcher {} claimed ownership for {} after set watcher on {}.",
                                                new Object[] { myNode, lockPath, ownerNode });
                                        claimOwnership(lockWatcher.epoch);
                                        promise.complete(currentOwner.getLeft());
                                    } else {
                                        // watch sibling successfully
                                        lockState.transition(State.WAITING);
                                        promise.complete(currentOwner.getLeft());
                                    }
                                } else if (KeeperException.Code.NONODE.intValue() == rc) {
                                    // sibling just disappeared, it might be the chance to claim ownership
                                    checkLockOwnerAndWaitIfPossible(lockWatcher, wait, promise);
                                } else {
                                    promise.completeExceptionally(
                                            KeeperException.create(KeeperException.Code.get(rc)));
                                }
                            }

                            @Override
                            public String getActionName() {
                                StringBuilder sb = new StringBuilder();
                                sb.append("postWatchLockOwner(myNode=").append(myNode).append(", siblingNode=")
                                        .append(siblingNode).append(", ownerNode=").append(ownerNode)
                                        .append(")");
                                return sb.toString();
                            }
                        }, promise);
                    }
                }, null);
            } else {
                promise.complete(currentOwner.getLeft());
            }
        }

        @Override
        public String getActionName() {
            StringBuilder sb = new StringBuilder();
            sb.append("watchLockOwner(myNode=").append(myNode).append(", siblingNode=").append(siblingNode)
                    .append(", ownerNode=").append(ownerNode).append(")");
            return sb.toString();
        }
    }, promise);
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Void> trySyncTableToPeerCluster(TableName tableName, byte[][] splits,
        ReplicationPeerDescription peer) {
    Configuration peerConf = null;
    try {/*from   ww  w.j  a va  2s  .c  o  m*/
        peerConf = ReplicationPeerConfigUtil.getPeerClusterConfiguration(connection.getConfiguration(), peer);
    } catch (IOException e) {
        return failedFuture(e);
    }
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(ConnectionFactory.createAsyncConnection(peerConf), (conn, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        addListener(getDescriptor(tableName), (tableDesc, err1) -> {
            if (err1 != null) {
                future.completeExceptionally(err1);
                return;
            }
            AsyncAdmin peerAdmin = conn.getAdmin();
            addListener(peerAdmin.tableExists(tableName), (exist, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(err2);
                    return;
                }
                if (!exist) {
                    CompletableFuture<Void> createTableFuture = null;
                    if (splits == null) {
                        createTableFuture = peerAdmin.createTable(tableDesc);
                    } else {
                        createTableFuture = peerAdmin.createTable(tableDesc, splits);
                    }
                    addListener(createTableFuture, (result, err3) -> {
                        if (err3 != null) {
                            future.completeExceptionally(err3);
                        } else {
                            future.complete(result);
                        }
                    });
                } else {
                    addListener(compareTableWithPeerCluster(tableName, tableDesc, peer, peerAdmin),
                            (result, err4) -> {
                                if (err4 != null) {
                                    future.completeExceptionally(err4);
                                } else {
                                    future.complete(result);
                                }
                            });
                }
            });
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> snapshot(SnapshotDescription snapshotDesc) {
    SnapshotProtos.SnapshotDescription snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
    try {//w  ww.ja v a2  s .  c o  m
        ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
    } catch (IllegalArgumentException e) {
        return failedFuture(e);
    }
    CompletableFuture<Void> future = new CompletableFuture<>();
    final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot).build();
    addListener(this.<Long>newMasterCaller()
            .action((controller, stub) -> this.<SnapshotRequest, SnapshotResponse, Long>call(controller, stub,
                    request, (s, c, req, done) -> s.snapshot(c, req, done), resp -> resp.getExpectedTimeout()))
            .call(), (expectedTimeout, err) -> {
                if (err != null) {
                    future.completeExceptionally(err);
                    return;
                }
                TimerTask pollingTask = new TimerTask() {
                    int tries = 0;
                    long startTime = EnvironmentEdgeManager.currentTime();
                    long endTime = startTime + expectedTimeout;
                    long maxPauseTime = expectedTimeout / maxAttempts;

                    @Override
                    public void run(Timeout timeout) throws Exception {
                        if (EnvironmentEdgeManager.currentTime() < endTime) {
                            addListener(isSnapshotFinished(snapshotDesc), (done, err2) -> {
                                if (err2 != null) {
                                    future.completeExceptionally(err2);
                                } else if (done) {
                                    future.complete(null);
                                } else {
                                    // retry again after pauseTime.
                                    long pauseTime = ConnectionUtils
                                            .getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries);
                                    pauseTime = Math.min(pauseTime, maxPauseTime);
                                    AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime,
                                            TimeUnit.MILLISECONDS);
                                }
                            });
                        } else {
                            future.completeExceptionally(new SnapshotCreationException(
                                    "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:"
                                            + expectedTimeout + " ms",
                                    snapshotDesc));
                        }
                    }
                };
                AsyncConnectionImpl.RETRY_TIMER.newTimeout(pollingTask, 1, TimeUnit.MILLISECONDS);
            });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private void getProcedureResult(long procId, CompletableFuture<Void> future, int retries) {
    addListener(/* w  ww. j av a2s  .  c  om*/
            this.<GetProcedureResultResponse>newMasterCaller().action((controller, stub) -> this
                    .<GetProcedureResultRequest, GetProcedureResultResponse, GetProcedureResultResponse>call(
                            controller, stub, GetProcedureResultRequest.newBuilder().setProcId(procId).build(),
                            (s, c, req, done) -> s.getProcedureResult(c, req, done), (resp) -> resp))
                    .call(),
            (response, error) -> {
                if (error != null) {
                    LOG.warn("failed to get the procedure result procId={}", procId,
                            ConnectionUtils.translateException(error));
                    retryTimer.newTimeout(t -> getProcedureResult(procId, future, retries + 1),
                            ConnectionUtils.getPauseTime(pauseNs, retries), TimeUnit.NANOSECONDS);
                    return;
                }
                if (response.getState() == GetProcedureResultResponse.State.RUNNING) {
                    retryTimer.newTimeout(t -> getProcedureResult(procId, future, retries + 1),
                            ConnectionUtils.getPauseTime(pauseNs, retries), TimeUnit.NANOSECONDS);
                    return;
                }
                if (response.hasException()) {
                    IOException ioe = ForeignExceptionUtil.toIOException(response.getException());
                    future.completeExceptionally(ioe);
                } else {
                    future.complete(null);
                }
            });
}