Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleProducer(final CommandProducer cmdProducer) {
    checkArgument(state == State.Connected);
    CompletableFuture<Boolean> authorizationFuture;
    if (service.isAuthorizationEnabled()) {
        authorizationFuture = service.getAuthorizationManager()
                .canProduceAsync(DestinationName.get(cmdProducer.getTopic().toString()), authRole);
    } else {/*from   ww w  .  j  a  v a2s. c  om*/
        authorizationFuture = CompletableFuture.completedFuture(true);
    }

    // Use producer name provided by client if present
    final String producerName = cmdProducer.hasProducerName() ? cmdProducer.getProducerName()
            : service.generateUniqueProducerName();
    final String topicName = cmdProducer.getTopic();
    final long producerId = cmdProducer.getProducerId();
    final long requestId = cmdProducer.getRequestId();
    authorizationFuture.thenApply(isAuthorized -> {
        if (isAuthorized) {
            if (log.isDebugEnabled()) {
                log.debug("[{}] Client is authorized to Produce with role {}", remoteAddress, authRole);
            }
            CompletableFuture<Producer> producerFuture = new CompletableFuture<>();
            CompletableFuture<Producer> existingProducerFuture = producers.putIfAbsent(producerId,
                    producerFuture);

            if (existingProducerFuture != null) {
                if (existingProducerFuture.isDone() && !existingProducerFuture.isCompletedExceptionally()) {
                    Producer producer = existingProducerFuture.getNow(null);
                    log.info("[{}] Producer with the same id is already created: {}", remoteAddress, producer);
                    ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producer.getProducerName()));
                    return null;
                } else {
                    // There was an early request to create a producer with
                    // same producerId. This can happen when
                    // client
                    // timeout is lower the broker timeouts. We need to wait
                    // until the previous producer creation
                    // request
                    // either complete or fails.
                    ServerError error = !existingProducerFuture.isDone() ? ServerError.ServiceNotReady
                            : getErrorCode(existingProducerFuture);
                    log.warn("[{}][{}] Producer is already present on the connection", remoteAddress,
                            topicName);
                    ctx.writeAndFlush(Commands.newError(requestId, error,
                            "Producer is already present on the connection"));
                    return null;
                }
            }

            log.info("[{}][{}] Creating producer. producerId={}", remoteAddress, topicName, producerId);

            service.getTopic(topicName).thenAccept((Topic topic) -> {
                // Before creating producer, check if backlog quota exceeded
                // on topic
                if (topic.isBacklogQuotaExceeded(producerName)) {
                    IllegalStateException illegalStateException = new IllegalStateException(
                            "Cannot create producer on topic with backlog quota exceeded");
                    BacklogQuota.RetentionPolicy retentionPolicy = topic.getBacklogQuota().getPolicy();
                    if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_request_hold) {
                        ctx.writeAndFlush(
                                Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededError,
                                        illegalStateException.getMessage()));
                    } else if (retentionPolicy == BacklogQuota.RetentionPolicy.producer_exception) {
                        ctx.writeAndFlush(
                                Commands.newError(requestId, ServerError.ProducerBlockedQuotaExceededException,
                                        illegalStateException.getMessage()));
                    }
                    producerFuture.completeExceptionally(illegalStateException);
                    producers.remove(producerId, producerFuture);
                    return;
                }

                disableTcpNoDelayIfNeeded(topicName, producerName);

                Producer producer = new Producer(topic, ServerCnx.this, producerId, producerName, authRole);

                try {
                    topic.addProducer(producer);

                    if (isActive()) {
                        if (producerFuture.complete(producer)) {
                            log.info("[{}] Created new producer: {}", remoteAddress, producer);
                            ctx.writeAndFlush(Commands.newProducerSuccess(requestId, producerName));
                            return;
                        } else {
                            // The producer's future was completed before by
                            // a close command
                            producer.closeNow();
                            log.info("[{}] Cleared producer created after timeout on client side {}",
                                    remoteAddress, producer);
                        }
                    } else {
                        producer.closeNow();
                        log.info("[{}] Cleared producer created after connection was closed: {}", remoteAddress,
                                producer);
                        producerFuture.completeExceptionally(
                                new IllegalStateException("Producer created after connection was closed"));
                    }
                } catch (BrokerServiceException ise) {
                    log.error("[{}] Failed to add producer to topic {}: {}", remoteAddress, topicName,
                            ise.getMessage());
                    ctx.writeAndFlush(Commands.newError(requestId,
                            BrokerServiceException.getClientErrorCode(ise), ise.getMessage()));
                    producerFuture.completeExceptionally(ise);
                }

                producers.remove(producerId, producerFuture);
            }).exceptionally(exception -> {
                Throwable cause = exception.getCause();
                if (!(cause instanceof ServiceUnitNotReadyException)) {
                    // Do not print stack traces for expected exceptions
                    log.error("[{}] Failed to create topic {}", remoteAddress, topicName, exception);
                }

                // If client timed out, the future would have been completed
                // by subsequent close. Send error back to
                // client, only if not completed already.
                if (producerFuture.completeExceptionally(exception)) {
                    ctx.writeAndFlush(Commands.newError(requestId,
                            BrokerServiceException.getClientErrorCode(cause), cause.getMessage()));
                }
                producers.remove(producerId, producerFuture);

                return null;
            });
        } else {
            String msg = "Client is not authorized to Produce";
            log.warn("[{}] {} with role {}", remoteAddress, msg, authRole);
            ctx.writeAndFlush(Commands.newError(requestId, ServerError.AuthorizationError, msg));
        }
        return null;
    });
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference<TableName> tableName,
        CompletableFuture<TableName> result) {
    addListener(getRegionLocation(encodeRegionName), (location, err) -> {
        if (err != null) {
            result.completeExceptionally(err);
            return;
        }/*from w ww .java  2 s.c  o  m*/
        RegionInfo regionInfo = location.getRegion();
        if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            result.completeExceptionally(
                    new IllegalArgumentException("Can't invoke merge on non-default regions directly"));
            return;
        }
        if (!tableName.compareAndSet(null, regionInfo.getTable())) {
            if (!tableName.get().equals(regionInfo.getTable())) {
                // tables of this two region should be same.
                result.completeExceptionally(
                        new IllegalArgumentException("Cannot merge regions from two different tables "
                                + tableName.get() + " and " + regionInfo.getTable()));
            } else {
                result.complete(tableName.get());
            }
        }
    });
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

/**
 * Connect to peer and check the table descriptor on peer:
 * <ol>/*www  .  jav a  2  s  . c om*/
 * <li>Create the same table on peer when not exist.</li>
 * <li>Throw an exception if the table already has replication enabled on any of the column
 * families.</li>
 * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li>
 * </ol>
 * @param tableName name of the table to sync to the peer
 * @param splits table split keys
 */
private CompletableFuture<Void> checkAndSyncTableToPeerClusters(TableName tableName, byte[][] splits) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(listReplicationPeers(), (peers, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        if (peers == null || peers.size() <= 0) {
            future.completeExceptionally(
                    new IllegalArgumentException("Found no peer cluster for replication."));
            return;
        }
        List<CompletableFuture<Void>> futures = new ArrayList<>();
        peers.stream().filter(peer -> peer.getPeerConfig().needToReplicate(tableName)).forEach(peer -> {
            futures.add(trySyncTableToPeerCluster(tableName, splits, peer));
        });
        addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                (result, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        future.complete(result);
                    }
                });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> splitRegion(byte[] regionName) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(getRegionLocation(regionName), (location, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }//  www  .j  a v a 2  s .  c o  m
        RegionInfo regionInfo = location.getRegion();
        if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
            future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. "
                    + "Replicas are auto-split when their primary is split."));
            return;
        }
        ServerName serverName = location.getServerName();
        if (serverName == null) {
            future.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
            return;
        }
        addListener(split(regionInfo, null), (ret, err2) -> {
            if (err2 != null) {
                future.completeExceptionally(err2);
            } else {
                future.complete(ret);
            }
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> flush(TableName tableName) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(tableExists(tableName), (exists, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
        } else if (!exists) {
            future.completeExceptionally(new TableNotFoundException(tableName));
        } else {/*  w w w .  j a  v a  2 s .co m*/
            addListener(isTableEnabled(tableName), (tableEnabled, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(err2);
                } else if (!tableEnabled) {
                    future.completeExceptionally(new TableNotEnabledException(tableName));
                } else {
                    addListener(execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(),
                            new HashMap<>()), (ret, err3) -> {
                                if (err3 != null) {
                                    future.completeExceptionally(err3);
                                } else {
                                    future.complete(ret);
                                }
                            });
                }
            });
        }
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Void> compareTableWithPeerCluster(TableName tableName, TableDescriptor tableDesc,
        ReplicationPeerDescription peer, AsyncAdmin peerAdmin) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(peerAdmin.getDescriptor(tableName), (peerTableDesc, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }/*from  www  .  j  ava 2 s .co  m*/
        if (peerTableDesc == null) {
            future.completeExceptionally(
                    new IllegalArgumentException("Failed to get table descriptor for table "
                            + tableName.getNameAsString() + " from peer cluster " + peer.getPeerId()));
            return;
        }
        if (TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, tableDesc) != 0) {
            future.completeExceptionally(new IllegalArgumentException(
                    "Table " + tableName.getNameAsString() + " exists in peer cluster " + peer.getPeerId()
                            + ", but the table descriptors are not same when compared with source cluster."
                            + " Thus can not enable the table's replication switch."));
            return;
        }
        future.complete(null);
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<List<QuotaSettings>> getQuota(QuotaFilter filter) {
    CompletableFuture<List<QuotaSettings>> future = new CompletableFuture<>();
    Scan scan = QuotaTableUtil.makeScan(filter);
    this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build().scan(scan,
            new AdvancedScanResultConsumer() {
                List<QuotaSettings> settings = new ArrayList<>();

                @Override//www  . j  a v  a2s. co  m
                public void onNext(Result[] results, ScanController controller) {
                    for (Result result : results) {
                        try {
                            QuotaTableUtil.parseResultToCollection(result, settings);
                        } catch (IOException e) {
                            controller.terminate();
                            future.completeExceptionally(e);
                        }
                    }
                }

                @Override
                public void onError(Throwable error) {
                    future.completeExceptionally(error);
                }

                @Override
                public void onComplete() {
                    future.complete(settings);
                }
            });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> updateConfiguration() {
    CompletableFuture<Void> future = new CompletableFuture<Void>();
    addListener(getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS)),
            (status, err) -> {/*from  w w w .  j  ava2s  .c  om*/
                if (err != null) {
                    future.completeExceptionally(err);
                } else {
                    List<CompletableFuture<Void>> futures = new ArrayList<>();
                    status.getLiveServerMetrics().keySet()
                            .forEach(server -> futures.add(updateConfiguration(server)));
                    futures.add(updateConfiguration(status.getMasterName()));
                    status.getBackupMasterNames().forEach(master -> futures.add(updateConfiguration(master)));
                    addListener(
                            CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                            (result, err2) -> {
                                if (err2 != null) {
                                    future.completeExceptionally(err2);
                                } else {
                                    future.complete(result);
                                }
                            });
                }
            });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> enableTableReplication(TableName tableName) {
    if (tableName == null) {
        return failedFuture(new IllegalArgumentException("Table name is null"));
    }/*from  w ww.ja  v a  2  s  .com*/
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(tableExists(tableName), (exist, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        if (!exist) {
            future.completeExceptionally(
                    new TableNotFoundException("Table '" + tableName.getNameAsString() + "' does not exists."));
            return;
        }
        addListener(getTableSplits(tableName), (splits, err1) -> {
            if (err1 != null) {
                future.completeExceptionally(err1);
            } else {
                addListener(checkAndSyncTableToPeerClusters(tableName, splits), (result, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        addListener(setTableReplication(tableName, true), (result3, err3) -> {
                            if (err3 != null) {
                                future.completeExceptionally(err3);
                            } else {
                                future.complete(result3);
                            }
                        });
                    }
                });
            }
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<CompactionState> getCompactionStateForRegion(byte[] regionName) {
    CompletableFuture<CompactionState> future = new CompletableFuture<>();
    addListener(getRegionLocation(regionName), (location, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }//  w w  w.j a v a 2s .c  om
        ServerName serverName = location.getServerName();
        if (serverName == null) {
            future.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName)));
            return;
        }
        addListener(this.<GetRegionInfoResponse>newAdminCaller().action((controller, stub) -> this
                .<GetRegionInfoRequest, GetRegionInfoResponse, GetRegionInfoResponse>adminCall(controller, stub,
                        RequestConverter.buildGetRegionInfoRequest(location.getRegion().getRegionName(), true),
                        (s, c, req, done) -> s.getRegionInfo(controller, req, done), resp -> resp))
                .serverName(serverName).call(), (resp2, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        if (resp2.hasCompactionState()) {
                            future.complete(ProtobufUtil.createCompactionState(resp2.getCompactionState()));
                        } else {
                            future.complete(CompactionState.NONE);
                        }
                    }
                });
    });
    return future;
}