Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public <S, R> CompletableFuture<R> coprocessorService(Function<RpcChannel, S> stubMaker,
        ServiceCaller<S, R> callable) {
    MasterCoprocessorRpcChannelImpl channel = new MasterCoprocessorRpcChannelImpl(
            this.<Message>newMasterCaller());
    S stub = stubMaker.apply(channel);/*from  w ww .  j  a v  a  2s .c  o m*/
    CompletableFuture<R> future = new CompletableFuture<>();
    ClientCoprocessorRpcController controller = new ClientCoprocessorRpcController();
    callable.call(stub, controller, resp -> {
        if (controller.failed()) {
            future.completeExceptionally(controller.getFailed());
        } else {
            future.complete(resp);
        }
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public <S, R> CompletableFuture<R> coprocessorService(Function<RpcChannel, S> stubMaker,
        ServiceCaller<S, R> callable, ServerName serverName) {
    RegionServerCoprocessorRpcChannelImpl channel = new RegionServerCoprocessorRpcChannelImpl(
            this.<Message>newServerCaller().serverName(serverName));
    S stub = stubMaker.apply(channel);//from   w  w w .  j  ava 2 s . c  om
    CompletableFuture<R> future = new CompletableFuture<>();
    ClientCoprocessorRpcController controller = new ClientCoprocessorRpcController();
    callable.call(stub, controller, resp -> {
        if (controller.failed()) {
            future.completeExceptionally(controller.getFailed());
        } else {
            future.complete(resp);
        }
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> enableTableReplication(TableName tableName) {
    if (tableName == null) {
        return failedFuture(new IllegalArgumentException("Table name is null"));
    }//from  w w w .  j  ava2 s .  c  o m
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(tableExists(tableName), (exist, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        if (!exist) {
            future.completeExceptionally(
                    new TableNotFoundException("Table '" + tableName.getNameAsString() + "' does not exists."));
            return;
        }
        addListener(getTableSplits(tableName), (splits, err1) -> {
            if (err1 != null) {
                future.completeExceptionally(err1);
            } else {
                addListener(checkAndSyncTableToPeerClusters(tableName, splits), (result, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        addListener(setTableReplication(tableName, true), (result3, err3) -> {
                            if (err3 != null) {
                                future.completeExceptionally(err3);
                            } else {
                                future.complete(result3);
                            }
                        });
                    }
                });
            }
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> disableTableReplication(TableName tableName) {
    if (tableName == null) {
        return failedFuture(new IllegalArgumentException("Table name is null"));
    }/*from  ww  w. j  a v  a  2  s . com*/
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(tableExists(tableName), (exist, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        if (!exist) {
            future.completeExceptionally(
                    new TableNotFoundException("Table '" + tableName.getNameAsString() + "' does not exists."));
            return;
        }
        addListener(setTableReplication(tableName, false), (result, err2) -> {
            if (err2 != null) {
                future.completeExceptionally(err2);
            } else {
                future.complete(result);
            }
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<byte[][]> getTableSplits(TableName tableName) {
    CompletableFuture<byte[][]> future = new CompletableFuture<>();
    addListener(/*from  w w w  . ja v  a  2s.c  o m*/
            getRegions(tableName).thenApply(regions -> regions.stream()
                    .filter(RegionReplicaUtil::isDefaultReplica).collect(Collectors.toList())),
            (regions, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(err2);
                    return;
                }
                if (regions.size() == 1) {
                    future.complete(null);
                } else {
                    byte[][] splits = new byte[regions.size() - 1][];
                    for (int i = 1; i < regions.size(); i++) {
                        splits[i - 1] = regions.get(i).getStartKey();
                    }
                    future.complete(splits);
                }
            });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

/**
 * Connect to peer and check the table descriptor on peer:
 * <ol>/*  w ww.  j av  a2 s. c  o  m*/
 * <li>Create the same table on peer when not exist.</li>
 * <li>Throw an exception if the table already has replication enabled on any of the column
 * families.</li>
 * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li>
 * </ol>
 * @param tableName name of the table to sync to the peer
 * @param splits table split keys
 */
private CompletableFuture<Void> checkAndSyncTableToPeerClusters(TableName tableName, byte[][] splits) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(listReplicationPeers(), (peers, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        if (peers == null || peers.size() <= 0) {
            future.completeExceptionally(
                    new IllegalArgumentException("Found no peer cluster for replication."));
            return;
        }
        List<CompletableFuture<Void>> futures = new ArrayList<>();
        peers.stream().filter(peer -> peer.getPeerConfig().needToReplicate(tableName)).forEach(peer -> {
            futures.add(trySyncTableToPeerCluster(tableName, splits, peer));
        });
        addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                (result, err2) -> {
                    if (err2 != null) {
                        future.completeExceptionally(err2);
                    } else {
                        future.complete(result);
                    }
                });
    });
    return future;
}

From source file:org.hyperledger.fabric.sdk.Channel.java

private Collection<ProposalResponse> sendProposalToPeers(Collection<Peer> peers, SignedProposal signedProposal,
        TransactionContext transactionContext) throws InvalidArgumentException, ProposalException {
    checkPeers(peers);/*from  ww w. java 2s.c o m*/

    if (transactionContext.getVerify()) {
        try {
            loadCACertificates(false);
        } catch (Exception e) {
            throw new ProposalException(e);
        }
    }

    final String txID = transactionContext.getTxID();

    class Pair {
        private final Peer peer;

        private final Future<FabricProposalResponse.ProposalResponse> future;

        private Pair(Peer peer, Future<FabricProposalResponse.ProposalResponse> future) {
            this.peer = peer;
            this.future = future;
        }
    }
    List<Pair> peerFuturePairs = new ArrayList<>();
    for (Peer peer : peers) {
        logger.debug(format("Channel %s send proposal to %s, txID: %s", name, peer.toString(), txID));

        if (null != diagnosticFileDumper) {
            logger.trace(format("Sending to channel %s, peer: %s, proposal: %s, txID: %s", name, peer, txID,
                    diagnosticFileDumper.createDiagnosticProtobufFile(signedProposal.toByteArray())));

        }

        Future<FabricProposalResponse.ProposalResponse> proposalResponseListenableFuture;
        try {
            proposalResponseListenableFuture = peer.sendProposalAsync(signedProposal);
        } catch (Exception e) {
            proposalResponseListenableFuture = new CompletableFuture<>();
            ((CompletableFuture) proposalResponseListenableFuture).completeExceptionally(e);

        }
        peerFuturePairs.add(new Pair(peer, proposalResponseListenableFuture));

    }

    Collection<ProposalResponse> proposalResponses = new ArrayList<>();
    for (Pair peerFuturePair : peerFuturePairs) {

        FabricProposalResponse.ProposalResponse fabricResponse = null;
        String message;
        int status = 500;
        final String peerName = peerFuturePair.peer.toString();
        try {
            fabricResponse = peerFuturePair.future.get(transactionContext.getProposalWaitTime(),
                    TimeUnit.MILLISECONDS);
            message = fabricResponse.getResponse().getMessage();
            status = fabricResponse.getResponse().getStatus();
            peerFuturePair.peer.setHasConnected();
            logger.debug(format("Channel %s, transaction: %s got back from peer %s status: %d, message: %s",
                    name, txID, peerName, status, message));
            if (null != diagnosticFileDumper) {
                logger.trace(format("Got back from channel %s, peer: %s, proposal response: %s", name, peerName,
                        diagnosticFileDumper.createDiagnosticProtobufFile(fabricResponse.toByteArray())));

            }
        } catch (InterruptedException e) {
            message = "Sending proposal with transaction: " + txID + " to " + peerName
                    + " failed because of interruption";
            logger.error(message, e);
        } catch (TimeoutException e) {
            message = format(
                    "Channel %s sending proposal with transaction %s to %s failed because of timeout(%d milliseconds) expiration",
                    toString(), txID, peerName, transactionContext.getProposalWaitTime());
            logger.error(message, e);
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            if (cause instanceof Error) {
                String emsg = "Sending proposal with txID: " + txID + " to " + peerName + " failed because of "
                        + cause.getMessage();
                logger.error(emsg, new Exception(cause)); //wrapped in exception to get full stack trace.
                throw (Error) cause;
            } else {
                if (cause instanceof StatusRuntimeException) {
                    message = format(
                            "Channel %s Sending proposal with transaction: %s to %s failed because of: gRPC failure=%s",
                            toString(), txID, peerName, ((StatusRuntimeException) cause).getStatus());
                } else {
                    message = format(
                            "Channel %s sending proposal with transaction: %s to %s failed because of: %s",
                            toString(), txID, peerName, cause.getMessage());
                }
                logger.error(message, new Exception(cause)); //wrapped in exception to get full stack trace.
            }
        }

        ProposalResponse proposalResponse = new ProposalResponse(transactionContext, status, message);
        proposalResponse.setProposalResponse(fabricResponse);
        proposalResponse.setProposal(signedProposal);
        proposalResponse.setPeer(peerFuturePair.peer);

        if (fabricResponse != null && transactionContext.getVerify()) {
            proposalResponse.verify(client.getCryptoSuite());
        }

        proposalResponses.add(proposalResponse);
    }

    return proposalResponses;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Void> trySyncTableToPeerCluster(TableName tableName, byte[][] splits,
        ReplicationPeerDescription peer) {
    Configuration peerConf = null;
    try {//  w ww  .  ja va2s  .  c o m
        peerConf = ReplicationPeerConfigUtil.getPeerClusterConfiguration(connection.getConfiguration(), peer);
    } catch (IOException e) {
        return failedFuture(e);
    }
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(ConnectionFactory.createAsyncConnection(peerConf), (conn, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        addListener(getDescriptor(tableName), (tableDesc, err1) -> {
            if (err1 != null) {
                future.completeExceptionally(err1);
                return;
            }
            AsyncAdmin peerAdmin = conn.getAdmin();
            addListener(peerAdmin.tableExists(tableName), (exist, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(err2);
                    return;
                }
                if (!exist) {
                    CompletableFuture<Void> createTableFuture = null;
                    if (splits == null) {
                        createTableFuture = peerAdmin.createTable(tableDesc);
                    } else {
                        createTableFuture = peerAdmin.createTable(tableDesc, splits);
                    }
                    addListener(createTableFuture, (result, err3) -> {
                        if (err3 != null) {
                            future.completeExceptionally(err3);
                        } else {
                            future.complete(result);
                        }
                    });
                } else {
                    addListener(compareTableWithPeerCluster(tableName, tableDesc, peer, peerAdmin),
                            (result, err4) -> {
                                if (err4 != null) {
                                    future.completeExceptionally(err4);
                                } else {
                                    future.complete(result);
                                }
                            });
                }
            });
        });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Void> compareTableWithPeerCluster(TableName tableName, TableDescriptor tableDesc,
        ReplicationPeerDescription peer, AsyncAdmin peerAdmin) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(peerAdmin.getDescriptor(tableName), (peerTableDesc, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }//from w  ww .  ja  va 2 s .  c om
        if (peerTableDesc == null) {
            future.completeExceptionally(
                    new IllegalArgumentException("Failed to get table descriptor for table "
                            + tableName.getNameAsString() + " from peer cluster " + peer.getPeerId()));
            return;
        }
        if (TableDescriptor.COMPARATOR_IGNORE_REPLICATION.compare(peerTableDesc, tableDesc) != 0) {
            future.completeExceptionally(new IllegalArgumentException(
                    "Table " + tableName.getNameAsString() + " exists in peer cluster " + peer.getPeerId()
                            + ", but the table descriptors are not same when compared with source cluster."
                            + " Thus can not enable the table's replication switch."));
            return;
        }
        future.complete(null);
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

/**
 * Set the table's replication switch if the table's replication switch is already not set.
 * @param tableName name of the table/*from ww  w.j  av  a2  s  . co  m*/
 * @param enableRep is replication switch enable or disable
 */
private CompletableFuture<Void> setTableReplication(TableName tableName, boolean enableRep) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    addListener(getDescriptor(tableName), (tableDesc, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }
        if (!tableDesc.matchReplicationScope(enableRep)) {
            int scope = enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL;
            TableDescriptor newTableDesc = TableDescriptorBuilder.newBuilder(tableDesc)
                    .setReplicationScope(scope).build();
            addListener(modifyTable(newTableDesc), (result, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(err2);
                } else {
                    future.complete(result);
                }
            });
        } else {
            future.complete(null);
        }
    });
    return future;
}