List of usage examples for java.util.concurrent CompletableFuture completeExceptionally
public boolean completeExceptionally(Throwable ex)
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(tableExists(tableName), (exist, err) -> { if (err != null) { future.completeExceptionally(err); return; }//from w w w . ja v a 2s .c om if (!exist) { future.completeExceptionally(new TableNotFoundException(tableName)); return; } addListener(tableExists(newTableName), (exist1, err1) -> { if (err1 != null) { future.completeExceptionally(err1); return; } if (exist1) { future.completeExceptionally(new TableExistsException(newTableName)); return; } addListener(getDescriptor(tableName), (tableDesc, err2) -> { if (err2 != null) { future.completeExceptionally(err2); return; } TableDescriptor newTableDesc = TableDescriptorBuilder.copy(newTableName, tableDesc); if (preserveSplits) { addListener(getTableSplits(tableName), (splits, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { addListener(splits != null ? createTable(newTableDesc, splits) : createTable(newTableDesc), (result, err4) -> { if (err4 != null) { future.completeExceptionally(err4); } else { future.complete(result); } }); } }); } else { addListener(createTable(newTableDesc), (result, err5) -> { if (err5 != null) { future.completeExceptionally(err5); } else { future.complete(result); } }); } }); }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java
private static <REQ, PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, REQ req, Converter<PREQ, byte[], REQ> reqConvert, RpcCall<PRESP, PREQ> rpcCall, Converter<RESP, HBaseRpcController, PRESP> respConverter) { CompletableFuture<RESP> future = new CompletableFuture<>(); try {/*from w w w . j a v a2 s . c o m*/ rpcCall.call(stub, controller, reqConvert.convert(loc.getRegionInfo().getRegionName(), req), new RpcCallback<PRESP>() { @Override public void run(PRESP resp) { if (controller.failed()) { future.completeExceptionally(controller.getFailed()); } else { try { future.complete(respConverter.convert(controller, resp)); } catch (IOException e) { future.completeExceptionally(e); } } } }); } catch (IOException e) { future.completeExceptionally(e); } return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java
private static <RESP> CompletableFuture<RESP> mutateRow(HBaseRpcController controller, HRegionLocation loc, ClientService.Interface stub, RowMutations mutation, Converter<MultiRequest, byte[], RowMutations> reqConvert, Function<Result, RESP> respConverter) { CompletableFuture<RESP> future = new CompletableFuture<>(); try {//from w w w. j a v a 2 s .c om byte[] regionName = loc.getRegionInfo().getRegionName(); MultiRequest req = reqConvert.convert(regionName, mutation); stub.multi(controller, req, new RpcCallback<MultiResponse>() { @Override public void run(MultiResponse resp) { if (controller.failed()) { future.completeExceptionally(controller.getFailed()); } else { try { org.apache.hadoop.hbase.client.MultiResponse multiResp = ResponseConverter .getResults(req, resp, controller.cellScanner()); Throwable ex = multiResp.getException(regionName); if (ex != null) { future.completeExceptionally(ex instanceof IOException ? ex : new IOException( "Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()), ex)); } else { future.complete(respConverter .apply((Result) multiResp.getResults().get(regionName).result.get(0))); } } catch (IOException e) { future.completeExceptionally(e); } } } }); } catch (IOException e) { future.completeExceptionally(e); } return future; }
From source file:org.apache.hadoop.hbase.client.TestAsyncSingleRequestRpcRetryingCaller.java
private <T> CompletableFuture<T> failedFuture() { CompletableFuture<T> future = new CompletableFuture<>(); future.completeExceptionally(new RuntimeException("Inject error!")); return future; }
From source file:org.apache.hadoop.hbase.client.TestAsyncSingleRequestRpcRetryingCaller.java
@Test public void testLocateError() throws IOException, InterruptedException, ExecutionException { AtomicBoolean errorTriggered = new AtomicBoolean(false); AtomicInteger count = new AtomicInteger(0); HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get(); AsyncRegionLocator mockedLocator = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) { @Override/*from w w w .j av a2 s .c om*/ CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row, RegionLocateType locateType, long timeoutNs) { if (tableName.equals(TABLE_NAME)) { CompletableFuture<HRegionLocation> future = new CompletableFuture<>(); if (count.getAndIncrement() == 0) { errorTriggered.set(true); future.completeExceptionally(new RuntimeException("Inject error!")); } else { future.complete(loc); } return future; } else { return super.getRegionLocation(tableName, row, locateType, timeoutNs); } } @Override void updateCachedLocation(HRegionLocation loc, Throwable exception) { } }; try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(), CONN.registry, CONN.registry.getClusterId().get(), User.getCurrent()) { @Override AsyncRegionLocator getLocator() { return mockedLocator; } }) { RawAsyncTable table = mockedConn.getRawTableBuilder(TABLE_NAME) .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build(); table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get(); assertTrue(errorTriggered.get()); errorTriggered.set(false); count.set(0); Result result = table.get(new Get(ROW).addColumn(FAMILY, QUALIFIER)).get(); assertArrayEquals(VALUE, result.getValue(FAMILY, QUALIFIER)); assertTrue(errorTriggered.get()); } }
From source file:org.apache.hadoop.hbase.client.ZKAsyncRegistry.java
private static <T> CompletableFuture<T> exec(BackgroundPathable<?> opBuilder, String path, CuratorEventProcessor<T> processor) { CompletableFuture<T> future = new CompletableFuture<>(); try {/*from ww w . j a v a2 s . c om*/ opBuilder.inBackground((client, event) -> { try { future.complete(processor.process(event)); } catch (Exception e) { future.completeExceptionally(e); } }).withUnhandledErrorListener((msg, e) -> future.completeExceptionally(e)).forPath(path); } catch (Exception e) { future.completeExceptionally(e); } return future; }
From source file:org.apache.hadoop.hbase.client.ZKAsyncRegistry.java
@Override public CompletableFuture<RegionLocations> getMetaRegionLocation() { CompletableFuture<RegionLocations> future = new CompletableFuture<>(); HRegionLocation[] locs = new HRegionLocation[znodePaths.metaReplicaZNodes.size()]; MutableInt remaining = new MutableInt(locs.length); znodePaths.metaReplicaZNodes.forEach((replicaId, path) -> { if (replicaId == DEFAULT_REPLICA_ID) { exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> { if (error != null) { future.completeExceptionally(error); return; }/*from ww w . j av a 2 s . co m*/ if (proto == null) { future.completeExceptionally(new IOException("Meta znode is null")); return; } Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { future.completeExceptionally( new IOException("Meta region is in state " + stateAndServerName.getFirst())); return; } locs[DEFAULT_REPLICA_ID] = new HRegionLocation( getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); tryComplete(remaining, locs, future); }); } else { exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> { if (future.isDone()) { return; } if (error != null) { LOG.warn("Failed to fetch " + path, error); locs[replicaId] = null; } else if (proto == null) { LOG.warn("Meta znode for replica " + replicaId + " is null"); locs[replicaId] = null; } else { Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst()); locs[replicaId] = null; } else { locs[replicaId] = new HRegionLocation( getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), stateAndServerName.getSecond()); } } tryComplete(remaining, locs, future); }); } }); return future; }
From source file:org.apache.james.blob.cassandra.CassandraBlobsDAO.java
@Override public CompletableFuture<byte[]> readBytes(BlobId blobId) { try {//from www . j ava2 s . c o m return readBlobParts(blobId).collectList().map(parts -> Bytes.concat(parts.toArray(new byte[0][]))) .toFuture(); } catch (ObjectStoreException e) { CompletableFuture<byte[]> error = new CompletableFuture<>(); error.completeExceptionally(e); return error; } }
From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java
protected void internalDeletePartitionedTopic(boolean authoritative, boolean force) { validateAdminAccessForTenant(topicName.getTenant()); PartitionedTopicMetadata partitionMetadata = getPartitionedTopicMetadata(topicName, authoritative); int numPartitions = partitionMetadata.partitions; if (numPartitions > 0) { final CompletableFuture<Void> future = new CompletableFuture<>(); final AtomicInteger count = new AtomicInteger(numPartitions); try {/* w w w . j a v a 2 s . c o m*/ for (int i = 0; i < numPartitions; i++) { TopicName topicNamePartition = topicName.getPartition(i); pulsar().getAdminClient().persistentTopics().deleteAsync(topicNamePartition.toString(), force) .whenComplete((r, ex) -> { if (ex != null) { if (ex instanceof NotFoundException) { // if the sub-topic is not found, the client might not have called create // producer or it might have been deleted earlier, so we ignore the 404 error. // For all other exception, we fail the delete partition method even if a single // partition is failed to be deleted if (log.isDebugEnabled()) { log.debug("[{}] Partition not found: {}", clientAppId(), topicNamePartition); } } else { future.completeExceptionally(ex); log.error("[{}] Failed to delete partition {}", clientAppId(), topicNamePartition, ex); return; } } else { log.info("[{}] Deleted partition {}", clientAppId(), topicNamePartition); } if (count.decrementAndGet() == 0) { future.complete(null); } }); } future.get(); } catch (Exception e) { Throwable t = e.getCause(); if (t instanceof PreconditionFailedException) { throw new RestException(Status.PRECONDITION_FAILED, "Topic has active producers/subscriptions"); } else { throw new RestException(t); } } } // Only tries to delete the znode for partitioned topic when all its partitions are successfully deleted String path = path(PARTITIONED_TOPIC_PATH_ZNODE, namespaceName.toString(), domain(), topicName.getEncodedLocalName()); try { globalZk().delete(path, -1); globalZkCache().invalidate(path); // we wait for the data to be synced in all quorums and the observers Thread.sleep(PARTITIONED_TOPIC_WAIT_SYNC_TIME_MS); log.info("[{}] Deleted partitioned topic {}", clientAppId(), topicName); } catch (KeeperException.NoNodeException nne) { throw new RestException(Status.NOT_FOUND, "Partitioned topic does not exist"); } catch (KeeperException.BadVersionException e) { log.warn("[{}] Failed to delete partitioned topic {}: concurrent modification", clientAppId(), topicName); throw new RestException(Status.CONFLICT, "Concurrent modification"); } catch (Exception e) { log.error("[{}] Failed to delete partitioned topic {}", clientAppId(), topicName, e); throw new RestException(e); } }
From source file:org.apache.pulsar.broker.admin.impl.PersistentTopicsBase.java
public static CompletableFuture<PartitionedTopicMetadata> getPartitionedTopicMetadata(PulsarService pulsar, String clientAppId, String originalPrincipal, AuthenticationDataSource authenticationData, TopicName topicName) {/*from w ww. ja v a 2 s . co m*/ CompletableFuture<PartitionedTopicMetadata> metadataFuture = new CompletableFuture<>(); try { // (1) authorize client try { checkAuthorization(pulsar, topicName, clientAppId, authenticationData); } catch (RestException e) { try { validateAdminAccessForTenant(pulsar, clientAppId, originalPrincipal, topicName.getTenant()); } catch (RestException authException) { log.warn("Failed to authorize {} on cluster {}", clientAppId, topicName.toString()); throw new PulsarClientException( String.format("Authorization failed %s on topic %s with error %s", clientAppId, topicName.toString(), authException.getMessage())); } } catch (Exception ex) { // throw without wrapping to PulsarClientException that considers: unknown error marked as internal // server error log.warn("Failed to authorize {} on cluster {} with unexpected exception {}", clientAppId, topicName.toString(), ex.getMessage(), ex); throw ex; } String path = path(PARTITIONED_TOPIC_PATH_ZNODE, topicName.getNamespace(), topicName.getDomain().toString(), topicName.getEncodedLocalName()); // validates global-namespace contains local/peer cluster: if peer/local cluster present then lookup can // serve/redirect request else fail partitioned-metadata-request so, client fails while creating // producer/consumer checkLocalOrGetPeerReplicationCluster(pulsar, topicName.getNamespaceObject()) .thenCompose(res -> fetchPartitionedTopicMetadataAsync(pulsar, path)).thenAccept(metadata -> { if (log.isDebugEnabled()) { log.debug("[{}] Total number of partitions for topic {} is {}", clientAppId, topicName, metadata.partitions); } metadataFuture.complete(metadata); }).exceptionally(ex -> { metadataFuture.completeExceptionally(ex.getCause()); return null; }); } catch (Exception ex) { metadataFuture.completeExceptionally(ex); } return metadataFuture; }