List of usage examples for java.util.concurrent CompletableFuture CompletableFuture
public CompletableFuture()
From source file:com.devicehive.rpcclient.RpcClientActionTest.java
@Test public void testListNetworkAction() throws Exception { ListNetworkRequest listNetworkRequest = new ListNetworkRequest(); listNetworkRequest.setName(UUID.randomUUID().toString()); // nonexistent name Request request = Request.newBuilder().withBody(listNetworkRequest).build(); CompletableFuture<Response> future = new CompletableFuture<>(); client.call(request, future::complete); Response response = future.get(10, TimeUnit.SECONDS); ListNetworkResponse responseBody = (ListNetworkResponse) response.getBody(); assertNotNull(responseBody.getNetworks().isEmpty()); }
From source file:org.apache.hadoop.hbase.client.TestAsyncSingleRequestRpcRetryingCaller.java
@Test public void testLocateError() throws IOException, InterruptedException, ExecutionException { AtomicBoolean errorTriggered = new AtomicBoolean(false); AtomicInteger count = new AtomicInteger(0); HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get(); AsyncRegionLocator mockedLocator = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) { @Override// w w w .j a v a 2 s .c o m CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row, RegionLocateType locateType, long timeoutNs) { if (tableName.equals(TABLE_NAME)) { CompletableFuture<HRegionLocation> future = new CompletableFuture<>(); if (count.getAndIncrement() == 0) { errorTriggered.set(true); future.completeExceptionally(new RuntimeException("Inject error!")); } else { future.complete(loc); } return future; } else { return super.getRegionLocation(tableName, row, locateType, timeoutNs); } } @Override void updateCachedLocation(HRegionLocation loc, Throwable exception) { } }; try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(), CONN.registry, CONN.registry.getClusterId().get(), User.getCurrent()) { @Override AsyncRegionLocator getLocator() { return mockedLocator; } }) { RawAsyncTable table = mockedConn.getRawTableBuilder(TABLE_NAME) .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build(); table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get(); assertTrue(errorTriggered.get()); errorTriggered.set(false); count.set(0); Result result = table.get(new Get(ROW).addColumn(FAMILY, QUALIFIER)).get(); assertArrayEquals(VALUE, result.getValue(FAMILY, QUALIFIER)); assertTrue(errorTriggered.get()); } }
From source file:io.pravega.segmentstore.server.reading.StorageReadManagerTests.java
private CompletableFuture<StorageReadManager.Result> sendRequest(StorageReadManager reader, long offset, int length) { CompletableFuture<StorageReadManager.Result> requestCompletion = new CompletableFuture<>(); reader.execute(new StorageReadManager.Request(offset, length, requestCompletion::complete, requestCompletion::completeExceptionally, TIMEOUT)); return requestCompletion; }
From source file:org.mule.service.oauth.internal.DefaultClientCredentialsOAuthDancer.java
@Override public CompletableFuture<Void> refreshToken() { final Map<String, String> formData = new HashMap<>(); formData.put(GRANT_TYPE_PARAMETER, GRANT_TYPE_CLIENT_CREDENTIALS); if (scopes != null) { formData.put(SCOPE_PARAMETER, scopes); }// ww w . j a va 2 s. c om String authorization = null; if (encodeClientCredentialsInBody) { formData.put(CLIENT_ID_PARAMETER, clientId); formData.put(CLIENT_SECRET_PARAMETER, clientSecret); } else { authorization = "Basic " + encodeBase64String(format("%s:%s", clientId, clientSecret).getBytes()); } try { TokenResponse tokenResponse = invokeTokenUrl(tokenUrl, formData, authorization, false, encoding); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Retrieved access token, refresh token and expires from token url are: %s, %s, %s", tokenResponse.getAccessToken(), tokenResponse.getRefreshToken(), tokenResponse.getExpiresIn()); } final DefaultResourceOwnerOAuthContext defaultUserState = (DefaultResourceOwnerOAuthContext) getContext(); defaultUserState.setAccessToken(tokenResponse.getAccessToken()); defaultUserState.setExpiresIn(tokenResponse.getExpiresIn()); for (Entry<String, Object> customResponseParameterEntry : tokenResponse.getCustomResponseParameters() .entrySet()) { defaultUserState.getTokenResponseParameters().put(customResponseParameterEntry.getKey(), customResponseParameterEntry.getValue()); } updateResourceOwnerOAuthContext(defaultUserState); return completedFuture(null); } catch (TokenUrlResponseException | TokenNotFoundException e) { final CompletableFuture<Void> exceptionFuture = new CompletableFuture<>(); exceptionFuture.completeExceptionally(e); return exceptionFuture; } }
From source file:org.apache.hadoop.hbase.client.ZKAsyncRegistry.java
@Override public CompletableFuture<RegionLocations> getMetaRegionLocation() { CompletableFuture<RegionLocations> future = new CompletableFuture<>(); HRegionLocation[] locs = new HRegionLocation[znodePaths.metaReplicaZNodes.size()]; MutableInt remaining = new MutableInt(locs.length); znodePaths.metaReplicaZNodes.forEach((replicaId, path) -> { if (replicaId == DEFAULT_REPLICA_ID) { exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> { if (error != null) { future.completeExceptionally(error); return; }/*from w ww . ja v a2s . c om*/ if (proto == null) { future.completeExceptionally(new IOException("Meta znode is null")); return; } Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { future.completeExceptionally( new IOException("Meta region is in state " + stateAndServerName.getFirst())); return; } locs[DEFAULT_REPLICA_ID] = new HRegionLocation( getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond()); tryComplete(remaining, locs, future); }); } else { exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> { if (future.isDone()) { return; } if (error != null) { LOG.warn("Failed to fetch " + path, error); locs[replicaId] = null; } else if (proto == null) { LOG.warn("Meta znode for replica " + replicaId + " is null"); locs[replicaId] = null; } else { Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto); if (stateAndServerName.getFirst() != RegionState.State.OPEN) { LOG.warn("Meta region for replica " + replicaId + " is in state " + stateAndServerName.getFirst()); locs[replicaId] = null; } else { locs[replicaId] = new HRegionLocation( getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId), stateAndServerName.getSecond()); } } tryComplete(remaining, locs, future); }); } }); return future; }
From source file:io.pravega.controller.store.stream.InMemoryStream.java
@Override CompletableFuture<Void> setConfigurationData(StreamConfiguration configuration) { Preconditions.checkNotNull(configuration); CompletableFuture<Void> result = new CompletableFuture<>(); synchronized (lock) { if (this.configuration == null) { result.completeExceptionally(StoreException.create(StoreException.Type.DATA_NOT_FOUND, getName())); } else {//w ww. j av a 2s. c om this.configuration = configuration; result.complete(null); } } return result; }
From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java
public AsyncBatchRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn, TableName tableName, List<? extends Row> actions, long pauseNs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.conn = conn; this.tableName = tableName; this.pauseNs = pauseNs; this.maxAttempts = maxAttempts; this.operationTimeoutNs = operationTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs; this.startLogErrorsCnt = startLogErrorsCnt; this.actions = new ArrayList<>(actions.size()); this.futures = new ArrayList<>(actions.size()); this.action2Future = new IdentityHashMap<>(actions.size()); for (int i = 0, n = actions.size(); i < n; i++) { Row rawAction = actions.get(i);//from w ww . j a v a 2 s. co m Action action = new Action(rawAction, i); if (rawAction instanceof Append || rawAction instanceof Increment) { action.setNonce(conn.getNonceGenerator().newNonce()); } this.actions.add(action); CompletableFuture<T> future = new CompletableFuture<>(); futures.add(future); action2Future.put(action, future); } this.action2Errors = new IdentityHashMap<>(); this.startNs = System.nanoTime(); }
From source file:org.onlab.netty.NettyMessaging.java
protected CompletableFuture<Void> sendAsync(Endpoint ep, InternalMessage message) { CompletableFuture<Void> future = new CompletableFuture<>(); try {/* w ww .j a v a2 s. c o m*/ if (ep.equals(localEp)) { dispatchLocally(message); future.complete(null); } else { Channel channel = null; try { channel = channels.borrowObject(ep); channel.writeAndFlush(message).addListener(channelFuture -> { if (!channelFuture.isSuccess()) { future.completeExceptionally(channelFuture.cause()); } else { future.complete(null); } }); } finally { channels.returnObject(ep, channel); } } } catch (Exception e) { future.completeExceptionally(e); } return future; }
From source file:org.apache.bookkeeper.tests.integration.utils.DockerUtils.java
public static String runCommand(DockerClient docker, String containerId, boolean ignoreError, String... cmd) throws Exception { CompletableFuture<Boolean> future = new CompletableFuture<>(); String execid = docker.execCreateCmd(containerId).withCmd(cmd).withAttachStderr(true).withAttachStdout(true) .exec().getId();/* w ww . ja v a2s .c om*/ String cmdString = Arrays.stream(cmd).collect(Collectors.joining(" ")); StringBuffer output = new StringBuffer(); docker.execStartCmd(execid).withDetach(false).exec(new ResultCallback<Frame>() { @Override public void close() { } @Override public void onStart(Closeable closeable) { LOG.info("DOCKER.exec({}:{}): Executing...", containerId, cmdString); } @Override public void onNext(Frame object) { LOG.info("DOCKER.exec({}:{}): {}", containerId, cmdString, object); output.append(new String(object.getPayload(), UTF_8)); } @Override public void onError(Throwable throwable) { future.completeExceptionally(throwable); } @Override public void onComplete() { LOG.info("DOCKER.exec({}:{}): Done", containerId, cmdString); future.complete(true); } }); future.get(); InspectExecResponse resp = docker.inspectExecCmd(execid).exec(); while (resp.isRunning()) { Thread.sleep(200); resp = docker.inspectExecCmd(execid).exec(); } int retCode = resp.getExitCode(); if (retCode != 0) { LOG.error("DOCKER.exec({}:{}): failed with {} : {}", containerId, cmdString, retCode, output); if (!ignoreError) { throw new Exception( String.format("cmd(%s) failed on %s with exitcode %d", cmdString, containerId, retCode)); } } else { LOG.info("DOCKER.exec({}:{}): completed with {}", containerId, cmdString, retCode); } return output.toString(); }
From source file:io.sqp.client.impl.SqpConnectionImpl.java
@Override public synchronized CompletableFuture<SqpConnection> connect(String host, int port, String path, String database) {/* ww w .ja v a 2s . c o m*/ _state = ConnectionState.Connecting; _database = database; ClientManager clientManager = ClientManager.createClient(); ClientEndpointConfig cec = ClientEndpointConfig.Builder.create().build(); String scheme = "ws"; _connectionFuture = new CompletableFuture<>(); URI uri; try { uri = new URI(scheme, null, host, port, path, null, null); _logger.log(Level.INFO, "Attempting to connect to '" + uri + "'"); clientManager.connectToServer(this, cec, uri); } catch (URISyntaxException | DeploymentException | IOException e) { _connectionFuture.completeExceptionally( new ConnectionException("Failed to connect to server: " + e.getMessage(), e)); } return _connectionFuture; }