Example usage for java.util.concurrent CompletableFuture complete

List of usage examples for java.util.concurrent CompletableFuture complete

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture complete.

Prototype

public boolean complete(T value) 

Source Link

Document

If not already completed, sets the value returned by #get() and related methods to the given value.

Usage

From source file:org.onlab.nio.service.IOLoopMessaging.java

protected void dispatchLocally(DefaultMessage message) {
    String type = message.type();
    if (REPLY_MESSAGE_TYPE.equals(type)) {
        try {//from  www.  j  av a2 s . c  o  m
            CompletableFuture<byte[]> futureResponse = responseFutures.getIfPresent(message.id());
            if (futureResponse != null) {
                futureResponse.complete(message.payload());
            } else {
                log.warn("Received a reply for message id:[{}]. " + " from {}. But was unable to locate the"
                        + " request handle", message.id(), message.sender());
            }
        } finally {
            responseFutures.invalidate(message.id());
        }
        return;
    }
    Consumer<DefaultMessage> handler = handlers.get(type);
    if (handler != null) {
        handler.accept(message);
    } else {
        log.debug("No handler registered for {}", type);
    }
}

From source file:org.apache.bookkeeper.tests.integration.utils.DockerUtils.java

public static String runCommand(DockerClient docker, String containerId, boolean ignoreError, String... cmd)
        throws Exception {
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    String execid = docker.execCreateCmd(containerId).withCmd(cmd).withAttachStderr(true).withAttachStdout(true)
            .exec().getId();//ww w . j a  v a  2 s  .c  o m
    String cmdString = Arrays.stream(cmd).collect(Collectors.joining(" "));
    StringBuffer output = new StringBuffer();
    docker.execStartCmd(execid).withDetach(false).exec(new ResultCallback<Frame>() {
        @Override
        public void close() {
        }

        @Override
        public void onStart(Closeable closeable) {
            LOG.info("DOCKER.exec({}:{}): Executing...", containerId, cmdString);
        }

        @Override
        public void onNext(Frame object) {
            LOG.info("DOCKER.exec({}:{}): {}", containerId, cmdString, object);
            output.append(new String(object.getPayload(), UTF_8));
        }

        @Override
        public void onError(Throwable throwable) {
            future.completeExceptionally(throwable);
        }

        @Override
        public void onComplete() {
            LOG.info("DOCKER.exec({}:{}): Done", containerId, cmdString);
            future.complete(true);
        }
    });
    future.get();

    InspectExecResponse resp = docker.inspectExecCmd(execid).exec();
    while (resp.isRunning()) {
        Thread.sleep(200);
        resp = docker.inspectExecCmd(execid).exec();
    }
    int retCode = resp.getExitCode();
    if (retCode != 0) {
        LOG.error("DOCKER.exec({}:{}): failed with {} : {}", containerId, cmdString, retCode, output);
        if (!ignoreError) {
            throw new Exception(
                    String.format("cmd(%s) failed on %s with exitcode %d", cmdString, containerId, retCode));
        }
    } else {
        LOG.info("DOCKER.exec({}:{}): completed with {}", containerId, cmdString, retCode);
    }
    return output.toString();
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Checks the jar cache to see if an entry already exists for this list of jars,
 * returns the path of that entry if it does exist, otherwise creates the jar, adds
 * the path to the cache and returns it.
 * /*from   www  .  j a  v  a  2  s.  c  om*/
 * @param jars_to_merge
 * @return
 * @throws Exception 
 */
public static synchronized CompletableFuture<String> buildOrReturnCachedStormTopologyJar(
        final Collection<String> jars_to_merge, final String cached_jar_dir) {
    CompletableFuture<String> future = new CompletableFuture<String>();
    final String hashed_jar_name = JarBuilderUtil.getHashedJarName(jars_to_merge, cached_jar_dir);
    //1. Check cache for this jar via hash of jar names
    if (storm_topology_jars_cache.containsKey(hashed_jar_name)) {
        //if exists:
        //2. validate jars has not been updated
        Date most_recent_update = JarBuilderUtil.getMostRecentlyUpdatedFile(jars_to_merge);
        //if the cache is more recent than any of the files, we assume nothing has been updated
        if (storm_topology_jars_cache.get(hashed_jar_name).getTime() > most_recent_update.getTime()) {
            //RETURN return cached jar file path
            _logger.debug("Returning a cached copy of the jar");
            //update the cache copy to set its modified time to now so we don't clean it up
            JarBuilderUtil.updateJarModifiedTime(hashed_jar_name);
            future.complete(hashed_jar_name);
            return future;
        } else {
            //delete cache copy
            _logger.debug("Removing an expired cached copy of the jar");
            removeCachedJar(hashed_jar_name);
        }
    }

    //if we fall through
    //3. create jar
    _logger.debug("Fell through or cache copy is old, have to create a new version");
    if (buildStormTopologyJar(jars_to_merge, hashed_jar_name)) {
        //4. add jar to cache w/ current/newest file timestamp      
        storm_topology_jars_cache.put(hashed_jar_name, new Date());
        //RETURN return new jar file path
        future.complete(hashed_jar_name);
    } else {
        //had an error creating jar, throw an exception?
        future.completeExceptionally(new Exception("Error trying to create storm jar, see logs"));
    }
    return future;

}

From source file:opensnap.repository.MongoRepository.java

public CompletableFuture<T> insert(T elem) {
    CompletableFuture<T> future = new CompletableFuture<>();
    try {//  w w  w.  ja  v  a  2 s.  c  o m
        Document doc = Document.valueOf(mapper.writeValueAsString(elem));
        collection.insert(doc).register((result, e) -> {
            if (result != null && result.wasAcknowledged()) {
                elem.setId(doc.getObjectId("_id"));
                future.complete(elem);
            } else {
                logger.error("Error while creating a new document in insert() : " + doc.toString(), e);
                future.cancel(true);
            }
        });
    } catch (JsonProcessingException e) {
        logger.error("Error while creating element " + elem.toString() + " in insert()", e);
        future.cancel(true);
    }
    return future;
}

From source file:io.pravega.segmentstore.server.host.stat.AutoScaleProcessor.java

private CompletableFuture<Void> writeRequest(AutoScaleEvent event) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    try {//  ww w  .  j a va2 s.  c o m
        CompletableFuture.runAsync(() -> {
            try {
                writer.get().writeEvent(event.getKey(), event).get();
                result.complete(null);
            } catch (InterruptedException | ExecutionException e) {
                log.error("error sending request to requeststream {}", e);
                result.completeExceptionally(e);
            }
        }, executor);
    } catch (RejectedExecutionException e) {
        log.error("our executor queue is full. failed to post scale event for {}/{}/{}", event.getScope(),
                event.getStream(), event.getSegmentNumber());
        result.completeExceptionally(e);
    }

    return result;
}

From source file:io.pravega.service.server.host.stat.AutoScaleProcessor.java

private CompletableFuture<Void> writeRequest(ScaleEvent event) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    try {/*from   w  w  w .j  a  va 2 s .co  m*/
        CompletableFuture.runAsync(() -> {
            try {
                writer.get().writeEvent(event.getKey(), event).get();
                result.complete(null);
            } catch (InterruptedException | ExecutionException e) {
                log.error("error sending request to requeststream {}", e);
                result.completeExceptionally(e);
            }
        }, executor);
    } catch (RejectedExecutionException e) {
        log.error("our executor queue is full. failed to post scale event for {}/{}/{}", event.getScope(),
                event.getStream(), event.getSegmentNumber());
        result.completeExceptionally(e);
    }

    return result;
}

From source file:com.teradata.benchto.driver.execution.ExecutionSynchronizer.java

/**
 * Executes {@code callable} when time comes. The {@code callable} gets executed immediately, without
 * offloading to a backghround thread, if execution time requested has already passed.
 *//*w  w  w.  ja  va2s. c om*/
public <T> CompletableFuture<T> execute(Instant when, Callable<T> callable) {
    if (!Instant.now().isBefore(when)) {
        // Run immediately.
        try {
            return completedFuture(callable.call());
        } catch (Exception e) {
            CompletableFuture<T> future = new CompletableFuture<>();
            future.completeExceptionally(e);
            return future;
        }
    }

    long delay = Instant.now().until(when, ChronoUnit.MILLIS);
    CompletableFuture<T> future = new CompletableFuture<>();
    executorService.schedule(() -> {
        try {
            future.complete(callable.call());
        } catch (Throwable e) {
            future.completeExceptionally(e);
            throw e;
        }
        return null;
    }, delay, MILLISECONDS);

    return future;
}

From source file:co.runrightfast.vertx.demo.testHarness.jmx.DemoMXBeanImpl.java

private <A extends com.google.protobuf.Message> Handler<AsyncResult<Message<A>>> responseHandler(
        final CompletableFuture future, final Class<A> messageType) {
    return result -> {
        if (result.succeeded()) {
            future.complete(result.result().body());
        } else {/*from  w  w w .  j  av a2 s.  co  m*/
            log.logp(SEVERE, getClass().getName(),
                    String.format("responseHandler.failure::%s", messageType.getName()), "request failed",
                    result.cause());
            future.completeExceptionally(result.cause());
        }
    };
}

From source file:org.apache.hadoop.hbase.client.TestAsyncSingleRequestRpcRetryingCaller.java

@Test
public void testLocateError() throws IOException, InterruptedException, ExecutionException {
    AtomicBoolean errorTriggered = new AtomicBoolean(false);
    AtomicInteger count = new AtomicInteger(0);
    HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get();
    AsyncRegionLocator mockedLocator = new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) {
        @Override/* ww w  . ja  v a2 s .  c o m*/
        CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row,
                RegionLocateType locateType, long timeoutNs) {
            if (tableName.equals(TABLE_NAME)) {
                CompletableFuture<HRegionLocation> future = new CompletableFuture<>();
                if (count.getAndIncrement() == 0) {
                    errorTriggered.set(true);
                    future.completeExceptionally(new RuntimeException("Inject error!"));
                } else {
                    future.complete(loc);
                }
                return future;
            } else {
                return super.getRegionLocation(tableName, row, locateType, timeoutNs);
            }
        }

        @Override
        void updateCachedLocation(HRegionLocation loc, Throwable exception) {
        }
    };
    try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(), CONN.registry,
            CONN.registry.getClusterId().get(), User.getCurrent()) {

        @Override
        AsyncRegionLocator getLocator() {
            return mockedLocator;
        }
    }) {
        RawAsyncTable table = mockedConn.getRawTableBuilder(TABLE_NAME)
                .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build();
        table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get();
        assertTrue(errorTriggered.get());
        errorTriggered.set(false);
        count.set(0);
        Result result = table.get(new Get(ROW).addColumn(FAMILY, QUALIFIER)).get();
        assertArrayEquals(VALUE, result.getValue(FAMILY, QUALIFIER));
        assertTrue(errorTriggered.get());
    }
}

From source file:co.runrightfast.vertx.demo.testHarness.jmx.DemoMXBeanImpl.java

@Override
public String lookupIPAddress(final String dnsServer, final String host) {
    final DnsClient client = vertx.createDnsClient(53, dnsServer);
    final CompletableFuture<String> future = new CompletableFuture<>();
    client.lookup("vertx.io", result -> {
        if (result.succeeded()) {
            future.complete(result.result());
        } else {//ww  w.j  a  v  a 2  s  .  c o  m
            future.completeExceptionally(result.cause());
        }
    });

    try {
        return future.get();
    } catch (final InterruptedException | ExecutionException ex) {
        throw new RuntimeException(ex);
    }
}