Example usage for java.util.concurrent CompletableFuture supplyAsync

List of usage examples for java.util.concurrent CompletableFuture supplyAsync

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture supplyAsync.

Prototype

public static <U> CompletableFuture<U> supplyAsync(Supplier<U> supplier) 

Source Link

Document

Returns a new CompletableFuture that is asynchronously completed by a task running in the ForkJoinPool#commonPool() with the value obtained by calling the given Supplier.

Usage

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Boolean> optimizeQuery(final List<String> ordered_field_list) {

    // Mongo appears to have an ~100 char list on the query, Fongo does not, so add a mannual check
    // so we don't get the situation where the tests work but it fails operationally

    String approx_index_name = ordered_field_list.stream().collect(Collectors.joining("."));
    if (approx_index_name.length() > 100) {
        throw new MongoException(ErrorUtils.get(ErrorUtils.MONGODB_INDEX_TOO_LONG, approx_index_name));
    }//  ww w .ja  va  2 s .  c  o  m

    return CompletableFuture.supplyAsync(() -> {
        final BasicDBObject index_keys = new BasicDBObject(ordered_field_list.stream()
                .collect(Collectors.toMap(f -> f, f -> 1, (v1, v2) -> 1, LinkedHashMap::new)));

        _state.orig_coll.createIndex(index_keys, new BasicDBObject("background", true));

        return true;
    });
}

From source file:eu.interedition.collatex.tools.CollationServer.java

private static String detectDotPath() {
    for (String detectionCommand : new String[] { "which dot", "where dot.exe" }) {
        try {/* ww w  .  j  a  v  a2  s.  c  o  m*/

            final Process process = Runtime.getRuntime().exec(detectionCommand);
            try (BufferedReader processReader = new BufferedReader(
                    new InputStreamReader(process.getInputStream(), Charset.defaultCharset()))) {
                final CompletableFuture<Optional<String>> path = CompletableFuture
                        .supplyAsync(() -> processReader.lines().map(String::trim)
                                .filter(l -> l.toLowerCase().contains("dot")).findFirst());
                process.waitFor();
                final String dotPath = path.get().get();
                LOG.info(() -> "Detected GraphViz' dot at '" + dotPath + "'");
                return dotPath;
            }
        } catch (Throwable t) {
            LOG.log(Level.FINE, detectionCommand, t);
        }
    }
    return null;
}

From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java

@Override
public CompletableFuture<Void> stop() {
    if (started.compareAndSet(true, false)) {
        return CompletableFuture.supplyAsync(() -> {
            boolean interrupted = false;
            try {
                try {
                    serverChannel.close().sync();
                } catch (InterruptedException e) {
                    interrupted = true;/* ww  w .j  a v  a 2  s. c  o  m*/
                }
                Future<?> serverShutdownFuture = serverGroup.shutdownGracefully();
                Future<?> clientShutdownFuture = clientGroup.shutdownGracefully();
                try {
                    serverShutdownFuture.sync();
                } catch (InterruptedException e) {
                    interrupted = true;
                }
                try {
                    clientShutdownFuture.sync();
                } catch (InterruptedException e) {
                    interrupted = true;
                }
                timeoutFuture.cancel(false);
                timeoutExecutor.shutdown();
            } finally {
                log.info("Stopped");
                if (interrupted) {
                    Thread.currentThread().interrupt();
                }
            }
            return null;
        });
    }
    return CompletableFuture.completedFuture(null);
}

From source file:org.apache.flink.runtime.rest.RestServerEndpointITCase.java

/**
 * Tests that after calling {@link RestServerEndpoint#closeAsync()}, the handlers are closed
 * first, and we wait for in-flight requests to finish. As long as not all handlers are closed,
 * HTTP requests should be served.//from www . j  a  v  a  2  s  .co  m
 */
@Test
public void testShouldWaitForHandlersWhenClosing() throws Exception {
    testHandler.closeFuture = new CompletableFuture<>();
    final HandlerBlocker handlerBlocker = new HandlerBlocker(timeout);
    testHandler.handlerBody = id -> {
        // Intentionally schedule the work on a different thread. This is to simulate
        // handlers where the CompletableFuture is finished by the RPC framework.
        return CompletableFuture.supplyAsync(() -> {
            handlerBlocker.arriveAndBlock();
            return new TestResponse(id);
        });
    };

    // Initiate closing RestServerEndpoint but the test handler should block.
    final CompletableFuture<Void> closeRestServerEndpointFuture = serverEndpoint.closeAsync();
    assertThat(closeRestServerEndpointFuture.isDone(), is(false));

    final CompletableFuture<TestResponse> request = sendRequestToTestHandler(new TestRequest(1));
    handlerBlocker.awaitRequestToArrive();

    // Allow handler to close but there is still one in-flight request which should prevent
    // the RestServerEndpoint from closing.
    testHandler.closeFuture.complete(null);
    assertThat(closeRestServerEndpointFuture.isDone(), is(false));

    // Finish the in-flight request.
    handlerBlocker.unblockRequest();

    request.get(timeout.getSize(), timeout.getUnit());
    closeRestServerEndpointFuture.get(timeout.getSize(), timeout.getUnit());
}

From source file:org.apache.geode.internal.cache.BackupDUnitTest.java

/**
 * Test for bug 42419//from   w  ww  .  java  2 s.  com
 */
@Test
public void testBackupWhileBucketIsCreated() throws Throwable {
    Host host = Host.getHost(0);
    vm0 = host.getVM(0);
    vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);

    logger.info("Creating region in VM0");
    createPersistentRegion(vm0);

    // create a bucket on vm0
    createData(vm0, 0, 1, "A", "region1");

    // create the pr on vm1, which won't have any buckets
    logger.info("Creating region in VM1");
    createPersistentRegion(vm1);

    CompletableFuture<BackupStatus> backupStatusFuture = CompletableFuture.supplyAsync(() -> backup(vm2));
    CompletableFuture<Void> createDataFuture = CompletableFuture
            .runAsync(() -> createData(vm0, 1, 5, "A", "region1"));
    CompletableFuture.allOf(backupStatusFuture, createDataFuture);

    BackupStatus status = backupStatusFuture.get();
    assertEquals(2, status.getBackedUpDiskStores().size());
    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());

    validateBackupComplete();

    createData(vm0, 0, 5, "C", "region1");

    assertEquals(2, status.getBackedUpDiskStores().size());
    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());

    closeCache(vm0);
    closeCache(vm1);

    // Destroy the current data
    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
        public void run() {
            try {
                cleanDiskDirs();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    });

    restoreBackup(2);

    createPersistentRegionsAsync();

    checkData(vm0, 0, 1, "A", "region1");
}

From source file:org.apache.james.blob.objectstorage.ObjectStorageBlobsDAO.java

public CompletableFuture<ContainerName> createContainer(ContainerName name) {
    return CompletableFuture
            .supplyAsync(() -> blobStore.createContainerInLocation(DEFAULT_LOCATION, name.value()))
            .thenApply(created -> {//from   www.j  a  v  a2s.  c  o m
                if (!created) {
                    LOGGER.debug("{} already existed", name);
                }
                return name;
            });
}

From source file:org.apache.storm.localizer.AsyncLocalizerTest.java

@Test
public void testRequestDownloadTopologyBlobs() throws Exception {
    final String topoId = "TOPO-12345";
    final String user = "user";
    LocalAssignment la = new LocalAssignment();
    la.set_topology_id(topoId);// w  w w.j av a  2  s .  com
    la.set_owner(user);
    ExecutorInfo ei = new ExecutorInfo();
    ei.set_task_start(1);
    ei.set_task_end(1);
    la.add_to_executors(ei);
    final String topoName = "TOPO";
    final int port = 8080;
    final String simpleLocalName = "simple.txt";
    final String simpleKey = "simple";

    final String stormLocal = "/tmp/storm-local/";
    final File userDir = new File(stormLocal, user);
    final String stormRoot = stormLocal + topoId + "/";

    final String localizerRoot = getTestLocalizerRoot();
    final String simpleLocalFile = localizerRoot + user + "/simple";
    final String simpleCurrentLocalFile = localizerRoot + user + "/simple.current";

    final StormTopology st = new StormTopology();
    st.set_spouts(new HashMap<>());
    st.set_bolts(new HashMap<>());
    st.set_state_spouts(new HashMap<>());

    Map<String, Map<String, Object>> topoBlobMap = new HashMap<>();
    Map<String, Object> simple = new HashMap<>();
    simple.put("localname", simpleLocalName);
    simple.put("uncompress", false);
    topoBlobMap.put(simpleKey, simple);

    Map<String, Object> conf = new HashMap<>();
    conf.put(Config.STORM_LOCAL_DIR, stormLocal);
    AdvancedFSOps ops = mock(AdvancedFSOps.class);
    ConfigUtils mockedCU = mock(ConfigUtils.class);

    Map<String, Object> topoConf = new HashMap<>(conf);
    topoConf.put(Config.TOPOLOGY_BLOBSTORE_MAP, topoBlobMap);
    topoConf.put(Config.TOPOLOGY_NAME, topoName);

    List<LocalizedResource> localizedList = new ArrayList<>();
    LocalizedResource simpleLocal = new LocalizedResource(simpleKey, simpleLocalFile, false);
    localizedList.add(simpleLocal);

    AsyncLocalizer bl = spy(
            new AsyncLocalizer(conf, ops, localizerRoot, new AtomicReference<>(new HashMap<>()), null));
    ConfigUtils orig = ConfigUtils.setInstance(mockedCU);
    try {
        when(mockedCU.supervisorStormDistRootImpl(conf, topoId)).thenReturn(stormRoot);
        when(mockedCU.readSupervisorStormConfImpl(conf, topoId)).thenReturn(topoConf);
        when(mockedCU.readSupervisorTopologyImpl(conf, topoId, ops)).thenReturn(st);

        //Write the mocking backwards so the actual method is not called on the spy object
        doReturn(CompletableFuture.supplyAsync(() -> null)).when(bl).requestDownloadBaseTopologyBlobs(la, port,
                null);
        doReturn(userDir).when(bl).getLocalUserFileCacheDir(user);
        doReturn(localizedList).when(bl).getBlobs(any(List.class), eq(user), eq(topoName), eq(userDir));

        Future<Void> f = bl.requestDownloadTopologyBlobs(la, port, null);
        f.get(20, TimeUnit.SECONDS);
        // We should be done now...

        verify(bl).getLocalUserFileCacheDir(user);

        verify(ops).fileExists(userDir);
        verify(ops).forceMkdir(userDir);

        verify(bl).getBlobs(any(List.class), eq(user), eq(topoName), eq(userDir));

        verify(ops).createSymlink(new File(stormRoot, simpleLocalName), new File(simpleCurrentLocalFile));
    } finally {
        bl.close();
        ConfigUtils.setInstance(orig);
    }
}

From source file:org.pdfsam.update.UpdatesController.java

@EventListener
public void checkForUpdates(UpdateCheckRequest event) {
    LOG.debug(DefaultI18nContext.getInstance().i18n("Checking for updates"));
    CompletableFuture.supplyAsync(service::getLatestVersion).thenAccept(current -> {
        if (isNotBlank(current) && !pdfsam.version().equals(current)) {
            LOG.info(DefaultI18nContext.getInstance().i18n("PDFsam {0} is available for download", current));
            Platform.runLater(() -> eventStudio().broadcast(new UpdateAvailableEvent(current)));
        }// w  ww . jav  a 2s.  c o  m
    }).whenComplete((r, e) -> {
        if (nonNull(e)) {
            LOG.warn(DefaultI18nContext.getInstance().i18n("Unable to find the latest available version."), e);
        }
    });
}

From source file:org.pentaho.big.data.kettle.plugins.kafka.KafkaDialogHelper.java

@SuppressWarnings("unused")
void clusterNameChanged(Event event) {
    String current = wTopic.getText();
    if ((wbCluster.getSelection() && StringUtil.isEmpty(wClusterName.getText()))
            || (!wbCluster.getSelection() && StringUtil.isEmpty(wBootstrapServers.getText()))) {
        return;/*from ww  w . j av a 2 s.  c om*/
    }
    String clusterName = wClusterName.getText();
    boolean isCluster = wbCluster.getSelection();
    String directBootstrapServers = wBootstrapServers == null ? "" : wBootstrapServers.getText();
    Map<String, String> config = getConfig(optionsTable);
    try {
        CompletableFuture.supplyAsync(() -> listTopics(clusterName, isCluster, directBootstrapServers, config))
                .thenAccept(topicMap -> Display.getDefault().syncExec(() -> populateTopics(topicMap, current)))
                .get(MAX_WAIT, TimeUnit.MILLISECONDS);
        // we do a get here to avoid losing exceptions that occur in another thread
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new IllegalStateException(e);
    } catch (TimeoutException | ExecutionException e) {
        throw new IllegalStateException(e);
    }
}

From source file:org.raspinloop.fmi.VMRunner.java

@Override
public void run() {
    String separator = System.getProperty("file.separator");
    String path = System.getProperty("java.home") + separator + "bin" + separator + "java";

    ProcessBuilder processBuilder;
    try {//from w w w .j  a  v  a 2s.c om
        processBuilder = new ProcessBuilder(path,
                VMRunnerUtils.getRunnerAgentArgument(".", jSonConfigName, false),
                VMRunnerUtils.getWeaverAgentArgument(".", false), vMArguments, "-cp",
                classPath.stream().collect(Collectors.joining(":")), className, programArguments);
    } catch (Exception e) {
        logger.error("Cannot configure process: " + e.getMessage());
        throw new VMRunnerUncheckedException(e);
    }

    if (System.getProperty("mock") == null || !System.getProperty("mock").equalsIgnoreCase("true")) {
        try {

            logger.debug(
                    "==> starting JVM " + processBuilder.command().stream().collect(Collectors.joining(" ")));
            process = processBuilder.inheritIO().start();
        } catch (Exception e) {
            logger.error("Cannot start process: " + e.getMessage());
            throw new VMRunnerUncheckedException(e);
        }
        CompletableFuture.supplyAsync(() -> {
            try {
                return process.waitFor();
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }
        }).thenAccept(i -> logger.info("<== JVM Stopped code(" + i + ")"));

    } else {
        logger.info("PLEASE RUN " + processBuilder.command().stream().collect(Collectors.joining(" ")));
        while (true) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                logger.info("PLEASE STOP ");
                return;
            }
        }
    }
}