Example usage for java.util.concurrent CompletableFuture supplyAsync

List of usage examples for java.util.concurrent CompletableFuture supplyAsync

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture supplyAsync.

Prototype

public static <U> CompletableFuture<U> supplyAsync(Supplier<U> supplier, Executor executor) 

Source Link

Document

Returns a new CompletableFuture that is asynchronously completed by a task running in the given executor with the value obtained by calling the given Supplier.

Usage

From source file:com.thinkbiganalytics.feedmgr.nifi.cache.NiFiFlowInspectorManager.java

public void addGroupToInspect(String groupId, int level, NiFiFlowInspection parent) {
    int nextLevel = level + 1;
    processGroupsToInspect.add(groupId);
    inspectingCount.incrementAndGet();//from  w  w  w. j  a va2s  .  c o m
    NiFiFlowInspector processGroupInspector = new NiFiFlowInspector(groupId, nextLevel, parent, restClient);
    CompletableFuture<NiFiFlowInspection> flowInspection = CompletableFuture
            .supplyAsync(() -> processGroupInspector.inspect(), executorService);
    flowInspection.thenAcceptAsync(this::flowInspectionComplete);

}

From source file:com.teradata.benchto.driver.listeners.BenchmarkServiceExecutionListener.java

@Override
public Future<?> executionFinished(QueryExecutionResult executionResult) {
    return CompletableFuture.supplyAsync(() -> getMeasurements(executionResult), taskExecutor::execute)
            .thenCompose(future -> future)
            .thenApply(measurements -> buildExecutionFinishedRequest(executionResult, measurements))
            .thenAccept(request -> {/*w  w  w.  ja v  a 2 s.  c  o m*/
                benchmarkServiceClient.finishExecution(executionResult.getBenchmark().getUniqueName(),
                        executionResult.getBenchmark().getSequenceId(),
                        executionSequenceId(executionResult.getQueryExecution()), request);
            });
}

From source file:ai.grakn.graph.internal.computer.GraknSparkComputer.java

private Future<ComputerResult> submitWithExecutor(Executor exec) {
    getGraphRDD(this);
    jobGroupId = Integer.toString(ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE));
    String jobDescription = this.vertexProgram == null ? this.mapReducers.toString()
            : this.vertexProgram + "+" + this.mapReducers;

    this.sparkConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION) + "/" + jobGroupId);
    this.apacheConfiguration.setProperty(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));
    this.hadoopConfiguration.set(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION,
            this.sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION));

    // create the completable future
    return CompletableFuture.supplyAsync(() -> {
        graknGraphRDD.sparkContext.setJobGroup(jobGroupId, jobDescription);
        final long startTime = System.currentTimeMillis();

        GraknSparkMemory memory = null;//  ww w  . ja v  a 2 s .  c  o m
        JavaPairRDD<Object, VertexWritable> computedGraphRDD = null;
        JavaPairRDD<Object, ViewIncomingPayload<Object>> viewIncomingRDD = null;

        ////////////////////////////////
        // process the vertex program //
        ////////////////////////////////
        if (null != this.vertexProgram) {
            // set up the vertex program and wire up configurations
            this.mapReducers.addAll(this.vertexProgram.getMapReducers());
            memory = new GraknSparkMemory(this.vertexProgram, this.mapReducers, graknGraphRDD.sparkContext);
            this.vertexProgram.setup(memory);
            memory.broadcastMemory(graknGraphRDD.sparkContext);
            final HadoopConfiguration vertexProgramConfiguration = new HadoopConfiguration();
            this.vertexProgram.storeState(vertexProgramConfiguration);
            ConfigurationUtils.copy(vertexProgramConfiguration, apacheConfiguration);
            ConfUtil.mergeApacheIntoHadoopConfiguration(vertexProgramConfiguration, hadoopConfiguration);
            // execute the vertex program
            while (true) {
                memory.setInTask(true);
                viewIncomingRDD = GraknSparkExecutor.executeVertexProgramIteration(graknGraphRDD.loadedGraphRDD,
                        viewIncomingRDD, memory, vertexProgramConfiguration);
                memory.setInTask(false);
                if (this.vertexProgram.terminate(memory))
                    break;
                else {
                    memory.incrIteration();
                    memory.broadcastMemory(graknGraphRDD.sparkContext);
                }
            }
            // write the computed graph to the respective output (rdd or output format)
            final String[] elementComputeKeys = this.vertexProgram.getElementComputeKeys()
                    .toArray(new String[this.vertexProgram.getElementComputeKeys().size()]);
            computedGraphRDD = GraknSparkExecutor.prepareFinalGraphRDD(graknGraphRDD.loadedGraphRDD,
                    viewIncomingRDD, elementComputeKeys);
            if ((hadoopConfiguration.get(Constants.GREMLIN_HADOOP_GRAPH_OUTPUT_FORMAT, null) != null
                    || hadoopConfiguration.get(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, null) != null)
                    && !this.persist.equals(Persist.NOTHING)) {
                try {
                    hadoopConfiguration
                            .getClass(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, OutputFormatRDD.class,
                                    OutputRDD.class)
                            .newInstance().writeGraphRDD(apacheConfiguration, computedGraphRDD);
                } catch (final InstantiationException | IllegalAccessException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        final boolean computedGraphCreated = computedGraphRDD != null;
        if (!computedGraphCreated) {
            computedGraphRDD = graknGraphRDD.loadedGraphRDD;
        }

        final Memory.Admin finalMemory = null == memory ? new MapMemory() : new MapMemory(memory);

        //////////////////////////////
        // process the map reducers //
        //////////////////////////////
        if (!this.mapReducers.isEmpty()) {
            for (final MapReduce mapReduce : this.mapReducers) {
                // execute the map reduce job
                final HadoopConfiguration newApacheConfiguration = new HadoopConfiguration(apacheConfiguration);
                mapReduce.storeState(newApacheConfiguration);
                // map
                final JavaPairRDD mapRDD = GraknSparkExecutor.executeMap(computedGraphRDD, mapReduce,
                        newApacheConfiguration);
                // combine
                final JavaPairRDD combineRDD = mapReduce.doStage(MapReduce.Stage.COMBINE)
                        ? GraknSparkExecutor.executeCombine(mapRDD, newApacheConfiguration)
                        : mapRDD;
                // reduce
                final JavaPairRDD reduceRDD = mapReduce.doStage(MapReduce.Stage.REDUCE)
                        ? GraknSparkExecutor.executeReduce(combineRDD, mapReduce, newApacheConfiguration)
                        : combineRDD;
                // write the map reduce output back to disk and computer result memory
                try {
                    mapReduce.addResultToMemory(finalMemory,
                            hadoopConfiguration
                                    .getClass(Constants.GREMLIN_SPARK_GRAPH_OUTPUT_RDD, OutputFormatRDD.class,
                                            OutputRDD.class)
                                    .newInstance()
                                    .writeMemoryRDD(apacheConfiguration, mapReduce.getMemoryKey(), reduceRDD));
                } catch (final InstantiationException | IllegalAccessException e) {
                    throw new IllegalStateException(e.getMessage(), e);
                }
            }
        }

        // unpersist the computed graph if it will not be used again (no PersistedOutputRDD)
        if (!graknGraphRDD.outputToSpark || this.persist.equals(GraphComputer.Persist.NOTHING)) {
            computedGraphRDD.unpersist();
        }
        // delete any file system or rdd data if persist nothing
        String outputPath = sparkConfiguration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION);
        if (null != outputPath && this.persist.equals(GraphComputer.Persist.NOTHING)) {
            if (graknGraphRDD.outputToHDFS) {
                graknGraphRDD.fileSystemStorage.rm(outputPath);
            }
            if (graknGraphRDD.outputToSpark) {
                graknGraphRDD.sparkContextStorage.rm(outputPath);
            }
        }
        // update runtime and return the newly computed graph
        finalMemory.setRuntime(System.currentTimeMillis() - startTime);
        return new DefaultComputerResult(
                InputOutputHelper.getOutputGraph(apacheConfiguration, this.resultGraph, this.persist),
                finalMemory.asImmutable());
    }, exec);
}

From source file:de.ks.text.AsciiDocEditor.java

protected void searchForText() {
    String searchKey = searchField.textProperty().getValueSafe().toLowerCase(Locale.ROOT);
    String editorContent = editor.textProperty().getValueSafe().toLowerCase(Locale.ROOT);
    searchField.setDisable(true);//from  ww w . j ava2  s .  c  o  m
    CompletableFuture<Integer> search = CompletableFuture.supplyAsync(() -> {
        if (lastSearch != null && lastSearch.matches(searchKey, editorContent)) {
            int startPoint = lastSearch.getPosition() + searchKey.length();
            int newPosition;
            if (startPoint >= editorContent.length()) {
                newPosition = -1;
            } else {
                newPosition = editorContent.substring(startPoint).indexOf(searchKey);
                if (newPosition >= 0) {
                    newPosition += lastSearch.getPosition() + searchKey.length();
                }
            }

            if (newPosition == -1) {
                newPosition = editorContent.indexOf(searchKey);
            }
            lastSearch.setPosition(newPosition);
            return newPosition;
        } else {
            int newPosition = editorContent.indexOf(searchKey);
            lastSearch = new LastSearch(searchKey, editorContent).setPosition(newPosition);
            return newPosition;
        }
    }, controller.getExecutorService());

    search.thenAcceptAsync(index -> {
        searchField.setDisable(false);
        if (index >= 0) {
            editor.positionCaret(index);
            editor.requestFocus();
        }
    }, controller.getJavaFXExecutor());
}

From source file:ai.grakn.engine.controller.TasksController.java

private CompletableFuture<List<Json>> saveTasksInQueue(List<TaskStateWithConfiguration> taskStates) {
    // Put the tasks in a persistent queue
    List<CompletableFuture<Json>> futures = taskStates.stream()
            .map(taskStateWithConfiguration -> CompletableFuture
                    .supplyAsync(() -> addTaskToManager(taskStateWithConfiguration), executor))
            .collect(toList());/*from  www . j  a  va  2  s  .c o  m*/
    return all(futures);
}

From source file:io.pravega.controller.server.eventProcessor.ControllerEventProcessors.java

private CompletableFuture<Void> handleOrphanedReaders(
        final EventProcessorGroup<? extends ControllerEvent> group, final Supplier<Set<String>> processes) {
    return withRetriesAsync(() -> CompletableFuture.supplyAsync(() -> {
        try {//  w  w  w.ja  va 2  s . c o m
            return group.getProcesses();
        } catch (CheckpointStoreException e) {
            if (e.getType().equals(CheckpointStoreException.Type.NoNode)) {
                return Collections.<String>emptySet();
            }
            throw new CompletionException(e);
        }
    }, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor)
            .thenComposeAsync(groupProcesses -> withRetriesAsync(() -> CompletableFuture.supplyAsync(() -> {
                try {
                    return new ImmutablePair<>(processes.get(), groupProcesses);
                } catch (Exception e) {
                    log.error(String.format("Error fetching current processes%s", group.toString()), e);
                    throw new CompletionException(e);
                }
            }, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor)).thenComposeAsync(pair -> {
                Set<String> activeProcesses = pair.getLeft();
                Set<String> registeredProcesses = pair.getRight();

                if (registeredProcesses == null || registeredProcesses.isEmpty()) {
                    return CompletableFuture.completedFuture(null);
                }

                if (activeProcesses != null) {
                    registeredProcesses.removeAll(activeProcesses);
                }

                List<CompletableFuture<Void>> futureList = new ArrayList<>();
                for (String process : registeredProcesses) {
                    futureList.add(withRetriesAsync(() -> CompletableFuture.runAsync(() -> {
                        try {
                            group.notifyProcessFailure(process);
                        } catch (CheckpointStoreException e) {
                            log.error(String.format(
                                    "Error notifying failure of process=%s in event processor group %s",
                                    process, group.toString()), e);
                            throw new CompletionException(e);
                        }
                    }, executor), RETRYABLE_PREDICATE, Integer.MAX_VALUE, executor));
                }

                return FutureHelpers.allOf(futureList);
            });
}

From source file:net.sourceforge.pmd.docs.DeadLinksChecker.java

private CompletableFuture<Integer> getCachedFutureResponse(String url) {
    if (urlResponseCache.containsKey(url)) {
        LOG.info("response: HTTP " + urlResponseCache.get(url) + " (CACHED) on " + url);
        return urlResponseCache.get(url);
    } else {//from w w w . ja  va2  s  .c  o m
        // process asynchronously
        CompletableFuture<Integer> futureResponse = CompletableFuture
                .supplyAsync(() -> computeHttpResponse(url), executorService);
        urlResponseCache.put(url, futureResponse);
        return futureResponse;
    }
}

From source file:org.apache.flink.runtime.blob.BlobCacheGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob./*  w  w  w  .j a v  a2  s .  c  o m*/
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 * @param cacheAccessesHAStore
 *       whether the cache has access to the {@link BlobServer}'s HA store or not
 */
private void testConcurrentGetOperations(final JobID jobId, final BlobKey.BlobType blobType,
        final boolean cacheAccessesHAStore) throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config,
                    cacheAccessesHAStore ? blobStoreServer : blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(cache, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        FutureUtils.ConjunctFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);

        if (blobType == PERMANENT_BLOB) {
            // wait until all operations have completed and check that no exception was thrown
            filesFuture.get();
        } else {
            // wait for all futures to complete (do not abort on expected exceptions) and check
            // that at least one succeeded
            int completedSuccessfully = 0;
            for (CompletableFuture<File> op : getOperations) {
                try {
                    op.get();
                    ++completedSuccessfully;
                } catch (Throwable t) {
                    // transient BLOBs get deleted upon first access and only one request will be successful while all others will have an IOException caused by a FileNotFoundException
                    if (!(ExceptionUtils.getRootCause(t) instanceof FileNotFoundException)) {
                        // ignore
                        org.apache.flink.util.ExceptionUtils.rethrowIOException(t);
                    }
                }
            }
            // multiple clients may have accessed the BLOB successfully before it was
            // deleted, but always at least one:
            assertThat(completedSuccessfully, greaterThanOrEqualTo(1));
        }
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobCachePutTest.java

/**
 * [FLINK-6020]/*from   w w w. j a v a 2  s .  c o  m*/
 * Tests that concurrent put operations will only upload the file once to the {@link BlobStore}
 * and that the files are not corrupt at any time.
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStoreServer = mock(BlobStore.class);
    final BlobStore blobStoreCache = mock(BlobStore.class);

    int concurrentPutOperations = 2;
    int dataSize = 1024;

    final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations);
    final byte[] data = new byte[dataSize];

    final List<Path> jars;
    if (blobType == PERMANENT_BLOB) {
        // implement via JAR file upload instead:
        File tmpFile = temporaryFolder.newFile();
        FileUtils.writeByteArrayToFile(tmpFile, data);
        jars = Collections.singletonList(new Path(tmpFile.getAbsolutePath()));
    } else {
        jars = null;
    }

    Collection<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations);

    ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations);

    try (final BlobServer server = new BlobServer(config, blobStoreServer);
            final BlobCacheService cache = new BlobCacheService(config, blobStoreCache,
                    new InetSocketAddress("localhost", server.getPort()))) {

        server.start();

        // for highAvailability
        final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        // uploading HA BLOBs works on BlobServer only (and, for now, via the BlobClient)

        for (int i = 0; i < concurrentPutOperations; i++) {
            final Supplier<BlobKey> callable;
            if (blobType == PERMANENT_BLOB) {
                // cannot use a blocking stream here (upload only possible via files)
                callable = () -> {
                    try {
                        List<PermanentBlobKey> keys = BlobClient.uploadFiles(serverAddress, config, jobId,
                                jars);
                        assertEquals(1, keys.size());
                        BlobKey uploadedKey = keys.get(0);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };

            } else {
                callable = () -> {
                    try {
                        BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data);
                        BlobKey uploadedKey = put(cache, jobId, inputStream, blobType);
                        // check the uploaded file's contents (concurrently)
                        verifyContents(server, jobId, uploadedKey, data);
                        return uploadedKey;
                    } catch (IOException e) {
                        throw new CompletionException(new FlinkException("Could not upload blob.", e));
                    }
                };
            }
            CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(callable, executor);

            allFutures.add(putFuture);
        }

        FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures);

        // wait until all operations have completed and check that no exception was thrown
        Collection<BlobKey> blobKeys = conjunctFuture.get();

        Iterator<BlobKey> blobKeyIterator = blobKeys.iterator();

        assertTrue(blobKeyIterator.hasNext());

        BlobKey blobKey = blobKeyIterator.next();

        // make sure that all blob keys are the same
        while (blobKeyIterator.hasNext()) {
            // check for unique BlobKey, but should have same hash
            verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next());
        }

        // check the uploaded file's contents
        verifyContents(server, jobId, blobKey, data);

        // check that we only uploaded the file once to the blob store
        if (blobType == PERMANENT_BLOB) {
            verify(blobStoreServer, times(1)).put(any(File.class), eq(jobId), eq(blobKey));
        } else {
            // can't really verify much in the other cases other than that the put operations should
            // work and not corrupt files
            verify(blobStoreServer, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
        }
        // caches must not access the blob store (they are not allowed to write there)
        verify(blobStoreCache, times(0)).put(any(File.class), eq(jobId), eq(blobKey));
    } finally {
        executor.shutdownNow();
    }
}

From source file:org.apache.flink.runtime.blob.BlobServerGetTest.java

/**
 * [FLINK-6020] Tests that concurrent get operations don't concurrently access the BlobStore to
 * download a blob./*w w w.j a  va  2s  .  c o  m*/
 *
 * @param jobId
 *       job ID to use (or <tt>null</tt> if job-unrelated)
 * @param blobType
 *       whether the BLOB should become permanent or transient
 */
private void testConcurrentGetOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType)
        throws IOException, InterruptedException, ExecutionException {
    final Configuration config = new Configuration();
    config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

    final BlobStore blobStore = mock(BlobStore.class);

    final int numberConcurrentGetOperations = 3;
    final List<CompletableFuture<File>> getOperations = new ArrayList<>(numberConcurrentGetOperations);

    final byte[] data = { 1, 2, 3, 4, 99, 42 };

    doAnswer(new Answer() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            File targetFile = (File) invocation.getArguments()[2];

            FileUtils.writeByteArrayToFile(targetFile, data);

            return null;
        }
    }).when(blobStore).get(any(JobID.class), any(BlobKey.class), any(File.class));

    final ExecutorService executor = Executors.newFixedThreadPool(numberConcurrentGetOperations);

    try (final BlobServer server = new BlobServer(config, blobStore)) {

        server.start();

        // upload data first
        final BlobKey blobKey = put(server, jobId, data, blobType);

        // now try accessing it concurrently (only HA mode will be able to retrieve it from HA store!)
        if (blobType == PERMANENT_BLOB) {
            // remove local copy so that a transfer from HA store takes place
            assertTrue(server.getStorageLocation(jobId, blobKey).delete());
        }
        for (int i = 0; i < numberConcurrentGetOperations; i++) {
            CompletableFuture<File> getOperation = CompletableFuture.supplyAsync(() -> {
                try {
                    File file = get(server, jobId, blobKey);
                    // check that we have read the right data
                    validateGetAndClose(new FileInputStream(file), data);
                    return file;
                } catch (IOException e) {
                    throw new CompletionException(
                            new FlinkException("Could not read blob for key " + blobKey + '.', e));
                }
            }, executor);

            getOperations.add(getOperation);
        }

        CompletableFuture<Collection<File>> filesFuture = FutureUtils.combineAll(getOperations);
        filesFuture.get();
    } finally {
        executor.shutdownNow();
    }
}