Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.apache.distributedlog.BKLogHandler.java

public CompletableFuture<LogRecordWithDLSN> getLastLogRecordAsync(final boolean recover,
        final boolean includeEndOfStream) {
    final CompletableFuture<LogRecordWithDLSN> promise = new CompletableFuture<LogRecordWithDLSN>();
    streamMetadataStore.logExists(logMetadata.getUri(), logMetadata.getLogName())
            .whenComplete(new FutureEventListener<Void>() {
                @Override//from  w w w. j  a va2 s.  c o m
                public void onSuccess(Void value) {
                    readLogSegmentsFromStore(LogSegmentMetadata.DESC_COMPARATOR,
                            LogSegmentFilter.DEFAULT_FILTER, null).whenComplete(
                                    new FutureEventListener<Versioned<List<LogSegmentMetadata>>>() {

                                        @Override
                                        public void onSuccess(Versioned<List<LogSegmentMetadata>> ledgerList) {
                                            if (ledgerList.getValue().isEmpty()) {
                                                promise.completeExceptionally(new LogEmptyException(
                                                        "Log " + getFullyQualifiedName() + " has no records"));
                                                return;
                                            }
                                            asyncGetLastLogRecord(ledgerList.getValue().iterator(), promise,
                                                    recover, false, includeEndOfStream);
                                        }

                                        @Override
                                        public void onFailure(Throwable cause) {
                                            promise.completeExceptionally(cause);
                                        }
                                    });
                }

                @Override
                public void onFailure(Throwable cause) {
                    promise.completeExceptionally(cause);
                }
            });
    return promise;
}

From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java

@Override
public CompletableFuture<Cursor<FileDescriptor>> getObjectsBySpec(QueryComponent<FileDescriptor> spec) {
    try {//w  w  w . j  av a2s.  c  o  m
        return CompletableFuture
                .completedFuture(new DataStoreCursor(getFolderFilenames(output_directory, fileContext)));
    } catch (IllegalArgumentException | IOException e) {
        final CompletableFuture<Cursor<FileDescriptor>> fut = new CompletableFuture<Cursor<FileDescriptor>>();
        fut.completeExceptionally(e);
        return fut;
    }
}

From source file:com.devicehive.service.DeviceClassService.java

public CompletableFuture<List<DeviceClassWithEquipmentVO>> list(String name, String namePattern,
        String sortField, Boolean sortOrderAsc, Integer take, Integer skip) {
    ListDeviceClassRequest request = new ListDeviceClassRequest();
    request.setName(name);//w  ww  .  j a va  2 s. com
    request.setNamePattern(namePattern);
    request.setSortField(sortField);
    request.setSortOrderAsc(sortOrderAsc);
    request.setTake(take);
    request.setSkip(skip);

    CompletableFuture<com.devicehive.shim.api.Response> future = new CompletableFuture<>();

    rpcClient.call(Request.newBuilder().withBody(request).build(), new ResponseConsumer(future));

    return future.thenApply(r -> ((ListDeviceClassResponse) r.getBody()).getDeviceClasses());
}

From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java

private CompletableFuture<HTableDescriptor[]> batchTableOperations(Pattern pattern, TableOperator operator,
        String operationType) {//ww  w  .  ja v a  2 s.c om
    CompletableFuture<HTableDescriptor[]> future = new CompletableFuture<>();
    List<HTableDescriptor> failed = new LinkedList<>();
    listTables(pattern, false).whenComplete((tables, error) -> {
        if (error != null) {
            future.completeExceptionally(error);
            return;
        }
        CompletableFuture[] futures = Arrays.stream(tables)
                .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> {
                    if (ex != null) {
                        LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex);
                        failed.add(table);
                    }
                })).toArray(size -> new CompletableFuture[size]);
        CompletableFuture.allOf(futures).thenAccept((v) -> {
            future.complete(failed.toArray(new HTableDescriptor[failed.size()]));
        });
    });
    return future;
}

From source file:org.onosproject.segmentrouting.pwaas.L2TunnelHandler.java

/**
 * Helper function to update a pw./*from   ww  w.jav a  2 s .co m*/
 *
 * @param oldPw the pseudo wire to remove
 * @param newPw the pseudo wirte to add
 */
private void updatePw(DefaultL2TunnelDescription oldPw, DefaultL2TunnelDescription newPw) {
    long tunnelId = oldPw.l2Tunnel().tunnelId();
    // The async tasks to orchestrate the next and
    // forwarding update.
    CompletableFuture<ObjectiveError> fwdInitNextFuture = new CompletableFuture<>();
    CompletableFuture<ObjectiveError> revInitNextFuture = new CompletableFuture<>();
    CompletableFuture<ObjectiveError> fwdTermNextFuture = new CompletableFuture<>();
    CompletableFuture<ObjectiveError> revTermNextFuture = new CompletableFuture<>();
    CompletableFuture<ObjectiveError> fwdPwFuture = new CompletableFuture<>();
    CompletableFuture<ObjectiveError> revPwFuture = new CompletableFuture<>();

    Result result = verifyPseudoWire(newPw);
    if (result != SUCCESS) {
        return;
    }
    // First we remove both policy.
    log.debug("Start deleting fwd policy for {}", tunnelId);
    deletePolicy(tunnelId, oldPw.l2TunnelPolicy().cP1(), oldPw.l2TunnelPolicy().cP1InnerTag(),
            oldPw.l2TunnelPolicy().cP1OuterTag(), fwdInitNextFuture, FWD);
    log.debug("Start deleting rev policy for {}", tunnelId);
    deletePolicy(tunnelId, oldPw.l2TunnelPolicy().cP2(), oldPw.l2TunnelPolicy().cP2InnerTag(),
            oldPw.l2TunnelPolicy().cP2OuterTag(), revInitNextFuture, REV);
    // Finally we remove both the tunnels.
    fwdInitNextFuture.thenAcceptAsync(status -> {
        if (status == null) {
            log.debug("Fwd policy removed. Now remove fwd {} for {}", INITIATION, tunnelId);
            tearDownPseudoWireInit(tunnelId, oldPw.l2TunnelPolicy().cP1(), fwdTermNextFuture, FWD);
        }
    });
    revInitNextFuture.thenAcceptAsync(status -> {
        if (status == null) {
            log.debug("Rev policy removed. Now remove rev {} for {}", INITIATION, tunnelId);
            tearDownPseudoWireInit(tunnelId, oldPw.l2TunnelPolicy().cP2(), revTermNextFuture, REV);

        }
    });
    fwdTermNextFuture.thenAcceptAsync(status -> {
        if (status == null) {
            log.debug("Fwd {} removed. Now remove fwd {} for {}", INITIATION, TERMINATION, tunnelId);
            tearDownPseudoWireTerm(oldPw.l2Tunnel(), oldPw.l2TunnelPolicy().cP2(), fwdPwFuture, FWD);
        }
    });
    revTermNextFuture.thenAcceptAsync(status -> {
        if (status == null) {
            log.debug("Rev {} removed. Now remove rev {} for {}", INITIATION, TERMINATION, tunnelId);
            tearDownPseudoWireTerm(oldPw.l2Tunnel(), oldPw.l2TunnelPolicy().cP1(), revPwFuture, REV);
        }
    });
    // At the end we install the new pw.
    fwdPwFuture.thenAcceptAsync(status -> {
        if (status == null) {
            log.debug("Deploying new fwd pw for {}", tunnelId);
            Result lamdaResult = deployPseudoWireInit(newPw.l2Tunnel(), newPw.l2TunnelPolicy().cP1(),
                    newPw.l2TunnelPolicy().cP2(), FWD);
            if (lamdaResult != SUCCESS) {
                return;
            }
            lamdaResult = deployPolicy(tunnelId, newPw.l2TunnelPolicy().cP1(),
                    newPw.l2TunnelPolicy().cP1InnerTag(), newPw.l2TunnelPolicy().cP1OuterTag(),
                    lamdaResult.nextId);
            if (lamdaResult != SUCCESS) {
                return;
            }
            deployPseudoWireTerm(newPw.l2Tunnel(), newPw.l2TunnelPolicy().cP2(),
                    newPw.l2TunnelPolicy().cP2OuterTag(), FWD);

        }
    });
    revPwFuture.thenAcceptAsync(status -> {
        if (status == null) {
            log.debug("Deploying new rev pw for {}", tunnelId);
            Result lamdaResult = deployPseudoWireInit(newPw.l2Tunnel(), newPw.l2TunnelPolicy().cP2(),
                    newPw.l2TunnelPolicy().cP1(), REV);
            if (lamdaResult != SUCCESS) {
                return;
            }
            lamdaResult = deployPolicy(tunnelId, newPw.l2TunnelPolicy().cP2(),
                    newPw.l2TunnelPolicy().cP2InnerTag(), newPw.l2TunnelPolicy().cP2OuterTag(),
                    lamdaResult.nextId);
            if (lamdaResult != SUCCESS) {
                return;
            }
            deployPseudoWireTerm(newPw.l2Tunnel(), newPw.l2TunnelPolicy().cP1(),
                    newPw.l2TunnelPolicy().cP1OuterTag(), REV);
        }
    });
}

From source file:org.apache.servicecomb.foundation.vertx.http.TestVertxServerResponseToHttpServletResponse.java

@Test
public void sendPart_ReadStreamPart(@Mocked ReadStreamPart part) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    new MockUp<PumpFromPart>() {
        @Mock//from  w  w w . j a  va  2 s . c o  m
        CompletableFuture<Void> toWriteStream(WriteStream<Buffer> writeStream) {
            return future;
        }
    };

    Assert.assertSame(future, response.sendPart(part));
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Starts up a storm job./*from   w  w w .  j ava  2 s.  c  o m*/
 * 
 * 1. gets the storm instance from the yarn config
 * 2. Makes a mega jar consisting of:
 *    A. Underlying artefacts (system libs)
 *  B. User supplied libraries
 * 3. Submit megajar to storm with jobname of the bucket id
 * 
 * @param bucket
 * @param underlying_artefacts
 * @param yarn_config_dir
 * @param user_lib_paths
 * @param topology
 * @return
 */
public static CompletableFuture<BasicMessageBean> startJob(final IStormController storm_controller,
        final DataBucketBean bucket, final Optional<String> sub_job,
        final Collection<Object> underlying_artefacts, final Collection<String> user_lib_paths,
        final StormTopology topology, final Map<String, String> config, final String cached_jar_dir) {
    if (null == topology) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(StormControllerUtil.class,
                "startJob", ErrorUtils.TOPOLOGY_NULL_ERROR, bucket.full_name()));
    }

    _logger.info("Retrieved user Storm config topology: spouts=" + topology.get_spouts_size() + " bolts="
            + topology.get_bolts_size() + " configs=" + config.toString());

    final Set<String> jars_to_merge = new TreeSet<String>();

    final CompletableFuture<String> jar_future = Lambdas.get(() -> {
        if (RemoteStormController.class.isAssignableFrom(storm_controller.getClass())) {
            // (This is only necessary in the remote case)

            jars_to_merge.addAll(underlying_artefacts.stream()
                    .map(artefact -> LiveInjector.findPathJar(artefact.getClass(), ""))
                    .filter(f -> !f.equals("")).collect(Collectors.toSet()));

            if (jars_to_merge.isEmpty()) { // special case: no aleph2 libs found, this is almost certainly because this is being run from eclipse...
                final GlobalPropertiesBean globals = ModuleUtils.getGlobalProperties();
                _logger.warn(
                        "WARNING: no library files found, probably because this is running from an IDE - instead taking all JARs from: "
                                + (globals.local_root_dir() + "/lib/"));
                try {
                    //... and LiveInjecter doesn't work on classes ... as a backup just copy everything from "<LOCAL_ALEPH2_HOME>/lib" into there 
                    jars_to_merge
                            .addAll(FileUtils
                                    .listFiles(new File(globals.local_root_dir() + "/lib/"),
                                            new String[] { "jar" }, false)
                                    .stream().map(File::toString).collect(Collectors.toList()));
                } catch (Exception e) {
                    throw new RuntimeException("In eclipse/IDE mode, directory not found: "
                            + (globals.local_root_dir() + "/lib/"));
                }
            }
            //add in the user libs
            jars_to_merge.addAll(user_lib_paths);

            //create jar
            return buildOrReturnCachedStormTopologyJar(jars_to_merge, cached_jar_dir);
        } else {
            return CompletableFuture.completedFuture("/unused/dummy.jar");
        }
    });

    //submit to storm
    @SuppressWarnings("unchecked")
    final CompletableFuture<BasicMessageBean> submit_future = Lambdas.get(() -> {
        long retries = 0;
        while (retries < MAX_RETRIES) {
            try {
                _logger.debug("Trying to submit job, try: " + retries + " of " + MAX_RETRIES);
                final String jar_file_location = jar_future.get();
                return storm_controller.submitJob(bucketPathToTopologyName(bucket, sub_job), jar_file_location,
                        topology, (Map<String, Object>) (Map<String, ?>) config);
            } catch (Exception ex) {
                if (ex instanceof AlreadyAliveException) {
                    retries++;
                    //sleep 1s, was seeing about 2s of sleep required before job successfully submitted on restart
                    try {
                        Thread.sleep(1000);
                    } catch (Exception e) {
                        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                        error_future.completeExceptionally(e);
                        return error_future;
                    }
                } else {
                    retries = MAX_RETRIES; //we threw some other exception, bail out
                    final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                    error_future.completeExceptionally(ex);
                    return error_future;
                }
            }
        }
        //we maxed out our retries, throw failure
        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
        error_future.completeExceptionally(new Exception(
                "Error submitting job, ran out of retries (previous (same name) job is probably still alive)"));
        return error_future;
    });
    return submit_future;
}

From source file:org.apache.tinkerpop.gremlin.server.GremlinServer.java

/**
 * Stop Gremlin Server and free the port binding. Note that multiple calls to this method will return the
 * same instance of the {@link java.util.concurrent.CompletableFuture}.
 *///from   w w  w. j a v  a2s  .c o m
public synchronized CompletableFuture<Void> stop() {
    if (serverStopped != null) {
        // shutdown has started so don't fire it off again
        return serverStopped;
    }

    serverStopped = new CompletableFuture<>();
    final CountDownLatch servicesLeftToShutdown = new CountDownLatch(3);

    // release resources in the OpProcessors (e.g. kill sessions)
    OpLoader.getProcessors().entrySet().forEach(kv -> {
        logger.info("Shutting down OpProcessor[{}]", kv.getKey());
        try {
            kv.getValue().close();
        } catch (Exception ex) {
            logger.warn(
                    "Shutdown will continue but, there was an error encountered while closing " + kv.getKey(),
                    ex);
        }
    });

    // it's possible that a channel might not be initialized in the first place if bind() fails because
    // of port conflict.  in that case, there's no need to wait for the channel to close.
    if (null == ch)
        servicesLeftToShutdown.countDown();
    else
        ch.close().addListener(f -> servicesLeftToShutdown.countDown());

    logger.info("Shutting down thread pools.");

    try {
        gremlinExecutorService.shutdown();
    } finally {
        logger.debug("Shutdown Gremlin thread pool.");
    }

    try {
        workerGroup.shutdownGracefully()
                .addListener((GenericFutureListener) f -> servicesLeftToShutdown.countDown());
    } finally {
        logger.debug("Shutdown Worker thread pool.");
    }
    try {
        bossGroup.shutdownGracefully()
                .addListener((GenericFutureListener) f -> servicesLeftToShutdown.countDown());
    } finally {
        logger.debug("Shutdown Boss thread pool.");
    }

    // channel is shutdown as are the thread pools - time to kill graphs as nothing else should be acting on them
    new Thread(() -> {
        serverGremlinExecutor.getHooks().forEach(hook -> {
            logger.info("Executing shutdown {}", LifeCycleHook.class.getSimpleName());
            try {
                hook.onShutDown(new LifeCycleHook.Context(logger));
            } catch (UnsupportedOperationException | UndeclaredThrowableException uoe) {
                // if the user doesn't implement onShutDown the scriptengine will throw
                // this exception.  it can safely be ignored.
            }
        });

        try {
            gremlinExecutorService.awaitTermination(30000, TimeUnit.MILLISECONDS);
        } catch (InterruptedException ie) {
            logger.warn(
                    "Timeout waiting for Gremlin thread pool to shutdown - continuing with shutdown process.");
        }

        try {
            servicesLeftToShutdown.await(30000, TimeUnit.MILLISECONDS);
        } catch (InterruptedException ie) {
            logger.warn(
                    "Timeout waiting for boss/worker thread pools to shutdown - continuing with shutdown process.");
        }

        serverGremlinExecutor.getGraphManager().getGraphs().forEach((k, v) -> {
            logger.debug("Closing Graph instance [{}]", k);
            try {
                v.close();
            } catch (Exception ex) {
                logger.warn(String.format("Exception while closing Graph instance [%s]", k), ex);
            } finally {
                logger.info("Closed Graph instance [{}]", k);
            }
        });

        logger.info("Gremlin Server - shutdown complete");
        serverStopped.complete(null);
    }, SERVER_THREAD_PREFIX + "stop").start();

    return serverStopped;
}

From source file:org.jaqpot.core.service.client.jpdi.JPDIClientImpl.java

@Override
public Future<Dataset> predict(Dataset inputDataset, Model model, MetaInfo datasetMeta, String taskId) {

    CompletableFuture<Dataset> futureDataset = new CompletableFuture<>();

    Dataset dataset = DatasetFactory.copy(inputDataset);
    Dataset tempWithDependentFeatures = DatasetFactory.copy(dataset,
            new HashSet<>(model.getDependentFeatures()));

    dataset.getDataEntry().parallelStream().forEach(dataEntry -> {
        dataEntry.getValues().keySet().retainAll(model.getIndependentFeatures());
    });//from  w ww. ja v a2s. c  o m
    PredictionRequest predictionRequest = new PredictionRequest();
    predictionRequest.setDataset(dataset);
    predictionRequest.setRawModel(model.getActualModel());
    predictionRequest.setAdditionalInfo(model.getAdditionalInfo());

    final HttpPost request = new HttpPost(model.getAlgorithm().getPredictionService());
    request.addHeader("Accept", "application/json");
    request.addHeader("Content-Type", "application/json");

    PipedOutputStream out = new PipedOutputStream();
    PipedInputStream in;
    try {
        in = new PipedInputStream(out);
    } catch (IOException ex) {
        futureDataset.completeExceptionally(ex);
        return futureDataset;
    }
    request.setEntity(new InputStreamEntity(in, ContentType.APPLICATION_JSON));

    Future futureResponse = client.execute(request, new FutureCallback<HttpResponse>() {

        @Override
        public void completed(final HttpResponse response) {
            futureMap.remove(taskId);
            int status = response.getStatusLine().getStatusCode();
            try {
                InputStream responseStream = response.getEntity().getContent();

                switch (status) {
                case 200:
                case 201:
                    try {
                        PredictionResponse predictionResponse = serializer.parse(responseStream,
                                PredictionResponse.class);

                        List<LinkedHashMap<String, Object>> predictions = predictionResponse.getPredictions();
                        if (dataset.getDataEntry().isEmpty()) {
                            DatasetFactory.addEmptyRows(dataset, predictions.size());
                        }
                        List<Feature> features = featureHandler
                                .findBySource("algorithm/" + model.getAlgorithm().getId());
                        IntStream.range(0, dataset.getDataEntry().size())
                                // .parallel()
                                .forEach(i -> {
                                    Map<String, Object> row = predictions.get(i);
                                    DataEntry dataEntry = dataset.getDataEntry().get(i);
                                    if (model.getAlgorithm().getOntologicalClasses().contains("ot:Scaling")
                                            || model.getAlgorithm().getOntologicalClasses()
                                                    .contains("ot:Transformation")) {
                                        dataEntry.getValues().clear();
                                        dataset.getFeatures().clear();
                                    }
                                    row.entrySet().stream().forEach(entry -> {
                                        //                                                    Feature feature = featureHandler.findByTitleAndSource(entry.getKey(), "algorithm/" + model.getAlgorithm().getId());
                                        Feature feature = features.stream()
                                                .filter(f -> f.getMeta().getTitles().contains(entry.getKey()))
                                                .findFirst().orElse(null);
                                        if (feature == null) {
                                            return;
                                        }
                                        dataEntry.getValues().put(baseURI + "feature/" + feature.getId(),
                                                entry.getValue());
                                        FeatureInfo featInfo = new FeatureInfo(
                                                baseURI + "feature/" + feature.getId(),
                                                feature.getMeta().getTitles().stream().findFirst().get());
                                        featInfo.setCategory(Dataset.DescriptorCategory.PREDICTED);
                                        dataset.getFeatures().add(featInfo);
                                    });
                                });
                        dataset.setId(randomStringGenerator.nextString(20));
                        dataset.setTotalRows(dataset.getDataEntry().size());
                        dataset.setMeta(datasetMeta);
                        futureDataset.complete(DatasetFactory.mergeColumns(dataset, tempWithDependentFeatures));
                    } catch (Exception ex) {
                        futureDataset.completeExceptionally(ex);
                    }
                    break;
                case 400:
                    String message = new BufferedReader(new InputStreamReader(responseStream)).lines()
                            .collect(Collectors.joining("\n"));
                    futureDataset.completeExceptionally(new BadRequestException(message));
                    break;
                case 404:
                    message = new BufferedReader(new InputStreamReader(responseStream)).lines()
                            .collect(Collectors.joining("\n"));
                    futureDataset.completeExceptionally(new NotFoundException(message));
                    break;
                case 500:
                    message = new BufferedReader(new InputStreamReader(responseStream)).lines()
                            .collect(Collectors.joining("\n"));
                    futureDataset.completeExceptionally(new InternalServerErrorException(message));
                    break;
                default:
                    message = new BufferedReader(new InputStreamReader(responseStream)).lines()
                            .collect(Collectors.joining("\n"));
                    futureDataset.completeExceptionally(new InternalServerErrorException(message));
                }
            } catch (IOException | UnsupportedOperationException ex) {
                futureDataset.completeExceptionally(ex);
            }
        }

        @Override
        public void failed(final Exception ex) {
            futureMap.remove(taskId);
            futureDataset.completeExceptionally(new InternalServerErrorException(ex));
        }

        @Override
        public void cancelled() {
            futureMap.remove(taskId);
            futureDataset.cancel(true);
        }
    });
    serializer.write(predictionRequest, out);
    try {
        out.close();
    } catch (IOException ex) {
        futureDataset.completeExceptionally(ex);
    }
    futureMap.put(taskId, futureResponse);
    return futureDataset;
}

From source file:org.apache.pulsar.functions.runtime.ProcessRuntime.java

@Override
public CompletableFuture<InstanceCommunication.MetricsData> getMetrics(int instanceId) {
    CompletableFuture<InstanceCommunication.MetricsData> retval = new CompletableFuture<>();
    if (stub == null) {
        retval.completeExceptionally(new RuntimeException("Not alive"));
        return retval;
    }/*from  w  w  w  . jav  a  2  s .c  o  m*/
    ListenableFuture<InstanceCommunication.MetricsData> response = stub
            .withDeadlineAfter(GRPC_TIMEOUT_SECS, TimeUnit.SECONDS).getMetrics(Empty.newBuilder().build());
    Futures.addCallback(response, new FutureCallback<InstanceCommunication.MetricsData>() {
        @Override
        public void onFailure(Throwable throwable) {
            retval.completeExceptionally(throwable);
        }

        @Override
        public void onSuccess(InstanceCommunication.MetricsData t) {
            retval.complete(t);
        }
    });
    return retval;
}