Example usage for java.util.concurrent CompletableFuture runAsync

List of usage examples for java.util.concurrent CompletableFuture runAsync

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture runAsync.

Prototype

public static CompletableFuture<Void> runAsync(Runnable runnable) 

Source Link

Document

Returns a new CompletableFuture that is asynchronously completed by a task running in the ForkJoinPool#commonPool() after it runs the given action.

Usage

From source file:ch.bender.evacuate.Runner.java

/**
 * evacuateDir//from w  w  w.ja  v  a 2s . c  om
 * <p>
 * @param aDir
 * @throws Exception 
 */
private void evacuate(Path aDir) {
    Path subDirToBackupRoot = myBackupDir.relativize(aDir);
    Path evacuateTarget = myEvacuateDir.resolve(subDirToBackupRoot);

    myEvacuateCandidates.put(aDir, evacuateTarget);

    if (myDryRun) {
        return;
    }

    if (Files.exists(evacuateTarget)) {
        myLog.debug("adding Future (trash chain preparation): " + evacuateTarget.toString());
        CompletableFuture<Void> future = CompletableFuture.runAsync(
                () -> Helper.prepareTrashChain(evacuateTarget, MAX_TRASH_VERSIONS, myFailedChainPreparations));
        myFutures.add(future);
    }

}

From source file:com.ikanow.aleph2.analytics.spark.utils.SparkTechnologyUtils.java

/** Optional utility to respect the test spec's timeout
 * @param maybe_test_spec// w w w.ja v a2s.co  m
 * @param on_timeout - mainly for testing
 */
public static void registerTestTimeout(final Optional<ProcessingTestSpecBean> maybe_test_spec,
        Runnable on_timeout) {

    maybe_test_spec.map(test_spec -> test_spec.max_run_time_secs()).ifPresent(max_run_time -> {
        CompletableFuture.runAsync(Lambdas.wrap_runnable_u(() -> {
            Thread.sleep(1500L * max_run_time); // (seconds, *1.5 for safety)
            System.out.println("Test timeout - exiting");
            on_timeout.run();
        }));
    });
}

From source file:io.fabric8.vertx.maven.plugin.mojos.AbstractRunMojo.java

/**
 * This method will trigger the lauch of the applicaiton as non-forked, running in same JVM as maven.
 *
 * @param argsList - the arguments to be passed to the vertx launcher
 * @throws MojoExecutionException - any error that might occur while starting the process
 *///from w  ww.  ja va  2s.co  m

protected void run(List<String> argsList) throws MojoExecutionException {
    JavaProcessExecutor vertxExecutor = new JavaProcessExecutor()
            .withJvmOpts(redeploy ? Collections.emptyList() : jvmArgs).withArgs(argsList)
            .withClassPath(getClassPathUrls()).withLogger(getLog()).withWaitFor(true);
    try {

        //When redeploy is enabled spin up the Incremental builder in background
        if (redeploy
                && !(VERTX_COMMAND_START.equals(vertxCommand) || VERTX_COMMAND_STOP.equals(vertxCommand))) {
            getLog().debug("Collected mojos: " + MojoSpy.MOJOS);

            Set<Path> inclDirs = Collections.singleton(new File(project.getBasedir(), "src/main").toPath());

            //TODO - handle exceptions effectively
            // TODO - Not sure about the runAsync here, it uses the default fork join pool
            CompletableFuture.runAsync(() -> {
                List<Callable<Void>> chain = computeExecutionChain();
                IncrementalBuilder incrementalBuilder = new IncrementalBuilder(inclDirs, chain, getLog(),
                        redeployScanPeriod);
                incrementalBuilder.run();
            });

        }

        vertxExecutor.execute();

    } catch (Exception e) {
        throw new MojoExecutionException("Unable to launch incremental builder", e);
    }
}

From source file:com.ikanow.aleph2.graph.titan.services.TitanGraphService.java

/** Deletes a bucket
 * @param bucket//ww  w  .  j  a  v a  2 s .  c  om
 * @param secondary_buffer
 * @param bucket_or_buffer_getting_deleted
 * @return
 */
private CompletableFuture<BasicMessageBean> handleBucketDeletionRequest_internal(DataBucketBean bucket,
        Optional<String> secondary_buffer, boolean bucket_or_buffer_getting_deleted) {

    //TODO (ALEPH-15): check if the indexes exist - just return if so

    if (secondary_buffer.isPresent()) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                "handleBucketDeletionRequest", ErrorUtils.BUFFERS_NOT_SUPPORTED, bucket.full_name()));
    }

    //TODO (ALEPH-15): At some point need to be able for services to (optionally) request batch enrichment jobs - eg would be much nicer to fire this off as a distributed job

    return CompletableFuture.runAsync(() -> {

        try {
            Thread.sleep(1000L);
        } catch (Exception e) {
        } // just check the indexes have refreshed...

        final TitanTransaction tx = _titan.buildTransaction().start();

        //DEBUG
        //final com.fasterxml.jackson.databind.ObjectMapper titan_mapper = _titan.io(org.apache.tinkerpop.gremlin.structure.io.IoCore.graphson()).mapper().create().createMapper();

        @SuppressWarnings("unchecked")
        final Stream<TitanVertex> vertices_to_check = Optionals.<TitanVertex>streamOf(
                tx.query().has(GraphAnnotationBean.a2_p, bucket.full_name()).vertices(), false);
        vertices_to_check.forEach(v -> {
            {
                final Iterator<VertexProperty<String>> props = v.<String>properties(GraphAnnotationBean.a2_p);
                while (props.hasNext()) {
                    final VertexProperty<String> prop = props.next();
                    if (bucket.full_name().equals(prop.value())) {
                        prop.remove();
                    }
                }
            }
            {
                final Iterator<VertexProperty<String>> props = v.<String>properties(GraphAnnotationBean.a2_p);
                if (!props.hasNext()) { // can delete this bucket
                    v.remove();
                }
            }
        });
        @SuppressWarnings("unchecked")
        final Stream<TitanEdge> edges_to_check = Optionals.<TitanEdge>streamOf(
                tx.query().has(GraphAnnotationBean.a2_p, bucket.full_name()).edges(), false);
        edges_to_check.forEach(e -> {
            e.remove(); // (can only have one edge so delete it)
        });

        tx.commit();
    }).thenApply(__ -> ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(),
            "handleBucketDeletionRequest", "Completed", "handleBucketDeletionRequest"))
            .exceptionally(t -> ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "handleBucketDeletionRequest", ErrorUtils.getLongForm("{0}", t),
                    "handleBucketDeletionRequest"));

}

From source file:com.ikanow.aleph2.distributed_services.services.CoreDistributedServices.java

@Override
public CompletableFuture<Void> runOnAkkaJoin(Runnable task) {
    synchronized (_joined_akka_cluster) {
        if (_joined_akka_cluster.isDone()) {
            return CompletableFuture.runAsync(task);
        } else {/*from  ww  w . ja v a  2  s. c om*/
            final CompletableFuture<Void> on_complete = new CompletableFuture<>();
            _post_join_task_list.add(Tuples._2T(on_complete, task));
            return on_complete;
        }
    }
}

From source file:org.apache.bookkeeper.tests.containers.ChaosContainer.java

public void tailContainerLog() {
    CompletableFuture.runAsync(() -> {
        while (null == containerId) {
            try {
                TimeUnit.MILLISECONDS.sleep(100);
            } catch (InterruptedException e) {
                return;
            }/*from  ww  w  . j  a va  2s .  c om*/
        }

        LogContainerCmd logContainerCmd = this.dockerClient.logContainerCmd(containerId);
        logContainerCmd.withStdOut(true).withStdErr(true).withFollowStream(true);
        logContainerCmd.exec(new LogContainerResultCallback() {
            @Override
            public void onNext(Frame item) {
                log.info(new String(item.getPayload(), UTF_8));
            }
        });
    });
}

From source file:org.apache.geode.internal.cache.BackupDUnitTest.java

/**
 * Test for bug 42419//  w  ww  .  j a  va 2s . c  om
 */
@Test
public void testBackupWhileBucketIsCreated() throws Throwable {
    Host host = Host.getHost(0);
    vm0 = host.getVM(0);
    vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);

    logger.info("Creating region in VM0");
    createPersistentRegion(vm0);

    // create a bucket on vm0
    createData(vm0, 0, 1, "A", "region1");

    // create the pr on vm1, which won't have any buckets
    logger.info("Creating region in VM1");
    createPersistentRegion(vm1);

    CompletableFuture<BackupStatus> backupStatusFuture = CompletableFuture.supplyAsync(() -> backup(vm2));
    CompletableFuture<Void> createDataFuture = CompletableFuture
            .runAsync(() -> createData(vm0, 1, 5, "A", "region1"));
    CompletableFuture.allOf(backupStatusFuture, createDataFuture);

    BackupStatus status = backupStatusFuture.get();
    assertEquals(2, status.getBackedUpDiskStores().size());
    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());

    validateBackupComplete();

    createData(vm0, 0, 5, "C", "region1");

    assertEquals(2, status.getBackedUpDiskStores().size());
    assertEquals(Collections.emptySet(), status.getOfflineDiskStores());

    closeCache(vm0);
    closeCache(vm1);

    // Destroy the current data
    Invoke.invokeInEveryVM(new SerializableRunnable("Clean disk dirs") {
        public void run() {
            try {
                cleanDiskDirs();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    });

    restoreBackup(2);

    createPersistentRegionsAsync();

    checkData(vm0, 0, 1, "A", "region1");
}

From source file:org.apache.geode.internal.cache.BackupJUnitTest.java

@Test
public void testCompactionDuringBackup() throws IOException, InterruptedException {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setDiskDirs(diskDirs);/* w w w  . j a  va2  s .  c  o  m*/
    dsf.setMaxOplogSize(1);
    dsf.setAutoCompact(false);
    dsf.setAllowForceCompaction(true);
    dsf.setCompactionThreshold(20);
    DiskStoreImpl ds = (DiskStoreImpl) dsf.create(DISK_STORE_NAME);

    Region<Object, Object> region = createRegion();

    // Put enough data to roll some oplogs
    for (int i = 0; i < 1024; i++) {
        region.put(i, getBytes(i));
    }

    BackupManager backupManager = cache
            .startBackup(cache.getInternalDistributedSystem().getDistributedMember());
    backupManager.validateRequestingAdmin();
    backupManager.prepareForBackup();
    final Region theRegion = region;
    final DiskStore theDiskStore = ds;
    CompletableFuture.runAsync(() -> destroyAndCompact(theRegion, theDiskStore));
    backupManager.doBackup(backupDir, null, false);

    cache.close();
    destroyDiskDirs();
    restoreBackup(false);
    createCache();
    createDiskStore();
    region = createRegion();
    validateEntriesExist(region, 0, 1024);

    assertNull(region.get("A"));
}

From source file:org.apache.storm.messaging.netty.NettyTest.java

private void doTestServerDelayed(Map<String, Object> stormConf) throws Exception {
    LOG.info("4. test server delayed");
    String reqMessage = "0123456789abcdefghijklmnopqrstuvwxyz";
    IContext context = TransportFactory.makeContext(stormConf);
    try {//from www  .  j  a v  a  2s.  c  o m
        AtomicReference<TaskMessage> response = new AtomicReference<>();
        int port = Utils.getAvailablePort(6700);
        try (IConnection client = context.connect(null, "localhost", port, remoteBpStatus)) {
            AtomicReference<IConnection> server = new AtomicReference<>();
            try {
                CompletableFuture<?> serverStart = CompletableFuture.runAsync(() -> {
                    try {
                        Thread.sleep(100);
                        server.set(context.bind(null, port));
                        server.get().registerRecv(mkConnectionCallback(response::set));
                        waitUntilReady(client, server.get());
                    } catch (Exception e) {
                        throw Utils.wrapInRuntime(e);
                    }
                });
                serverStart.get(Testing.TEST_TIMEOUT_MS, TimeUnit.MILLISECONDS);
                byte[] messageBytes = reqMessage.getBytes(StandardCharsets.UTF_8);

                send(client, taskId, messageBytes);

                waitForNotNull(response);
                TaskMessage responseMessage = response.get();
                assertThat(responseMessage.task(), is(taskId));
                assertThat(responseMessage.message(), is(messageBytes));
            } finally {
                if (server.get() != null) {
                    server.get().close();
                }
            }
        }
    } finally {
        context.term();
    }
}

From source file:org.dhatim.fastexcel.Correctness.java

@Test
public void multipleWorksheets() throws Exception {
    int numWs = 10;
    int numRows = 5000;
    int numCols = 6;
    byte[] data = writeWorkbook(wb -> {
        @SuppressWarnings("unchecked")
        CompletableFuture<Void>[] cfs = new CompletableFuture[numWs];
        for (int i = 0; i < cfs.length; ++i) {
            Worksheet ws = wb.newWorksheet("Sheet " + i);
            CompletableFuture<Void> cf = CompletableFuture.runAsync(() -> {
                for (int j = 0; j < numCols; ++j) {
                    ws.value(0, j, "Column " + j);
                    ws.style(0, j).bold().fontSize(12).fillColor(Color.GRAY2).set();
                    for (int k = 1; k <= numRows; ++k) {
                        switch (j) {
                        case 0:
                            ws.value(k, j, "String value " + k);
                            break;
                        case 1:
                            ws.value(k, j, 2);
                            break;
                        case 2:
                            ws.value(k, j, 3L);
                            break;
                        case 3:
                            ws.value(k, j, 0.123);
                            break;
                        case 4:
                            ws.value(k, j, new Date());
                            ws.style(k, j).format("yyyy-MM-dd HH:mm:ss").set();
                            break;
                        case 5:
                            ws.value(k, j, LocalDate.now());
                            ws.style(k, j).format("yyyy-MM-dd").set();
                            break;
                        default:
                            throw new IllegalArgumentException();
                        }/*w  w w  .  j a v a  2  s. c o m*/
                    }
                }
                ws.formula(numRows + 1, 1, "=SUM(" + ws.range(1, 1, numRows, 1).toString() + ")");
                ws.formula(numRows + 1, 2, "=SUM(" + ws.range(1, 2, numRows, 2).toString() + ")");
                ws.formula(numRows + 1, 3, "=SUM(" + ws.range(1, 3, numRows, 3).toString() + ")");
                ws.formula(numRows + 1, 4, "=AVERAGE(" + ws.range(1, 4, numRows, 4).toString() + ")");
                ws.style(numRows + 1, 4).format("yyyy-MM-dd HH:mm:ss").set();
                ws.formula(numRows + 1, 5, "=AVERAGE(" + ws.range(1, 5, numRows, 5).toString() + ")");
                ws.style(numRows + 1, 5).format("yyyy-MM-dd").bold().italic().fontColor(Color.RED)
                        .fontName("Garamond").fontSize(new BigDecimal("14.5")).horizontalAlignment("center")
                        .verticalAlignment("top").wrapText(true).set();
                ws.range(1, 0, numRows, numCols - 1).style().borderColor(Color.RED).borderStyle("thick")
                        .shadeAlternateRows(Color.RED).set();
            });
            cfs[i] = cf;
        }
        try {
            CompletableFuture.allOf(cfs).get();
        } catch (InterruptedException | ExecutionException ex) {
            throw new RuntimeException(ex);
        }
    });

    // Check generated workbook with Apache POI
    XSSFWorkbook xwb = new XSSFWorkbook(new ByteArrayInputStream(data));
    assertThat(xwb.getActiveSheetIndex()).isEqualTo(0);
    assertThat(xwb.getNumberOfSheets()).isEqualTo(numWs);
    for (int i = 0; i < numWs; ++i) {
        assertThat(xwb.getSheetName(i)).isEqualTo("Sheet " + i);
        XSSFSheet xws = xwb.getSheetAt(i);
        assertThat(xws.getLastRowNum()).isEqualTo(numRows + 1);
        for (int j = 1; j <= numRows; ++j) {
            assertThat(xws.getRow(j).getCell(0).getStringCellValue()).isEqualTo("String value " + j);
        }
    }

}