Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:eu.freme.bpt.service.AbstractService.java

public void run(final FailurePolicy failurePolicy, final int nrThreads, final Callback callback) {
    logger.info("Running service {}", this.getClass().getName());
    ExecutorService executorService = Executors.newFixedThreadPool(nrThreads);
    Unirest.setTimeouts(30000, 300000); // TODO: configurable?
    while (ioIterator.hasNext()) {
        final IO io = ioIterator.next();

        executorService.submit(() -> {
            try (final InputStream inputStream = io.getInputStream();
                    final OutputStream outputStream = io.getOutputStream()) {
                byte[] input = IOUtils.toByteArray(inputStream);
                HttpResponse<InputStream> response = Unirest.post(endpoint).headers(headers)
                        .queryString(parameters).body(input).asBinary();
                if (response.getStatus() == 200) {
                    logger.debug("Request alright.");
                    try (InputStream responseInput = response.getBody()) {
                        IOUtils.copy(responseInput, outputStream);
                        callback.onTaskComplete(io.getInputFile(), io.getOutputFile());
                    } catch (IOException e) {
                        logger.error("Error while writing response.", e);
                        callback.onTaskFails(io.getInputFile(), io.getOutputFile(),
                                "Error while writing response. " + e.getMessage());
                        if (!failurePolicy.check()) {
                            System.exit(3);
                        }/*from w w w . j av  a  2 s  . c  om*/
                    }
                } else {
                    String body = IOUtils.toString(response.getBody());
                    String msg = "Error response from service " + endpoint + ": Status " + response.getStatus()
                            + ": " + response.getStatusText() + " - " + body;
                    logger.error(msg);
                    callback.onTaskFails(io.getInputFile(), io.getOutputFile(), msg);
                    if (!failurePolicy.check()) {
                        System.exit(3);
                    }
                }
            } catch (Exception e) {
                logger.error("Request to {} failed." + endpoint, e);
                callback.onTaskFails(io.getInputFile(), io.getOutputFile(),
                        "Request to " + endpoint + " failed. " + e.getMessage());
                if (!failurePolicy.check()) {
                    System.exit(3);
                }
            }
        });
    }
    executorService.shutdown();
    try {
        executorService.awaitTermination(1, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        logger.warn("Waiting on termination interrupted.");
    }
    callback.onBatchComplete();
}

From source file:gobblin.util.HadoopUtils.java

/**
 * This method is an additive implementation of the {@link FileSystem#rename(Path, Path)} method. It moves all the
 * files/directories under 'from' path to the 'to' path without overwriting existing directories in the 'to' path.
 *
 * <p>/* w  ww  . j  a  v a 2 s.c o  m*/
 * The rename operation happens at the first non-existent sub-directory. If a directory at destination path already
 * exists, it recursively tries to move sub-directories. If all the sub-directories also exist at the destination,
 * a file level move is done
 * </p>
 *
 * @param fileSystem on which the data needs to be moved
 * @param from path of the data to be moved
 * @param to path of the data to be moved
 */
public static void renameRecursively(FileSystem fileSystem, Path from, Path to) throws IOException {

    log.info(String.format("Recursively renaming %s in %s to %s.", from, fileSystem.getUri(), to));

    FileSystem throttledFS = getOptionallyThrottledFileSystem(fileSystem, 10000);

    ExecutorService executorService = ScalingThreadPoolExecutor.newScalingThreadPool(1, 100, 100,
            ExecutorsUtils.newThreadFactory(Optional.of(log), Optional.of("rename-thread-%d")));
    Queue<Future<?>> futures = Queues.newConcurrentLinkedQueue();

    try {
        if (!fileSystem.exists(from)) {
            throw new IOException("Trying to rename a path that does not exist! " + from);
        }

        futures.add(executorService.submit(new RenameRecursively(throttledFS, fileSystem.getFileStatus(from),
                to, executorService, futures)));
        int futuresUsed = 0;
        while (!futures.isEmpty()) {
            try {
                futures.poll().get();
                futuresUsed++;
            } catch (ExecutionException | InterruptedException ee) {
                throw new IOException(ee.getCause());
            }
        }

        log.info(String.format("Recursive renaming of %s to %s. (details: used %d futures)", from, to,
                futuresUsed));

    } finally {
        ExecutorsUtils.shutdownExecutorService(executorService, Optional.of(log), 1, TimeUnit.SECONDS);
    }
}

From source file:aos.camel.JavaFutureTest.java

@Test
public void testFutureWithDone() throws Exception {
    // this is the task we want to execute async
    // usually the task is something that takes
    // some time to do
    Callable<String> task = new Callable<String>() {
        public String call() throws Exception {
            // do something that takes some time
            LOG.info("Starting to process task");
            Thread.sleep(5000);//from  w  w  w .  ja  va 2 s  . c o m
            LOG.info("Task is now done");
            return "Camel rocks";
        }
    };

    // this is the thread pool we will use
    ExecutorService executor = Executors.newCachedThreadPool();

    // now submit the task to the thread pool
    // and get the Future handle back so we can later get the result
    LOG.info("Submitting task to ExecutorService");
    Future<String> future = executor.submit(task);
    LOG.info("Task submitted and we got a Future handle");

    // test when we are done
    boolean done = false;
    while (!done) {
        done = future.isDone();
        LOG.info("Is the task done? " + done);
        if (!done) {
            Thread.sleep(2000);
        }
    }

    // and get the answer
    String answer = future.get();
    LOG.info("The answer is: " + answer);
}

From source file:com.netflix.curator.framework.recipes.barriers.TestDistributedBarrier.java

@Test
public void testMultiClient() throws Exception {
    CuratorFramework client1 = null;/*from   w ww  .j  a v  a2 s. c o m*/
    CuratorFramework client2 = null;
    try {
        {
            CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                    new RetryOneTime(1));
            try {
                client.start();
                DistributedBarrier barrier = new DistributedBarrier(client, "/barrier");
                barrier.setBarrier();
            } finally {
                IOUtils.closeQuietly(client);
            }
        }

        client1 = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
        client2 = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));

        List<Future<Object>> futures = Lists.newArrayList();
        ExecutorService service = Executors.newCachedThreadPool();
        for (final CuratorFramework c : new CuratorFramework[] { client1, client2 }) {
            Future<Object> future = service.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    c.start();
                    DistributedBarrier barrier = new DistributedBarrier(c, "/barrier");
                    barrier.waitOnBarrier(10, TimeUnit.MILLISECONDS);
                    return null;
                }
            });
            futures.add(future);
        }

        Thread.sleep(1000);
        {
            CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                    new RetryOneTime(1));
            try {
                client.start();
                DistributedBarrier barrier = new DistributedBarrier(client, "/barrier");
                barrier.removeBarrier();
            } finally {
                IOUtils.closeQuietly(client);
            }
        }

        for (Future<Object> f : futures) {
            f.get();
        }
    } finally {
        IOUtils.closeQuietly(client1);
        IOUtils.closeQuietly(client2);
    }
}

From source file:bear.ssh.MyStreamCopier.java

public Future<TaskResult<?>> spawn(ExecutorService service, final long finishAtMs) {
    this.finishAtMs = finishAtMs;
    return service.submit(new CatchyCallable<TaskResult<?>>(new Callable<TaskResult<?>>() {
        @Override/*ww w  .  java 2 s .  com*/
        public TaskResult<?> call() {
            boolean interrupted = false;

            while (!stopFlag) {
                interrupted = Thread.currentThread().isInterrupted();

                if (interrupted)
                    break;

                try {
                    if (nonBlockingCopy() == -1) {
                        break;
                    }

                    if (MyStreamCopier.this.finishAtMs != -1
                            && MyStreamCopier.this.finishAtMs < System.currentTimeMillis()) {
                        break;
                    }

                    listener.reportProgress(count, null, -1);

                    if (!(periodMs <= 0 && periodNano <= 0)) {
                        Thread.sleep(periodMs, periodNano);
                    }
                } catch (Exception e) {
                    if (e instanceof InterruptedIOException) {
                        GlobalContext.AwareThread t = (GlobalContext.AwareThread) Thread.currentThread();
                        log.error("interrupted by: {}, at: {}, I am at {}", t.getInterruptedBy(),
                                Throwables.getStackTraceAsString(t.getInterruptedAt()),
                                Throwables.getStackTraceAsString(e));
                    } else {
                        log.error("", e);
                    }
                    return TaskResult.of(e);
                }
            }

            try {
                nonBlockingCopy();

                // try one more time as it's buggy
                // they asked us to stop but did not interrupt, let's have one more chance
                //todo remove this
                /*
                                    if(stopFlag){
                try {
                    Thread.sleep(periodMs, periodNano);
                } catch (InterruptedException e) {
                    //they are interrupting our attempt to wait
                    //we cancel and try to instantly copy...
                    nonBlockingCopy();
                }
                                    }
                */
            } catch (Exception e) {
                log.error("", e);

                return TaskResult.of(e);
            } finally {
                if (stopFlag || interrupted) {
                    IOUtils.closeQuietly(in);
                }
            }

            finished = true;

            return TaskResult.OK;
        }
    }));
}

From source file:com.ottogroup.bi.spqr.pipeline.component.queue.chronicle.DefaultStreamingMessageQueueTest.java

/**
 * Inserts a configurable number of messages into a {@link Chronicle} and measures the
 * duration it takes to read the content from it using the {@link DefaultStreamingMessageQueue} implementation
 *//*from  w  w w . jav a  2 s .  co m*/
//   @Test
public void testNext_performanceTest() throws Exception {

    Properties props = new Properties();
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_DELETE_ON_EXIT, "true");
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_PATH, System.getProperty("java.io.tmpdir"));
    final DefaultStreamingMessageQueue inbox = new DefaultStreamingMessageQueue();
    inbox.setId("testNext_performanceTest");
    inbox.initialize(props);

    final StreamingMessageQueueProducer producer = inbox.getProducer();
    final StreamingMessageQueueConsumer consumer = inbox.getConsumer();

    final CountDownLatch latch = new CountDownLatch(numberOfMessagesPerfTest);

    ExecutorService svc = Executors.newCachedThreadPool();

    Future<Integer> producerDurationFuture = svc.submit(new Callable<Integer>() {

        public Integer call() {
            StreamingDataMessage object = new StreamingDataMessage(new byte[] { 01, 2, 3, 4, 5, 6, 7, 9 },
                    System.currentTimeMillis());
            long s1 = System.nanoTime();
            for (int i = 0; i < numberOfMessagesPerfTest; i++) {
                producer.insert(object);
            }
            long s2 = System.nanoTime();
            return (int) (s2 - s1);
        }
    });

    Future<Integer> durationFuture = svc.submit(new Callable<Integer>() {
        public Integer call() {
            StreamingDataMessage msg = null;
            long start = System.nanoTime();
            while (true) {
                msg = consumer.next();
                if (msg != null) {
                    latch.countDown();
                    if (latch.getCount() == 0)
                        break;
                } else {
                    LockSupport.parkNanos(1);
                }

            }
            long end = System.nanoTime();
            return (int) (end - start);
        }
    });

    try {
        Assert.assertTrue("Failed to receive expected number of messages", latch.await(10, TimeUnit.SECONDS));
    } catch (InterruptedException e) {
        Assert.fail("Failed to receive expected number of messages");
    }

    int producerDuration = producerDurationFuture.get();
    int duration = durationFuture.get();

    double messagesPerNano = ((double) numberOfMessagesPerfTest / (double) duration);
    double messagesPerNanoRounded = (double) Math.round(messagesPerNano * 10000) / 10000;

    double messagesPerMilli = messagesPerNano * 1000000;
    messagesPerMilli = (double) Math.round(messagesPerMilli * 100) / 100;

    long messagesPerSecondTmps = Math.round(messagesPerNano * 1000000 * 1000);
    double messagesPerSecond = (double) Math.round(messagesPerSecondTmps);
    ;

    double nanosPerMessage = ((double) duration / (double) numberOfMessagesPerfTest);
    nanosPerMessage = (double) Math.round(nanosPerMessage * 100) / 100;

    logger.info("message count: " + numberOfMessagesPerfTest);
    logger.info(
            "message producing: " + producerDuration + "ns, " + TimeUnit.NANOSECONDS.toMillis(producerDuration)
                    + "ms, " + TimeUnit.NANOSECONDS.toSeconds(producerDuration) + "s");
    logger.info("message consumption: " + duration + "ns, " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms, "
            + TimeUnit.NANOSECONDS.toSeconds(duration) + "s");
    logger.info("message throughput: " + messagesPerNanoRounded + " msgs/ns, " + messagesPerMilli + " msgs/ms, "
            + messagesPerSecond + " msgs/s");

    svc.shutdownNow();
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessReadWriteLock.java

@Test
public void testBasic() throws Exception {
    final int CONCURRENCY = 8;
    final int ITERATIONS = 100;

    final Random random = new Random();
    final AtomicInteger concurrentCount = new AtomicInteger(0);
    final AtomicInteger maxConcurrentCount = new AtomicInteger(0);
    final AtomicInteger writeCount = new AtomicInteger(0);
    final AtomicInteger readCount = new AtomicInteger(0);

    List<Future<Void>> futures = Lists.newArrayList();
    ExecutorService service = Executors.newCachedThreadPool();
    for (int i = 0; i < CONCURRENCY; ++i) {
        Future<Void> future = service.submit(new Callable<Void>() {
            @Override//www  .  ja va2  s .com
            public Void call() throws Exception {
                CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                        new RetryOneTime(1));
                client.start();
                try {
                    InterProcessReadWriteLock lock = new InterProcessReadWriteLock(client, "/lock");
                    for (int i = 0; i < ITERATIONS; ++i) {
                        if (random.nextInt(100) < 10) {
                            doLocking(lock.writeLock(), concurrentCount, maxConcurrentCount, random, 1);
                            writeCount.incrementAndGet();
                        } else {
                            doLocking(lock.readLock(), concurrentCount, maxConcurrentCount, random,
                                    Integer.MAX_VALUE);
                            readCount.incrementAndGet();
                        }
                    }
                } finally {
                    IOUtils.closeQuietly(client);
                }
                return null;
            }
        });
        futures.add(future);
    }

    for (Future<Void> future : futures) {
        future.get();
    }

    System.out.println("Writes: " + writeCount.get() + " - Reads: " + readCount.get() + " - Max Reads: "
            + maxConcurrentCount.get());

    Assert.assertTrue(writeCount.get() > 0);
    Assert.assertTrue(readCount.get() > 0);
    Assert.assertTrue(maxConcurrentCount.get() > 1);
}

From source file:com.blacklocus.jres.request.index.JresUpdateDocumentTest.java

@Test(expected = ExecutionException.class)
public void testRetryOnConflictExpectError() throws InterruptedException, ExecutionException {
    final String index = "JresUpdateDocumentTest.testRetryOnConflictExpectError".toLowerCase();
    final String type = "test";
    final String id = "warzone";

    final AtomicReference<String> error = new AtomicReference<String>();
    final int numThreads = 16, numIterations = 100;

    ExecutorService x = Executors.newFixedThreadPool(numThreads);
    List<Future<?>> futures = new ArrayList<Future<?>>(numThreads);
    for (int i = 0; i < numThreads; i++) {
        futures.add(x.submit(new Callable<Void>() {
            @Override/*from   w ww.  j av a 2s  .co  m*/
            public Void call() throws Exception {
                for (int j = 0; j < numIterations; j++) {
                    jres.quest(new JresUpdateDocument(index, type, id, ImmutableMap.of("value", 0)));
                }
                return null;
            }
        }));
    }
    x.shutdown();
    x.awaitTermination(1, TimeUnit.MINUTES);

    for (Future<?> future : futures) {
        // expecting a conflict exception from ElasticSearch
        future.get();
    }
}

From source file:io.wcm.caravan.pipeline.impl.JsonPipelineMultipleSubscriptionsTest.java

@Test
public void subscribeConcurrentlyToPlainPipelineOutputs() throws InterruptedException, JSONException {
    firstStep = newPipelineWithResponseBody("{id:123}");

    // use a synchronized set to collect the pipeline output from multiple threads
    Set<JsonPipelineOutput> distinctOutputs = Collections.synchronizedSet(new HashSet<JsonPipelineOutput>());

    // create multiple simultaneous threads that subscribe to the same pipeline output
    // and use a CountDownLatch to delay the subscription until all threads have been started
    ExecutorService executorService = Executors.newCachedThreadPool();
    CountDownLatch countDown = new CountDownLatch(100);
    while (countDown.getCount() > 0) {

        executorService.submit(() -> {

            countDown.await();// ww  w  . j  av a2s  .c om
            distinctOutputs.add(firstStep.getOutput().toBlocking().single());

            return null; // this is required for the lambda to be considered a Callable<Void> and therefore be allowed to throw exceptions
        });

        countDown.countDown();
    }

    executorService.shutdown();
    executorService.awaitTermination(1, TimeUnit.MINUTES);

    // ensure all threads received the same JsonPipelineOutput instance with the expected JSON output
    assertEquals(1, distinctOutputs.size());
    JSONAssert.assertEquals("{id: 123}", firstStep.getStringOutput().toBlocking().first(),
            JSONCompareMode.STRICT);
}

From source file:learn.jersey.services.BufferedMutatorExample.java

@Override
public int run(String[] args) throws InterruptedException, ExecutionException, TimeoutException {

    /** a callback invoked when an asynchronous write fails. */
    final BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() {
        @Override/*from   w w w .j  av  a 2s  .c o m*/
        public void onException(RetriesExhaustedWithDetailsException e, BufferedMutator mutator) {
            for (int i = 0; i < e.getNumExceptions(); i++) {
                LOG.info("Failed to sent put " + e.getRow(i) + ".");
            }
        }
    };
    BufferedMutatorParams params = new BufferedMutatorParams(TABLE).listener(listener);

    //
    // step 1: create a single Connection and a BufferedMutator, shared by
    // all worker threads.
    //
    try (final Connection conn = ConnectionFactory.createConnection(getConf());
            final BufferedMutator mutator = conn.getBufferedMutator(params)) {

        /** worker pool that operates on BufferedTable instances */
        final ExecutorService workerPool = Executors.newFixedThreadPool(POOL_SIZE);
        List<Future<Void>> futures = new ArrayList<>(TASK_COUNT);

        for (int i = 0; i < TASK_COUNT; i++) {
            futures.add(workerPool.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    //
                    // step 2: each worker sends edits to the shared
                    // BufferedMutator instance. They all use
                    // the same backing buffer, call-back "listener", and
                    // RPC executor pool.
                    //
                    Put p = new Put(Bytes.toBytes("someRow"));
                    p.addColumn(FAMILY, Bytes.toBytes("someQualifier"), Bytes.toBytes("some value"));
                    mutator.mutate(p);
                    // do work... maybe you want to call mutator.flush()
                    // after many edits to ensure any of
                    // this worker's edits are sent before exiting the
                    // Callable
                    return null;
                }
            }));
        }

        //
        // step 3: clean up the worker pool, shut down.
        //
        for (Future<Void> f : futures) {
            f.get(5, TimeUnit.MINUTES);
        }
        workerPool.shutdown();
    } catch (IOException e) {
        // exception while creating/destroying Connection or BufferedMutator
        LOG.info("exception while creating/destroying Connection or BufferedMutator", e);
    } // BufferedMutator.close() ensures all work is flushed. Could be the
      // custom listener is
      // invoked from here.
    return 0;
}