Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:com.raycloud.cobarclient.mybatis.spring.MySqlSessionTemplate.java

/**
 * TODO ?Multiple Thread Transaction/*from www.j  av a 2 s.  c om*/
 *
 * @param statement
 * @param collection
 * @param <T>
 * @return
 */
private final <T extends Object> int batchAsync(final String statement, Collection<T> collection) {
    Map<Shard, List<T>> classifiedEntities = classify(statement, collection);
    final CountDownLatch latch = new CountDownLatch(classifiedEntities.size());
    List<Future<Integer>> futures = new ArrayList<Future<Integer>>();
    final MultipleCauseException throwables = new MultipleCauseException();
    ExecutorService _executor = MtContextExecutors.getMtcExecutorService(executor);
    SqlSessionHolder holder = SqlSessionUtils
            .currentSqlSessionHolder(MySqlSessionTemplate.this.sqlSessionFactory);
    for (final Map.Entry<Shard, List<T>> entry : classifiedEntities.entrySet()) {
        futures.add(_executor.submit(new BatchAsyncCallable(entry, statement, latch, throwables, holder)));
    }
    try {
        latch.await();
    } catch (InterruptedException e) {
        throw new ConcurrencyFailureException("interrupted when processing data access request in concurrency",
                e);
    }
    if (!throwables.getCauses().isEmpty()) {
        throw new TransientDataAccessResourceException(
                "one or more errors when performing data access operations" + " against multiple shards",
                throwables);
    }
    return counter(getFutureResults(futures));
}

From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationTest.java

/**
 * Creates objects with the given names in the given bucket.
 *///from www.j a  va  2 s  .  co m
private void createObjects(final String bucketName, String[] objectNames) throws IOException {

    final ExecutorService threadPool = Executors.newCachedThreadPool();
    final CountDownLatch counter = new CountDownLatch(objectNames.length);
    List<Future<?>> futures = new ArrayList<>();
    // Do each creation asynchronously.
    for (final String objectName : objectNames) {
        Future<?> future = threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (objectName.endsWith(GoogleCloudStorage.PATH_DELIMITER)) {
                        mkdir(bucketName, objectName);
                    } else {
                        // Just use objectName as file contents.
                        writeTextFile(bucketName, objectName, objectName);
                    }
                } catch (Throwable ioe) {
                    throw new RuntimeException(
                            String.format("Exception creating %s/%s", bucketName, objectName), ioe);
                } finally {
                    counter.countDown();
                }
            }
        });
        futures.add(future);
    }

    try {
        counter.await();
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted while awaiting object creation!", ie);
    } finally {
        threadPool.shutdown();
        try {
            if (!threadPool.awaitTermination(10L, TimeUnit.SECONDS)) {
                System.err.println("Failed to awaitTermination! Forcing executor shutdown.");
                threadPool.shutdownNow();
            }
        } catch (InterruptedException ie) {
            throw new IOException("Interrupted while shutting down threadpool!", ie);
        }
    }

    for (Future<?> future : futures) {
        try {
            // We should already be done.
            future.get(10, TimeUnit.MILLISECONDS);
        } catch (Exception e) {
            throw new IOException(String.format("Creation of file %s failed with exception", objectName), e);
        }
    }
}

From source file:com.cloudera.oryx.als.common.factorizer.als.AlternatingLeastSquares.java

private void addWorkers(LongObjectMap<LongFloatMap> R, LongObjectMap<float[]> M, RealMatrix MTM,
        LongObjectMap<float[]> MTags, ExecutorService executor, Collection<Future<?>> futures) {
    if (R != null) {
        List<Pair<Long, LongFloatMap>> workUnit = Lists.newArrayListWithCapacity(WORK_UNIT_SIZE);
        for (LongObjectMap.MapEntry<LongFloatMap> entry : R.entrySet()) {
            workUnit.add(new Pair<Long, LongFloatMap>(entry.getKey(), entry.getValue()));
            if (workUnit.size() == WORK_UNIT_SIZE) {
                futures.add(executor.submit(new Worker(features, M, MTM, MTags, workUnit)));
                workUnit = Lists.newArrayListWithCapacity(WORK_UNIT_SIZE);
            }//from  w  w  w  .j a  va  2  s  .  c  o m
        }
        if (!workUnit.isEmpty()) {
            futures.add(executor.submit(new Worker(features, M, MTM, MTags, workUnit)));
        }
    }
}

From source file:com.iyonger.apm.web.model.AgentManager.java

public void running(final GrinderProperties grinderProperties, SingleConsole singleConsole,
        Set<AgentIdentity> availables) {
    ExecutorService execService = null;
    try {//from  w  w  w .ja  va  2 s.c o  m
        // Make the agents connect to console.
        grinderProperties.setInt(GrinderProperties.CONSOLE_PORT, singleConsole.getConsolePort());
        execService = ExecutorFactory.createThreadPool("agentStarter", NUMBER_OF_THREAD);

        for (final AgentIdentity eachAgentIdentity : availables) {
            execService.submit(new Runnable() {
                @Override
                public void run() {
                    agentControllerServerDaemon.startAgent(grinderProperties, eachAgentIdentity);
                }
            });
        }

    } finally {
        if (execService != null) {
            execService.shutdown();
        }
    }
}

From source file:net.myrrix.online.factorizer.als.AlternatingLeastSquares.java

private void addWorkers(FastByIDMap<FastByIDFloatMap> R, FastByIDMap<float[]> M, RealMatrix MTM,
        FastByIDMap<float[]> MTags, ExecutorService executor, Collection<Future<?>> futures) {
    if (R != null) {
        List<Pair<Long, FastByIDFloatMap>> workUnit = Lists.newArrayListWithCapacity(WORK_UNIT_SIZE);
        for (FastByIDMap.MapEntry<FastByIDFloatMap> entry : R.entrySet()) {
            workUnit.add(new Pair<Long, FastByIDFloatMap>(entry.getKey(), entry.getValue()));
            if (workUnit.size() == WORK_UNIT_SIZE) {
                futures.add(executor.submit(new Worker(features, M, MTM, MTags, workUnit)));
                workUnit = Lists.newArrayListWithCapacity(WORK_UNIT_SIZE);
            }//  w ww  .j av a  2s  . c o  m
        }
        if (!workUnit.isEmpty()) {
            futures.add(executor.submit(new Worker(features, M, MTM, MTags, workUnit)));
        }
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestReaper.java

@Test
public void testSparseUseNoReap() throws Exception {
    final int THRESHOLD = 3000;

    Timing timing = new Timing();
    Reaper reaper = null;/*from ww w .j  a va  2s.c  o  m*/
    Future<Void> watcher = null;
    CuratorFramework client = makeClient(timing, null);
    try {
        client.start();
        client.create().creatingParentsIfNeeded().forPath("/one/two/three");

        Assert.assertNotNull(client.checkExists().forPath("/one/two/three"));

        final Queue<Reaper.PathHolder> holders = new ConcurrentLinkedQueue<Reaper.PathHolder>();
        final ExecutorService pool = Executors.newCachedThreadPool();
        ScheduledExecutorService service = new ScheduledThreadPoolExecutor(1) {
            @Override
            public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
                final Reaper.PathHolder pathHolder = (Reaper.PathHolder) command;
                holders.add(pathHolder);
                final ScheduledFuture<?> f = super.schedule(command, delay, unit);
                pool.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {
                        f.get();
                        holders.remove(pathHolder);
                        return null;
                    }
                });
                return f;
            }
        };

        reaper = new Reaper(client, service, THRESHOLD);
        reaper.start();
        reaper.addPath("/one/two/three");

        long start = System.currentTimeMillis();
        boolean emptyCountIsCorrect = false;
        while (((System.currentTimeMillis() - start) < timing.forWaiting().milliseconds())
                && !emptyCountIsCorrect) // need to loop as the Holder can go in/out of the Reaper's DelayQueue
        {
            for (Reaper.PathHolder holder : holders) {
                if (holder.path.endsWith("/one/two/three")) {
                    emptyCountIsCorrect = (holder.emptyCount > 0);
                    break;
                }
            }
            Thread.sleep(1);
        }
        Assert.assertTrue(emptyCountIsCorrect);

        client.create().forPath("/one/two/three/foo");

        Thread.sleep(2 * (THRESHOLD / Reaper.EMPTY_COUNT_THRESHOLD));
        Assert.assertNotNull(client.checkExists().forPath("/one/two/three"));
        client.delete().forPath("/one/two/three/foo");

        Thread.sleep(THRESHOLD);
        timing.sleepABit();

        Assert.assertNull(client.checkExists().forPath("/one/two/three"));
    } finally {
        if (watcher != null) {
            watcher.cancel(true);
        }
        IOUtils.closeQuietly(reaper);
        IOUtils.closeQuietly(client);
    }
}

From source file:com.github.lindenb.mscheduler.MScheduler.java

protected int updateJobStatus(final Task task) {
    final StatusChecker call = createStatusChecker(task);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final Future<Integer> future = executor.submit(call);
    int return_status = -1;

    try {/*from   www .  ja  va  2s .  c o  m*/
        //allow 10 seconds to get status
        return_status = future.get(10, TimeUnit.SECONDS);

        return return_status;
    } catch (TimeoutException e) {
        future.cancel(true);
        LOG.error("Timeout for gettting job status and " + task);
        return -1;
    } catch (Exception e) {
        future.cancel(true);
        LOG.error("Failure:", e);
        return -1;
    } finally {
        executor.shutdown();
    }
}

From source file:com.amazonaws.services.kinesis.aggregators.datastore.DynamoQueryEngine.java

public List<TableKeyStructure> parallelQueryKeys(QueryKeyScope scope, int threads) throws Exception {
    List<ParallelKeyScanWorker> workers = new ArrayList<>();
    Collection<Future<?>> workerStatus = new ArrayList<>();
    List<TableKeyStructure> output = new ArrayList<>();
    int totalResultsProcessed = 0;

    // set up the executor thread pool
    ExecutorService executor = Executors.newFixedThreadPool(threads);

    // create workers for each segment that we need to do queries against
    for (int i = 0; i < threads; i++) {
        ParallelKeyScanWorker worker = new ParallelKeyScanWorker(this.tableName, i, threads, scope,
                this.labelAttribute, this.dateAttribute);
        workers.add(worker);/*  w w w. j  a  v a2  s.co m*/
        workerStatus.add(executor.submit(worker));
    }

    for (Future<?> f : workerStatus) {
        f.get();
    }
    executor.shutdown();

    for (ParallelKeyScanWorker w : workers) {
        // throw any exceptions the worker incurred
        w.throwExceptions();

        if (w.getResultCount() > 0) {
            output.addAll(w.getOutput());
        }

        totalResultsProcessed += w.getResultsProcessed();
    }

    LOG.info(String.format("Key Extraction Complete - Processed %s Key Items", totalResultsProcessed));

    return output;
}

From source file:org.piraso.server.service.ResponseLoggerServiceImplTest.java

@Test
public void testWaitAndStop() throws Exception {
    final AtomicBoolean fail = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(2);

    Runnable startServiceRunnable = new Runnable() {
        public void run() {
            try {
                service.start();/*from  w ww  .j ava  2 s .  co m*/
            } catch (Exception e) {
                fail.set(true);
                e.printStackTrace();
            }
        }
    };

    Runnable logMessagesRunnable = new Runnable() {
        public void run() {
            try {

                service.stopAndWait(3000l);
            } catch (Exception e) {
                fail.set(true);
                e.printStackTrace();
            }
        }
    };

    Future future = executor.submit(startServiceRunnable);
    executor.submit(logMessagesRunnable);

    future.get();
    executor.shutdown();

    if (fail.get()) {
        fail("failure see exception trace.");
    }

    // no harm invoking it again
    service.stopAndWait(1000l);

    assertFalse(service.isAlive());
}

From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java

@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
@Test// ww  w.jav  a2s  .c o  m
public void testMulti() throws Exception {
    final String PATH = "/queue";
    final int CLIENT_QTY = 4;
    final int MAX_ITEMS = 10;
    final int ADD_ITEMS = MAX_ITEMS * 100;
    final int SLOP_FACTOR = 2;

    final QueueConsumer<String> consumer = new QueueConsumer<String>() {
        @Override
        public void consumeMessage(String message) throws Exception {
            Thread.sleep(10);
        }

        @Override
        public void stateChanged(CuratorFramework client, ConnectionState newState) {
        }
    };

    final Timing timing = new Timing();
    final ExecutorService executor = Executors.newCachedThreadPool();
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executor);

    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            timing.session(), timing.connection(), new RetryOneTime(1));
    try {
        client.start();
        client.create().forPath(PATH);

        final CountDownLatch isWaitingLatch = new CountDownLatch(1);
        final AtomicBoolean isDone = new AtomicBoolean(false);
        final List<Integer> counts = new CopyOnWriteArrayList<Integer>();
        final Object lock = new Object();
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                Watcher watcher = new Watcher() {
                    @Override
                    public void process(WatchedEvent event) {
                        synchronized (lock) {
                            lock.notifyAll();
                        }
                    }
                };

                while (!Thread.currentThread().isInterrupted() && client.isStarted() && !isDone.get()) {
                    synchronized (lock) {
                        int size = client.getChildren().usingWatcher(watcher).forPath(PATH).size();
                        counts.add(size);
                        isWaitingLatch.countDown();
                        lock.wait();
                    }
                }
                return null;
            }
        });
        isWaitingLatch.await();

        for (int i = 0; i < CLIENT_QTY; ++i) {
            final int index = i;
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = null;
                    DistributedQueue<String> queue = null;

                    try {
                        client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
                                timing.connection(), new RetryOneTime(1));
                        client.start();
                        queue = QueueBuilder.builder(client, consumer, serializer, PATH).executor(executor)
                                .maxItems(MAX_ITEMS).putInBackground(false).lockPath("/locks").buildQueue();
                        queue.start();

                        for (int i = 0; i < ADD_ITEMS; ++i) {
                            queue.put("" + index + "-" + i);
                        }
                    } finally {
                        IOUtils.closeQuietly(queue);
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.take().get();
        }

        isDone.set(true);
        synchronized (lock) {
            lock.notifyAll();
        }

        for (int count : counts) {
            Assert.assertTrue(counts.toString(), count <= (MAX_ITEMS * SLOP_FACTOR));
        }
    } finally {
        executor.shutdownNow();
        IOUtils.closeQuietly(client);
    }
}