Example usage for java.util.concurrent ExecutorService invokeAll

List of usage examples for java.util.concurrent ExecutorService invokeAll

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService invokeAll.

Prototype

<T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException;

Source Link

Document

Executes the given tasks, returning a list of Futures holding their status and results when all complete.

Usage

From source file:org.omnaest.utils.operation.foreach.ForEach.java

/**
 * @see #map(Operation)/*w  w  w.j a va 2 s .co m*/
 * @param operation
 * @param executorService
 * @return
 * @throws ExecutionException
 */
public <R> IterationResult<R> map(final Operation<R, E> operation, ExecutorService executorService)
        throws ExecutionException {
    IterationResult<R> retval = null;

    List<Callable<R>> callableList = new ArrayList<Callable<R>>();
    for (final E element : this.iterable) {
        callableList.add(new Callable<R>() {
            @Override
            public R call() throws Exception {
                return operation.execute(element);
            }
        });
    }

    List<R> resultList = new ArrayList<R>();
    {
        try {
            List<Future<R>> futureList = executorService.invokeAll(callableList);
            for (Future<R> future : futureList) {
                do {
                    try {
                        resultList.add(future.get());
                    } catch (InterruptedException e) {
                    }
                } while (!future.isDone());
            }
        } catch (InterruptedException e) {
        }
    }
    retval = new IterationResult<R>(resultList);

    return retval;
}

From source file:org.apache.stratos.integration.tests.application.MetadataServiceTestCase.java

@Test(timeOut = DEFAULT_APPLICATION_TEST_TIMEOUT, description = "Application startup, activation and metadata service concurrency test", priority = 2)
public void metadataConcurrencyTest() throws Exception {
    log.info("Running MetadataServiceTestCase.metadataConcurrencyTest test method...");
    log.info("Starting multiple clients to add properties");
    ExecutorService taskExecutor = Executors.newFixedThreadPool(5);
    List<Callable<Void>> tasks = new ArrayList<>();
    tasks.add(getInstanceOfCallable(APPLICATION_1_ID, "t1"));
    tasks.add(getInstanceOfCallable(APPLICATION_1_ID, "t2"));
    tasks.add(getInstanceOfCallable(APPLICATION_1_ID, "t3"));
    taskExecutor.invokeAll(tasks);

    for (int i = 0; i < 50; i++) {
        PropertyBean propertyBean = restClient.getApplicationProperty(APPLICATION_1_ID, Integer.toString(i),
                app1AccessToken);/*ww w.  ja  va 2s .c o m*/
        log.info("Retrieved metadata property: " + gson.toJson(propertyBean));
        List<String> addedValues = new ArrayList<>(Arrays.asList("t1", "t2", "t3"));
        boolean hasPropertiesAdded = propertyBean.getValues().containsAll(addedValues);
        Assert.assertTrue(hasPropertiesAdded,
                String.format("Property values have not been added for [key] %d", i));
    }
    log.info("Metadata service concurrency test completed successfully");
}

From source file:org.broadinstitute.gatk.utils.fasta.CachingIndexedFastaSequenceFileUnitTest.java

@Test(dataProvider = "ParallelFastaTest", enabled = true && !DEBUG, timeOut = 60000)
public void testCachingIndexedFastaReaderParallel(final File fasta, final int cacheSize, final int querySize,
        final int nt) throws FileNotFoundException, InterruptedException {
    final CachingIndexedFastaSequenceFile caching = new CachingIndexedFastaSequenceFile(fasta,
            getCacheSize(cacheSize), true, false);

    logger.warn(String.format("Parallel caching index fasta reader test cacheSize %d querySize %d nt %d",
            caching.getCacheSize(), querySize, nt));
    for (int iterations = 0; iterations < 1; iterations++) {
        final ExecutorService executor = Executors.newFixedThreadPool(nt);
        final Collection<Callable<Object>> tasks = new ArrayList<Callable<Object>>(nt);
        for (int i = 0; i < nt; i++)
            tasks.add(new Callable<Object>() {
                @Override/*from w w w .  ja va 2  s . c  om*/
                public Object call() throws Exception {
                    testSequential(caching, fasta, querySize);
                    return null;
                }
            });
        executor.invokeAll(tasks);
        executor.shutdownNow();
    }
}

From source file:com.concursive.connect.web.modules.common.social.images.jobs.ImageResizerJob.java

public void execute(JobExecutionContext context) throws JobExecutionException {
    LOG.debug("Starting...");
    SchedulerContext schedulerContext = null;
    Connection db = null;//w w  w . jav  a2 s . c  o  m

    // Initial setup
    try {
        schedulerContext = context.getScheduler().getContext();
    } catch (Exception e) {
        LOG.error("ImageResizerJob Exception due to scheduler", e);
        throw new JobExecutionException(e);
    }

    // Process the arrays
    Vector exportList = (Vector) schedulerContext.get(IMAGE_RESIZER_ARRAY);

    while (exportList.size() > 0) {

        // Holds the transactions to be threaded
        List<TransactionTask> renderTasks = new ArrayList<TransactionTask>();

        // Pre-process the files using a database connection
        try {
            db = SchedulerUtils.getConnection(schedulerContext);
            // The imageResizerBean contains the image handle to be processed
            ImageResizerBean bean = (ImageResizerBean) exportList.remove(0);

            LOG.debug("Preparing thumbnails for FileItem (" + bean.getFileItemId() + ")... " + bean.getWidth()
                    + "x" + bean.getHeight());
            // Load the fileItem
            FileItem fileItem = new FileItem(db, bean.getFileItemId());
            if (bean.getWidth() > 0 || bean.getHeight() > 0) {
                // A specific size needs to be rendered
                renderTasks.add(new TransactionTask(bean, fileItem, bean.getWidth(), bean.getHeight(), false));
            } else {
                // No specific size so for each fileItem, generate several sizes of the image
                renderTasks.add(new TransactionTask(bean, fileItem, 640, 480, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 210, 150, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 200, 200, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 133, 133, true));
                renderTasks.add(new TransactionTask(bean, fileItem, 100, 100, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 75, 75, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 50, 50, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 45, 45, false));
                renderTasks.add(new TransactionTask(bean, fileItem, 30, 30, false));
            }
        } catch (Exception e) {
            LOG.error("ImageResizerJob Exception", e);
            continue;
        } finally {
            SchedulerUtils.freeConnection(schedulerContext, db);
        }

        int threads = 2;
        // Process the files
        ExecutorService executor = null;
        List<Future<Thumbnail>> futures = null;
        try {
            executor = Executors.newFixedThreadPool(threads);
            // NOTE: this wrapper fix is for Java 1.5
            final Collection<Callable<Thumbnail>> wrapper = Collections
                    .<Callable<Thumbnail>>unmodifiableCollection(renderTasks);
            LOG.debug("Generating thumbnails... " + renderTasks.size());
            futures = executor.invokeAll(wrapper);
        } catch (InterruptedException e) {
            LOG.error("ImageResizerJob executor exception", e);
            if (executor != null) {
                executor.shutdown();
            }
            throw new JobExecutionException(e);
        }

        // Insert the thumbnails using the database connection
        try {
            db = SchedulerUtils.getConnection(schedulerContext);
            LOG.debug("Inserting thumbnails into database... " + futures.size());
            // Process the executor results
            for (Future<Thumbnail> f : futures) {
                Thumbnail thumbnail = f.get();
                thumbnail.insert(db);
            }
        } catch (Exception e) {
            LOG.error("ImageResizerJob insert thumbnails into database exception", e);
            throw new JobExecutionException(e);
        } finally {
            SchedulerUtils.freeConnection(schedulerContext, db);
            if (executor != null) {
                executor.shutdown();
            }
        }
    }
}

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

@Test(timeout = 60000)
public void testDownloadPublicWithStatCache()
        throws IOException, URISyntaxException, InterruptedException, ExecutionException {
    final Configuration conf = new Configuration();
    FileContext files = FileContext.getLocalFSFileContext(conf);
    Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));

    // if test directory doesn't have ancestor permission, skip this test
    FileSystem f = basedir.getFileSystem(conf);
    assumeTrue(FSDownload.ancestorsHaveExecutePermissions(f, basedir, null));

    files.mkdir(basedir, null, true);//w  w w .  j  a  va  2s.co m
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    int size = 512;

    final ConcurrentMap<Path, AtomicInteger> counts = new ConcurrentHashMap<Path, AtomicInteger>();
    final CacheLoader<Path, Future<FileStatus>> loader = FSDownload.createStatusCacheLoader(conf);
    final LoadingCache<Path, Future<FileStatus>> statCache = CacheBuilder.newBuilder()
            .build(new CacheLoader<Path, Future<FileStatus>>() {
                public Future<FileStatus> load(Path path) throws Exception {
                    // increment the count
                    AtomicInteger count = counts.get(path);
                    if (count == null) {
                        count = new AtomicInteger(0);
                        AtomicInteger existing = counts.putIfAbsent(path, count);
                        if (existing != null) {
                            count = existing;
                        }
                    }
                    count.incrementAndGet();

                    // use the default loader
                    return loader.load(path);
                }
            });

    // test FSDownload.isPublic() concurrently
    final int fileCount = 3;
    List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>();
    for (int i = 0; i < fileCount; i++) {
        Random rand = new Random();
        long sharedSeed = rand.nextLong();
        rand.setSeed(sharedSeed);
        System.out.println("SEED: " + sharedSeed);
        final Path path = new Path(basedir, "test-file-" + i);
        createFile(files, path, size, rand);
        final FileSystem fs = path.getFileSystem(conf);
        final FileStatus sStat = fs.getFileStatus(path);
        tasks.add(new Callable<Boolean>() {
            public Boolean call() throws IOException {
                return FSDownload.isPublic(fs, path, sStat, statCache);
            }
        });
    }

    ExecutorService exec = Executors.newFixedThreadPool(fileCount);
    try {
        List<Future<Boolean>> futures = exec.invokeAll(tasks);
        // files should be public
        for (Future<Boolean> future : futures) {
            assertTrue(future.get());
        }
        // for each path exactly one file status call should be made
        for (AtomicInteger count : counts.values()) {
            assertSame(count.get(), 1);
        }
    } finally {
        exec.shutdown();
    }
}

From source file:org.apache.sysml.runtime.io.ReaderTextCSVParallel.java

private MatrixBlock computeCSVSizeAndCreateOutputMatrixBlock(InputSplit[] splits, Path path, JobConf job,
        boolean hasHeader, String delim, long estnnz) throws IOException, DMLRuntimeException {
    int nrow = 0;
    int ncol = 0;

    FileInputFormat.addInputPath(job, path);
    TextInputFormat informat = new TextInputFormat();
    informat.configure(job);/*  w  ww. j  a  v  a2 s . co m*/

    // count no of entities in the first non-header row
    LongWritable key = new LongWritable();
    Text oneLine = new Text();
    RecordReader<LongWritable, Text> reader = informat.getRecordReader(splits[0], job, Reporter.NULL);
    try {
        if (reader.next(key, oneLine)) {
            String cellStr = oneLine.toString().trim();
            ncol = StringUtils.countMatches(cellStr, delim) + 1;
        }
    } finally {
        IOUtilFunctions.closeSilently(reader);
    }

    // count rows in parallel per split
    try {
        ExecutorService pool = Executors.newFixedThreadPool(_numThreads);
        ArrayList<CountRowsTask> tasks = new ArrayList<CountRowsTask>();
        for (InputSplit split : splits) {
            tasks.add(new CountRowsTask(split, informat, job, hasHeader));
            hasHeader = false;
        }
        pool.invokeAll(tasks);
        pool.shutdown();

        // collect row counts for offset computation
        // early error notify in case not all tasks successful
        _offsets = new SplitOffsetInfos(tasks.size());
        for (CountRowsTask rt : tasks) {
            if (!rt.getReturnCode())
                throw new IOException("Count task for csv input failed: " + rt.getErrMsg());
            _offsets.setOffsetPerSplit(tasks.indexOf(rt), nrow);
            _offsets.setLenghtPerSplit(tasks.indexOf(rt), rt.getRowCount());
            nrow = nrow + rt.getRowCount();
        }
    } catch (Exception e) {
        throw new IOException("Threadpool Error " + e.getMessage(), e);
    }

    // allocate target matrix block based on given size; 
    // need to allocate sparse as well since lock-free insert into target
    return createOutputMatrixBlock(nrow, ncol, nrow, ncol, estnnz, true, true);
}

From source file:org.apache.carbondata.sdk.file.ConcurrentSdkReaderTest.java

@Test
public void testReadParallely() throws IOException, InterruptedException {
    int numFiles = 10;
    int numRowsPerFile = 10;
    short numThreads = 4;
    writeDataMultipleFiles(numFiles, numRowsPerFile);
    long count;/*w w w  . j ava 2  s.com*/

    // Sequential Reading
    CarbonReader reader = CarbonReader.builder(dataDir).build();
    try {
        count = 0;
        long start = System.currentTimeMillis();
        while (reader.hasNext()) {
            reader.readNextRow();
            count += 1;
        }
        long end = System.currentTimeMillis();
        System.out.println("[Sequential read] Time: " + (end - start) + " ms");
        Assert.assertEquals(numFiles * numRowsPerFile, count);
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    } finally {
        reader.close();
    }

    // Concurrent Reading
    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    try {
        CarbonReader reader2 = CarbonReader.builder(dataDir).build();
        List<CarbonReader> multipleReaders = reader2.split(numThreads);
        try {
            List<ReadLogic> tasks = new ArrayList<>();
            List<Future<Long>> results;
            count = 0;

            for (CarbonReader reader_i : multipleReaders) {
                tasks.add(new ReadLogic(reader_i));
            }
            long start = System.currentTimeMillis();
            results = executorService.invokeAll(tasks);
            for (Future result_i : results) {
                count += (long) result_i.get();
            }
            long end = System.currentTimeMillis();
            System.out.println("[Parallel read] Time: " + (end - start) + " ms");
            Assert.assertEquals(numFiles * numRowsPerFile, count);
        } catch (Exception e) {
            e.printStackTrace();
            Assert.fail(e.getMessage());
        }
    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.getMessage());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.MINUTES);
    }
}

From source file:com.mikebl71.android.websms.connector.cabbage.CabbageConnector.java

/**
 * Called to update balance. Updates subconnector's balances concurrently.
 *///from ww  w.j a  v a 2 s.  co m
@Override
protected void doUpdate(final Context context, final Intent intent) {
    final ConnectorSpec cs = this.getSpec(context);
    final int subCount = cs.getSubConnectorCount();
    final SubConnectorSpec[] subs = cs.getSubConnectors();

    final List<Callable<Void>> tasks = new ArrayList<Callable<Void>>(subCount);
    for (SubConnectorSpec sub : subs) {
        final String subId = sub.getID();

        tasks.add(new Callable<Void>() {
            public Void call() throws Exception {
                // clone intent and assign it to this sub connector
                final Intent subIntent = new Intent(intent);
                ConnectorCommand cmd = new ConnectorCommand(subIntent);
                cmd.setSelectedSubConnector(subId);
                cmd.setToIntent(subIntent);
                // update balance for this subconnector
                sendData(context, new ConnectorCommand(subIntent));
                return null;
            }
        });
    }

    try {
        final ExecutorService executor = Executors.newFixedThreadPool(subCount);
        // execute all updates in parallel and wait till all are complete
        final List<Future<Void>> results = executor.invokeAll(tasks);
        executor.shutdownNow();

        // if any of the updates failed then re-throw the first exception
        // (which will then be returned to WebSMS)
        for (int idx = 0; idx < results.size(); idx++) {
            Future<Void> result = results.get(idx);
            try {
                result.get();
            } catch (ExecutionException ex) {
                String subName = subs[idx].getName();
                throw new WebSMSException(
                        subName + ": " + ConnectorSpec.convertErrorMessage(context, ex.getCause()));
            }
        }
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
    }
}

From source file:org.red5.net.websocket.WebSocketServerTest.java

@SuppressWarnings("unused")
@Test/*from  ww  w.ja v a2s  .  c o  m*/
public void testMultiThreaded() throws Throwable {
    log.info("testMultiThreaded enter");
    // create the server instance
    Thread server = new Thread() {
        @Override
        public void run() {
            log.debug("Server thread run");
            try {
                WSServer.main(null);
            } catch (Exception e) {
                log.error("Error in server thread", e);
            }
            log.debug("Server thread exit");
        }
    };
    server.setDaemon(true);
    server.start();
    // add plugin to the registry
    WebSocketPlugin plugin = new WebSocketPlugin();
    PluginRegistry.register(plugin);
    // start plugin
    plugin.doStart();
    // create a scope for the manager
    IScope appScope = new GlobalScope();
    // create an app
    MultiThreadedApplicationAdapter app = new MultiThreadedApplicationAdapter();
    app.setScope(appScope);
    // add the app
    plugin.setApplication(app);
    // get the manager
    WebSocketScopeManager manager = plugin.getManager(appScope);
    manager.setApplication(appScope);
    // wait for server
    while (!WSServer.isListening()) {
        Thread.sleep(10L);
    }
    // how many threads
    int threads = 1;
    List<Worker> tasks = new ArrayList<Worker>(threads);
    for (int t = 0; t < threads; t++) {
        tasks.add(new Worker());
    }
    ExecutorService executorService = Executors.newFixedThreadPool(threads);
    // invokeAll() blocks until all tasks have run...
    long start = System.nanoTime();
    List<Future<Object>> futures = executorService.invokeAll(tasks);
    log.info("Runtime: {} ns", (System.nanoTime() - start));
    for (Worker r : tasks) {
        // loop through and check results

    }
    Thread.sleep(2000L);
    // stop server
    server.interrupt();
    WSServer.stop();
    // stop plugin
    PluginRegistry.shutdown();
    log.info("testMultiThreaded exit");
}

From source file:com.aliyun.odps.ship.upload.DshipUpload.java

private void uploadBlock() throws IOException, TunnelException, ParseException {
    int threads = Integer.valueOf(DshipContext.INSTANCE.get(Constants.THREADS));
    ExecutorService executors = Executors.newFixedThreadPool(threads);
    ArrayList<Callable<Long>> callList = new ArrayList<Callable<Long>>();
    for (BlockInfo block : blockIndex) {
        final BlockUploader uploader = new BlockUploader(block, tunnelUploadSession, sessionHistory);
        Callable<Long> call = new Callable<Long>() {
            @Override//w w w  .ja va2s  .  com
            public Long call() throws Exception {
                uploader.upload();
                return 0L;
            }
        };

        callList.add(call);
    }

    try {
        List<Future<Long>> futures = executors.invokeAll(callList);
        ArrayList<String> failedBlock = new ArrayList<String>();
        for (int i = 0; i < futures.size(); ++i) {
            try {
                futures.get(i).get();
            } catch (ExecutionException e) {
                e.printStackTrace();
                failedBlock.add(String.valueOf(i));
            }
        }
        if (!failedBlock.isEmpty()) {
            throw new TunnelException("Block ID:" + StringUtils.join(failedBlock, ",") + " Failed.");
        }
    } catch (InterruptedException e) {
        throw new UserInterruptException(e.getMessage());
    }
}