Example usage for java.util.concurrent ExecutorService invokeAll

List of usage examples for java.util.concurrent ExecutorService invokeAll

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService invokeAll.

Prototype

<T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException;

Source Link

Document

Executes the given tasks, returning a list of Futures holding their status and results when all complete.

Usage

From source file:org.silverpeas.core.util.DBUtilIntegrationTest.java

@Test
public void nextUniqueIdUpdateForAnExistingTablesShouldWorkAndConcurrency() throws Exception {
    long startTime = System.currentTimeMillis();
    try {//from  w w w .j  a v  a  2 s  .co  m
        final int nbProcesses = 150;
        final int nbSetOfThreadsPerProcess = 15;
        final List<Object> count = new ArrayList<>();
        final List<Callable<org.apache.commons.lang3.tuple.Pair<String, Integer>>> listsOfGetNextId = new ArrayList<>(
                nbProcesses);
        for (int i = 0; i < nbProcesses; i++) {
            listsOfGetNextId.add(() -> {
                String tableName;
                synchronized (count) {
                    count.add("");
                    tableName = "User_" + count.size() + "_table";
                }
                return org.apache.commons.lang3.tuple.Pair.of(tableName,
                        nextUniqueIdUpdateForAnExistingTableShouldWorkAndConcurrency(tableName,
                                nbSetOfThreadsPerProcess));
            });
        }
        ExecutorService executorService = Executors.newFixedThreadPool(10);
        List<org.apache.commons.lang3.tuple.Pair<String, Integer>> tableNextIdsInError = new ArrayList<>();
        try {
            for (Future<org.apache.commons.lang3.tuple.Pair<String, Integer>> aTreatment : executorService
                    .invokeAll(listsOfGetNextId)) {
                org.apache.commons.lang3.tuple.Pair<String, Integer> tableIdValue = aTreatment.get();
                if (tableIdValue.getRight() != nbSetOfThreadsPerProcess) {
                    if (tableNextIdsInError.isEmpty()) {
                        Logger.getAnonymousLogger().severe("Some errors...");
                    }
                    tableNextIdsInError.add(tableIdValue);
                    Logger.getAnonymousLogger().severe("Next id value must be " + nbSetOfThreadsPerProcess
                            + " for table " + tableIdValue.getLeft() + ", but was " + tableIdValue.getRight());
                }
            }
        } finally {
            executorService.shutdown();
        }
        if (!tableNextIdsInError.isEmpty()) {
            fail("The next id of " + tableNextIdsInError.size() + " tables is in error.");
        }
    } finally {
        Logger.getAnonymousLogger()
                .info("Test duration of " + (System.currentTimeMillis() - startTime) + " ms");
    }
}

From source file:io.ecarf.core.cloud.task.processor.dictionary.AssembleDictionaryTask.java

@Override
public void run() throws IOException {

    log.info("Assembling dictionary, memory usage: " + Utils.getMemoryUsageInGB() + "GB");

    Stopwatch stopwatch = Stopwatch.createStarted();

    List<StorageObject> objects = this.cloudService.listCloudStorageObjects(bucket);

    //Set<String> files = new HashSet<>();

    List<Item> items = new ArrayList<>();

    for (StorageObject object : objects) {

        String filename = object.getName();

        if (filename.endsWith(FilenameUtils.KRYO_SERIALIZED_EXT)) {
            //files.add(filename);
            items.add(new Item(filename, object.getSize().longValue()));
        }/*from  w w  w.j a va2  s. c  om*/
    }

    log.info("Found " + items.size() + ", serialized files");

    int processors = Runtime.getRuntime().availableProcessors();

    BinPackingPartition function = new BinPackingPartition(items);
    function.setMaxBinItems((long) processors);
    List<Partition> partitions = function.partition();

    TermDictionary dictionary = TermDictionary.populateRDFOWLData(new TermDictionaryConcurrent());

    List<Callable<Void>> tasks = getSubTasks(partitions, dictionary);

    try {

        // check if we only have one file to process
        if (tasks.size() == 1) {

            tasks.get(0).call();

        } else if (processors == 1) {
            // only one process then process synchronously

            for (Callable<Void> task : tasks) {
                task.call();
            }

        } else {

            // multiple cores
            ExecutorService executor = Utils.createFixedThreadPool(processors);

            try {

                executor.invokeAll(tasks);

            } finally {
                executor.shutdown();
            }
        }

        tasks = null;

    } catch (Exception e) {
        log.error("Failed to process multiple files", e);
        throw new IOException(e);

    }

    int dicSize = dictionary.size();

    log.info("Successfully assembled dictionary with size: " + dicSize + ", max resourceId: "
            + dictionary.getLargestResourceId() + ", memory usage: " + Utils.getMemoryUsageInGB() + "GB"
            + ", timer: " + stopwatch);

    // extract the terms and encode the schema if needed
    if (StringUtils.isNotBlank(this.schemaFile) && StringUtils.isNotBlank(this.schemaBucket)) {
        this.encodeSchema(dictionary);
    }

    // encode the term stats file is needed
    if (StringUtils.isNotBlank(this.termStatsFile) && StringUtils.isNotBlank(this.encodedTermStatsFile)) {
        this.encodeTermsStats(dictionary);
    }

    // if no name provided for the dictionary file then create a default
    if (StringUtils.isBlank(this.dictionaryFile)) {
        this.dictionaryFile = this.cloudService.getInstanceId() + '_'
                + FilenameUtils.getSerializedGZipedDictionaryFilename();
    }

    this.dictionaryFile = FilenameUtils.getLocalFilePath(this.dictionaryFile);

    dictionary = ((ConcurrentDictionary) dictionary).getNonConcurrentDictionary();

    log.info("Successfully created non concurrent dictionary for serialization, memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

    dictionary.toFile(dictionaryFile, true);

    dictionary = null;

    log.info("Successfully serialized dictionary with size: " + dicSize + ", memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

    if (StringUtils.isBlank(this.targetBucket)) {
        this.targetBucket = bucket;
    }

    this.cloudService.uploadFileToCloudStorage(dictionaryFile, this.targetBucket);

    log.info("Successfully assembled, serialized and uploaded dictionary, memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

}

From source file:org.apache.storm.grouping.LoadAwareShuffleGroupingTest.java

private void runMultithreadedBenchmark(LoadAwareCustomStreamGrouping grouper, List<Integer> availableTaskIds,
        LoadMapping loadMapping, int numThreads) throws InterruptedException, ExecutionException {
    // Task Id not used, so just pick a static value
    final int inputTaskId = 100;

    final WorkerTopologyContext context = mockContext(availableTaskIds);

    // Call prepare with our available taskIds
    grouper.prepare(context, null, availableTaskIds);

    // periodically calls refreshLoad in 1 sec to simulate worker load update timer
    ScheduledExecutorService refreshService = MoreExecutors
            .getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1));
    refreshService.scheduleAtFixedRate(() -> grouper.refreshLoad(loadMapping), 1, 1, TimeUnit.SECONDS);

    long current = System.currentTimeMillis();
    int idx = 0;/*  w w w . jav  a 2  s.c om*/
    while (true) {
        grouper.chooseTasks(inputTaskId, Lists.newArrayList());

        idx++;
        if (idx % 100000 == 0) {
            // warm up 60 seconds
            if (System.currentTimeMillis() - current >= 60_000) {
                break;
            }
        }
    }

    final int groupingExecutionsPerThread = 2_000_000_000;

    List<Callable<Long>> threadTasks = Lists.newArrayList();
    for (int x = 0; x < numThreads; x++) {
        Callable<Long> threadTask = new Callable<Long>() {
            @Override
            public Long call() throws Exception {
                long current = System.currentTimeMillis();
                for (int i = 1; i <= groupingExecutionsPerThread; i++) {
                    grouper.chooseTasks(inputTaskId, Lists.newArrayList());
                }
                return System.currentTimeMillis() - current;
            }
        };

        // Add to our collection.
        threadTasks.add(threadTask);
    }

    ExecutorService executor = Executors.newFixedThreadPool(threadTasks.size());
    List<Future<Long>> taskResults = executor.invokeAll(threadTasks);

    // Wait for all tasks to complete
    Long maxDurationMillis = 0L;
    for (Future taskResult : taskResults) {
        while (!taskResult.isDone()) {
            Thread.sleep(100);
        }
        Long durationMillis = (Long) taskResult.get();
        if (maxDurationMillis < durationMillis) {
            maxDurationMillis = durationMillis;
        }
    }

    LOG.info("Max duration among threads is : {} ms", maxDurationMillis);

    refreshService.shutdownNow();
}

From source file:org.jasig.cas.ticket.registry.support.JpaLockingStrategyTests.java

private void testConcurrency(final ExecutorService executor, final LockingStrategy[] locks) throws Exception {
    final List<Locker> lockers = new ArrayList<Locker>(locks.length);
    for (int i = 0; i < locks.length; i++) {
        lockers.add(new Locker(locks[i]));
    }/*from  w  w  w.  jav  a2 s .c om*/

    int lockCount = 0;
    for (Future<Boolean> result : executor.invokeAll(lockers)) {
        if (result.get()) {
            lockCount++;
        }
    }
    assertTrue("Lock count should be <= 1 but was " + lockCount, lockCount <= 1);

    final List<Releaser> releasers = new ArrayList<Releaser>(locks.length);
    for (int i = 0; i < locks.length; i++) {
        releasers.add(new Releaser(locks[i]));
    }
    int releaseCount = 0;
    for (Future<Boolean> result : executor.invokeAll(lockers)) {
        if (result.get()) {
            releaseCount++;
        }
    }
    assertTrue("Release count should be <= 1 but was " + releaseCount, releaseCount <= 1);
}

From source file:org.apache.sling.maven.slingstart.run.StartMojo.java

/**
 * @see org.apache.maven.plugin.Mojo#execute()
 *//*from  www  . j av  a 2 s . com*/
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
    if (this.skipLaunchpad) {
        this.getLog().info("Executing of the start launchpad mojo is disabled by configuration.");
        return;
    }

    // delete properties
    if (systemPropertiesFile != null && systemPropertiesFile.exists()) {
        FileUtils.deleteQuietly(this.systemPropertiesFile);
    }

    // get configurations
    final Collection<ServerConfiguration> configurations = getLaunchpadConfigurations();

    // create the common environment
    final LaunchpadEnvironment env = new LaunchpadEnvironment(this.findLaunchpadJar(),
            this.cleanWorkingDirectory, !this.keepLaunchpadRunning, this.launchpadReadyTimeOutSec, this.debug);

    // create callables
    final Collection<LauncherCallable> tasks = new LinkedList<LauncherCallable>();

    for (final ServerConfiguration launchpadConfiguration : configurations) {
        validateConfiguration(launchpadConfiguration);

        tasks.add(createTask(launchpadConfiguration, env));
    }

    // create the launchpad runner properties
    this.createLaunchpadRunnerProperties(configurations);

    if (parallelExecution) {
        // ExecutorService for starting launchpad instances in parallel
        final ExecutorService executor = Executors.newCachedThreadPool();
        try {
            final List<Future<ProcessDescription>> resultsCollector = executor.invokeAll(tasks);
            for (final Future<ProcessDescription> future : resultsCollector) {
                try {
                    if (null == future.get()) {
                        throw new MojoExecutionException("Cannot start all the instances");
                    }
                } catch (final ExecutionException e) {
                    throw new MojoExecutionException(e.getLocalizedMessage(), e);
                }
            }
        } catch (final InterruptedException e) {
            throw new MojoExecutionException(e.getLocalizedMessage(), e);
        }
    } else {
        for (final LauncherCallable task : tasks) {
            try {
                if (null == task.call()) {
                    throw new MojoExecutionException("Cannot start all the instances");
                }
            } catch (final Exception e) {
                throw new MojoExecutionException(e.getLocalizedMessage(), e);
            }
        }
    }
    if (this.keepLaunchpadRunning) {
        getLog().info("Press CTRL-C to stop launchpad instance(s)...");
        while (true && this.isRunning(tasks)) {
            try {
                Thread.sleep(5000);
            } catch (final InterruptedException ie) {
                break;
            }
        }
    }
}

From source file:org.apache.storm.grouping.LoadAwareShuffleGroupingTest.java

@Test
public void testLoadAwareShuffleGroupingWithEvenLoadMultiThreaded()
        throws InterruptedException, ExecutionException {
    final int numTasks = 7;

    final LoadAwareShuffleGrouping grouper = new LoadAwareShuffleGrouping();

    // Task Id not used, so just pick a static value
    final int inputTaskId = 100;
    // Define our taskIds - the test expects these to be incrementing by one up from zero
    final List<Integer> availableTaskIds = getAvailableTaskIds(numTasks);
    final LoadMapping loadMapping = buildLocalTasksEvenLoadMapping(availableTaskIds);

    final WorkerTopologyContext context = mockContext(availableTaskIds);
    grouper.prepare(context, null, availableTaskIds);

    // force triggers building ring
    grouper.refreshLoad(loadMapping);/* www.  ja  v  a 2 s  .  c  om*/

    // calling chooseTasks should be finished before refreshing ring
    // adjusting groupingExecutionsPerThread might be needed with really slow machine
    // we allow race condition between refreshing ring and choosing tasks
    // so it will not make exact even distribution, though diff is expected to be small
    // given that all threadTasks are finished before refreshing ring,
    // distribution should be exactly even
    final int groupingExecutionsPerThread = numTasks * 5000;
    final int numThreads = 10;
    int totalEmits = groupingExecutionsPerThread * numThreads;

    List<Callable<int[]>> threadTasks = Lists.newArrayList();
    for (int x = 0; x < numThreads; x++) {
        Callable<int[]> threadTask = new Callable<int[]>() {
            @Override
            public int[] call() throws Exception {
                int[] taskCounts = new int[availableTaskIds.size()];
                for (int i = 1; i <= groupingExecutionsPerThread; i++) {
                    List<Integer> taskIds = grouper.chooseTasks(inputTaskId, Lists.newArrayList());

                    // Validate a single task id return
                    assertNotNull("Not null taskId list returned", taskIds);
                    assertEquals("Single task Id returned", 1, taskIds.size());

                    int taskId = taskIds.get(0);

                    assertTrue("TaskId should exist", taskId >= 0 && taskId < availableTaskIds.size());
                    taskCounts[taskId]++;
                }
                return taskCounts;
            }
        };

        // Add to our collection.
        threadTasks.add(threadTask);
    }

    ExecutorService executor = Executors.newFixedThreadPool(threadTasks.size());
    List<Future<int[]>> taskResults = executor.invokeAll(threadTasks);

    // Wait for all tasks to complete
    int[] taskIdTotals = new int[numTasks];
    for (Future taskResult : taskResults) {
        while (!taskResult.isDone()) {
            Thread.sleep(1000);
        }
        int[] taskDistributions = (int[]) taskResult.get();
        for (int i = 0; i < taskDistributions.length; i++) {
            taskIdTotals[i] += taskDistributions[i];
        }
    }

    int minPrCount = (int) (totalEmits * ((1.0 / numTasks) - ACCEPTABLE_MARGIN));
    int maxPrCount = (int) (totalEmits * ((1.0 / numTasks) + ACCEPTABLE_MARGIN));

    for (int i = 0; i < numTasks; i++) {
        assertTrue("Distribution should be even for all nodes with small delta",
                taskIdTotals[i] >= minPrCount && taskIdTotals[i] <= maxPrCount);
    }
}

From source file:org.apache.ctakes.ytex.kernel.evaluator.CorpusKernelEvaluatorImpl.java

@Override
public void evaluateKernelOnCorpus(Map<Long, Node> instanceIDMap, int nMod, boolean evalTest)
        throws InterruptedException {
    ExecutorService svc = Executors.newFixedThreadPool(nMod);
    List<Callable<Object>> taskList = new ArrayList<Callable<Object>>(nMod);
    for (int nSlice = 1; nSlice <= nMod; nSlice++) {
        taskList.add(new SliceEvaluator(instanceIDMap, nMod, nSlice, evalTest));
    }/*from w  ww .  j  a v  a2s .com*/
    svc.invokeAll(taskList);
    svc.shutdown();
    svc.awaitTermination(60 * 4, TimeUnit.MINUTES);
}

From source file:org.apache.hadoop.fs.FCStatisticsBaseTest.java

@Test(timeout = 70000)
public void testStatisticsThreadLocalDataCleanUp() throws Exception {
    final Statistics stats = new Statistics("test");
    // create a small thread pool to test the statistics
    final int size = 2;
    ExecutorService es = Executors.newFixedThreadPool(size);
    List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(size);
    for (int i = 0; i < size; i++) {
        tasks.add(new Callable<Boolean>() {
            public Boolean call() {
                // this populates the data set in statistics
                stats.incrementReadOps(1);
                return true;
            }//from  www .j ava  2 s  .c o m
        });
    }
    // run the threads
    es.invokeAll(tasks);
    // assert that the data size is exactly the number of threads
    final AtomicInteger allDataSize = new AtomicInteger(0);
    allDataSize.set(stats.getAllThreadLocalDataSize());
    Assert.assertEquals(size, allDataSize.get());
    Assert.assertEquals(size, stats.getReadOps());
    // force the GC to collect the threads by shutting down the thread pool
    es.shutdownNow();
    es.awaitTermination(1, TimeUnit.MINUTES);
    es = null;
    System.gc(); // force GC to garbage collect threads

    // wait for up to 60 seconds
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
            int size = stats.getAllThreadLocalDataSize();
            allDataSize.set(size);
            if (size == 0) {
                return true;
            }
            LOG.warn(
                    "not all references have been cleaned up; still " + allDataSize.get() + " references left");
            LOG.warn("triggering another GC");
            System.gc();
            return false;
        }
    }, 500, 60 * 1000);
    Assert.assertEquals(0, allDataSize.get());
    Assert.assertEquals(size, stats.getReadOps());
}

From source file:com.googlecode.jcasockets.perf.Client.java

public void execute() throws InterruptedException, ExecutionException {
    int numberOfThreads = clientOptions.getNumberOfThreads();
    String ipAddress = clientOptions.getIpAddress();
    List<Integer> ports = clientOptions.getPorts();
    ExecutorService executorService = Executors.newFixedThreadPool(numberOfThreads);
    try {//from ww w  .  ja  v  a2  s .c om
        Collection<Callable<ExecutionStatistics>> senderTestRunners = new ArrayList<Callable<ExecutionStatistics>>(
                numberOfThreads);
        for (Integer port : ports) {
            for (int i = 0; i < numberOfThreads; i++) {
                SocketSender socketSender = socketSenderFactory.createSocketSender(ipAddress, port);
                senderTestRunners.add(new SenderTestRunner(clientOptions, socketSender));
            }
        }
        List<Future<ExecutionStatistics>> executionStatisticsFutures = executorService
                .invokeAll(senderTestRunners);
        executionStatistics = new ExecutionStatistics(null);
        for (Future<ExecutionStatistics> future : executionStatisticsFutures) {
            ExecutionStatistics that = future.get();
            executionStatistics.combine(that);
        }
    } finally {
        executorService.shutdown();
    }
}

From source file:com.facebook.presto.accumulo.tools.RewriteMetricsTask.java

public int exec() throws Exception {
    // Validate the required parameters have been set
    int numErrors = checkParam(config, "config");
    numErrors += checkParam(schema, "schema");
    numErrors += checkParam(tableName, "tableName");
    if (numErrors > 0) {
        return 1;
    }// www  . j a v  a  2 s.  com

    // Create the instance and the connector
    Instance inst = new ZooKeeperInstance(config.getInstance(), config.getZooKeepers());
    Connector connector = inst.getConnector(config.getUsername(), new PasswordToken(config.getPassword()));

    if (auths == null) {
        auths = connector.securityOperations().getUserAuthorizations(config.getUsername());
    }

    // Fetch the table metadata
    ZooKeeperMetadataManager manager = new ZooKeeperMetadataManager(config, new TypeRegistry());

    LOG.info("Scanning Presto metadata for tables...");
    AccumuloTable table = manager.getTable(new SchemaTableName(schema, tableName));

    if (table == null) {
        LOG.error("Table is null, does it exist?");
        return 1;
    }

    reconfigureIterators(connector, table);

    if (!dryRun) {
        LOG.info("Truncating metrics table " + table.getIndexTableName() + "_metrics");
        connector.tableOperations().deleteRows(table.getIndexTableName() + "_metrics", null, null);
    } else {
        LOG.info("Would have truncated metrics table " + table.getIndexTableName() + "_metrics");
    }

    long start = System.currentTimeMillis();

    ExecutorService service = MoreExecutors.getExitingExecutorService(
            new ThreadPoolExecutor(2, 2, 0, TimeUnit.MILLISECONDS, new SynchronousQueue<>()));

    List<Future<Void>> tasks = service.invokeAll(ImmutableList.of(() -> {
        rewriteMetrics(connector, table, start);
        return null;
    }, () -> {
        rewriteNumRows(connector, table, start);
        return null;
    }));

    for (Future<Void> task : tasks) {
        task.get();
    }

    LOG.info("Finished");
    return 0;
}