Example usage for java.util.concurrent ExecutorService invokeAll

List of usage examples for java.util.concurrent ExecutorService invokeAll

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService invokeAll.

Prototype

<T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException;

Source Link

Document

Executes the given tasks, returning a list of Futures holding their status and results when all complete.

Usage

From source file:demo.vmware.commands.CommandGetAllRegions.java

/**
 * Retrieve the contents for all regions, one region per task executor
 */// w  ww. j a  v a2 s . c o  m
@Override
public CommandResult run(ConfigurableApplicationContext mainContext, List<String> parameters) {
    // Use the template for this meaning we can only fetch the whole contents of any region we have a template for
    // CommandGetCounts goes right to the cache so it may list more regions
    Map<String, GemfireTemplate> allRegionTemplates = CommandRegionUtils.getAllGemfireTemplates(mainContext);

    // use the Java executor service because of it's awesome invokeAll method.
    ExecutorService taskExecutor = Executors.newFixedThreadPool(allRegionTemplates.size());
    Collection tasks = new ArrayList<RegionFetcher>();

    CommandTimer timer = new CommandTimer();
    for (String key : allRegionTemplates.keySet()) {
        GemfireTemplate oneTemplate = allRegionTemplates.get(key);
        if (parallelFetch) {
            tasks.add(new RegionFetcher(oneTemplate.getRegion().getName(), 0, oneTemplate));
        } else {
            // don't write anything out and don't capture results
            fetchOneRegion(oneTemplate.getRegion().getName(), 5, oneTemplate);
        }
    }
    if (parallelFetch) {
        // invokeAll() returns when all tasks are complete
        try {
            List<Future<?>> futures = taskExecutor.invokeAll(tasks);
            taskExecutor.shutdown();
            LOG.info("Fetched " + futures.size() + " regions in threads");
            // the futures hold the results at this point futures.get(X).get();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
    timer.stop();
    return new CommandResult(null, "Loading all regions took " + timer.getTimeDiffInSeconds() + " seconds");
}

From source file:com.heliosphere.demeter.base.runner.AbstractRunner.java

@SuppressWarnings("nls")
@Override/*from ww  w .j  a va2  s.  c  o m*/
public void start() throws RunnerException {
    log.info(String.format("Runner started: dispatching [%1d] context(s) across [%2d] thread(s).",
            contexts.size(), threadCount));
    log.info(" ");

    ExecutorService executor = Executors.newFixedThreadPool(this.threadCount);
    for (IContext context : contexts) {
        callables.add(context.getProcessor());
    }

    try {
        futures = executor.invokeAll(callables);
    } catch (InterruptedException e) {
        throw new RunnerException("An error occurred due to: " + e.getMessage(), e);
    }

    log.info(
            "*********************************************************************************************************");
    log.info("EXECUTION SUMMARY:");
    log.info(" ");
    log.info(String.format(" Thread pool size..: [%1d]", threadCount));
    log.info(String.format(" Configuration file: [%1s]", configuration.getResource().getFile().getName()));
    log.info(String.format(" Execution file....: [%1s]", execution.getResource().getFile().getName()));
    log.info(String.format("        Description: %1s", execution.getHeader().getDescription()));
    log.info(String.format("       Parameter(s):"));
    IParameterConfiguration configuration = null;
    for (IParameterExecution p : execution.getContent().getElements()) {
        configuration = p.getConfiguration();
        log.info(String.format("               type:[%1s], name:[%2s], value:[%3s], description:[%4s]",
                p.getType(), p.getName(), p.getValue(), configuration.getDescription()));
    }
    log.info(" ");

    for (Future<IExecutionResult> future : futures) {
        try {
            IExecutionResult result = future.get();

            // Dump the execution result of the execution of a processor.
            String message = String.format("Context name:[%1s], status:[%2s], execution:[%4s]",
                    StringUtils.abbreviateMiddle(result.getName(), "...", 50), result.getStatus().toString(),
                    result.getElapsed());
            log.error(message);

            // If process has failed, then dump the exceptions!
            if (result.getStatus() == ExecutionStatusType.FAILED) {
                for (Exception exception : result.getExceptions()) {
                    log.error(String.format("   Exception caught -> %1s", exception.getMessage()), exception);
                }
            }
        } catch (InterruptedException | ExecutionException e) {
            throw new RunnerException("An error occurred due to: " + e.getMessage(), e);
        }
    }

    executor.shutdown();
    watch.stop();

    log.info(" ");
    log.info(String.format("Runner finished processing: [%1d] context(s) in a total of: [%2s]", contexts.size(),
            watch.toString()));
    log.info(
            "*********************************************************************************************************");
}

From source file:com.ibm.bi.dml.runtime.io.ReaderTextCSVParallel.java

/**
 * //w ww  . jav a  2  s .  c  o m
 * @param path
 * @param job
 * @param dest
 * @param rlen
 * @param clen
 * @param brlen
 * @param bclen
 * @param hasHeader
 * @param delim
 * @param fill
 * @param fillValue
 * @return
 * @throws IOException
 */
private void readCSVMatrixFromHDFS(InputSplit[] splits, Path path, JobConf job, MatrixBlock dest, long rlen,
        long clen, int brlen, int bclen, boolean hasHeader, String delim, boolean fill, double fillValue)
        throws IOException {
    FileInputFormat.addInputPath(job, path);
    TextInputFormat informat = new TextInputFormat();
    informat.configure(job);

    ExecutorService pool = Executors.newFixedThreadPool(_numThreads);

    try {
        // create read tasks for all splits
        ArrayList<CSVReadTask> tasks = new ArrayList<CSVReadTask>();
        int splitCount = 0;
        for (InputSplit split : splits) {
            tasks.add(new CSVReadTask(split, _offsets, informat, job, dest, rlen, clen, hasHeader, delim, fill,
                    fillValue, splitCount++));
        }
        pool.invokeAll(tasks);
        pool.shutdown();

        // check return codes and aggregate nnz
        long lnnz = 0;
        for (CSVReadTask rt : tasks) {
            lnnz += rt.getPartialNnz();
            if (!rt.getReturnCode()) {
                Exception err = rt.getException();
                throw new IOException("Read task for csv input failed: " + err.toString(), err);
            }
        }
        dest.setNonZeros(lnnz);
    } catch (Exception e) {
        throw new IOException("Threadpool issue, while parallel read.", e);
    }
}

From source file:com.dateofrock.simpledbmapper.SimpleDBMapper.java

/**
 * SimpleDB?????/*from   w ww  .  j  a  va 2 s.  c om*/
 * 
 * @param object
 *            {@link SimpleDBDomain}????POJO
 *            {@link SimpleDBVersionAttribute}
 *            ??????????????<a href=
 *            "http://docs.amazonwebservices.com/AmazonSimpleDB/latest/DeveloperGuide/ConditionalPut.html"
 *            >Conditional Put</a>????
 */
public <T> void save(T object) {
    Class<?> clazz = object.getClass();
    String domainName = getDomainName(clazz);

    Field itemNameField = this.reflector.findItemNameField(clazz);
    if (itemNameField == null) {
        throw new SimpleDBMapperException(object + "@SimpleDBItemName????");
    }

    String itemName = null;
    itemName = this.reflector.encodeItemNameAsSimpleDBFormat(object, itemNameField);

    Set<Field> allFields = this.reflector.listAllFields(clazz);
    Map<String, Object> attributeMap = new HashMap<String, Object>();
    List<S3BlobReference> blobList = new ArrayList<S3BlobReference>();
    for (Field field : allFields) {
        try {
            String attributeName = this.reflector.getAttributeName(field);
            if (attributeName != null) {
                if (this.reflector.isAttributeField(field)) {
                    attributeMap.put(attributeName, field.get(object));
                } else if (this.reflector.isBlobField(field)) {
                    String s3BucketName = this.reflector.getS3BucketName(clazz);
                    String s3KeyPrefix = this.reflector.getS3KeyPrefix(clazz);
                    String s3ContentType = this.reflector.getS3ContentType(field);
                    // FIXME
                    S3BlobReference s3BlobRef = new S3BlobReference(attributeName, s3BucketName, s3KeyPrefix,
                            s3ContentType, field.get(object));
                    blobList.add(s3BlobRef);
                }
            }
        } catch (Exception e) {
            throw new SimpleDBMapperException(e);
        }
    }

    List<String> nullKeys = new ArrayList<String>();
    List<ReplaceableAttribute> replacableAttrs = new ArrayList<ReplaceableAttribute>();

    // SimpleDBAttribute
    for (Map.Entry<String, Object> entry : attributeMap.entrySet()) {
        String sdbAttributeName = entry.getKey();
        Object sdbValue = entry.getValue();
        if (sdbValue == null) {
            nullKeys.add(sdbAttributeName);// ?
        } else if (sdbValue instanceof Set) { // Set
            Set<?> c = (Set<?>) sdbValue;
            for (Object val : c) {
                replacableAttrs.add(new ReplaceableAttribute(sdbAttributeName,
                        this.reflector.encodeObjectAsSimpleDBFormat(val), true));
            }
        } else {
            replacableAttrs.add(new ReplaceableAttribute(sdbAttributeName,
                    this.reflector.encodeObjectAsSimpleDBFormat(sdbValue), true));
        }
    }

    // SimpleDBBlob
    // Upload?Blob?
    List<S3Task> uploadTasks = new ArrayList<S3Task>();
    for (S3BlobReference s3BlobRef : blobList) {
        String bucketName = s3BlobRef.getS3BucketName();
        if (bucketName == null) {
            throw new SimpleDBMapperException("Blob??s3BucketName????");
        }

        StringBuilder s3Key = new StringBuilder();
        String prefix = s3BlobRef.getPrefix();
        if (prefix == null) {
            throw new SimpleDBMapperException("Blob?prefix?null??????");
        }
        prefix = prefix.trim();
        s3Key.append(prefix);
        if (!prefix.isEmpty() && !prefix.endsWith("/")) {
            s3Key.append("/");
        }
        s3Key.append(itemName).append("/").append(s3BlobRef.getAttributeName());

        Object blobObject = s3BlobRef.getObject();
        if (blobObject == null) {
            nullKeys.add(s3BlobRef.getAttributeName());
            // ???Delete Object?
            // FIXME ????????SDB???DeleteAttribute?
            this.s3.deleteObject(bucketName, s3Key.toString());
        } else {
            // ?Blob????S3??????????????????????
            InputStream input = null;
            if (blobObject instanceof String) {
                // Blob?String
                // FIXME encoding???
                input = new ByteArrayInputStream(((String) blobObject).getBytes(Charset.forName("UTF-8")));
            } else if (blobObject.getClass().getSimpleName().equals("byte[]")) {
                // Blob?Byte?
                input = new ByteArrayInputStream((byte[]) blobObject);
            } else {
                throw new SimpleDBMapperException(
                        "Blob?????String????byte[]????");
            }
            S3Task uploadTask = new S3Task(this.s3, s3BlobRef.getAttributeName(), input, bucketName,
                    s3Key.toString(), s3BlobRef.getContentType());
            uploadTasks.add(uploadTask);
        }
    }

    // PutAttribute
    PutAttributesRequest req = new PutAttributesRequest();
    req.setDomainName(domainName);
    req.setItemName(itemName);

    // Version??object???Conditional PUT?
    Long nowVersion = System.currentTimeMillis();
    Field versionField = this.reflector.findVersionAttributeField(clazz);
    if (versionField != null) {
        try {
            Object versionObject = versionField.get(object);
            String versionAttributeName = versionField.getAnnotation(SimpleDBVersionAttribute.class)
                    .attributeName();
            if (versionObject != null) {
                if (versionObject instanceof Long) {
                    Long currentVersion = (Long) versionObject;
                    UpdateCondition expected = new UpdateCondition();
                    expected.setName(versionAttributeName);
                    expected.setValue(currentVersion.toString());
                    req.setExpected(expected);
                } else {
                    throw new SimpleDBMapperException(
                            "version?Long???????" + versionField);
                }
            }

            replacableAttrs.add(new ReplaceableAttribute(versionAttributeName, nowVersion.toString(), true));
        } catch (Exception e) {
            throw new SimpleDBMapperException("object?version??: " + object, e);
        }
    }

    // S3??
    List<S3TaskResult> taskFailures = new ArrayList<S3TaskResult>();
    ExecutorService executor = Executors.newFixedThreadPool(this.config.geS3AccessThreadPoolSize());
    try {
        List<Future<S3TaskResult>> futures = executor.invokeAll(uploadTasks);
        for (Future<S3TaskResult> future : futures) {
            S3TaskResult result = future.get();
            // SimpleDB?????
            replacableAttrs.add(new ReplaceableAttribute(result.getSimpleDBAttributeName(),
                    result.toSimpleDBAttributeValue(), true));
            if (!result.isSuccess()) {
                // Upload
                taskFailures.add(result);
            }
        }
    } catch (Exception e) {
        throw new SimpleDBMapperS3HandleException("S3??", e);
    }

    // UploadTask???
    if (!taskFailures.isEmpty()) {
        throw new SimpleDBMapperS3HandleException(taskFailures);
    }

    // SDB?PUT
    req.setAttributes(replacableAttrs);
    this.sdb.putAttributes(req);

    // version
    if (versionField != null) {
        try {
            versionField.set(object, nowVersion);
        } catch (Exception ignore) {
            throw new SimpleDBMapperException("version??", ignore);
        }
    }

    // DeleteAttribute
    if (!nullKeys.isEmpty()) {
        DeleteAttributesRequest delReq = new DeleteAttributesRequest();
        delReq.setDomainName(domainName);
        delReq.setItemName(itemName);
        Collection<Attribute> delAttrs = new ArrayList<Attribute>(nullKeys.size());
        for (String nullKey : nullKeys) {
            delAttrs.add(new Attribute(nullKey, null));
        }
        delReq.setAttributes(delAttrs);
        this.sdb.deleteAttributes(delReq);
    }

}

From source file:edu.odu.cs.cs350.yellow1.jar.JarExecutor.java

/**
 * Concurrently run all jars created with the test suite
 * For each test in the suite run them against every mutant using 
 * <br>For Each mutant create an {@link ExecuteJar} then execute them and 
 * gather information from the results which are available through other method calls to this class
 * @return true/* w  w  w .jav a2 s . c  o  m*/
 */
public boolean start() {
    ExecutorService threadPool = Executors
            .newFixedThreadPool(Math.max(2, Runtime.getRuntime().availableProcessors() / 2));
    List<Callable<ExecutionResults>> tasks = new ArrayList<>();

    //since the only operations on the shared files are reads it 
    //is relatively safe to do so in parallel 
    File testDir = new File(pathToTestSuit);
    List<File> tests = Arrays.asList(testDir.listFiles());
    numTests = tests.size();
    while (!jarsToExecute.isEmpty()) {
        File executeJar = jarsToExecute.poll();
        tasks.add(new ExecuteJar(executeJar, pathToGold, pathToLogDir, tests));
    }

    try {
        List<Future<ExecutionResults>> results = threadPool.invokeAll(tasks);
        for (Future<ExecutionResults> rets : results) {
            ExecutionResults result = rets.get();
            mutationTestingResults.add(result);
            System.out.println(result.toString());
            if (result.isKilled()) {
                ++mutantsKilled;
            } else
                ++mutantsNotKilled;

            logsCreated += result.getStandardErrOutput().size() + result.getStandardOutput().size();
        }
    } catch (InterruptedException | ExecutionException e) {
        logger.error(e.getMessage());
        // e.printStackTrace();
    }

    return true;
}

From source file:org.apache.nutch.storage.TestGoraStorage.java

/**
 * Tests multiple thread reading and writing to the same store, this should be
 * no problem because {@link DataStore} implementations claim to be thread
 * safe./* ww  w  .  j  av a  2s.  c om*/
 * 
 * @throws Exception
 */
@Test
@Ignore("Temporarily diable until NUTCH-1572 is addressed.")
public void testMultithreaded() throws Exception {
    // create a fixed thread pool
    int numThreads = 8;
    ExecutorService pool = Executors.newFixedThreadPool(numThreads);

    // define a list of tasks
    Collection<Callable<Integer>> tasks = new ArrayList<Callable<Integer>>();
    for (int i = 0; i < numThreads; i++) {
        tasks.add(new Callable<Integer>() {
            @Override
            public Integer call() {
                try {
                    // run a sequence
                    readWrite(Thread.currentThread().getName(), webPageStore);
                    // everything ok, return 0
                    return 0;
                } catch (Exception e) {
                    e.printStackTrace();
                    // this will fail the test
                    return 1;
                }
            }
        });
    }

    // submit them at once
    List<Future<Integer>> results = pool.invokeAll(tasks);

    // check results
    for (Future<Integer> result : results) {
        assertEquals(0, (int) result.get());
    }
}

From source file:org.sonarqube.tests.analysis.IssuesModeTest.java

private void runConcurrentIssues(final String workDirPath) throws Exception {
    // Install sonar-runner in advance to avoid concurrent unzip issues
    FileSystem fileSystem = orchestrator.getConfiguration().fileSystem();
    new SonarScannerInstaller(fileSystem).install(Version.create(SonarScanner.DEFAULT_SCANNER_VERSION),
            fileSystem.workspace(), true);
    final int nThreads = 3;
    ExecutorService executorService = Executors.newFixedThreadPool(nThreads);
    List<Callable<BuildResult>> tasks = new ArrayList<>();
    final File homeDir = temp.newFolder();
    for (int i = 0; i < nThreads; i++) {
        tasks.add(() -> {/*from w  w w  . j  ava2s  .  c o m*/
            SonarScanner scanner = configureScannerIssues("shared/xoo-sample", homeDir,
                    "sonar.it.enableWaitingSensor", "true", "sonar.working.directory", workDirPath);
            return orchestrator.executeBuild(scanner);
        });
    }

    boolean expectedError = false;
    for (Future<BuildResult> result : executorService.invokeAll(tasks)) {
        try {
            result.get();
        } catch (ExecutionException e) {
            if (e.getCause() instanceof BuildFailureException) {
                BuildFailureException bfe = (BuildFailureException) e.getCause();
                assertThat(bfe.getResult().getLogs())
                        .contains("Another SonarQube analysis is already in progress for this project");
                expectedError = true;
            } else {
                throw e;
            }
        }
    }
    if (!expectedError) {
        fail("At least one of the threads should have failed");
    }
}

From source file:org.openspaces.rest.space.SpaceTaskAPIController.java

private ModelAndView execute(String spaceName, String locators, final SpaceTaskRequest request) {
    GigaSpace space = ControllerUtils.xapCache.get(spaceName, locators);
    final int instanceCount = ControllerUtils.xapCache.getInstances(spaceName);
    ExecutorService svc = Executors.newFixedThreadPool(instanceCount);
    int instances = 0;

    log.fine("request.target=" + request.target);
    if (request.target != null && !request.target.equals("all")) {
        instances = 1;// w ww .  j  a va 2 s . c o m
    } else {
        instances = instanceCount;
    }

    System.out.println("instances=" + instances);

    List<Callable<Object>> tasks = new ArrayList<Callable<Object>>(instances);
    for (int i = 0; i < instances; i++) {
        Object routing = 0;
        if (request.target != null && request.target.equals("all")) {
            routing = i;
        } else {
            routing = request.target;
        }
        tasks.add(new ScriptCallable(space, request, routing));
    }

    ModelAndView mv = new ModelAndView("jsonView");
    List<Object> model = new ArrayList<Object>(instances);
    try {
        List<Future<Object>> results = svc.invokeAll(tasks);

        for (Future<Object> fut : results) {
            if (fut.get() != null)
                model.add(fut.get());
        }
        mv.addObject("results", model);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        svc.shutdownNow();
    }
    return mv;
}

From source file:org.apache.sysml.runtime.compress.CompressedMatrixBlock.java

private static CompressedSizeInfo[] computeCompressedSizeInfos(CompressedSizeEstimator estim, int clen, int k)
        throws DMLRuntimeException {
    try {/* w  w  w . ja  v  a 2s .  c  om*/
        ExecutorService pool = Executors.newFixedThreadPool(k);
        ArrayList<SizeEstimTask> tasks = new ArrayList<SizeEstimTask>();
        for (int col = 0; col < clen; col++)
            tasks.add(new SizeEstimTask(estim, col));
        List<Future<CompressedSizeInfo>> rtask = pool.invokeAll(tasks);
        ArrayList<CompressedSizeInfo> ret = new ArrayList<CompressedSizeInfo>();
        for (Future<CompressedSizeInfo> lrtask : rtask)
            ret.add(lrtask.get());
        pool.shutdown();
        return ret.toArray(new CompressedSizeInfo[0]);
    } catch (Exception ex) {
        throw new DMLRuntimeException(ex);
    }
}

From source file:it.analysis.IssuesModeTest.java

private void runConcurrentIssues(final String workDirPath) throws Exception {
    // Install sonar-runner in advance to avoid concurrent unzip issues
    FileSystem fileSystem = orchestrator.getConfiguration().fileSystem();
    new SonarScannerInstaller(fileSystem).install(Version.create(SonarScanner.DEFAULT_SCANNER_VERSION),
            fileSystem.workspace(), true);
    final int nThreads = 3;
    ExecutorService executorService = Executors.newFixedThreadPool(nThreads);
    List<Callable<BuildResult>> tasks = new ArrayList<>();
    final File homeDir = temp.newFolder();
    for (int i = 0; i < nThreads; i++) {
        tasks.add(new Callable<BuildResult>() {

            public BuildResult call() throws Exception {
                SonarScanner runner = configureRunnerIssues("shared/xoo-sample", homeDir,
                        "sonar.it.enableWaitingSensor", "true", "sonar.working.directory", workDirPath);
                return orchestrator.executeBuild(runner);
            }//  w  ww.  ja v  a2  s . c o m
        });
    }

    boolean expectedError = false;
    for (Future<BuildResult> result : executorService.invokeAll(tasks)) {
        try {
            result.get();
        } catch (ExecutionException e) {
            if (e.getCause() instanceof BuildFailureException) {
                BuildFailureException bfe = (BuildFailureException) e.getCause();
                assertThat(bfe.getResult().getLogs())
                        .contains("Another SonarQube analysis is already in progress for this project");
                expectedError = true;
            } else {
                throw e;
            }
        }
    }
    if (!expectedError) {
        fail("At least one of the threads should have failed");
    }
}