Example usage for java.util.concurrent CompletionService submit

List of usage examples for java.util.concurrent CompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService submit.

Prototype

Future<V> submit(Callable<V> task);

Source Link

Document

Submits a value-returning task for execution and returns a Future representing the pending results of the task.

Usage

From source file:com.facebook.presto.accumulo.index.ColumnCardinalityCache.java

/**
 * Gets the cardinality for each {@link AccumuloColumnConstraint}.
 * Given constraints are expected to be indexed! Who knows what would happen if they weren't!
 *
 * @param schema Schema name//from w  w  w  . java2  s.c  om
 * @param table Table name
 * @param auths Scan authorizations
 * @param idxConstraintRangePairs Mapping of all ranges for a given constraint
 * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete
 * @param pollingDuration Duration for polling the cardinality completion service
 * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest
 * @throws TableNotFoundException If the metrics table does not exist
 * @throws ExecutionException If another error occurs; I really don't even know anymore.
 */
public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table,
        Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs,
        long earlyReturnThreshold, Duration pollingDuration) throws ExecutionException, TableNotFoundException {
    // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary
    CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>(
            executorService);
    idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> {
        long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(),
                value);
        LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality);
        return Pair.of(cardinality, key);
    }));

    // Create a multi map sorted by cardinality
    ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys()
            .arrayListValues().build();
    try {
        boolean earlyReturn = false;
        int numTasks = idxConstraintRangePairs.asMap().entrySet().size();
        do {
            // Sleep for the polling duration to allow concurrent tasks to run for this time
            Thread.sleep(pollingDuration.toMillis());

            // Poll each task, retrieving the result if it is done
            for (int i = 0; i < numTasks; ++i) {
                Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll();
                if (futureCardinality != null && futureCardinality.isDone()) {
                    Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get();
                    cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight());
                }
            }

            // If the smallest cardinality is present and below the threshold, set the earlyReturn flag
            Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints
                    .entries().stream().findFirst();
            if (smallestCardinality.isPresent()) {
                if (smallestCardinality.get().getKey() <= earlyReturnThreshold) {
                    LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish",
                            smallestCardinality);
                    earlyReturn = true;
                }
            }
        } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks);
    } catch (ExecutionException | InterruptedException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e);
    }

    // Create a copy of the cardinalities
    return ImmutableMultimap.copyOf(cardinalityToConstraints);
}

From source file:org.apache.phoenix.execute.UpsertSelectOverlappingBatchesIT.java

@Test
public void testUpsertSelectSameBatchConcurrently() throws Exception {
    try (Connection conn = driver.connect(url, props)) {
        int numUpsertSelectRunners = 5;
        ExecutorService exec = Executors.newFixedThreadPool(numUpsertSelectRunners);
        CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(exec);
        List<Future<Boolean>> futures = Lists.newArrayListWithExpectedSize(numUpsertSelectRunners);
        // run one UPSERT SELECT for 100 rows (that locks the rows for a long time)
        futures.add(completionService.submit(new UpsertSelectRunner(dataTable, 0, 105, 1)));
        // run four UPSERT SELECTS for 5 rows (that overlap with slow running UPSERT SELECT)
        for (int i = 0; i < 100; i += 25) {
            futures.add(completionService.submit(new UpsertSelectRunner(dataTable, i, i + 25, 5)));
        }//from w  w  w .  j  a v a 2  s  .c  o m
        int received = 0;
        while (received < futures.size()) {
            Future<Boolean> resultFuture = completionService.take();
            Boolean result = resultFuture.get();
            received++;
            assertTrue(result);
        }
        exec.shutdownNow();
    }
}

From source file:org.pentaho.platform.plugin.services.metadata.PentahoMetadataDomainRepositoryConcurrencyTest.java

private void runTest(final List<? extends Callable<String>> actors) throws Exception {
    List<String> errors = new ArrayList<String>();
    ExecutorService executorService = Executors.newFixedThreadPool(actors.size());
    try {/* w ww. j a  v  a2s.  c o m*/
        CompletionService<String> completionService = new ExecutorCompletionService<String>(executorService);
        for (Callable<String> reader : actors) {
            completionService.submit(reader);
        }

        for (int i = 0; i < actors.size(); i++) {
            Future<String> take = completionService.take();
            String result;
            try {
                result = take.get();
            } catch (ExecutionException e) {
                result = "Execution exception: " + e.getMessage();
            }
            if (result != null) {
                errors.add(result);
            }
        }
    } finally {
        executorService.shutdown();
    }

    if (!errors.isEmpty()) {
        StringBuilder builder = new StringBuilder();
        builder.append("The following errors occurred: \n");
        for (String error : errors) {
            builder.append(error).append('\n');
        }
        fail(builder.toString());
    }
}

From source file:com.laudandjolynn.mytv.crawler.CrawlerGroup.java

@Override
public List<TvStation> crawlAllTvStation() {
    List<TvStation> resultList = new ArrayList<TvStation>();
    int size = crawlers.size();
    int maxThreadNum = Constant.CPU_PROCESSOR_NUM;
    ThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("Mytv_CrawlerGroup_%d")
            .build();// w  w w  . java  2s  .  c o  m
    ExecutorService executorService = Executors.newFixedThreadPool(size > maxThreadNum ? maxThreadNum : size,
            threadFactory);
    CompletionService<List<TvStation>> completionService = new ExecutorCompletionService<List<TvStation>>(
            executorService);
    for (final Crawler crawler : crawlers) {
        Callable<List<TvStation>> task = new Callable<List<TvStation>>() {
            @Override
            public List<TvStation> call() throws Exception {
                return crawler.crawlAllTvStation();
            }
        };
        completionService.submit(task);
    }
    executorService.shutdown();
    int count = 0;
    while (count < size) {
        try {
            List<TvStation> stationList = completionService.take().get();
            if (stationList != null) {
                resultList.addAll(stationList);
            }
        } catch (InterruptedException e) {
            logger.error("crawl task of all tv station interrupted.", e);
        } catch (ExecutionException e) {
            logger.error("crawl task of all tv station executed fail.", e);
        }
        count++;
    }

    for (CrawlEventListener listener : listeners) {
        listener.crawlEnd(new AllTvStationCrawlEndEvent(this, resultList));
    }
    return resultList;
}

From source file:com.appdynamics.monitors.hadoop.communicator.AmbariCommunicator.java

/**
 * Populates <code>metrics</code> Map with all numeric Ambari clusters metrics.
 * @see #getClusterMetrics(java.io.Reader)
 *
 * @param metrics/*w ww . j a v  a2 s . c  o  m*/
 */
public void populate(Map<String, Object> metrics) {
    this.metrics = metrics;
    try {
        Reader response = (new Response("http://" + host + ":" + port + "/api/v1/clusters")).call();

        Map<String, Object> json = (Map<String, Object>) parser.parse(response, simpleContainer);
        try {
            List<Map> clusters = (ArrayList<Map>) json.get("items");

            CompletionService<Reader> threadPool = new ExecutorCompletionService<Reader>(executor);
            int count = 0;
            for (Map cluster : clusters) {
                if (xmlParser.isIncludeCluster((String) ((Map) cluster.get("Clusters")).get("cluster_name"))) {
                    threadPool.submit(new Response(cluster.get("href") + CLUSTER_FIELDS));
                    count++;
                }
            }
            for (; count > 0; count--) {
                getClusterMetrics(threadPool.take().get());
            }
        } catch (Exception e) {
            logger.error("Failed to parse cluster names: " + stackTraceToString(e));
        }
    } catch (Exception e) {
        logger.error("Failed to get response for cluster names: " + stackTraceToString(e));
    }
    executor.shutdown();
}

From source file:org.apache.drill.optiq.EnumerableDrill.java

/** Runs the plan as a background task. */
Future<Collection<RunOutcome>> runPlan(CompletionService<Collection<RunOutcome>> service) {
    IteratorRegistry ir = new IteratorRegistry();
    DrillConfig config = DrillConfig.create();
    config.setSinkQueues(0, queue);/*from ww w.j  a  v  a 2 s.  co  m*/
    final ReferenceInterpreter i = new ReferenceInterpreter(plan, ir, new BasicEvaluatorFactory(ir),
            new RSERegistry(config));
    try {
        i.setup();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    return service.submit(new Callable<Collection<RunOutcome>>() {
        @Override
        public Collection<RunOutcome> call() throws Exception {
            Collection<RunOutcome> outcomes = i.run();

            for (RunOutcome outcome : outcomes) {
                System.out.println("============");
                System.out.println(outcome);
                if (outcome.outcome == RunOutcome.OutcomeType.FAILED && outcome.exception != null) {
                    outcome.exception.printStackTrace();
                }
            }
            return outcomes;
        }
    });
}

From source file:com.appdynamics.monitors.hadoop.communicator.AmbariCommunicator.java

/**
 * Parses a JSON Reader object as cluster metrics and collect service and host metrics.
 * @see #getServiceMetrics(java.io.Reader, String)
 * @see #getHostMetrics(java.io.Reader, String)
 *
 * @param response/*w  ww .j  a v  a 2 s  . c  o m*/
 */
private void getClusterMetrics(Reader response) {
    try {
        Map<String, Object> json = (Map<String, Object>) parser.parse(response, simpleContainer);
        try {
            String clusterName = (String) ((Map) json.get("Clusters")).get("cluster_name");
            List<Map> services = (ArrayList<Map>) json.get("services");
            List<Map> hosts = (ArrayList<Map>) json.get("hosts");

            CompletionService<Reader> threadPool = new ExecutorCompletionService<Reader>(executor);
            int count = 0;
            for (Map service : services) {
                if (xmlParser
                        .isIncludeService((String) ((Map) service.get("ServiceInfo")).get("service_name"))) {
                    threadPool.submit(new Response(service.get("href") + SERVICE_FIELDS));
                    count++;
                }
            }
            for (; count > 0; count--) {
                getServiceMetrics(threadPool.take().get(), clusterName + "|services");
            }

            for (Map host : hosts) {
                if (xmlParser.isIncludeHost((String) ((Map) host.get("Hosts")).get("host_name"))) {
                    threadPool.submit(new Response(host.get("href") + HOST_FIELDS));
                    count++;
                }
            }
            for (; count > 0; count--) {
                getHostMetrics(threadPool.take().get(), clusterName + "|hosts");
            }
        } catch (Exception e) {
            logger.error("Failed to parse cluster metrics: " + stackTraceToString(e));
        }
    } catch (Exception e) {
        logger.error("Failed to get response for cluster metrics: " + stackTraceToString(e));
    }
}

From source file:org.opoo.press.maven.wagon.github.GitHub.java

private List<TreeEntry> createEntriesInThreads(List<TreeEntry> entries, final String prefix,
        final String[] paths, final DataService service, final RepositoryId repository,
        final File outputDirectory, int numThreads) throws GitHubException {
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);//.newCachedThreadPool();  
    CompletionService<TreeEntry> cs = new ExecutorCompletionService<TreeEntry>(threadPool);

    for (final String path : paths) {
        cs.submit(new Callable<TreeEntry>() {
            @Override/* w  ww .  j a v  a2s .  c  o  m*/
            public TreeEntry call() throws Exception {
                return createEntry(prefix, path, service, repository, outputDirectory);
            }
        });
    }

    try {
        //BUG: wait for ever??
        //         Future<TreeEntry> future = cs.take();
        //         while(future != null){
        //            entries.add(future.get());
        //            future = cs.take();
        //         }

        for (int i = 0; i < paths.length; i++) {
            entries.add(cs.take().get());
        }
        log.info("All entries created: " + paths.length);
    } catch (InterruptedException e) {
        throw new GitHubException("", e);
    } catch (ExecutionException e) {
        throw new GitHubException("", e);
    }
    return entries;
}

From source file:com.tascape.qa.th.SuiteRunner.java

public int startExecution() throws IOException, InterruptedException, SQLException, XMLStreamException {
    File dir = SYS_CONFIG.getLogPath().resolve(execId).toFile();
    LOG.info("Create suite execution log directory {}", dir);
    if (!dir.exists() && !dir.mkdirs()) {
        throw new IOException("Cannot create directory " + dir);
    }/*from w  w  w  .  j a  v a2 s. c  om*/
    this.logAppProperties(dir);

    int threadCount = SYS_CONFIG.getExecutionThreadCount();
    LOG.info("Start execution engine with {} thread(s)", threadCount);
    int len = (threadCount + "").length();
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("t%0" + len + "d").build();
    ExecutorService executorService = Executors.newFixedThreadPool(threadCount, namedThreadFactory);
    CompletionService<TestResult> completionService = new ExecutorCompletionService<>(executorService);

    LOG.info("Start to acquire test cases to execute");
    int numberOfFailures = 0;
    try {
        List<TestResult> tcrs = this.filter(this.db.getQueuedTestCaseResults(this.execId, 100));
        while (!tcrs.isEmpty()) {
            List<Future<TestResult>> futures = new ArrayList<>();

            for (TestResult tcr : tcrs) {
                LOG.info("Submit test case {}", tcr.getTestCase().format());
                futures.add(completionService.submit(new TestRunnerJUnit4(db, tcr)));
            }
            LOG.debug("Total {} test cases submitted", futures.size());

            for (Future<TestResult> f : futures) {
                try {
                    Future<TestResult> future = completionService.take();
                    TestResult tcr = future.get();
                    if (tcr == null) {
                        continue;
                    }
                    String result = tcr.getResult().result();
                    LOG.info("Get result of test case {} - {}", tcr.getTestCase().format(), result);
                    if (!ExecutionResult.PASS.name().equals(result) && !result.endsWith("/0")) {
                        numberOfFailures++;
                    }
                } catch (Throwable ex) {
                    LOG.error("Error executing test thread", ex);
                    numberOfFailures++;
                }
            }

            tcrs = this.filter(this.db.getQueuedTestCaseResults(this.execId, 100));
        }
    } finally {
        AbstractSuite.getSuites().stream().forEach((suite) -> {
            try {
                suite.tearDown();
            } catch (Exception ex) {
                LOG.warn("Error tearing down suite {} -  {}", suite.getClass(), ex.getMessage());
            }
        });
    }
    executorService.shutdown();

    LOG.info("No more test case to run on this host, updating suite execution result");
    this.db.updateSuiteExecutionResult(this.execId);
    this.db.saveJunitXml(this.execId);
    return numberOfFailures;
}

From source file:com.threadswarm.imagefeedarchiver.driver.CommandLineDriver.java

@Override
public void run() {
    //setup filters
    List<RssItemFilter> filterList = new LinkedList<RssItemFilter>();
    filterList.add(new PreviouslyDownloadedItemFilter(processedRssItemDAO));
    RssItemFilter chainedItemFilter = new ChainedRssItemFilter(filterList);

    RssChannel rssChannel = null;//from ww  w .  j  a v  a  2  s. c  om
    try {
        rssChannel = fetchRssChannel(rssFeedUri);
    } catch (IOException | FeedParserException e) {
        LOGGER.error(
                "An Exception was thrown while attempting to download and parse the target RSS feed.. exiting",
                e);
        System.exit(1);
    }

    List<RssItem> filteredItemList = new LinkedList<RssItem>();
    if (rssChannel != null && rssChannel.getItems() != null) {
        for (RssItem rssItem : rssChannel.getItems()) {
            rssItem = chainedItemFilter.filter(rssItem);
            if (rssItem != null)
                filteredItemList.add(rssItem);
        }
    }

    if (!filteredItemList.isEmpty()) {
        //create list of headers to be used when downloading images
        List<Header> headerList = new ArrayList<Header>(2);
        if (doNotTrackRequested) {
            LOGGER.debug("Adding 'DNT' header to worker requests");
            headerList.add(DNT_HEADER);
        }
        headerList.add(new BasicHeader(HttpHeaders.REFERER, rssFeedUri.toString()));
        headerList = Collections.unmodifiableList(headerList);

        ExecutorService executorService = null;
        try {
            executorService = Executors.newFixedThreadPool(threadCount);
            CompletionService<ProcessedRssItem> completionService = new ExecutorCompletionService<ProcessedRssItem>(
                    executorService);
            Set<URI> processedURISet = new ConcurrentSkipListSet<URI>();
            int itemCount = 0;
            for (RssItem rssItem : filteredItemList) {
                completionService.submit(new RssItemProcessor(httpClient, rssItem, processedRssItemDAO,
                        outputDirectory, headerList, processedURISet, downloadDelay, forceHttps));
                itemCount++;
            }

            LOGGER.info("{} jobs submitted for execution", itemCount);

            for (int x = 0; x < itemCount; x++) {
                ProcessedRssItem processedItem = completionService.take().get();
                LOGGER.info("Item status: {} --> [{}]", processedItem.getRssItem().getTitle(),
                        processedItem.getDownloadStatus());
            }
        } catch (InterruptedException e) {
            LOGGER.warn("Thread interrupted while blocking", e);
            Thread.currentThread().interrupt(); // restore interrupt
        } catch (ExecutionException e) {
            LOGGER.error("An Exception was thrown during worker execution and subsequently propagated", e);
            e.printStackTrace();
        } finally {
            executorService.shutdown();
            try {
                executorService.awaitTermination(10, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOGGER.warn("Thread interrupted while blocking", e);
                Thread.currentThread().interrupt(); // restore interrupt
            }
            httpClient.getConnectionManager().shutdown();
        }
    }
}