Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:org.paxle.crawler.proxy.impl.ProxyDataProvider.java

public ProxyDataProvider(Properties props, ICommandTracker cmdTracker,
        Map<String, IDocumentFactory> docFactories, ICrawlerTools crawlerTools) {
    singleton = this;
    this.commandTracker = cmdTracker;
    this.docFactories = docFactories;
    this.crawlerTools = crawlerTools;

    // init threadpool
    // XXX should we set the thread-pool size? 
    this.execService = Executors.newCachedThreadPool();
    this.execCompletionService = new ExecutorCompletionService<ICrawlerDocument>(this.execService);

    // read preferences
    if (props != null) {
        this.props = props;
        this.commandProfileID = Integer.parseInt(props.getProperty(PREF_PROFILE_ID, "-1"));
    }/* www .j a  v a 2s .c  o  m*/

    // starting up the thread
    this.setName(this.getClass().getSimpleName());
    this.start();
}

From source file:com.alibaba.otter.manager.biz.monitor.impl.GlobalMonitor.java

private void concurrentProcess(Map<Long, List<AlarmRule>> rules) {
    ExecutorCompletionService completionExecutor = new ExecutorCompletionService(executor);
    List<Future> futures = new ArrayList<Future>();
    for (Entry<Long, List<AlarmRule>> entry : rules.entrySet()) {
        final List<AlarmRule> alarmRules = entry.getValue();
        futures.add(completionExecutor.submit(new Callable<Object>() {

            @Override//ww w  .  j av  a  2 s. co m
            public Object call() throws Exception {
                pipelineMonitor.explore(alarmRules);
                return null;
            }
        }));
    }

    List<Throwable> exceptions = new ArrayList<Throwable>();
    int index = 0;
    int size = futures.size();
    while (index < size) {
        try {
            Future<?> future = completionExecutor.take();
            future.get();
        } catch (InterruptedException e) {
            exceptions.add(e);
        } catch (ExecutionException e) {
            exceptions.add(e);
        }
        index++;
    }

    if (!exceptions.isEmpty()) {
        StringBuilder sb = new StringBuilder(exceptions.size() + " exception happens in global monitor\n");
        sb.append("exception stack start :\n");
        for (Throwable t : exceptions) {
            sb.append(ExceptionUtils.getStackTrace(t));
        }
        sb.append("exception stack end \n");
        throw new IllegalStateException(sb.toString());
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static void runIbrTest(final long ibrInterval) throws Exception {
    final ExecutorService executor = createExecutor();
    final Random ran = new Random();

    final Configuration conf = newConf(ibrInterval);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();

    try {//  w ww .  j a v  a  2s. c  o m
        final String dirPathString = "/dir";
        final Path dir = new Path(dirPathString);
        dfs.mkdirs(dir);

        // start testing
        final long testStartTime = Time.monotonicNow();
        final ExecutorCompletionService<Path> createService = new ExecutorCompletionService<>(executor);
        final AtomicLong createFileTime = new AtomicLong();
        final AtomicInteger numBlockCreated = new AtomicInteger();

        // create files
        for (int i = 0; i < NUM_FILES; i++) {
            createService.submit(new Callable<Path>() {
                @Override
                public Path call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        final long seed = ran.nextLong();
                        final int numBlocks = ran.nextInt(MAX_BLOCK_NUM) + 1;
                        numBlockCreated.addAndGet(numBlocks);
                        return createFile(dir, numBlocks, seed, dfs);
                    } finally {
                        createFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }

        // verify files
        final ExecutorCompletionService<Boolean> verifyService = new ExecutorCompletionService<>(executor);
        final AtomicLong verifyFileTime = new AtomicLong();
        for (int i = 0; i < NUM_FILES; i++) {
            final Path file = createService.take().get();
            verifyService.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        return verifyFile(file, dfs);
                    } finally {
                        verifyFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }
        for (int i = 0; i < NUM_FILES; i++) {
            Assert.assertTrue(verifyService.take().get());
        }
        final long testEndTime = Time.monotonicNow();

        LOG.info("ibrInterval=" + ibrInterval + " ("
                + toConfString(DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY, conf) + "), numBlockCreated="
                + numBlockCreated);
        LOG.info("duration=" + toSecondString(testEndTime - testStartTime) + ", createFileTime="
                + toSecondString(createFileTime.get()) + ", verifyFileTime="
                + toSecondString(verifyFileTime.get()));
        LOG.info("NUM_FILES=" + NUM_FILES + ", MAX_BLOCK_NUM=" + MAX_BLOCK_NUM + ", BLOCK_SIZE=" + BLOCK_SIZE
                + ", NUM_THREADS=" + NUM_THREADS + ", NUM_DATANODES=" + NUM_DATANODES);
        logIbrCounts(cluster.getDataNodes());
    } finally {
        executor.shutdown();
        cluster.shutdown();
    }
}

From source file:org.dllearner.algorithms.qtl.experiments.SPARQLLearningProblemsGenerator.java

public void generateBenchmark(int nrOfSPARQLQueries, final int minDepth, final int maxDepth,
        int minNrOfExamples) {
    Collection<OWLClass> classes = getClasses();
    ArrayList<OWLClass> classesList = new ArrayList<>(classes);
    Collections.shuffle(classesList, new Random(123));
    classes = classesList;//w ww.  ja  v a  2 s .  com
    //      classes = Sets.newHashSet(new OWLClassImpl(IRI.create("http://semantics.crl.ibm.com/univ-bench-dl.owl#TennisFan")));

    //      ExecutorService tp = Executors.newFixedThreadPool(threadCount);
    List<Path> allPaths = new ArrayList<>();

    //      ThreadPoolExecutor tp = new CustomFutureReturningExecutor(
    //            threadCount, threadCount,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<Runnable>(classes.size(), true));

    ExecutorService tp = Executors.newFixedThreadPool(threadCount);

    CompletionService<List<Path>> ecs = new ExecutorCompletionService<List<Path>>(tp);

    JDKRandomGenerator rndGen = new JDKRandomGenerator();
    rndGen.setSeed(123);

    int nrOfQueriesPerDepth = nrOfSPARQLQueries / (maxDepth - minDepth + 1);

    // for each depth <= maxDepth
    for (int depth = minDepth; depth <= maxDepth; depth++) {
        System.out.println("Generating " + nrOfQueriesPerDepth + " queries for depth " + depth);

        Iterator<OWLClass> iterator = classes.iterator();

        // generate paths of depths <= maxDepth
        List<Path> pathsForDepth = new ArrayList<>();

        while (pathsForDepth.size() < nrOfQueriesPerDepth && iterator.hasNext()) {

            Collection<Future<List<Path>>> futures = new ArrayList<>();

            try {
                int cnt = 0;
                while (iterator.hasNext() && (pathsForDepth.size() + ++cnt < nrOfQueriesPerDepth)) {
                    // pick next class
                    OWLClass cls = iterator.next();

                    //            int depth = rndGen.nextInt(maxDepth) + 1;

                    Future<List<Path>> future = ecs
                            .submit(new PathDetectionTask(dataDir, ks, schema, cls, depth, minNrOfExamples));
                    futures.add(future);
                }

                int n = futures.size();
                try {
                    for (int i = 0; i < n; ++i) {
                        Future<List<Path>> f = ecs.take();
                        if (!f.isCancelled()) {
                            List<Path> paths = f.get();

                            if (paths != null) {
                                for (int j = 0; j < Math.min(paths.size(), maxPathsPerClassAndDepth); j++) {
                                    pathsForDepth.add(paths.get(j));
                                }
                            }
                            //                        System.out.println("#Paths: " + paths.size());
                            //                        paths.forEach(p -> System.out.println(p));

                            if (pathsForDepth.size() >= nrOfQueriesPerDepth) {
                                break;
                            }
                        }
                    }
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }
            } finally {
                for (Future<List<Path>> f : futures) {
                    f.cancel(true);
                }
            }
        }

        allPaths.addAll(pathsForDepth);
    }

    //      for (Future<Path> future : futures) {
    //            try {
    //               Path path = future.get();
    //               if(path != null) {
    //                  paths.add(path);
    //               }
    //               if(paths.size() == nrOfSPARQLQueries) {
    //                  System.err.println("Benchmark generation finished. Stopping all running threads.");
    //                  tp.shutdownNow();
    //               }
    //         } catch (InterruptedException | ExecutionException e) {
    //            e.printStackTrace();
    //         }
    //            if(paths.size() == nrOfSPARQLQueries) {
    //               System.err.println("Benchmark generation finished. Stopping all running threads.");
    //               tp.shutdownNow();
    //            }
    //      }

    tp.shutdownNow();
    try {
        tp.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    //      try {
    //         tp.awaitTermination(1, TimeUnit.DAYS);
    //      } catch (InterruptedException e) {
    //         e.printStackTrace();
    //      }

    // write queries to disk
    String queries = "";
    for (Path path : allPaths) {
        System.out.println(path);
        queries += path.asSPARQLQuery(Var.alloc("s")) + "\n";
    }
    File file = new File(benchmarkDirectory,
            "queries_" + nrOfSPARQLQueries + "_" + minDepth + "-" + maxDepth + "_" + minNrOfExamples + ".txt");
    try {
        Files.write(queries, file, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hbase.util.TestIdReadWriteLock.java

@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {/* w  w  w.jav a2 s . c  om*/
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        // make sure the entry pool will be cleared after GC and purge call
        int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
        LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
        assertEquals(0, entryPoolSize);
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.solr.update.SolrCmdDistributor.java

public SolrCmdDistributor(StreamingSolrClients clients, int maxRetriesOnForward, int retryPause) {
    this.clients = clients;
    this.maxRetriesOnForward = maxRetriesOnForward;
    this.retryPause = retryPause;
    this.updateExecutor = clients.getUpdateExecutor();
    completionService = new ExecutorCompletionService<>(updateExecutor);
}

From source file:org.springframework.batch.item.database.IbatisPagingItemReaderAsyncTests.java

/**
 * @throws Exception/*ww w  .  j  a va  2s. c o  m*/
 * @throws InterruptedException
 * @throws ExecutionException
 */
private void doTest() throws Exception, InterruptedException, ExecutionException {
    final IbatisPagingItemReader<Foo> reader = getItemReader();
    reader.setDataSource(dataSource);
    CompletionService<List<Foo>> completionService = new ExecutorCompletionService<List<Foo>>(
            Executors.newFixedThreadPool(THREAD_COUNT));
    for (int i = 0; i < THREAD_COUNT; i++) {
        completionService.submit(new Callable<List<Foo>>() {
            @Override
            public List<Foo> call() throws Exception {
                List<Foo> list = new ArrayList<Foo>();
                Foo next = null;
                do {
                    next = reader.read();
                    Thread.sleep(10L); // try to make it fairer
                    logger.debug("Reading item: " + next);
                    if (next != null) {
                        list.add(next);
                    }
                } while (next != null);
                return list;
            }
        });
    }
    int count = 0;
    Set<Foo> results = new HashSet<Foo>();
    for (int i = 0; i < THREAD_COUNT; i++) {
        List<Foo> items = completionService.take().get();
        count += items.size();
        logger.debug("Finished items count: " + items.size());
        logger.debug("Finished items: " + items);
        assertNotNull(items);
        results.addAll(items);
    }
    assertEquals(ITEM_COUNT, count);
    assertEquals(ITEM_COUNT, results.size());
    reader.close();
}

From source file:org.springframework.batch.item.database.JdbcPagingItemReaderAsyncTests.java

/**
 * @throws Exception//from  www .ja va 2  s. co m
 * @throws InterruptedException
 * @throws ExecutionException
 */
private void doTest() throws Exception, InterruptedException, ExecutionException {
    final ItemReader<Foo> reader = getItemReader();
    CompletionService<List<Foo>> completionService = new ExecutorCompletionService<List<Foo>>(
            Executors.newFixedThreadPool(THREAD_COUNT));
    for (int i = 0; i < THREAD_COUNT; i++) {
        completionService.submit(new Callable<List<Foo>>() {
            @Override
            public List<Foo> call() throws Exception {
                List<Foo> list = new ArrayList<Foo>();
                Foo next = null;
                do {
                    next = reader.read();
                    Thread.sleep(10L);
                    logger.debug("Reading item: " + next);
                    if (next != null) {
                        list.add(next);
                    }
                } while (next != null);
                return list;
            }
        });
    }
    int count = 0;
    Set<Foo> results = new HashSet<Foo>();
    for (int i = 0; i < THREAD_COUNT; i++) {
        List<Foo> items = completionService.take().get();
        count += items.size();
        logger.debug("Finished items count: " + items.size());
        logger.debug("Finished items: " + items);
        assertNotNull(items);
        results.addAll(items);
    }
    assertEquals(ITEM_COUNT, count);
    assertEquals(ITEM_COUNT, results.size());
}

From source file:org.springframework.integration.store.MessageGroupQueueTests.java

private void doTestConcurrentAccess(int concurrency, final int maxPerTask, final Set<String> set)
        throws Exception {

    SimpleMessageStore messageGroupStore = new SimpleMessageStore();
    final MessageGroupQueue queue = new MessageGroupQueue(messageGroupStore, "FOO");
    ExecutorService executorService = Executors.newCachedThreadPool();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executorService);

    for (int i = 0; i < concurrency; i++) {

        final int big = i;

        completionService.submit(new Callable<Boolean>() {
            public Boolean call() throws Exception {
                boolean result = true;
                for (int j = 0; j < maxPerTask; j++) {
                    result &= queue.add(new GenericMessage<String>("count=" + big + ":" + j));
                    if (!result) {
                        logger.warn("Failed to add");
                    }/*from w  w w  .ja  v a 2s  .co  m*/
                }
                return result;
            }
        });

        completionService.submit(new Callable<Boolean>() {
            public Boolean call() throws Exception {
                boolean result = true;
                for (int j = 0; j < maxPerTask; j++) {
                    @SuppressWarnings("unchecked")
                    Message<String> item = (Message<String>) queue.poll(10, TimeUnit.SECONDS);
                    result &= item != null;
                    if (!result) {
                        logger.warn("Failed to poll");
                    } else if (set != null) {
                        synchronized (set) {
                            set.add(item.getPayload());
                        }
                    }
                }
                return result;
            }
        });

        messageGroupStore.expireMessageGroups(-10000);

    }

    for (int j = 0; j < 2 * concurrency; j++) {
        assertTrue(completionService.take().get());
    }

    if (set != null) {
        // Ensure all items polled are unique
        assertEquals(concurrency * maxPerTask, set.size());
    }

    assertEquals(0, queue.size());
    messageGroupStore.expireMessageGroups(-10000);
    assertEquals(Integer.MAX_VALUE, queue.remainingCapacity());

    executorService.shutdown();

}

From source file:org.tinymediamanager.scraper.imdb.ImdbMovieParser.java

MediaMetadata getMovieMetadata(MediaScrapeOptions options) throws Exception {
    MediaMetadata md = new MediaMetadata(providerInfo.getId());

    // check if there is a md in the result
    if (options.getResult() != null && options.getResult().getMediaMetadata() != null) {
        LOGGER.debug("IMDB: getMetadata from cache: " + options.getResult());
        return options.getResult().getMediaMetadata();
    }//from  w  w  w  .  j av a  2 s . com

    String imdbId = "";

    // imdbId from searchResult
    if (options.getResult() != null) {
        imdbId = options.getResult().getIMDBId();
    }

    // imdbid from scraper option
    if (!MetadataUtil.isValidImdbId(imdbId)) {
        imdbId = options.getImdbId();
    }

    if (!MetadataUtil.isValidImdbId(imdbId)) {
        return md;
    }

    LOGGER.debug("IMDB: getMetadata(imdbId): " + imdbId);
    md.setId(providerInfo.getId(), imdbId);

    ExecutorCompletionService<Document> compSvcImdb = new ExecutorCompletionService<>(executor);
    ExecutorCompletionService<MediaMetadata> compSvcTmdb = new ExecutorCompletionService<>(executor);

    // worker for imdb request (/reference) (everytime from www.imdb.com)
    // StringBuilder sb = new StringBuilder(imdbSite.getSite());
    StringBuilder sb = new StringBuilder(ImdbSiteDefinition.IMDB_COM.getSite());
    sb.append("title/");
    sb.append(imdbId);
    sb.append("/reference");
    Callable<Document> worker = new ImdbWorker(sb.toString(), options.getLanguage().getLanguage(),
            options.getCountry().getAlpha2(), imdbSite);
    Future<Document> futureReference = compSvcImdb.submit(worker);

    // worker for imdb request (/plotsummary) (from chosen site)
    Future<Document> futurePlotsummary;
    sb = new StringBuilder(imdbSite.getSite());
    sb.append("title/");
    sb.append(imdbId);
    sb.append("/plotsummary");

    worker = new ImdbWorker(sb.toString(), options.getLanguage().getLanguage(),
            options.getCountry().getAlpha2(), imdbSite);
    futurePlotsummary = compSvcImdb.submit(worker);

    // worker for tmdb request
    Future<MediaMetadata> futureTmdb = null;
    if (ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("useTmdb")
            || ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("scrapeCollectionInfo")) {
        Callable<MediaMetadata> worker2 = new TmdbWorker(imdbId, options.getLanguage(), options.getCountry());
        futureTmdb = compSvcTmdb.submit(worker2);
    }

    Document doc;
    doc = futureReference.get();
    parseReferencePage(doc, options, md);

    /*
     * plot from /plotsummary
     */
    // build the url
    doc = futurePlotsummary.get();
    parsePlotsummaryPage(doc, options, md);

    // title also from chosen site if we are not scraping akas.imdb.com
    if (imdbSite != ImdbSiteDefinition.IMDB_COM) {
        Element title = doc.getElementById("tn15title");
        if (title != null) {
            Element element;
            // title
            Elements elements = title.getElementsByClass("main");
            if (elements.size() > 0) {
                element = elements.first();
                String movieTitle = cleanString(element.ownText());
                md.setTitle(movieTitle);
            }
        }
    }

    // get the release info page
    Future<Document> futureReleaseinfo;
    sb = new StringBuilder(imdbSite.getSite());
    sb.append("title/");
    sb.append(imdbId);
    sb.append("/releaseinfo");
    worker = new ImdbWorker(sb.toString(), options.getLanguage().getLanguage(),
            options.getCountry().getAlpha2(), imdbSite);
    futureReleaseinfo = compSvcImdb.submit(worker);
    doc = futureReleaseinfo.get();
    // parse original title here!!
    parseReleaseinfoPageAKAs(doc, options, md);

    // did we get a release date?
    if (md.getReleaseDate() == null
            || ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("localReleaseDate")) {
        parseReleaseinfoPage(doc, options, md);
    }

    // get data from tmdb?
    if (futureTmdb != null && (ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("useTmdb")
            || ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("scrapeCollectionInfo"))) {
        try {
            MediaMetadata tmdbMd = futureTmdb.get();
            if (ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("useTmdb") && tmdbMd != null) {
                // tmdbid
                md.setId(MediaMetadata.TMDB, tmdbMd.getId(MediaMetadata.TMDB));
                // title
                if (StringUtils.isNotBlank(tmdbMd.getTitle())) {
                    md.setTitle(tmdbMd.getTitle());
                }
                // original title
                if (StringUtils.isNotBlank(tmdbMd.getOriginalTitle())) {
                    md.setOriginalTitle(tmdbMd.getOriginalTitle());
                }
                // tagline
                if (StringUtils.isNotBlank(tmdbMd.getTagline())) {
                    md.setTagline(tmdbMd.getTagline());
                }
                // plot
                if (StringUtils.isNotBlank(tmdbMd.getPlot())) {
                    md.setPlot(tmdbMd.getPlot());
                }
                // collection info
                if (StringUtils.isNotBlank(tmdbMd.getCollectionName())) {
                    md.setCollectionName(tmdbMd.getCollectionName());
                    md.setId(MediaMetadata.TMDB_SET, tmdbMd.getId(MediaMetadata.TMDB_SET));
                }
            }
            if (ImdbMetadataProvider.providerInfo.getConfig().getValueAsBool("scrapeCollectionInfo")
                    && tmdbMd != null) {
                md.setId(MediaMetadata.TMDB_SET, tmdbMd.getId(MediaMetadata.TMDB_SET));
                md.setCollectionName(tmdbMd.getCollectionName());
            }
            md.setId(tmdbMd.getProviderId(), tmdbMd.getId(tmdbMd.getProviderId()));
        } catch (Exception ignored) {
        }
    }

    // if we have still no original title, take the title
    if (StringUtils.isBlank(md.getOriginalTitle())) {
        md.setOriginalTitle(md.getTitle());
    }

    // populate id
    md.setId(ImdbMetadataProvider.providerInfo.getId(), imdbId);

    return md;
}