Example usage for java.util.concurrent CompletionService take

List of usage examples for java.util.concurrent CompletionService take

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService take.

Prototype

Future<V> take() throws InterruptedException;

Source Link

Document

Retrieves and removes the Future representing the next completed task, waiting if none are yet present.

Usage

From source file:org.codice.ddf.spatial.ogc.csw.catalog.transformer.CswQueryResponseTransformer.java

private String multiThreadedMarshal(List<Result> results, String recordSchema,
        final Map<String, Serializable> arguments) throws CatalogTransformerException {

    CompletionService<BinaryContent> completionService = new ExecutorCompletionService<>(queryExecutor);

    try {//from w ww . ja v a2 s.  c  om
        for (Result result : results) {
            final Metacard mc = result.getMetacard();

            final MetacardTransformer transformer = metacardTransformerManager
                    .getTransformerBySchema(recordSchema);

            if (transformer == null) {
                throw new CatalogTransformerException("Cannot find transformer for schema: " + recordSchema);
            }

            // the "current" thread will run submitted task when queueSize exceeded; effectively
            // blocking enqueue of more tasks.
            completionService.submit(new Callable<BinaryContent>() {
                @Override
                public BinaryContent call() throws Exception {
                    BinaryContent content = transformer.transform(mc, arguments);
                    return content;
                }
            });
        }

        int metacardCount = results.size();
        CharArrayWriter accum = new CharArrayWriter(ACCUM_INITIAL_SIZE);
        for (int i = 0; i < metacardCount; i++) {
            Future<BinaryContent> binaryContentFuture = completionService.take(); // blocks
            BinaryContent binaryContent = binaryContentFuture.get();
            IOUtils.copy(binaryContent.getInputStream(), accum);
        }

        return accum.toString();

    } catch (IOException | InterruptedException | ExecutionException xe) {
        throw new CatalogTransformerException(xe);
    }

}

From source file:com.mgmtp.jfunk.core.JFunk.java

/**
 * Executes the jFunk test. A thread pool ({@link ExecutorService}) is created with the number
 * of configured threads, which handles concurrent script execution.
 *///from w  w w  .j  a v a  2 s.c o  m
@Override
protected void doExecute() throws Exception {
    ExecutorService execService = createExecutorService();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<>(execService);

    for (final File script : scripts) {
        completionService.submit(new Callable<Boolean>() {
            @Override
            public Boolean call() {
                boolean success = false;
                StopWatch stopWatch = new StopWatch();
                stopWatch.start();

                RESULT_LOG.info("Thread " + Thread.currentThread().getName() + ": starting execution of script "
                        + script.getName());

                try {
                    success = scriptExecutor.executeScript(script, scriptProperties);
                } catch (Throwable th) {
                    LOG.error(th.getMessage(), th);
                } finally {

                    LOG.info("SCRIPT EXECUTION " + (success ? "SUCCESSFUL" : "FAILED") + " (" + script + ")");

                    RESULT_LOG.info(
                            "Thread " + Thread.currentThread().getName() + ": finished execution of script "
                                    + script.getName() + " (took " + stopWatch + " H:mm:ss.SSS)");
                }
                return success;
            }
        });
    }

    boolean overallResult = true;
    for (int i = 0, size = scripts.size(); i < size; ++i) {
        if (!completionService.take().get()) {
            overallResult = false;
        }
    }

    shutDownExecutorService(execService);

    if (!overallResult) {
        throw new JFunkExecutionException();
    }
}

From source file:com.palantir.atlasdb.transaction.impl.SnapshotTransactionTest.java

@Test
public void testConcurrentWriteWriteConflicts() throws InterruptedException, ExecutionException {
    CompletionService<Void> executor = new ExecutorCompletionService<Void>(PTExecutors.newFixedThreadPool(8));
    final Cell cell = Cell.create("row1".getBytes(), "column1".getBytes());
    Transaction t1 = txManager.createNewTransaction();
    t1.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(0L)));
    t1.commit();/* ww  w.  j av  a2  s . c  om*/
    for (int i = 0; i < 1000; i++) {
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                txManager.runTaskWithRetry(new TxTask() {
                    @Override
                    public Void execute(Transaction t) throws RuntimeException {
                        long prev = EncodingUtils
                                .decodeVarLong(t.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
                        t.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(prev + 1)));
                        return null;
                    }
                });
                return null;
            }
        });
    }
    for (int i = 0; i < 1000; i++) {
        Future<Void> future = executor.take();
        future.get();
    }
    t1 = txManager.createNewTransaction();
    long val = EncodingUtils.decodeVarLong(t1.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
    assertEquals(1000, val);
}

From source file:com.palantir.atlasdb.transaction.impl.SnapshotTransactionTest.java

@Test
public void testConcurrentWriteChangedConflicts() throws InterruptedException, ExecutionException {
    conflictDetectionManager.setConflictDetectionMode(TABLE, ConflictHandler.RETRY_ON_VALUE_CHANGED);
    CompletionService<Void> executor = new ExecutorCompletionService<Void>(PTExecutors.newFixedThreadPool(8));
    final Cell cell = Cell.create("row1".getBytes(), "column1".getBytes());
    Transaction t1 = txManager.createNewTransaction();
    t1.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(0L)));
    t1.commit();/*from  ww w . j  ava 2 s  .co m*/
    for (int i = 0; i < 1000; i++) {
        executor.submit(new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                txManager.runTaskWithRetry(new TxTask() {
                    @Override
                    public Void execute(Transaction t) throws RuntimeException {
                        long prev = EncodingUtils
                                .decodeVarLong(t.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
                        t.put(TABLE, ImmutableMap.of(cell, EncodingUtils.encodeVarLong(prev + 1)));
                        return null;
                    }
                });
                return null;
            }
        });
    }
    for (int i = 0; i < 1000; i++) {
        Future<Void> future = executor.take();
        future.get();
    }
    t1 = txManager.createNewTransaction();
    long val = EncodingUtils.decodeVarLong(t1.get(TABLE, ImmutableSet.of(cell)).values().iterator().next());
    assertEquals(1000, val);
}

From source file:org.dllearner.algorithms.qtl.experiments.SPARQLLearningProblemsGenerator.java

public void generateBenchmark(int nrOfSPARQLQueries, final int minDepth, final int maxDepth,
        int minNrOfExamples) {
    Collection<OWLClass> classes = getClasses();
    ArrayList<OWLClass> classesList = new ArrayList<>(classes);
    Collections.shuffle(classesList, new Random(123));
    classes = classesList;/*from   w ww .ja  v a 2  s .co m*/
    //      classes = Sets.newHashSet(new OWLClassImpl(IRI.create("http://semantics.crl.ibm.com/univ-bench-dl.owl#TennisFan")));

    //      ExecutorService tp = Executors.newFixedThreadPool(threadCount);
    List<Path> allPaths = new ArrayList<>();

    //      ThreadPoolExecutor tp = new CustomFutureReturningExecutor(
    //            threadCount, threadCount,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<Runnable>(classes.size(), true));

    ExecutorService tp = Executors.newFixedThreadPool(threadCount);

    CompletionService<List<Path>> ecs = new ExecutorCompletionService<List<Path>>(tp);

    JDKRandomGenerator rndGen = new JDKRandomGenerator();
    rndGen.setSeed(123);

    int nrOfQueriesPerDepth = nrOfSPARQLQueries / (maxDepth - minDepth + 1);

    // for each depth <= maxDepth
    for (int depth = minDepth; depth <= maxDepth; depth++) {
        System.out.println("Generating " + nrOfQueriesPerDepth + " queries for depth " + depth);

        Iterator<OWLClass> iterator = classes.iterator();

        // generate paths of depths <= maxDepth
        List<Path> pathsForDepth = new ArrayList<>();

        while (pathsForDepth.size() < nrOfQueriesPerDepth && iterator.hasNext()) {

            Collection<Future<List<Path>>> futures = new ArrayList<>();

            try {
                int cnt = 0;
                while (iterator.hasNext() && (pathsForDepth.size() + ++cnt < nrOfQueriesPerDepth)) {
                    // pick next class
                    OWLClass cls = iterator.next();

                    //            int depth = rndGen.nextInt(maxDepth) + 1;

                    Future<List<Path>> future = ecs
                            .submit(new PathDetectionTask(dataDir, ks, schema, cls, depth, minNrOfExamples));
                    futures.add(future);
                }

                int n = futures.size();
                try {
                    for (int i = 0; i < n; ++i) {
                        Future<List<Path>> f = ecs.take();
                        if (!f.isCancelled()) {
                            List<Path> paths = f.get();

                            if (paths != null) {
                                for (int j = 0; j < Math.min(paths.size(), maxPathsPerClassAndDepth); j++) {
                                    pathsForDepth.add(paths.get(j));
                                }
                            }
                            //                        System.out.println("#Paths: " + paths.size());
                            //                        paths.forEach(p -> System.out.println(p));

                            if (pathsForDepth.size() >= nrOfQueriesPerDepth) {
                                break;
                            }
                        }
                    }
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }
            } finally {
                for (Future<List<Path>> f : futures) {
                    f.cancel(true);
                }
            }
        }

        allPaths.addAll(pathsForDepth);
    }

    //      for (Future<Path> future : futures) {
    //            try {
    //               Path path = future.get();
    //               if(path != null) {
    //                  paths.add(path);
    //               }
    //               if(paths.size() == nrOfSPARQLQueries) {
    //                  System.err.println("Benchmark generation finished. Stopping all running threads.");
    //                  tp.shutdownNow();
    //               }
    //         } catch (InterruptedException | ExecutionException e) {
    //            e.printStackTrace();
    //         }
    //            if(paths.size() == nrOfSPARQLQueries) {
    //               System.err.println("Benchmark generation finished. Stopping all running threads.");
    //               tp.shutdownNow();
    //            }
    //      }

    tp.shutdownNow();
    try {
        tp.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    //      try {
    //         tp.awaitTermination(1, TimeUnit.DAYS);
    //      } catch (InterruptedException e) {
    //         e.printStackTrace();
    //      }

    // write queries to disk
    String queries = "";
    for (Path path : allPaths) {
        System.out.println(path);
        queries += path.asSPARQLQuery(Var.alloc("s")) + "\n";
    }
    File file = new File(benchmarkDirectory,
            "queries_" + nrOfSPARQLQueries + "_" + minDepth + "-" + maxDepth + "_" + minNrOfExamples + ".txt");
    try {
        Files.write(queries, file, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.springframework.integration.store.MessageGroupQueueTests.java

private void doTestConcurrentAccess(int concurrency, final int maxPerTask, final Set<String> set)
        throws Exception {

    SimpleMessageStore messageGroupStore = new SimpleMessageStore();
    final MessageGroupQueue queue = new MessageGroupQueue(messageGroupStore, "FOO");
    ExecutorService executorService = Executors.newCachedThreadPool();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executorService);

    for (int i = 0; i < concurrency; i++) {

        final int big = i;

        completionService.submit(new Callable<Boolean>() {
            public Boolean call() throws Exception {
                boolean result = true;
                for (int j = 0; j < maxPerTask; j++) {
                    result &= queue.add(new GenericMessage<String>("count=" + big + ":" + j));
                    if (!result) {
                        logger.warn("Failed to add");
                    }//from  w w w.  j  a  v a 2s  .  c  o  m
                }
                return result;
            }
        });

        completionService.submit(new Callable<Boolean>() {
            public Boolean call() throws Exception {
                boolean result = true;
                for (int j = 0; j < maxPerTask; j++) {
                    @SuppressWarnings("unchecked")
                    Message<String> item = (Message<String>) queue.poll(10, TimeUnit.SECONDS);
                    result &= item != null;
                    if (!result) {
                        logger.warn("Failed to poll");
                    } else if (set != null) {
                        synchronized (set) {
                            set.add(item.getPayload());
                        }
                    }
                }
                return result;
            }
        });

        messageGroupStore.expireMessageGroups(-10000);

    }

    for (int j = 0; j < 2 * concurrency; j++) {
        assertTrue(completionService.take().get());
    }

    if (set != null) {
        // Ensure all items polled are unique
        assertEquals(concurrency * maxPerTask, set.size());
    }

    assertEquals(0, queue.size());
    messageGroupStore.expireMessageGroups(-10000);
    assertEquals(Integer.MAX_VALUE, queue.remainingCapacity());

    executorService.shutdown();

}

From source file:org.apache.hadoop.hdfs.DFSInputStream.java

private ByteBuffer getFirstToComplete(CompletionService<ByteBuffer> hedgedService,
        ArrayList<Future<ByteBuffer>> futures) throws InterruptedException {
    if (futures.isEmpty()) {
        throw new InterruptedException("let's retry");
    }/*from  www .j a v a2  s . c  o  m*/
    Future<ByteBuffer> future = null;
    try {
        future = hedgedService.take();
        ByteBuffer bb = future.get();
        futures.remove(future);
        return bb;
    } catch (ExecutionException e) {
        // already logged in the Callable
        futures.remove(future);
    } catch (CancellationException ce) {
        // already logged in the Callable
        futures.remove(future);
    }
    throw new InterruptedException("let's retry");
}

From source file:com.mellanox.r4h.DFSInputStream.java

private ByteBuffer getFirstToComplete(CompletionService<ByteBuffer> hedgedService,
        ArrayList<Future<ByteBuffer>> futures) throws InterruptedException {
    if (futures.isEmpty()) {
        throw new InterruptedException("let's retry");
    }//from w  w w.j av  a2s .c o  m
    Future<ByteBuffer> future = null;
    try {
        future = hedgedService.take();
        ByteBuffer bb = future.get();
        futures.remove(future);
        return bb;
    } catch (ExecutionException e) {
        // already logged in the Callable
        futures.remove(future);
    } catch (CancellationException ce) {
        // already logged in the Callable
        futures.remove(future);
    }

    throw new InterruptedException("let's retry");
}

From source file:com.ibm.jaggr.service.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk./*from ww  w. ja  v  a 2s .  com*/
 * 
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization 
 *            resources that are check on every server restart                     
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        cacheBust = aggregator.getOptions().getCacheBust();
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && rawConfig.equals(cached.rawConfig) && !validateDeps && !clean) {
        depMap = cached.depMap;
        return;
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode(PathUtil.getModuleName(path));
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one 
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (nodes with no children or dependency lists)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
}

From source file:com.laudandjolynn.mytv.crawler.tvmao.TvMaoCrawler.java

/**
 * ??/*from   w w w  .ja  v  a 2 s  .  c o m*/
 * 
 * @param files
 * @return
 */
private List<TvStation> crawlAllTvStationFromFile(File[] files) {
    logger.info("crawl all tv station from files.");
    List<TvStation> resultList = new ArrayList<TvStation>();
    ThreadFactory threadFactory = new BasicThreadFactory.Builder()
            .namingPattern("Mytv_Crawl_All_TV_Station_Of_TvMao_%d").build();
    ExecutorService executorService = Executors.newFixedThreadPool(2, threadFactory);
    CompletionService<List<TvStation>> completionService = new ExecutorCompletionService<List<TvStation>>(
            executorService);
    int size = files == null ? 0 : files.length;
    for (int i = 0; i < size; i++) {
        final File file = files[i];
        Callable<List<TvStation>> task = new Callable<List<TvStation>>() {
            @Override
            public List<TvStation> call() throws Exception {
                String filePath = file.getPath();
                String classifyEnds = filePath.substring(0, filePath.lastIndexOf(Constant.UNDERLINE));
                String city = classifyEnds.substring(classifyEnds.lastIndexOf(Constant.UNDERLINE) + 1);
                String html = null;
                try {
                    logger.debug("parse tv station file: " + filePath);
                    html = MyTvUtils.readAsHtml(filePath);
                } catch (IOException e) {
                    logger.error("read as xml error: " + filePath, e);
                    return null;
                }
                return parseTvStation(city, html);
            }
        };
        completionService.submit(task);
    }
    executorService.shutdown();
    int count = 0;
    while (count < size) {
        try {
            List<TvStation> stationList = completionService.take().get();
            if (stationList != null) {
                resultList.addAll(stationList);
            }
        } catch (InterruptedException e) {
            logger.error("crawl all tv station task interrupted.", e);
        } catch (ExecutionException e) {
            logger.error("crawl all tv station task executed fail.", e);
        }
        count++;
    }
    return resultList;
}