Example usage for java.util.concurrent CompletionService submit

List of usage examples for java.util.concurrent CompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService submit.

Prototype

Future<V> submit(Callable<V> task);

Source Link

Document

Submits a value-returning task for execution and returns a Future representing the pending results of the task.

Usage

From source file:org.dllearner.algorithms.qtl.experiments.SPARQLLearningProblemsGenerator.java

public void generateBenchmark(int nrOfSPARQLQueries, final int minDepth, final int maxDepth,
        int minNrOfExamples) {
    Collection<OWLClass> classes = getClasses();
    ArrayList<OWLClass> classesList = new ArrayList<>(classes);
    Collections.shuffle(classesList, new Random(123));
    classes = classesList;// w  w w  .  ja  v a2  s.  c o m
    //      classes = Sets.newHashSet(new OWLClassImpl(IRI.create("http://semantics.crl.ibm.com/univ-bench-dl.owl#TennisFan")));

    //      ExecutorService tp = Executors.newFixedThreadPool(threadCount);
    List<Path> allPaths = new ArrayList<>();

    //      ThreadPoolExecutor tp = new CustomFutureReturningExecutor(
    //            threadCount, threadCount,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<Runnable>(classes.size(), true));

    ExecutorService tp = Executors.newFixedThreadPool(threadCount);

    CompletionService<List<Path>> ecs = new ExecutorCompletionService<List<Path>>(tp);

    JDKRandomGenerator rndGen = new JDKRandomGenerator();
    rndGen.setSeed(123);

    int nrOfQueriesPerDepth = nrOfSPARQLQueries / (maxDepth - minDepth + 1);

    // for each depth <= maxDepth
    for (int depth = minDepth; depth <= maxDepth; depth++) {
        System.out.println("Generating " + nrOfQueriesPerDepth + " queries for depth " + depth);

        Iterator<OWLClass> iterator = classes.iterator();

        // generate paths of depths <= maxDepth
        List<Path> pathsForDepth = new ArrayList<>();

        while (pathsForDepth.size() < nrOfQueriesPerDepth && iterator.hasNext()) {

            Collection<Future<List<Path>>> futures = new ArrayList<>();

            try {
                int cnt = 0;
                while (iterator.hasNext() && (pathsForDepth.size() + ++cnt < nrOfQueriesPerDepth)) {
                    // pick next class
                    OWLClass cls = iterator.next();

                    //            int depth = rndGen.nextInt(maxDepth) + 1;

                    Future<List<Path>> future = ecs
                            .submit(new PathDetectionTask(dataDir, ks, schema, cls, depth, minNrOfExamples));
                    futures.add(future);
                }

                int n = futures.size();
                try {
                    for (int i = 0; i < n; ++i) {
                        Future<List<Path>> f = ecs.take();
                        if (!f.isCancelled()) {
                            List<Path> paths = f.get();

                            if (paths != null) {
                                for (int j = 0; j < Math.min(paths.size(), maxPathsPerClassAndDepth); j++) {
                                    pathsForDepth.add(paths.get(j));
                                }
                            }
                            //                        System.out.println("#Paths: " + paths.size());
                            //                        paths.forEach(p -> System.out.println(p));

                            if (pathsForDepth.size() >= nrOfQueriesPerDepth) {
                                break;
                            }
                        }
                    }
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }
            } finally {
                for (Future<List<Path>> f : futures) {
                    f.cancel(true);
                }
            }
        }

        allPaths.addAll(pathsForDepth);
    }

    //      for (Future<Path> future : futures) {
    //            try {
    //               Path path = future.get();
    //               if(path != null) {
    //                  paths.add(path);
    //               }
    //               if(paths.size() == nrOfSPARQLQueries) {
    //                  System.err.println("Benchmark generation finished. Stopping all running threads.");
    //                  tp.shutdownNow();
    //               }
    //         } catch (InterruptedException | ExecutionException e) {
    //            e.printStackTrace();
    //         }
    //            if(paths.size() == nrOfSPARQLQueries) {
    //               System.err.println("Benchmark generation finished. Stopping all running threads.");
    //               tp.shutdownNow();
    //            }
    //      }

    tp.shutdownNow();
    try {
        tp.awaitTermination(1, TimeUnit.HOURS);
    } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    //      try {
    //         tp.awaitTermination(1, TimeUnit.DAYS);
    //      } catch (InterruptedException e) {
    //         e.printStackTrace();
    //      }

    // write queries to disk
    String queries = "";
    for (Path path : allPaths) {
        System.out.println(path);
        queries += path.asSPARQLQuery(Var.alloc("s")) + "\n";
    }
    File file = new File(benchmarkDirectory,
            "queries_" + nrOfSPARQLQueries + "_" + minDepth + "-" + maxDepth + "_" + minNrOfExamples + ".txt");
    try {
        Files.write(queries, file, Charsets.UTF_8);
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:com.ibm.jaggr.service.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk.//  ww w.  j a  va2  s.  co m
 * 
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization 
 *            resources that are check on every server restart                     
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        cacheBust = aggregator.getOptions().getCacheBust();
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && rawConfig.equals(cached.rawConfig) && !validateDeps && !clean) {
        depMap = cached.depMap;
        return;
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode(PathUtil.getModuleName(path));
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one 
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (nodes with no children or dependency lists)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
}

From source file:ddf.catalog.cache.solr.impl.CachingFederationStrategy.java

private QueryResponse sourceFederate(List<Source> sources, final QueryRequest queryRequest) {
    if (LOGGER.isDebugEnabled()) {
        for (Source source : sources) {
            if (source != null) {
                LOGGER.debug("source to query: {}", source.getId());
            }//from w  w  w .ja  va  2 s. c  o  m
        }
    }

    Query originalQuery = queryRequest.getQuery();

    int offset = originalQuery.getStartIndex();
    final int pageSize = originalQuery.getPageSize();

    // limit offset to max value
    if (offset > this.maxStartIndex) {
        offset = this.maxStartIndex;
    }

    final QueryResponseImpl queryResponseQueue = new QueryResponseImpl(queryRequest, null);

    Map<Future<SourceResponse>, QueryRequest> futures = new HashMap<>();

    Query modifiedQuery = getModifiedQuery(originalQuery, sources.size(), offset, pageSize);
    QueryRequest modifiedQueryRequest = new QueryRequestImpl(modifiedQuery, queryRequest.isEnterprise(),
            queryRequest.getSourceIds(), queryRequest.getProperties());

    CompletionService<SourceResponse> queryCompletion = new ExecutorCompletionService<>(queryExecutorService);

    // Do NOT call source.isAvailable() when checking sources
    for (final Source source : sources) {
        if (source != null) {
            LOGGER.debug("running query on source: {}", source.getId());

            QueryRequest sourceQueryRequest = new QueryRequestImpl(modifiedQuery, queryRequest.isEnterprise(),
                    Collections.singleton(source.getId()), new HashMap<>(queryRequest.getProperties()));
            try {
                for (PreFederatedQueryPlugin service : preQuery) {
                    try {
                        sourceQueryRequest = service.process(source, sourceQueryRequest);
                    } catch (PluginExecutionException e) {
                        LOGGER.info("Error executing PreFederatedQueryPlugin", e);
                    }
                }
            } catch (StopProcessingException e) {
                LOGGER.info("Plugin stopped processing", e);
            }

            futures.put(queryCompletion.submit(new CallableSourceResponse(source, sourceQueryRequest)),
                    sourceQueryRequest);
        }
    }

    QueryResponseImpl offsetResults = null;
    // If there are offsets and more than one source, we have to get all the
    // results back and then
    // transfer them into a different Queue. That is what the
    // OffsetResultHandler does.
    if (offset > 1 && sources.size() > 1) {
        offsetResults = new QueryResponseImpl(queryRequest, null);
        queryExecutorService
                .submit(new OffsetResultHandler(queryResponseQueue, offsetResults, pageSize, offset));
    }

    queryExecutorService.submit(sortedQueryMonitorFactory.createMonitor(queryCompletion, futures,
            queryResponseQueue, modifiedQueryRequest, postQuery));

    QueryResponse queryResponse;
    if (offset > 1 && sources.size() > 1) {
        queryResponse = offsetResults;
        LOGGER.debug("returning offsetResults");
    } else {
        queryResponse = queryResponseQueue;
        LOGGER.debug("returning returnResults: {}", queryResponse);
    }

    LOGGER.debug("returning Query Results: {}", queryResponse);
    return queryResponse;
}

From source file:com.ibm.jaggr.core.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk./*from   w w w  . j av a  2s . c om*/
 *
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization
 *            resources that are check on every server restart
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    final String sourceMethod = "<ctor>"; //$NON-NLS-1$
    boolean isTraceLogging = log.isLoggable(Level.FINER);
    if (isTraceLogging) {
        log.entering(DepTree.class.getName(), sourceMethod,
                new Object[] { paths, aggregator, stamp, clean, validateDeps });
    }
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();
    cacheBust = AggregatorUtil.getCacheBust(aggregator);

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                if (isTraceLogging) {
                    log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$
                }
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (isTraceLogging) {
                log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$
            }
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
        if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) {
            if (isTraceLogging) {
                log.finer("Current config = " + rawConfig); //$NON-NLS-1$
                log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$
            }
            validateDeps = true;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && !validateDeps && !clean) {
        depMap = cached.depMap;
        fromCache = true;
        return;
    } else if (isTraceLogging) {
        log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$
                + ", clean = " + clean); //$NON-NLS-1$
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator));
    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (folder nodes with no children)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        if (isTraceLogging) {
            log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$
        }
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    if (isTraceLogging) {
        log.exiting(DepTree.class.getName(), sourceMethod);
    }
}

From source file:com.laudandjolynn.mytv.crawler.tvmao.TvMaoCrawler.java

/**
 * ??/*from  w ww.j  a va  2 s  .co  m*/
 * 
 * @param files
 * @return
 */
private List<TvStation> crawlAllTvStationFromFile(File[] files) {
    logger.info("crawl all tv station from files.");
    List<TvStation> resultList = new ArrayList<TvStation>();
    ThreadFactory threadFactory = new BasicThreadFactory.Builder()
            .namingPattern("Mytv_Crawl_All_TV_Station_Of_TvMao_%d").build();
    ExecutorService executorService = Executors.newFixedThreadPool(2, threadFactory);
    CompletionService<List<TvStation>> completionService = new ExecutorCompletionService<List<TvStation>>(
            executorService);
    int size = files == null ? 0 : files.length;
    for (int i = 0; i < size; i++) {
        final File file = files[i];
        Callable<List<TvStation>> task = new Callable<List<TvStation>>() {
            @Override
            public List<TvStation> call() throws Exception {
                String filePath = file.getPath();
                String classifyEnds = filePath.substring(0, filePath.lastIndexOf(Constant.UNDERLINE));
                String city = classifyEnds.substring(classifyEnds.lastIndexOf(Constant.UNDERLINE) + 1);
                String html = null;
                try {
                    logger.debug("parse tv station file: " + filePath);
                    html = MyTvUtils.readAsHtml(filePath);
                } catch (IOException e) {
                    logger.error("read as xml error: " + filePath, e);
                    return null;
                }
                return parseTvStation(city, html);
            }
        };
        completionService.submit(task);
    }
    executorService.shutdown();
    int count = 0;
    while (count < size) {
        try {
            List<TvStation> stationList = completionService.take().get();
            if (stationList != null) {
                resultList.addAll(stationList);
            }
        } catch (InterruptedException e) {
            logger.error("crawl all tv station task interrupted.", e);
        } catch (ExecutionException e) {
            logger.error("crawl all tv station task executed fail.", e);
        }
        count++;
    }
    return resultList;
}

From source file:com.alibaba.wasp.fserver.EntityGroup.java

private synchronized void commitTransaction(WALEdit edit) throws IOException {
    Transaction t = edit.getT();//from  ww w.j  a v  a2  s. c  o  m
    if (LOG.isDebugEnabled()) {
        LOG.debug("EntityGroup commitTransaction:" + t.getTransactionID());
    }
    List<Mutate> mutates = t.getEdits();
    CompletionService<InsureStatus> completionService = new ExecutorCompletionService<InsureStatus>(
            this.services.getThreadPool());

    for (Mutate mutate : mutates) {
        String tableName = mutate.getTableName();
        try {
            if (mutate.getMutateType() == Mutate.MutateType.PUT) {
                Put put = ProtobufUtil.toPut(mutate, t.getTransactionID());
                completionService.submit(new InsurePut(tableName, put));
            } else if (mutate.getMutateType() == Mutate.MutateType.DELETE) {
                Delete delete = ProtobufUtil.toDelete(mutate);
                completionService.submit(new InsureDelete(tableName, delete));
            }
        } catch (DoNotRetryIOException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("convert mutate to Put or Delete error.", e);
            }
        }
    }

    int errors = 0;
    for (int i = 0; i < mutates.size(); i++) {
        try {
            Future<InsureStatus> result = completionService.take();
            if (InsureStatus.SUCCESS == result.get()) {
                // nothing,this operator is successful.
            } else if (InsureStatus.FAILED == result.get()) {
                errors++;
            } else {
                LOG.warn("What happened?");
                errors++;
            }
        } catch (InterruptedException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("transaction execute error", e);
            }
        } catch (ExecutionException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("transaction execute error", e);
            }
        }
    }
    if (errors != 0) {
        String message = "transaction id=" + t.getTransactionID() + " process occur " + errors + " errors";
        LOG.warn(message);
        throw new IOException(message);
    }

    try {
        redo.commit(edit);
    } catch (AlreadyCommitTransactionException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID() + " has all ready commited", e);
        }
    } catch (NotInitlizedRedoException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID()
                    + " commited failed as a result of the redo log has a error ", e);
        }
    } catch (RedoLogNotServingException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID()
                    + " commited failed as a result of the redo log has been closed ", e);
        }
    }
    Primary primary = edit.getAction();
    if (primary != null) {
        this.releaseRowLock(primary.getCombinedPrimaryKey());
    }
}

From source file:org.apache.nifi.cluster.manager.impl.HttpRequestReplicatorImpl.java

private Set<NodeResponse> replicateHelper(final Set<NodeIdentifier> nodeIds, final String method,
        final String scheme, final String path, final Map<String, List<String>> parameters, final Object entity,
        final Map<String, String> headers) throws UriConstructionException {

    if (nodeIds.isEmpty()) {
        return new HashSet<>(); // return quickly for trivial case
    }/* w  ww .  jav a 2  s .c  om*/

    final CompletionService<NodeResponse> completionService = new ExecutorCompletionService<>(executorService);

    // keeps track of future requests so that failed requests can be tied back to the failing node
    final Collection<NodeHttpRequestFutureWrapper> futureNodeHttpRequests = new ArrayList<>();

    // construct the URIs for the nodes
    final Map<NodeIdentifier, URI> uriMap = new HashMap<>();
    try {
        for (final NodeIdentifier nodeId : nodeIds) {
            final URI nodeUri = new URI(scheme, null, nodeId.getApiAddress(), nodeId.getApiPort(), path,
                    /* query */ null, /* fragment */ null);
            uriMap.put(nodeId, nodeUri);
        }
    } catch (final URISyntaxException use) {
        throw new UriConstructionException(use);
    }

    // submit the requests to the nodes
    final String requestId = UUID.randomUUID().toString();
    headers.put(WebClusterManager.REQUEST_ID_HEADER, requestId);
    for (final Map.Entry<NodeIdentifier, URI> entry : uriMap.entrySet()) {
        final NodeIdentifier nodeId = entry.getKey();
        final URI nodeUri = entry.getValue();
        final NodeHttpRequestCallable callable = (entity == null)
                ? new NodeHttpRequestCallable(nodeId, method, nodeUri, parameters, headers)
                : new NodeHttpRequestCallable(nodeId, method, nodeUri, entity, headers);
        futureNodeHttpRequests.add(
                new NodeHttpRequestFutureWrapper(nodeId, method, nodeUri, completionService.submit(callable)));
    }

    // get the node responses
    final Set<NodeResponse> result = new HashSet<>();
    for (int i = 0; i < nodeIds.size(); i++) {

        // keeps track of the original request information in case we receive an exception
        NodeHttpRequestFutureWrapper futureNodeHttpRequest = null;
        try {

            // get the future resource response for the node
            final Future<NodeResponse> futureNodeResourceResponse = completionService.take();

            // find the original request by comparing the submitted future with the future returned by the completion service
            for (final NodeHttpRequestFutureWrapper futureNodeHttpRequestElem : futureNodeHttpRequests) {
                if (futureNodeHttpRequestElem.getFuture() == futureNodeResourceResponse) {
                    futureNodeHttpRequest = futureNodeHttpRequestElem;
                }
            }

            // try to retrieve the node response and add to result
            final NodeResponse nodeResponse = futureNodeResourceResponse.get();
            result.add(nodeResponse);

        } catch (final InterruptedException | ExecutionException ex) {

            logger.warn(
                    "Node request for " + futureNodeHttpRequest.getNodeId() + " encountered exception: " + ex,
                    ex);

            // create node response with the thrown exception and add to result
            final NodeResponse nodeResponse = new NodeResponse(futureNodeHttpRequest.getNodeId(),
                    futureNodeHttpRequest.getHttpMethod(), futureNodeHttpRequest.getRequestUri(), ex);
            result.add(nodeResponse);

        }
    }

    if (logger.isDebugEnabled()) {
        NodeResponse min = null;
        NodeResponse max = null;
        long nanosSum = 0L;
        int nanosAdded = 0;

        for (final NodeResponse response : result) {
            final long requestNanos = response.getRequestDuration(TimeUnit.NANOSECONDS);
            final long minNanos = (min == null) ? -1 : min.getRequestDuration(TimeUnit.NANOSECONDS);
            final long maxNanos = (max == null) ? -1 : max.getRequestDuration(TimeUnit.NANOSECONDS);

            if (requestNanos < minNanos || minNanos < 0L) {
                min = response;
            }

            if (requestNanos > maxNanos || maxNanos < 0L) {
                max = response;
            }

            if (requestNanos >= 0L) {
                nanosSum += requestNanos;
                nanosAdded++;
            }
        }

        final StringBuilder sb = new StringBuilder();
        sb.append("Node Responses for ").append(method).append(" ").append(path).append(" (Request ID ")
                .append(requestId).append("):\n");
        for (final NodeResponse response : result) {
            sb.append(response).append("\n");
        }

        final long averageNanos = (nanosAdded == 0) ? -1L : nanosSum / nanosAdded;
        final long averageMillis = (averageNanos < 0) ? averageNanos
                : TimeUnit.MILLISECONDS.convert(averageNanos, TimeUnit.NANOSECONDS);
        logger.debug("For {} {} (Request ID {}), minimum response time = {}, max = {}, average = {} ms", method,
                path, requestId, min, max, averageMillis);
        logger.debug(sb.toString());
    }

    return result;
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

@Override
public ImmutableCollection<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {/*www  . j  a va  2 s.c  o m*/
        // Clear so metrics doesn't find them.
        ImmutableCollection<StoreFile> result = storeEngine.getStoreFileManager().clearFiles();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region
                    .getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-" + this.getColumnFamilyName());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            IOException ioe = null;
            try {
                for (int i = 0; i < result.size(); i++) {
                    try {
                        Future<Void> future = completionService.take();
                        future.get();
                    } catch (InterruptedException e) {
                        if (ioe == null) {
                            ioe = new InterruptedIOException();
                            ioe.initCause(e);
                        }
                    } catch (ExecutionException e) {
                        if (ioe == null)
                            ioe = new IOException(e.getCause());
                    }
                }
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
            if (ioe != null)
                throw ioe;
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

/**
 * Creates an unsorted list of StoreFile loaded in parallel
 * from the given directory.//from  ww w.  j  a va  2 s  . c  om
 * @throws IOException
 */
private List<StoreFile> loadStoreFiles() throws IOException {
    Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName());
    if (files == null || files.size() == 0) {
        return new ArrayList<StoreFile>();
    }

    // initialize the thread pool for opening store files in parallel..
    ThreadPoolExecutor storeFileOpenerThreadPool = this.region
            .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.getColumnFamilyName());
    CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>(
            storeFileOpenerThreadPool);

    int totalValidStoreFile = 0;
    for (final StoreFileInfo storeFileInfo : files) {
        // open each store file in parallel
        completionService.submit(new Callable<StoreFile>() {
            @Override
            public StoreFile call() throws IOException {
                StoreFile storeFile = createStoreFileAndReader(storeFileInfo);
                return storeFile;
            }
        });
        totalValidStoreFile++;
    }

    ArrayList<StoreFile> results = new ArrayList<StoreFile>(files.size());
    IOException ioe = null;
    try {
        for (int i = 0; i < totalValidStoreFile; i++) {
            try {
                Future<StoreFile> future = completionService.take();
                StoreFile storeFile = future.get();
                long length = storeFile.getReader().length();
                this.storeSize += length;
                this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("loaded " + storeFile.toStringDetailed());
                }
                results.add(storeFile);
            } catch (InterruptedException e) {
                if (ioe == null)
                    ioe = new InterruptedIOException(e.getMessage());
            } catch (ExecutionException e) {
                if (ioe == null)
                    ioe = new IOException(e.getCause());
            }
        }
    } finally {
        storeFileOpenerThreadPool.shutdownNow();
    }
    if (ioe != null) {
        // close StoreFile readers
        for (StoreFile file : results) {
            try {
                if (file != null)
                    file.closeReader(true);
            } catch (IOException e) {
                LOG.warn(e.getMessage());
            }
        }
        throw ioe;
    }

    return results;
}

From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java

@Test(expected = DockerTimeoutException.class)
public void testConnectionRequestTimeout() throws Exception {
    final int connectionPoolSize = 1;
    final int callableCount = connectionPoolSize * 100;

    final ExecutorService executor = Executors.newCachedThreadPool();
    final CompletionService completion = new ExecutorCompletionService(executor);

    // Spawn and wait on many more containers than the connection pool size.
    // This should cause a timeout once the connection pool is exhausted.

    final DockerClient dockerClient = DefaultDockerClient.fromEnv().connectionPoolSize(connectionPoolSize)
            .build();//from   w w w .j  a  va 2  s  . c om
    try {
        // Create container
        final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST)
                .cmd("sh", "-c", "while :; do sleep 1; done").build();
        final String name = randomName();
        final ContainerCreation creation = dockerClient.createContainer(config, name);
        final String id = creation.id();

        // Start the container
        dockerClient.startContainer(id);

        // Submit a bunch of waitContainer requests
        for (int i = 0; i < callableCount; i++) {
            completion.submit(new Callable<ContainerExit>() {
                @Override
                public ContainerExit call() throws Exception {
                    return dockerClient.waitContainer(id);
                }
            });
        }

        // Wait for the requests to complete or throw expected exception
        for (int i = 0; i < callableCount; i++) {
            try {
                completion.take().get();
            } catch (ExecutionException e) {
                Throwables.propagateIfInstanceOf(e.getCause(), DockerTimeoutException.class);
                throw e;
            }
        }
    } finally {
        executor.shutdown();
        dockerClient.close();
    }
}