Example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

List of usage examples for java.util.concurrent ExecutorCompletionService ExecutorCompletionService

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService ExecutorCompletionService.

Prototype

public ExecutorCompletionService(Executor executor) 

Source Link

Document

Creates an ExecutorCompletionService using the supplied executor for base task execution and a LinkedBlockingQueue as a completion queue.

Usage

From source file:com.alibaba.otter.node.etl.load.loader.db.FileLoadAction.java

/**
 * ? fast-fail /*from   w  w  w.  ja  v  a2 s.c  o  m*/
 */
private void moveFiles(FileLoadContext context, List<FileData> fileDatas, File rootDir) {
    Exception exception = null;
    adjustPoolSize(context);
    ExecutorCompletionService<Exception> executorComplition = new ExecutorCompletionService<Exception>(
            executor);

    List<Future<Exception>> results = new ArrayList<Future<Exception>>();
    for (FileData fileData : fileDatas) {
        Future<Exception> future = executorComplition.submit(new FileLoadWorker(context, rootDir, fileData));
        results.add(future);

        // fast fail
        if (future.isDone()) { // ( CallerRunsPolicy)
            try {
                exception = future.get();
            } catch (Exception e) {
                exception = e;
            }
            if (exception != null) {
                for (Future<Exception> result : results) {
                    if (!result.isDone() && !result.isCancelled()) {
                        result.cancel(true);
                    }
                }
                throw exception instanceof LoadException ? (LoadException) exception
                        : new LoadException(exception);
            }
        }

    }

    int resultSize = results.size();
    int cursor = 0;
    while (cursor < resultSize) {
        try {
            Future<Exception> result = executorComplition.take();
            exception = result.get();
        } catch (Exception e) {
            exception = e;
            break;
        }
        cursor++;
    }

    if (cursor != resultSize) { // ??
        for (Future<Exception> future : results) {
            if (!future.isDone() && !future.isCancelled()) {
                future.cancel(true);
            }
        }

    }

    if (exception != null) {
        throw exception instanceof LoadException ? (LoadException) exception : new LoadException(exception);
    }
}

From source file:org.opoo.press.maven.wagon.github.GitHub.java

private List<TreeEntry> createEntriesInThreads(List<TreeEntry> entries, final String prefix,
        final String[] paths, final DataService service, final RepositoryId repository,
        final File outputDirectory, int numThreads) throws GitHubException {
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);//.newCachedThreadPool();  
    CompletionService<TreeEntry> cs = new ExecutorCompletionService<TreeEntry>(threadPool);

    for (final String path : paths) {
        cs.submit(new Callable<TreeEntry>() {
            @Override/*from  w  ww  .j  a  v a  2s  . c o  m*/
            public TreeEntry call() throws Exception {
                return createEntry(prefix, path, service, repository, outputDirectory);
            }
        });
    }

    try {
        //BUG: wait for ever??
        //         Future<TreeEntry> future = cs.take();
        //         while(future != null){
        //            entries.add(future.get());
        //            future = cs.take();
        //         }

        for (int i = 0; i < paths.length; i++) {
            entries.add(cs.take().get());
        }
        log.info("All entries created: " + paths.length);
    } catch (InterruptedException e) {
        throw new GitHubException("", e);
    } catch (ExecutionException e) {
        throw new GitHubException("", e);
    }
    return entries;
}

From source file:org.apache.camel.processor.MulticastProcessor.java

protected void doProcessParallel(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final boolean streaming, final AsyncCallback callback)
        throws Exception {

    ObjectHelper.notNull(executorService, "ExecutorService", this);
    ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);

    final CompletionService<Exchange> completion;
    if (streaming) {
        // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
        completion = new ExecutorCompletionService<Exchange>(executorService);
    } else {// ww w.ja  va  2s. co  m
        // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
        completion = new SubmitOrderedCompletionService<Exchange>(executorService);
    }

    // when parallel then aggregate on the fly
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicInteger total = new AtomicInteger(0);
    final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
    final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
    final AtomicException executionException = new AtomicException();

    final Iterator<ProcessorExchangePair> it = pairs.iterator();

    if (it.hasNext()) {
        // issue task to execute in separate thread so it can aggregate on-the-fly
        // while we submit new tasks, and those tasks complete concurrently
        // this allows us to optimize work and reduce memory consumption
        AggregateOnTheFlyTask task = new AggregateOnTheFlyTask(result, original, total, completion, running,
                aggregationOnTheFlyDone, allTasksSubmitted, executionException);

        // and start the aggregation task so we can aggregate on-the-fly
        aggregateExecutorService.submit(task);
    }

    LOG.trace("Starting to submit parallel tasks");

    while (it.hasNext()) {
        final ProcessorExchangePair pair = it.next();
        final Exchange subExchange = pair.getExchange();
        updateNewExchange(subExchange, total.intValue(), pairs, it);

        completion.submit(new Callable<Exchange>() {
            public Exchange call() throws Exception {
                if (!running.get()) {
                    // do not start processing the task if we are not running
                    return subExchange;
                }

                try {
                    doProcessParallel(pair);
                } catch (Throwable e) {
                    subExchange.setException(e);
                }

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                Integer number = getExchangeIndex(subExchange);
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Parallel processing failed for number " + number, LOG);
                if (stopOnException && !continueProcessing) {
                    // signal to stop running
                    running.set(false);
                    // throw caused exception
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        throw new CamelExchangeException("Parallel processing failed for number " + number,
                                subExchange, subExchange.getException());
                    }
                }

                if (LOG.isTraceEnabled()) {
                    LOG.trace("Parallel processing complete for exchange: " + subExchange);
                }
                return subExchange;
            }
        });

        total.incrementAndGet();
    }

    // signal all tasks has been submitted
    if (LOG.isTraceEnabled()) {
        LOG.trace("Signaling that all " + total.get() + " tasks has been submitted.");
    }
    allTasksSubmitted.set(true);

    // its to hard to do parallel async routing so we let the caller thread be synchronously
    // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
    // wait for aggregation to be done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Waiting for on-the-fly aggregation to complete aggregating " + total.get() + " responses.");
    }
    aggregationOnTheFlyDone.await();

    // did we fail for whatever reason, if so throw that caused exception
    if (executionException.get() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parallel processing failed due " + executionException.get().getMessage());
        }
        throw executionException.get();
    }

    // no everything is okay so we are done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Done parallel processing " + total + " exchanges");
    }
}

From source file:com.ibm.jaggr.service.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk.//from  w  ww .ja va  2 s  .  c om
 * 
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization 
 *            resources that are check on every server restart                     
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        cacheBust = aggregator.getOptions().getCacheBust();
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && rawConfig.equals(cached.rawConfig) && !validateDeps && !clean) {
        depMap = cached.depMap;
        return;
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode(PathUtil.getModuleName(path));
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one 
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (nodes with no children or dependency lists)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
}

From source file:com.liferay.sync.engine.lan.session.LanSession.java

protected SyncLanClientQueryResult findSyncLanClient(SyncFile syncFile) throws Exception {

    SyncAccount syncAccount = SyncAccountService.fetchSyncAccount(syncFile.getSyncAccountId());

    List<String> syncLanClientUuids = SyncLanEndpointService
            .findSyncLanClientUuids(syncAccount.getLanServerUuid(), syncFile.getRepositoryId());

    if (syncLanClientUuids.isEmpty()) {
        return null;
    }/*from   w  w  w  .j ava2s .  c o m*/

    final List<Callable<SyncLanClientQueryResult>> syncLanClientQueryResultCallables = Collections
            .synchronizedList(new ArrayList<Callable<SyncLanClientQueryResult>>(syncLanClientUuids.size()));

    for (String syncLanClientUuid : syncLanClientUuids) {
        SyncLanClient syncLanClient = SyncLanClientService.fetchSyncLanClient(syncLanClientUuid);

        syncLanClientQueryResultCallables.add(createSyncLanClientQueryResultCallable(syncLanClient, syncFile));
    }

    int queryPoolSize = Math.min(syncLanClientUuids.size(), PropsValues.SYNC_LAN_SESSION_QUERY_POOL_MAX_SIZE);

    List<Future<SyncLanClientQueryResult>> pendingSyncLanClientQueryResults = new ArrayList<>(queryPoolSize);

    ExecutorCompletionService<SyncLanClientQueryResult> executorCompletionService = new ExecutorCompletionService<>(
            getExecutorService());

    for (int i = 0; i < queryPoolSize; i++) {
        Callable<SyncLanClientQueryResult> callable = new Callable<SyncLanClientQueryResult>() {

            @Override
            public synchronized SyncLanClientQueryResult call() throws Exception {

                if (syncLanClientQueryResultCallables.isEmpty()) {
                    return null;
                }

                Callable<SyncLanClientQueryResult> syncLanClientQueryResultCallable = syncLanClientQueryResultCallables
                        .remove(0);

                try {
                    return syncLanClientQueryResultCallable.call();
                } catch (Exception e) {
                    return call();
                }
            }

        };

        pendingSyncLanClientQueryResults.add(executorCompletionService.submit(callable));
    }

    List<Future<SyncLanClientQueryResult>> completedSyncLanClientQueryResult = new ArrayList<>(queryPoolSize);

    long timeout = PropsValues.SYNC_LAN_SESSION_QUERY_TOTAL_TIMEOUT;

    long endTime = System.currentTimeMillis() + timeout;

    for (int i = 0; i < queryPoolSize; i++) {
        Future<SyncLanClientQueryResult> future = executorCompletionService.poll(timeout,
                TimeUnit.MILLISECONDS);

        if (future == null) {
            for (Future<SyncLanClientQueryResult> pendingSyncLanClientQueryResult : pendingSyncLanClientQueryResults) {

                if (!pendingSyncLanClientQueryResult.isDone()) {
                    pendingSyncLanClientQueryResult.cancel(true);
                }
            }

            break;
        }

        completedSyncLanClientQueryResult.add(future);

        timeout = endTime - System.currentTimeMillis();
    }

    SyncLanClientQueryResult candidateSyncLanClientQueryResult = null;
    int candidateDownloadRatePerConnection = 0;

    for (Future<SyncLanClientQueryResult> completedFuture : completedSyncLanClientQueryResult) {

        SyncLanClientQueryResult syncLanClientQueryResult = null;

        try {
            syncLanClientQueryResult = completedFuture.get();
        } catch (Exception e) {
            continue;
        }

        if (syncLanClientQueryResult == null) {
            continue;
        }

        if (syncLanClientQueryResult.getConnectionsCount() >= syncLanClientQueryResult.getMaxConnections()) {

            if (candidateSyncLanClientQueryResult == null) {
                candidateSyncLanClientQueryResult = syncLanClientQueryResult;
            }

            continue;
        }

        if (syncLanClientQueryResult.getConnectionsCount() == 0) {
            return syncLanClientQueryResult;
        }

        int downloadRatePerConnection = syncLanClientQueryResult.getDownloadRate()
                / (syncLanClientQueryResult.getConnectionsCount() + 1);

        if (downloadRatePerConnection >= candidateDownloadRatePerConnection) {

            candidateDownloadRatePerConnection = downloadRatePerConnection;
            candidateSyncLanClientQueryResult = syncLanClientQueryResult;
        }
    }

    return candidateSyncLanClientQueryResult;
}

From source file:org.codice.ddf.spatial.ogc.csw.catalog.transformer.CswQueryResponseTransformer.java

private String multiThreadedMarshal(List<Result> results, String recordSchema,
        final Map<String, Serializable> arguments) throws CatalogTransformerException {

    CompletionService<BinaryContent> completionService = new ExecutorCompletionService<>(queryExecutor);

    try {//from w  w w .  j  av a  2 s. c om
        for (Result result : results) {
            final Metacard mc = result.getMetacard();

            final MetacardTransformer transformer = metacardTransformerManager
                    .getTransformerBySchema(recordSchema);

            if (transformer == null) {
                throw new CatalogTransformerException("Cannot find transformer for schema: " + recordSchema);
            }

            // the "current" thread will run submitted task when queueSize exceeded; effectively
            // blocking enqueue of more tasks.
            completionService.submit(new Callable<BinaryContent>() {
                @Override
                public BinaryContent call() throws Exception {
                    BinaryContent content = transformer.transform(mc, arguments);
                    return content;
                }
            });
        }

        int metacardCount = results.size();
        CharArrayWriter accum = new CharArrayWriter(ACCUM_INITIAL_SIZE);
        for (int i = 0; i < metacardCount; i++) {
            Future<BinaryContent> binaryContentFuture = completionService.take(); // blocks
            BinaryContent binaryContent = binaryContentFuture.get();
            IOUtils.copy(binaryContent.getInputStream(), accum);
        }

        return accum.toString();

    } catch (IOException | InterruptedException | ExecutionException xe) {
        throw new CatalogTransformerException(xe);
    }

}

From source file:org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.java

public LogicalIOProcessorRuntimeTask(TaskSpec taskSpec, int appAttemptNumber, Configuration tezConf,
        String[] localDirs, TezUmbilical tezUmbilical, Map<String, ByteBuffer> serviceConsumerMetadata,
        Map<String, String> envMap, Multimap<String, String> startedInputsMap, ObjectRegistry objectRegistry,
        String pid, ExecutionContext ExecutionContext, long memAvailable, boolean updateSysCounters,
        HadoopShim hadoopShim, TezExecutors sharedExecutor) throws IOException {
    // Note: If adding any fields here, make sure they're cleaned up in the cleanupContext method.
    // TODO Remove jobToken from here post TEZ-421
    super(taskSpec, tezConf, tezUmbilical, pid, updateSysCounters);
    LOG.info("Initializing LogicalIOProcessorRuntimeTask with TaskSpec: " + taskSpec);
    int numInputs = taskSpec.getInputs().size();
    int numOutputs = taskSpec.getOutputs().size();
    this.localDirs = localDirs;
    this.inputSpecs = taskSpec.getInputs();
    this.inputsMap = new ConcurrentHashMap<String, LogicalInput>(numInputs);
    this.inputContextMap = new ConcurrentHashMap<String, InputContext>(numInputs);
    this.outputSpecs = taskSpec.getOutputs();
    this.outputsMap = new ConcurrentHashMap<String, LogicalOutput>(numOutputs);
    this.outputContextMap = new ConcurrentHashMap<String, OutputContext>(numOutputs);

    this.runInputMap = new LinkedHashMap<String, LogicalInput>();
    this.runOutputMap = new LinkedHashMap<String, LogicalOutput>();

    this.initializedInputs = new ConcurrentHashMap<String, LogicalInput>();
    this.initializedOutputs = new ConcurrentHashMap<String, LogicalOutput>();

    this.processorDescriptor = taskSpec.getProcessorDescriptor();
    this.serviceConsumerMetadata = serviceConsumerMetadata;
    this.envMap = envMap;
    this.eventsToBeProcessed = new LinkedBlockingQueue<TezEvent>();
    this.state.set(State.NEW);
    this.appAttemptNumber = appAttemptNumber;
    this.initializeProcessorFirst = tezConf.getBoolean(TezConfiguration.TEZ_TASK_INITIALIZE_PROCESSOR_FIRST,
            TezConfiguration.TEZ_TASK_INITIALIZE_PROCESSOR_FIRST_DEFAULT);
    this.initializeProcessorIOSerially = tezConf.getBoolean(
            TezConfiguration.TEZ_TASK_INITIALIZE_PROCESSOR_IO_SERIALLY,
            TezConfiguration.TEZ_TASK_INITIALIZE_PROCESSOR_IO_SERIALLY_DEFAULT);
    int numInitializers = numInputs + numOutputs; // Processor is initialized in the main thread.
    numInitializers = (numInitializers == 0 ? 1 : numInitializers);
    if (initializeProcessorIOSerially) {
        numInitializers = 1;//from  www  .  ja  va 2s  .  com
    }
    this.initializerExecutor = Executors.newFixedThreadPool(numInitializers,
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("I/O Setup %d").build());
    this.initializerCompletionService = new ExecutorCompletionService<Void>(this.initializerExecutor);
    this.groupInputSpecs = taskSpec.getGroupInputs();
    initialMemoryDistributor = new MemoryDistributor(numInputs, numOutputs, tezConf);
    this.startedInputsMap = startedInputsMap;
    this.inputReadyTracker = new InputReadyTracker();
    this.objectRegistry = objectRegistry;
    this.ExecutionContext = ExecutionContext;
    this.memAvailable = memAvailable;
    this.hadoopShim = hadoopShim;
    this.maxEventBacklog = tezConf.getInt(TezConfiguration.TEZ_TASK_MAX_EVENT_BACKLOG,
            TezConfiguration.TEZ_TASK_MAX_EVENT_BACKLOG_DEFAULT);
    this.sharedExecutor = sharedExecutor;
}

From source file:org.ugent.caagt.genestacker.search.bb.BranchAndBound.java

@Override
public ParetoFrontier runSearch(long runtimeLimit, int numThreads) throws GenestackerException {

    // create list to store previously generated schemes
    previousSchemes = new ArrayList<>();
    // create set to store previously generated scheme alternatives
    previousSchemeAlternatives = new HashSet<>();
    // create queue for schemes to be considered
    schemeQueue = new LinkedList<>();

    // reset ids/*from   w w w.j  a v  a 2s  . c o  m*/
    SeedLotNode.resetIDs();
    PlantNode.resetIDs();
    CrossingNode.resetIDs();
    CrossingSchemeAlternatives.resetIDs();

    // create thread pool and completion service for scheme extension

    // inform user about number of cross workers used (verbose)
    logger.info(VERBOSE, "Number of threads used for extending partial schemes: {}", numThreads);
    ExecutorService extPool = Executors.newFixedThreadPool(numThreads);
    CompletionService<List<CrossingSchemeAlternatives>> extCompletionService = new ExecutorCompletionService<>(
            extPool);

    // initialize solution manager
    BranchAndBoundSolutionManager solutionManager = new BranchAndBoundSolutionManager(dominatesRelation,
            ideotype, popSizeTools, maxNumSeedsPerCrossing, constraints, heuristics, seedLotFilters,
            homozygousIdeotypeParents);
    // set initial Pareto frontier, if any
    if (initialFrontier != null) {
        solutionManager.setFrontier(initialFrontier);
    }

    // apply initial plant filter, if any
    if (initialPlantFilter != null) {

        // verbose
        logger.info(VERBOSE, "Filtering initial plants ...");

        initialPlants = initialPlantFilter.filter(initialPlants);

        //verbose
        logger.info(VERBOSE, "Retained {} initial plants (see below)", initialPlants.size());
        for (Plant p : initialPlants) {
            logger.info(VERBOSE, "\n{}", p);
        }

    }

    // create initial partial schemes from initial plants
    List<CrossingSchemeAlternatives> initialParentSchemes = new ArrayList<>();
    for (Plant p : initialPlants) {
        // create uniform seed lot
        SeedLot sl = new SeedLot(p.getGenotype());
        // create seedlot node
        SeedLotNode sln = new SeedLotNode(sl, 0);
        // create and attach plant node
        PlantNode pn = new PlantNode(p, 0, sln);
        // create partial crossing scheme
        CrossingScheme s = new CrossingScheme(popSizeTools, pn);
        initialParentSchemes.add(new CrossingSchemeAlternatives(s));
    }
    registerNewSchemes(initialParentSchemes, solutionManager);

    // now iteratively cross schemes with previous schemes to create larger schemes,
    // until all solutions have been inspected or pruned
    while (!runtimeLimitExceeded() && !schemeQueue.isEmpty()) {

        // get next scheme from queue
        CrossingSchemeAlternatives cur = schemeQueue.poll();

        // fire progression message (verbose)
        logger.info(VERBOSE, "num solutions: {} ### prog: {} ({}) ### cur scheme: {} - T = {}",
                solutionManager.getFrontier().getNumSchemes(), previousSchemes.size(), schemeQueue.size(), cur,
                TimeFormatting.formatTime(System.currentTimeMillis() - getStart()));
        // debug: create diagram of current scheme (all alternatives)
        if (logger.isDebugEnabled()) {
            for (int i = 0; i < cur.nrOfAlternatives(); i++) {
                logger.debug("Cur scheme (alternative {}): {}", i + 1,
                        writeDiagram(cur.getAlternatives().get(i)));
            }
            // wait for enter
            DebugUtils.waitForEnter();
        }

        // delete possible pruned alternatives
        Iterator<CrossingScheme> it = cur.iterator();
        int numForCrossing = 0;
        int numForSelfing = 0;
        while (it.hasNext()) {
            CrossingScheme alt = it.next();
            // check if alternative should be removed
            if (previousSchemeAlternatives.contains(alt)) {
                // equivalent scheme alternative generated before, delete current alternative
                it.remove();
            } else if (solutionManager.pruneDequeueScheme(alt)) {
                // prune dequeued scheme (e.g. by the optimal subscheme heuristic)
                it.remove();
            } else {
                // check pruning for crossing/selfing
                boolean pruneCross = solutionManager.pruneCrossCurrentScheme(alt);
                boolean pruneSelf = solutionManager.pruneSelfCurrentScheme(alt);
                if (pruneCross && pruneSelf) {
                    // alternative not useful anymore
                    it.remove();
                } else {
                    // count nr of alternatives useful for crossing or selfing
                    if (!pruneCross) {
                        numForCrossing++;
                    }
                    if (!pruneSelf) {
                        numForSelfing++;
                    }
                }
            }
        }

        if (cur.nrOfAlternatives() > 0) {

            // if useful, self current scheme
            if (numForSelfing > 0) {
                registerNewSchemes(selfScheme(cur, map, solutionManager), solutionManager);
            }

            // if useful, cross with previous schemes
            if (numForCrossing > 0) {
                // launch workers to combine with previous schemes
                Iterator<CrossingSchemeAlternatives> previousSchemesIterator = previousSchemes.iterator();
                for (int w = 0; w < numThreads; w++) {
                    // submit worker
                    extCompletionService
                            .submit(new CrossWorker(previousSchemesIterator, cur, solutionManager, map));
                    // very verbose
                    logger.info(VERY_VERBOSE, "Launched cross worker {} of {}", w + 1, numThreads);
                }
                // handle results of completed workers in the order in which they complete
                for (int w = 0; w < numThreads; w++) {
                    try {
                        // wait for next worker to complete and register its solutions
                        registerNewSchemes(extCompletionService.take().get(), solutionManager);
                        // very verbose
                        logger.info(VERY_VERBOSE, "{}/{} cross workers finished", w + 1, numThreads);
                    } catch (InterruptedException | ExecutionException ex) {
                        // something went wrong with the cross workers
                        throw new SearchException("An error occured while extending the current scheme.", ex);
                    }
                }
            }

            // put the scheme in the sorted set with previously considered schemes (only done if useful for later crossings)
            previousSchemes.add(cur);
            // register scheme alternatives
            previousSchemeAlternatives.addAll(cur.getAlternatives());
        }
    }

    if (runtimeLimitExceeded()) {
        // info
        logger.info("Runtime limit exceeded");
    }

    // shutdown thread pool
    extPool.shutdownNow();

    return solutionManager.getFrontier();
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphore.java

@Test
public void testReleaseInChunks() throws Exception {
    final int MAX_LEASES = 11;
    final int THREADS = 100;

    final CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    client.start();// www.ja v a 2 s  .c o  m
    try {
        final Stepper latch = new Stepper();
        final Random random = new Random();
        final Counter counter = new Counter();
        ExecutorService service = Executors.newCachedThreadPool();
        ExecutorCompletionService<Object> completionService = new ExecutorCompletionService<Object>(service);
        for (int i = 0; i < THREADS; ++i) {
            completionService.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test",
                            MAX_LEASES);
                    Lease lease = semaphore.acquire(10, TimeUnit.SECONDS);
                    if (lease == null) {
                        throw new Exception("timed out");
                    }
                    try {
                        synchronized (counter) {
                            ++counter.currentCount;
                            if (counter.currentCount > counter.maxCount) {
                                counter.maxCount = counter.currentCount;
                            }
                            counter.notifyAll();
                        }

                        latch.await();
                    } finally {
                        synchronized (counter) {
                            --counter.currentCount;
                        }
                        semaphore.returnLease(lease);
                    }
                    return null;
                }
            });
        }

        int remaining = THREADS;
        while (remaining > 0) {
            int times = Math.min(random.nextInt(5) + 1, remaining);
            latch.countDown(times);
            remaining -= times;
            Thread.sleep(random.nextInt(100) + 1);
        }

        for (int i = 0; i < THREADS; ++i) {
            completionService.take();
        }

        synchronized (counter) {
            Assert.assertTrue(counter.currentCount == 0);
            Assert.assertTrue(counter.maxCount > 0);
            Assert.assertTrue(counter.maxCount <= MAX_LEASES);
            System.out.println(counter.maxCount);
        }
    } finally {
        client.close();
    }
}

From source file:com.threadswarm.imagefeedarchiver.driver.CommandLineDriver.java

@Override
public void run() {
    //setup filters
    List<RssItemFilter> filterList = new LinkedList<RssItemFilter>();
    filterList.add(new PreviouslyDownloadedItemFilter(processedRssItemDAO));
    RssItemFilter chainedItemFilter = new ChainedRssItemFilter(filterList);

    RssChannel rssChannel = null;// w  w  w  .j a  v a  2s  .c o m
    try {
        rssChannel = fetchRssChannel(rssFeedUri);
    } catch (IOException | FeedParserException e) {
        LOGGER.error(
                "An Exception was thrown while attempting to download and parse the target RSS feed.. exiting",
                e);
        System.exit(1);
    }

    List<RssItem> filteredItemList = new LinkedList<RssItem>();
    if (rssChannel != null && rssChannel.getItems() != null) {
        for (RssItem rssItem : rssChannel.getItems()) {
            rssItem = chainedItemFilter.filter(rssItem);
            if (rssItem != null)
                filteredItemList.add(rssItem);
        }
    }

    if (!filteredItemList.isEmpty()) {
        //create list of headers to be used when downloading images
        List<Header> headerList = new ArrayList<Header>(2);
        if (doNotTrackRequested) {
            LOGGER.debug("Adding 'DNT' header to worker requests");
            headerList.add(DNT_HEADER);
        }
        headerList.add(new BasicHeader(HttpHeaders.REFERER, rssFeedUri.toString()));
        headerList = Collections.unmodifiableList(headerList);

        ExecutorService executorService = null;
        try {
            executorService = Executors.newFixedThreadPool(threadCount);
            CompletionService<ProcessedRssItem> completionService = new ExecutorCompletionService<ProcessedRssItem>(
                    executorService);
            Set<URI> processedURISet = new ConcurrentSkipListSet<URI>();
            int itemCount = 0;
            for (RssItem rssItem : filteredItemList) {
                completionService.submit(new RssItemProcessor(httpClient, rssItem, processedRssItemDAO,
                        outputDirectory, headerList, processedURISet, downloadDelay, forceHttps));
                itemCount++;
            }

            LOGGER.info("{} jobs submitted for execution", itemCount);

            for (int x = 0; x < itemCount; x++) {
                ProcessedRssItem processedItem = completionService.take().get();
                LOGGER.info("Item status: {} --> [{}]", processedItem.getRssItem().getTitle(),
                        processedItem.getDownloadStatus());
            }
        } catch (InterruptedException e) {
            LOGGER.warn("Thread interrupted while blocking", e);
            Thread.currentThread().interrupt(); // restore interrupt
        } catch (ExecutionException e) {
            LOGGER.error("An Exception was thrown during worker execution and subsequently propagated", e);
            e.printStackTrace();
        } finally {
            executorService.shutdown();
            try {
                executorService.awaitTermination(10, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOGGER.warn("Thread interrupted while blocking", e);
                Thread.currentThread().interrupt(); // restore interrupt
            }
            httpClient.getConnectionManager().shutdown();
        }
    }
}