Example usage for java.util.concurrent CompletionService take

List of usage examples for java.util.concurrent CompletionService take

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService take.

Prototype

Future<V> take() throws InterruptedException;

Source Link

Document

Retrieves and removes the Future representing the next completed task, waiting if none are yet present.

Usage

From source file:org.springframework.integration.jdbc.store.channel.AbstractTxTimeoutMessageStoreTests.java

public void testInt2993IdCacheConcurrency() throws InterruptedException, ExecutionException {
    final String groupId = "testInt2993Group";
    for (int i = 0; i < 100; i++) {
        this.jdbcChannelMessageStore.addMessageToGroup(groupId,
                new GenericMessage<String>("testInt2993Message"));
    }/*from w  w  w . j  a v  a 2  s .  c  om*/

    ExecutorService executorService = Executors.newCachedThreadPool();
    CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executorService);

    final int concurrency = 5;

    final TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager);

    for (int i = 0; i < concurrency; i++) {
        completionService.submit(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                for (int i = 0; i < 100; i++) {
                    boolean result = transactionTemplate.execute(new TransactionCallback<Boolean>() {
                        @Override
                        public Boolean doInTransaction(TransactionStatus status) {
                            Message<?> message = null;
                            try {
                                message = jdbcChannelMessageStore.pollMessageFromGroup(groupId);
                            } catch (Exception e) {
                                log.error("IdCache race condition.", e);
                                return false;
                            }
                            try {
                                Thread.sleep(10);
                            } catch (InterruptedException e) {
                                log.error(e);
                            }
                            if (message != null) {
                                jdbcChannelMessageStore
                                        .removeFromIdCache(message.getHeaders().getId().toString());
                            }
                            return true;
                        }
                    });
                    if (!result)
                        return false;
                }

                return true;
            }
        });
    }

    for (int j = 0; j < concurrency; j++) {
        assertTrue(completionService.take().get());
    }

    executorService.shutdown();
    assertTrue(executorService.awaitTermination(5, TimeUnit.SECONDS));
}

From source file:com.ibm.jaggr.core.impl.deps.DepTree.java

/**
 * Object constructor. Attempts to de-serialize the cached dependency lists
 * from disk and then validates the dependency lists based on last-modified
 * dates, looking for any new or removed files. If the cached dependency
 * list data cannot be de-serialized, new lists are constructed. Once the
 * dependency lists have been validated, the list data is serialized back
 * out to disk.//from  ww  w.  java  2 s .c  o  m
 *
 * @param paths
 *            Collection of URIs which specify the target resources
 *            to be scanned for javascript files.
 * @param aggregator
 *            The servlet instance for this object
 * @param stamp
 *            timestamp associated with external override/customization
 *            resources that are check on every server restart
 * @param clean
 *            If true, then the dependency lists are generated from scratch
 *            rather than by de-serializing and then validating the cached
 *            dependency lists.
 * @param validateDeps
 *            If true, then validate existing cached dependencies using
 *            file last-modified times.
 * @throws IOException
 */
public DepTree(Collection<URI> paths, IAggregator aggregator, long stamp, boolean clean, boolean validateDeps)
        throws IOException {
    final String sourceMethod = "<ctor>"; //$NON-NLS-1$
    boolean isTraceLogging = log.isLoggable(Level.FINER);
    if (isTraceLogging) {
        log.entering(DepTree.class.getName(), sourceMethod,
                new Object[] { paths, aggregator, stamp, clean, validateDeps });
    }
    this.stamp = stamp;
    IConfig config = aggregator.getConfig();
    rawConfig = config.toString();
    cacheBust = AggregatorUtil.getCacheBust(aggregator);

    File cacheDir = new File(aggregator.getWorkingDirectory(), DEPCACHE_DIRNAME);
    File cacheFile = new File(cacheDir, CACHE_FILE);

    /*
     * The de-serialized dependency map. If we have a cached dependency map,
     * then it will be validated against the last-modified dates of the
     * current files and only the files that have changed will need to be
     * re-parsed to update the dependency lists.
     */
    DepTree cached = null;

    if (!clean) {
        // If we're not starting clean, try to de-serialize the map from
        // cache
        try {
            ObjectInputStream is = new ObjectInputStream(new FileInputStream(cacheFile));
            try {
                if (isTraceLogging) {
                    log.finer("Attempting to read cached dependencies from " + cacheFile.toString()); //$NON-NLS-1$
                }
                cached = (DepTree) is.readObject();
            } finally {
                try {
                    is.close();
                } catch (Exception ignore) {
                }
            }
        } catch (FileNotFoundException e) {
            /*
             * Not an error. Just means that the cache file hasn't been
             * written yet or else it's been deleted.
             */
            if (log.isLoggable(Level.INFO))
                log.log(Level.INFO, Messages.DepTree_1);
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
        }
    }

    // If the cacheBust config param has changed, then do a clean build
    // of the dependencies.
    if (cached != null) {
        if (stamp == 0) {
            // no init stamp provided.  Preserve the cached one.
            stamp = cached.stamp;
        }
        if (stamp > cached.stamp) {
            // init stamp has been updated.  Validate dependencies.
            validateDeps = true;
        }
        if (!StringUtils.equals(cacheBust, cached.cacheBust)) {
            if (isTraceLogging) {
                log.finer("Current cacheBust = " + cacheBust + ", cached cacheBust = " + cached.cacheBust); //$NON-NLS-1$//$NON-NLS-2$
            }
            if (log.isLoggable(Level.INFO)) {
                log.info(Messages.DepTree_2);
            }
            cached = null;
        }
        if (cached != null && !StringUtils.equals(rawConfig, cached.rawConfig)) {
            if (isTraceLogging) {
                log.finer("Current config = " + rawConfig); //$NON-NLS-1$
                log.finer("Cached config = " + cached.rawConfig); //$NON-NLS-1$
            }
            validateDeps = true;
        }
    }

    /*
     * If we de-serialized a previously saved dependency map, then go with
     * that.
     */
    if (cached != null && !validateDeps && !clean) {
        depMap = cached.depMap;
        fromCache = true;
        return;
    } else if (isTraceLogging) {
        log.finer("Building/validating deps: cached = " + cached + ", validateDeps = " + validateDeps //$NON-NLS-1$//$NON-NLS-2$
                + ", clean = " + clean); //$NON-NLS-1$
    }

    // Initialize the dependency map
    depMap = new ConcurrentHashMap<URI, DepTreeNode>();

    // This can take a while, so print something to the console
    String msg = MessageFormat.format(Messages.DepTree_3, new Object[] { aggregator.getName() });

    ConsoleService cs = new ConsoleService();
    cs.println(msg);

    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    // Make sure that all the paths are unique and orthogonal
    paths = DepUtils.removeRedundantPaths(paths);

    /*
     * Create the thread pools, one for the tree builders and one for the
     * parsers. Since a tree builder thread will wait for all the outstanding
     * parser threads started by that builder to complete, we need to use two
     * independent thread pools to guard against the possibility of deadlock
     * caused by all the threads in the pool being consumed by tree builders
     * and leaving none available to service the parsers.
     */
    final ThreadGroup treeBuilderTG = new ThreadGroup(TREEBUILDER_TGNAME),
            parserTG = new ThreadGroup(JSPARSER_TGNAME);
    ExecutorService treeBuilderExc = Executors.newFixedThreadPool(10, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(treeBuilderTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { treeBuilderTG.getName(), treeBuilderTG.activeCount() }));
        }
    }), parserExc = Executors.newFixedThreadPool(20, new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(parserTG, r, MessageFormat.format(THREADNAME,
                    new Object[] { parserTG.getName(), parserTG.activeCount() }));
        }
    });

    // Counter to keep track of number of tree builder threads started
    AtomicInteger treeBuilderCount = new AtomicInteger(0);

    // The completion services for the thread pools
    final CompletionService<URI> parserCs = new ExecutorCompletionService<URI>(parserExc);
    CompletionService<DepTreeBuilder.Result> treeBuilderCs = new ExecutorCompletionService<DepTreeBuilder.Result>(
            treeBuilderExc);

    Set<String> nonJSExtensions = Collections.unmodifiableSet(getNonJSExtensions(aggregator));
    // Start the tree builder threads to process the paths
    for (final URI path : paths) {
        /*
         * Create or get from cache the root node for this path and
         * add it to the new map.
         */
        DepTreeNode root = new DepTreeNode("", path); //$NON-NLS-1$
        DepTreeNode cachedNode = null;
        if (cached != null) {
            cachedNode = cached.depMap.get(path);
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_4, new Object[] { path }));
            }
        } else {
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_5, new Object[] { path }));
            }
        }
        depMap.put(path, root);

        treeBuilderCount.incrementAndGet();
        treeBuilderCs.submit(new DepTreeBuilder(aggregator, parserCs, path, root, cachedNode, nonJSExtensions));
    }

    // List of parser exceptions
    LinkedList<Exception> parserExceptions = new LinkedList<Exception>();

    /*
     * Pull the completed tree builder tasks from the completion queue until
     * all the paths have been processed
     */
    while (treeBuilderCount.decrementAndGet() >= 0) {
        try {
            DepTreeBuilder.Result result = treeBuilderCs.take().get();
            if (log.isLoggable(Level.INFO)) {
                log.info(MessageFormat.format(Messages.DepTree_6,
                        new Object[] { result.parseCount, result.dirName }));
            }
        } catch (Exception e) {
            if (log.isLoggable(Level.SEVERE))
                log.log(Level.SEVERE, e.getMessage(), e);
            parserExceptions.add(e);
        }
    }

    // shutdown the thread pools now that we're done with them
    parserExc.shutdown();
    treeBuilderExc.shutdown();

    // If parser exceptions occurred, then rethrow the first one
    if (parserExceptions.size() > 0) {
        throw new RuntimeException(parserExceptions.get(0));
    }

    // Prune dead nodes (folder nodes with no children)
    for (Map.Entry<URI, DepTreeNode> entry : depMap.entrySet()) {
        entry.getValue().prune();
    }

    /*
     * Make sure the cache directory exists before we try to serialize the
     * dependency map.
     */
    if (!cacheDir.exists())
        if (!cacheDir.mkdirs()) {
            throw new IOException(
                    MessageFormat.format(Messages.DepTree_0, new Object[] { cacheDir.getAbsolutePath() }));
        }

    // Serialize the map to the cache directory
    ObjectOutputStream os;
    os = new ObjectOutputStream(new FileOutputStream(cacheFile));
    try {
        if (isTraceLogging) {
            log.finer("Writing cached dependencies to " + cacheFile.toString()); //$NON-NLS-1$
        }
        os.writeObject(this);
    } finally {
        try {
            os.close();
        } catch (Exception ignore) {
        }
    }
    msg = MessageFormat.format(Messages.DepTree_7, new Object[] { aggregator.getName() });

    // Output that we're done.
    cs.println(msg);
    if (log.isLoggable(Level.INFO)) {
        log.info(msg);
    }
    if (isTraceLogging) {
        log.exiting(DepTree.class.getName(), sourceMethod);
    }
}

From source file:org.geowebcache.sqlite.MbtilesBlobStore.java

@Override
public boolean delete(TileRange tileRange) throws StorageException {
    // getting the files associated with this tile range
    Map<File, List<long[]>> files = fileManager.getFiles(tileRange);
    if (files.isEmpty()) {
        // no files so nothing to do
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("Nothing to do.");
        }/*from w w  w .j ava  2s. c o  m*/
        return false;
    }
    // let's delete the tiles
    CompletionService completionService = new ExecutorCompletionService(executorService);
    int tasks = 0;
    for (Map.Entry<File, List<long[]>> entry : files.entrySet()) {
        // FIXME: should we tell something to the listeners ?
        File file = entry.getKey();
        if (!file.exists()) {
            // this database file doesn't exists, so nothing to do
            continue;
        }
        if (eagerDelete) {
            // we delete the whole file avoiding fragmentation on the database
            completionService.submit(() -> connectionManager.delete(file), true);
        } else {
            // we need to delete all tiles that belong to the tiles range and are stored in the current file
            for (long[] range : entry.getValue()) {
                if (LOGGER.isDebugEnabled()) {
                    LOGGER.debug(String.format(
                            "Deleting tiles range [minx=%d, miny=%d, maxx=%d, maxxy=%d, zoom=%d] in file '%s'.",
                            range[0], range[1], range[2], range[3], range[4], file));
                }
                completionService.submit(() -> connectionManager.executeSql(file,
                        "DELETE FROM tiles WHERE zoom_level = ? AND tile_column BETWEEN ? AND ? AND tile_row BETWEEN ? AND ?;",
                        range[4], range[0], range[2], range[1], range[3]), true);
            }
        }
        tasks++;
    }
    // let's wait for the tasks to finish
    for (int i = 0; i < tasks; i++) {
        try {
            completionService.take().get();
        } catch (Exception exception) {
            throw Utils.exception(exception, "Something bad happen when deleting tile range.");
        }
    }
    return true;
}

From source file:com.elixsr.portforwarder.forwarding.ForwardingService.java

/**
 * Starts forwarding based on rules found in database.
 *
 * Acquires an instance of the Forwarding Manager to turn forwarding flag on.
 *
 * Creates a list off callbacks for each forward thread, and handle exceptions as they come.
 *
 * If an exception is thrown, the service immediately stops, and the #onDestroy method is
 * called./*ww  w .  j a v a  2 s.c om*/
 *
 * @param intent
 */
@Override
protected void onHandleIntent(Intent intent) {

    // Gets data from the incoming Intent
    //        String dataString = intent.getDataString();

    Log.i(TAG, "Ran the service");

    ForwardingManager.getInstance().enableForwarding();

    runService = true;

    /*
     * Creates a new Intent containing a Uri object
     * BROADCAST_ACTION is a custom Intent action
     */
    Intent localIntent = new Intent(BROADCAST_ACTION)
            // Puts the status into the Intent
            .putExtra(PORT_FORWARD_SERVICE_STATE, ForwardingManager.getInstance().isEnabled());
    // Broadcasts the Intent to receivers in this app.
    LocalBroadcastManager.getInstance(this).sendBroadcast(localIntent);

    showForwardingEnabledNotification();

    //load the rules from the datastore
    //TODO: inject the rules as extras
    RuleDao ruleDao = new RuleDao(new RuleDbHelper(this));
    List<RuleModel> ruleModels = ruleDao.getAllRuleModels();

    InetSocketAddress from;

    Forwarder forwarder = null;

    /*
     Sourced from: http://stackoverflow.com/questions/19348248/waiting-on-a-list-of-future
     */
    CompletionService<Void> completionService = new ExecutorCompletionService<>(executorService);

    // how many futures there are to check
    int remainingFutures = 0;

    for (RuleModel ruleModel : ruleModels) {

        try {
            from = generateFromIpUsingInterface(ruleModel.getFromInterfaceName(), ruleModel.getFromPort());

            if (ruleModel.isTcp()) {
                completionService.submit(new TcpForwarder(from, ruleModel.getTarget(), ruleModel.getName()));
                remainingFutures++;
            }

            if (ruleModel.isUdp()) {
                completionService.submit(new UdpForwarder(from, ruleModel.getTarget(), ruleModel.getName()));
                remainingFutures++;
            }

        } catch (SocketException | ObjectNotFoundException e) {
            Log.e(TAG, "Error generating IP Address for FROM interface with rule '" + ruleModel.getName() + "'",
                    e);

            // graceful UI Exception handling - broadcast this to ui - it will deal with display something to the user e.g. a Toast
            localIntent = new Intent(BROADCAST_ACTION)
                    // Puts the status into the Intent
                    .putExtra(PORT_FORWARD_SERVICE_ERROR_MESSAGE,
                            "Error while trying to start rule '" + ruleModel.getName() + "'");
            // Broadcasts the Intent to receivers in this app.
            LocalBroadcastManager.getInstance(this).sendBroadcast(localIntent);
        }
    }

    // Build and send an Event.
    tracker.send(new HitBuilders.EventBuilder().setCategory(CATEGORY_FORWARDING)
            .setAction(ACTION_START_FORWARDING).setLabel(ruleModels.size() + " rules").build());

    Future<?> completedFuture;

    // loop through each callback, and handle an exception
    while (remainingFutures > 0) {

        // block until a callable completes
        try {
            completedFuture = completionService.take();
            remainingFutures--;

            completedFuture.get();
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();

            Log.e(TAG, "Error when forwarding port.", e);
            localIntent = new Intent(BROADCAST_ACTION)
                    // Puts the status into the Intent
                    .putExtra(PORT_FORWARD_SERVICE_ERROR_MESSAGE, e.getCause().getMessage());
            // Broadcasts the Intent to receivers in this app.
            LocalBroadcastManager.getInstance(this).sendBroadcast(localIntent);

            break;
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.nifi.cluster.manager.impl.HttpRequestReplicatorImpl.java

private Set<NodeResponse> replicateHelper(final Set<NodeIdentifier> nodeIds, final String method,
        final String scheme, final String path, final Map<String, List<String>> parameters, final Object entity,
        final Map<String, String> headers) throws UriConstructionException {

    if (nodeIds.isEmpty()) {
        return new HashSet<>(); // return quickly for trivial case
    }/*from  w w w . jav a  2s.  c om*/

    final CompletionService<NodeResponse> completionService = new ExecutorCompletionService<>(executorService);

    // keeps track of future requests so that failed requests can be tied back to the failing node
    final Collection<NodeHttpRequestFutureWrapper> futureNodeHttpRequests = new ArrayList<>();

    // construct the URIs for the nodes
    final Map<NodeIdentifier, URI> uriMap = new HashMap<>();
    try {
        for (final NodeIdentifier nodeId : nodeIds) {
            final URI nodeUri = new URI(scheme, null, nodeId.getApiAddress(), nodeId.getApiPort(), path,
                    /* query */ null, /* fragment */ null);
            uriMap.put(nodeId, nodeUri);
        }
    } catch (final URISyntaxException use) {
        throw new UriConstructionException(use);
    }

    // submit the requests to the nodes
    final String requestId = UUID.randomUUID().toString();
    headers.put(WebClusterManager.REQUEST_ID_HEADER, requestId);
    for (final Map.Entry<NodeIdentifier, URI> entry : uriMap.entrySet()) {
        final NodeIdentifier nodeId = entry.getKey();
        final URI nodeUri = entry.getValue();
        final NodeHttpRequestCallable callable = (entity == null)
                ? new NodeHttpRequestCallable(nodeId, method, nodeUri, parameters, headers)
                : new NodeHttpRequestCallable(nodeId, method, nodeUri, entity, headers);
        futureNodeHttpRequests.add(
                new NodeHttpRequestFutureWrapper(nodeId, method, nodeUri, completionService.submit(callable)));
    }

    // get the node responses
    final Set<NodeResponse> result = new HashSet<>();
    for (int i = 0; i < nodeIds.size(); i++) {

        // keeps track of the original request information in case we receive an exception
        NodeHttpRequestFutureWrapper futureNodeHttpRequest = null;
        try {

            // get the future resource response for the node
            final Future<NodeResponse> futureNodeResourceResponse = completionService.take();

            // find the original request by comparing the submitted future with the future returned by the completion service
            for (final NodeHttpRequestFutureWrapper futureNodeHttpRequestElem : futureNodeHttpRequests) {
                if (futureNodeHttpRequestElem.getFuture() == futureNodeResourceResponse) {
                    futureNodeHttpRequest = futureNodeHttpRequestElem;
                }
            }

            // try to retrieve the node response and add to result
            final NodeResponse nodeResponse = futureNodeResourceResponse.get();
            result.add(nodeResponse);

        } catch (final InterruptedException | ExecutionException ex) {

            logger.warn(
                    "Node request for " + futureNodeHttpRequest.getNodeId() + " encountered exception: " + ex,
                    ex);

            // create node response with the thrown exception and add to result
            final NodeResponse nodeResponse = new NodeResponse(futureNodeHttpRequest.getNodeId(),
                    futureNodeHttpRequest.getHttpMethod(), futureNodeHttpRequest.getRequestUri(), ex);
            result.add(nodeResponse);

        }
    }

    if (logger.isDebugEnabled()) {
        NodeResponse min = null;
        NodeResponse max = null;
        long nanosSum = 0L;
        int nanosAdded = 0;

        for (final NodeResponse response : result) {
            final long requestNanos = response.getRequestDuration(TimeUnit.NANOSECONDS);
            final long minNanos = (min == null) ? -1 : min.getRequestDuration(TimeUnit.NANOSECONDS);
            final long maxNanos = (max == null) ? -1 : max.getRequestDuration(TimeUnit.NANOSECONDS);

            if (requestNanos < minNanos || minNanos < 0L) {
                min = response;
            }

            if (requestNanos > maxNanos || maxNanos < 0L) {
                max = response;
            }

            if (requestNanos >= 0L) {
                nanosSum += requestNanos;
                nanosAdded++;
            }
        }

        final StringBuilder sb = new StringBuilder();
        sb.append("Node Responses for ").append(method).append(" ").append(path).append(" (Request ID ")
                .append(requestId).append("):\n");
        for (final NodeResponse response : result) {
            sb.append(response).append("\n");
        }

        final long averageNanos = (nanosAdded == 0) ? -1L : nanosSum / nanosAdded;
        final long averageMillis = (averageNanos < 0) ? averageNanos
                : TimeUnit.MILLISECONDS.convert(averageNanos, TimeUnit.NANOSECONDS);
        logger.debug("For {} {} (Request ID {}), minimum response time = {}, max = {}, average = {} ms", method,
                path, requestId, min, max, averageMillis);
        logger.debug(sb.toString());
    }

    return result;
}

From source file:org.apache.hadoop.hbase.regionserver.HStore.java

@Override
public ImmutableCollection<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {/*from ww w  .j a  v a  2 s.  c  o m*/
        // Clear so metrics doesn't find them.
        ImmutableCollection<StoreFile> result = storeEngine.getStoreFileManager().clearFiles();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region
                    .getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-" + this.getColumnFamilyName());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            IOException ioe = null;
            try {
                for (int i = 0; i < result.size(); i++) {
                    try {
                        Future<Void> future = completionService.take();
                        future.get();
                    } catch (InterruptedException e) {
                        if (ioe == null) {
                            ioe = new InterruptedIOException();
                            ioe.initCause(e);
                        }
                    } catch (ExecutionException e) {
                        if (ioe == null)
                            ioe = new IOException(e.getCause());
                    }
                }
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
            if (ioe != null)
                throw ioe;
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:nl.umcg.westrah.binarymetaanalyzer.BinaryMetaAnalysis.java

public void run() throws IOException {
    initialize();/* w  ww  .  j a v  a  2 s. c  om*/
    loadProbeAnnotation();

    String outdir = settings.getOutput();
    if (usetmp) {
        outdir = tempDir;
    }

    System.out.println("Placing output here: " + outdir);
    outdir = Gpio.formatAsDirectory(outdir);
    Gpio.createDir(outdir);

    System.out.println(
            "Permutations: " + settings.getStartPermutations() + " until " + settings.getNrPermutations());

    String zscoretableheader = null;
    if (settings.isMakezscoretable()) {
        StringBuilder builder = new StringBuilder();
        builder.append("SNP\tAlleles\tAlleleAssessed");
        for (int t = 0; t < traitList.length; t++) {
            builder.append("\t").append(traitList[t].getMetaTraitName()).append("_")
                    .append(traitList[t].getAnnotation());
        }
        zscoretableheader = builder.toString();
    }

    int availableProcessors = Runtime.getRuntime().availableProcessors();
    int cores = settings.getNrThreads();
    if (cores < 1) {
        cores = 1;
    } else if (cores > availableProcessors) {
        cores = availableProcessors;
    }

    System.out.println("Will try to make use of " + cores + " CPU cores");
    System.out.println();

    HashSet<QTLPair> prevSet = null;
    for (int permutation = settings.getStartPermutations(); permutation <= settings
            .getNrPermutations(); permutation++) {
        // load probe annotation and index
        // this particular probe annotation can take multiple probes for a single location into account.

        HashSet<QTLPair> set = new HashSet<>();

        Descriptives.initializeZScoreToPValue();

        // re-intialize for each permutation, just to be sure
        if (permutation > settings.getStartPermutations()) {
            initialize();
            System.out.println("Loading probe annotation from: " + settings.getProbetranslationfile());
            loadProbeAnnotation();
            if (traitList.length == 0) {
                System.err.println("Error: no annotation loaded.");
                System.exit(-1);
            }
        }
        //         clearResultsBuffer();

        // create dataset objects
        System.out.println("Running permutation " + permutation);
        datasets = new BinaryMetaAnalysisDataset[settings.getDatasetlocations().size()];

        System.out.println("Loading datasets");
        for (int d = 0; d < datasets.length; d++) {
            datasets[d] = new BinaryMetaAnalysisDataset(settings.getDatasetlocations().get(d),
                    settings.getDatasetnames().get(d), settings.getDatasetPrefix().get(d), permutation,
                    settings.getDatasetannotations().get(d), probeAnnotation,
                    settings.getFeatureOccuranceScaleMaps().get(d));
        }

        System.out.println("Loaded " + datasets.length + " datasets");

        // create meta-analysis SNP index. have to recreate this every permutation,
        // since the order of SNPs is generated at random.
        System.out.println("Creating SNP index");
        createSNPIndex(outdir);
        System.out.println("Total of " + snpIndex.length + " SNPs");

        System.out.println("Creating probe index");
        createProbeIndex(outdir);
        System.out.println("Total of " + probeIndex.length + " probes");

        // make index of snp/probe combinations, if any specified
        createSNPProbeCombos(outdir);

        // load SNP annotation for SNPs present in dataset
        //         if (snpChr == null) {
        System.out.println("Loading SNP annotation from " + settings.getSNPAnnotationFile());
        loadSNPAnnotation();
        //         }

        // run analysis
        System.out.println("Type of analysis: " + settings.getAnalysisType());
        System.out.println("Cis-window: " + settings.getCisdistance());
        System.out.println("Trans-window: " + settings.getTransdistance());

        TextFile zscoreTableTf = null;
        TextFile zscoreTableTfNrSamples = null;

        if (settings.isMakezscoretable()) {

            String tableoutfile = outdir + "ZScoreMatrix-Permutation" + permutation + ".txt.gz";
            String tableoutfileNrSamples = outdir + "ZScoreMatrixNrSamples-Permutation" + permutation
                    + ".txt.gz";
            if (permutation == 0) {
                tableoutfile = outdir + "ZScoreMatrix.txt.gz";
                tableoutfileNrSamples = outdir + "ZScoreMatrixNrSamples.txt.gz";
            }
            System.out.println("Writing z-score table: " + tableoutfile);
            zscoreTableTf = new TextFile(tableoutfile, TextFile.W, 10 * 1048576);
            zscoreTableTfNrSamples = new TextFile(tableoutfileNrSamples, TextFile.W, 10 * 1048576);

            // write header
            zscoreTableTf.writeln(zscoretableheader);
            zscoreTableTfNrSamples.writeln(zscoretableheader);
        }

        ExecutorService threadPool = Executors.newFixedThreadPool(cores);
        CompletionService<Triple<ArrayList<QTL>, String, String>> pool = new ExecutorCompletionService<Triple<ArrayList<QTL>, String, String>>(
                threadPool);

        maxSavedPvalue = -Double.MAX_VALUE;
        locationToStoreResult = 0;
        bufferHasOverFlown = false;
        System.out.println("Max P: " + maxSavedPvalue + "\tLocationToStoreResult: " + locationToStoreResult);

        System.out.println("Starting meta-analysis");
        ProgressBar pb = new ProgressBar(snpList.length);
        int returned = 0;
        ArrayList<Future> futures = new ArrayList<>();
        for (int snp = 0; snp < snpList.length; snp++) {
            // this can go in different threads..
            boolean outputallzscores = true;
            if (permutation > 0) {
                outputallzscores = fullpermutationoutput;
            }
            BinaryMetaAnalysisTask t = new BinaryMetaAnalysisTask(settings, probeAnnotation, datasets, snpIndex,
                    snpList, snpChr, snpPositions, probeIndex, snpprobeCombos, traitMap, traitList, snp, DEBUG,
                    outputallzscores);
            futures.add(pool.submit(t));
        }

        // give the threadpool the signal to shutdown
        threadPool.shutdown();

        int addcalled = 0;
        while (returned < snpList.length) {
            try {
                Future<Triple<ArrayList<QTL>, String, String>> threadfuture = pool.take();
                if (threadfuture != null) {
                    Triple<ArrayList<QTL>, String, String> result = threadfuture.get();

                    for (QTL q : result.getLeft()) {
                        if (!DEBUG) {
                            addEQTL(q);
                        } else {

                            //                        int snpid = q.getSNPId();
                            //                        MetaQTL4MetaTrait trait = q.getMetaTrait();

                            //                        QTLPair combo = new QTLPair();
                            //                        combo.snpid = snpid;
                            //                        combo.trait = trait;
                            //                        set.add(combo);

                        }

                        addcalled++;
                    }
                    if (settings.isMakezscoretable()) {
                        zscoreTableTf.writeln(result.getMiddle());

                        zscoreTableTfNrSamples.writeln(result.getRight());
                    }
                    result = null;
                    returned++;
                    pb.iterate();
                }
                threadfuture = null;
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
        }
        pb.close();

        if (DEBUG) {
            if (prevSet != null) {
                // compare sets
                TextFile tf = new TextFile(outdir + "debug-p" + permutation + ".txt", TextFile.W);
                for (QTLPair p : prevSet) {
                    if (!set.contains(p)) {
                        tf.writeln(snpList[p.snpid] + "\t" + p.trait.getMetaTraitName());
                    }
                }
                tf.close();
            }
            prevSet = set;
        }

        System.out.println("Snps returned: " + returned + "\tNr of snps submitted: " + snpList.length
                + "\tNr of eQTLs evaluated: " + addcalled);
        System.out.println("Max P: " + maxSavedPvalue + "\tLocationToStoreResult: " + locationToStoreResult);

        if (settings.isMakezscoretable()) {
            zscoreTableTf.close();
            zscoreTableTfNrSamples.close();

            if (usetmp) {

                String filename = "ZScoreMatrix-Permutation" + permutation + ".txt.gz";
                if (permutation == 0) {
                    filename = "ZScoreMatrix.txt.gz";
                }
                File source = new File(tempDir + filename);
                File dest = new File(settings.getOutput() + filename);
                if (dest.exists()) {
                    System.out.println(
                            "Destination file: " + dest.getAbsolutePath() + " exists already.. Deleting!");
                    dest.delete();
                }
                System.out.println(
                        "Moving file: " + tempDir + filename + " --> " + settings.getOutput() + filename);
                FileUtils.moveFile(source, dest);

                filename = "ZScoreMatrixNrSamples-Permutation" + permutation + ".txt.gz";
                if (permutation == 0) {
                    filename = "ZScoreMatrixNrSamples.txt.gz";
                }
                source = new File(tempDir + filename);
                dest = new File(settings.getOutput() + filename);
                if (dest.exists()) {
                    System.out.println(
                            "Destination file: " + dest.getAbsolutePath() + " exists already.. Deleting!");
                    dest.delete();
                }
                System.out.println(
                        "Moving file: " + tempDir + filename + " --> " + settings.getOutput() + filename);
                FileUtils.moveFile(source, dest);
            }
        }

        for (BinaryMetaAnalysisDataset dataset : datasets) {
            dataset.close();
        }

        if (!DEBUG) {
            writeBuffer(outdir, permutation);

        }
    }
    if (usetmp) {
        // move remaining contents of tmp dir to final directory
        File source = new File(tempDir);
        File dest = new File(settings.getOutput());
        FileUtils.copyDirectory(source, dest);
        FileUtils.cleanDirectory(source);
    }
}

From source file:com.alibaba.wasp.fserver.EntityGroup.java

private synchronized void commitTransaction(WALEdit edit) throws IOException {
    Transaction t = edit.getT();//from w  w  w .  j  a v  a2s.c om
    if (LOG.isDebugEnabled()) {
        LOG.debug("EntityGroup commitTransaction:" + t.getTransactionID());
    }
    List<Mutate> mutates = t.getEdits();
    CompletionService<InsureStatus> completionService = new ExecutorCompletionService<InsureStatus>(
            this.services.getThreadPool());

    for (Mutate mutate : mutates) {
        String tableName = mutate.getTableName();
        try {
            if (mutate.getMutateType() == Mutate.MutateType.PUT) {
                Put put = ProtobufUtil.toPut(mutate, t.getTransactionID());
                completionService.submit(new InsurePut(tableName, put));
            } else if (mutate.getMutateType() == Mutate.MutateType.DELETE) {
                Delete delete = ProtobufUtil.toDelete(mutate);
                completionService.submit(new InsureDelete(tableName, delete));
            }
        } catch (DoNotRetryIOException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("convert mutate to Put or Delete error.", e);
            }
        }
    }

    int errors = 0;
    for (int i = 0; i < mutates.size(); i++) {
        try {
            Future<InsureStatus> result = completionService.take();
            if (InsureStatus.SUCCESS == result.get()) {
                // nothing,this operator is successful.
            } else if (InsureStatus.FAILED == result.get()) {
                errors++;
            } else {
                LOG.warn("What happened?");
                errors++;
            }
        } catch (InterruptedException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("transaction execute error", e);
            }
        } catch (ExecutionException e) {
            if (LOG.isErrorEnabled()) {
                LOG.error("transaction execute error", e);
            }
        }
    }
    if (errors != 0) {
        String message = "transaction id=" + t.getTransactionID() + " process occur " + errors + " errors";
        LOG.warn(message);
        throw new IOException(message);
    }

    try {
        redo.commit(edit);
    } catch (AlreadyCommitTransactionException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID() + " has all ready commited", e);
        }
    } catch (NotInitlizedRedoException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID()
                    + " commited failed as a result of the redo log has a error ", e);
        }
    } catch (RedoLogNotServingException e) {
        if (LOG.isErrorEnabled()) {
            LOG.error("the transaction id=" + t.getTransactionID()
                    + " commited failed as a result of the redo log has been closed ", e);
        }
    }
    Primary primary = edit.getAction();
    if (primary != null) {
        this.releaseRowLock(primary.getCombinedPrimaryKey());
    }
}

From source file:org.apache.hadoop.hbase.regionserver.Store.java

/**
 * Close all the readers//from w ww .j  ava 2 s .  com
 *
 * We don't need to worry about subsequent requests because the HRegion holds
 * a write lock that will prevent any more reads or writes.
 *
 * @throws IOException
 */
ImmutableList<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {
        ImmutableList<StoreFile> result = storefiles;

        // Clear so metrics doesn't find them.
        storefiles = ImmutableList.of();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region.getStoreFileOpenAndCloseThreadPool(
                    "StoreFileCloserThread-" + this.family.getNameAsString());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            try {
                for (int i = 0; i < result.size(); i++) {
                    Future<Void> future = completionService.take();
                    future.get();
                }
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e.getCause());
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java

@Test(expected = DockerTimeoutException.class)
public void testConnectionRequestTimeout() throws Exception {
    final int connectionPoolSize = 1;
    final int callableCount = connectionPoolSize * 100;

    final ExecutorService executor = Executors.newCachedThreadPool();
    final CompletionService completion = new ExecutorCompletionService(executor);

    // Spawn and wait on many more containers than the connection pool size.
    // This should cause a timeout once the connection pool is exhausted.

    final DockerClient dockerClient = DefaultDockerClient.fromEnv().connectionPoolSize(connectionPoolSize)
            .build();//from w  ww.  j a v a2s  .  c o m
    try {
        // Create container
        final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST)
                .cmd("sh", "-c", "while :; do sleep 1; done").build();
        final String name = randomName();
        final ContainerCreation creation = dockerClient.createContainer(config, name);
        final String id = creation.id();

        // Start the container
        dockerClient.startContainer(id);

        // Submit a bunch of waitContainer requests
        for (int i = 0; i < callableCount; i++) {
            completion.submit(new Callable<ContainerExit>() {
                @Override
                public ContainerExit call() throws Exception {
                    return dockerClient.waitContainer(id);
                }
            });
        }

        // Wait for the requests to complete or throw expected exception
        for (int i = 0; i < callableCount; i++) {
            try {
                completion.take().get();
            } catch (ExecutionException e) {
                Throwables.propagateIfInstanceOf(e.getCause(), DockerTimeoutException.class);
                throw e;
            }
        }
    } finally {
        executor.shutdown();
        dockerClient.close();
    }
}