Example usage for java.util.concurrent CompletionService submit

List of usage examples for java.util.concurrent CompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent CompletionService submit.

Prototype

Future<V> submit(Callable<V> task);

Source Link

Document

Submits a value-returning task for execution and returns a Future representing the pending results of the task.

Usage

From source file:org.apache.hadoop.hbase.regionserver.Store.java

/**
 * Close all the readers//from  w  w w  .  j a  v a  2  s. com
 *
 * We don't need to worry about subsequent requests because the HRegion holds
 * a write lock that will prevent any more reads or writes.
 *
 * @throws IOException
 */
ImmutableList<StoreFile> close() throws IOException {
    this.lock.writeLock().lock();
    try {
        ImmutableList<StoreFile> result = storefiles;

        // Clear so metrics doesn't find them.
        storefiles = ImmutableList.of();

        if (!result.isEmpty()) {
            // initialize the thread pool for closing store files in parallel.
            ThreadPoolExecutor storeFileCloserThreadPool = this.region.getStoreFileOpenAndCloseThreadPool(
                    "StoreFileCloserThread-" + this.family.getNameAsString());

            // close each store file in parallel
            CompletionService<Void> completionService = new ExecutorCompletionService<Void>(
                    storeFileCloserThreadPool);
            for (final StoreFile f : result) {
                completionService.submit(new Callable<Void>() {
                    public Void call() throws IOException {
                        f.closeReader(true);
                        return null;
                    }
                });
            }

            try {
                for (int i = 0; i < result.size(); i++) {
                    Future<Void> future = completionService.take();
                    future.get();
                }
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e.getCause());
            } finally {
                storeFileCloserThreadPool.shutdownNow();
            }
        }
        LOG.info("Closed " + this);
        return result;
    } finally {
        this.lock.writeLock().unlock();
    }
}

From source file:nl.umcg.westrah.binarymetaanalyzer.BinaryMetaAnalysis.java

public void run() throws IOException {
    initialize();//  w w  w.  j av a 2 s . c  o  m
    loadProbeAnnotation();

    String outdir = settings.getOutput();
    if (usetmp) {
        outdir = tempDir;
    }

    System.out.println("Placing output here: " + outdir);
    outdir = Gpio.formatAsDirectory(outdir);
    Gpio.createDir(outdir);

    System.out.println(
            "Permutations: " + settings.getStartPermutations() + " until " + settings.getNrPermutations());

    String zscoretableheader = null;
    if (settings.isMakezscoretable()) {
        StringBuilder builder = new StringBuilder();
        builder.append("SNP\tAlleles\tAlleleAssessed");
        for (int t = 0; t < traitList.length; t++) {
            builder.append("\t").append(traitList[t].getMetaTraitName()).append("_")
                    .append(traitList[t].getAnnotation());
        }
        zscoretableheader = builder.toString();
    }

    int availableProcessors = Runtime.getRuntime().availableProcessors();
    int cores = settings.getNrThreads();
    if (cores < 1) {
        cores = 1;
    } else if (cores > availableProcessors) {
        cores = availableProcessors;
    }

    System.out.println("Will try to make use of " + cores + " CPU cores");
    System.out.println();

    HashSet<QTLPair> prevSet = null;
    for (int permutation = settings.getStartPermutations(); permutation <= settings
            .getNrPermutations(); permutation++) {
        // load probe annotation and index
        // this particular probe annotation can take multiple probes for a single location into account.

        HashSet<QTLPair> set = new HashSet<>();

        Descriptives.initializeZScoreToPValue();

        // re-intialize for each permutation, just to be sure
        if (permutation > settings.getStartPermutations()) {
            initialize();
            System.out.println("Loading probe annotation from: " + settings.getProbetranslationfile());
            loadProbeAnnotation();
            if (traitList.length == 0) {
                System.err.println("Error: no annotation loaded.");
                System.exit(-1);
            }
        }
        //         clearResultsBuffer();

        // create dataset objects
        System.out.println("Running permutation " + permutation);
        datasets = new BinaryMetaAnalysisDataset[settings.getDatasetlocations().size()];

        System.out.println("Loading datasets");
        for (int d = 0; d < datasets.length; d++) {
            datasets[d] = new BinaryMetaAnalysisDataset(settings.getDatasetlocations().get(d),
                    settings.getDatasetnames().get(d), settings.getDatasetPrefix().get(d), permutation,
                    settings.getDatasetannotations().get(d), probeAnnotation,
                    settings.getFeatureOccuranceScaleMaps().get(d));
        }

        System.out.println("Loaded " + datasets.length + " datasets");

        // create meta-analysis SNP index. have to recreate this every permutation,
        // since the order of SNPs is generated at random.
        System.out.println("Creating SNP index");
        createSNPIndex(outdir);
        System.out.println("Total of " + snpIndex.length + " SNPs");

        System.out.println("Creating probe index");
        createProbeIndex(outdir);
        System.out.println("Total of " + probeIndex.length + " probes");

        // make index of snp/probe combinations, if any specified
        createSNPProbeCombos(outdir);

        // load SNP annotation for SNPs present in dataset
        //         if (snpChr == null) {
        System.out.println("Loading SNP annotation from " + settings.getSNPAnnotationFile());
        loadSNPAnnotation();
        //         }

        // run analysis
        System.out.println("Type of analysis: " + settings.getAnalysisType());
        System.out.println("Cis-window: " + settings.getCisdistance());
        System.out.println("Trans-window: " + settings.getTransdistance());

        TextFile zscoreTableTf = null;
        TextFile zscoreTableTfNrSamples = null;

        if (settings.isMakezscoretable()) {

            String tableoutfile = outdir + "ZScoreMatrix-Permutation" + permutation + ".txt.gz";
            String tableoutfileNrSamples = outdir + "ZScoreMatrixNrSamples-Permutation" + permutation
                    + ".txt.gz";
            if (permutation == 0) {
                tableoutfile = outdir + "ZScoreMatrix.txt.gz";
                tableoutfileNrSamples = outdir + "ZScoreMatrixNrSamples.txt.gz";
            }
            System.out.println("Writing z-score table: " + tableoutfile);
            zscoreTableTf = new TextFile(tableoutfile, TextFile.W, 10 * 1048576);
            zscoreTableTfNrSamples = new TextFile(tableoutfileNrSamples, TextFile.W, 10 * 1048576);

            // write header
            zscoreTableTf.writeln(zscoretableheader);
            zscoreTableTfNrSamples.writeln(zscoretableheader);
        }

        ExecutorService threadPool = Executors.newFixedThreadPool(cores);
        CompletionService<Triple<ArrayList<QTL>, String, String>> pool = new ExecutorCompletionService<Triple<ArrayList<QTL>, String, String>>(
                threadPool);

        maxSavedPvalue = -Double.MAX_VALUE;
        locationToStoreResult = 0;
        bufferHasOverFlown = false;
        System.out.println("Max P: " + maxSavedPvalue + "\tLocationToStoreResult: " + locationToStoreResult);

        System.out.println("Starting meta-analysis");
        ProgressBar pb = new ProgressBar(snpList.length);
        int returned = 0;
        ArrayList<Future> futures = new ArrayList<>();
        for (int snp = 0; snp < snpList.length; snp++) {
            // this can go in different threads..
            boolean outputallzscores = true;
            if (permutation > 0) {
                outputallzscores = fullpermutationoutput;
            }
            BinaryMetaAnalysisTask t = new BinaryMetaAnalysisTask(settings, probeAnnotation, datasets, snpIndex,
                    snpList, snpChr, snpPositions, probeIndex, snpprobeCombos, traitMap, traitList, snp, DEBUG,
                    outputallzscores);
            futures.add(pool.submit(t));
        }

        // give the threadpool the signal to shutdown
        threadPool.shutdown();

        int addcalled = 0;
        while (returned < snpList.length) {
            try {
                Future<Triple<ArrayList<QTL>, String, String>> threadfuture = pool.take();
                if (threadfuture != null) {
                    Triple<ArrayList<QTL>, String, String> result = threadfuture.get();

                    for (QTL q : result.getLeft()) {
                        if (!DEBUG) {
                            addEQTL(q);
                        } else {

                            //                        int snpid = q.getSNPId();
                            //                        MetaQTL4MetaTrait trait = q.getMetaTrait();

                            //                        QTLPair combo = new QTLPair();
                            //                        combo.snpid = snpid;
                            //                        combo.trait = trait;
                            //                        set.add(combo);

                        }

                        addcalled++;
                    }
                    if (settings.isMakezscoretable()) {
                        zscoreTableTf.writeln(result.getMiddle());

                        zscoreTableTfNrSamples.writeln(result.getRight());
                    }
                    result = null;
                    returned++;
                    pb.iterate();
                }
                threadfuture = null;
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
        }
        pb.close();

        if (DEBUG) {
            if (prevSet != null) {
                // compare sets
                TextFile tf = new TextFile(outdir + "debug-p" + permutation + ".txt", TextFile.W);
                for (QTLPair p : prevSet) {
                    if (!set.contains(p)) {
                        tf.writeln(snpList[p.snpid] + "\t" + p.trait.getMetaTraitName());
                    }
                }
                tf.close();
            }
            prevSet = set;
        }

        System.out.println("Snps returned: " + returned + "\tNr of snps submitted: " + snpList.length
                + "\tNr of eQTLs evaluated: " + addcalled);
        System.out.println("Max P: " + maxSavedPvalue + "\tLocationToStoreResult: " + locationToStoreResult);

        if (settings.isMakezscoretable()) {
            zscoreTableTf.close();
            zscoreTableTfNrSamples.close();

            if (usetmp) {

                String filename = "ZScoreMatrix-Permutation" + permutation + ".txt.gz";
                if (permutation == 0) {
                    filename = "ZScoreMatrix.txt.gz";
                }
                File source = new File(tempDir + filename);
                File dest = new File(settings.getOutput() + filename);
                if (dest.exists()) {
                    System.out.println(
                            "Destination file: " + dest.getAbsolutePath() + " exists already.. Deleting!");
                    dest.delete();
                }
                System.out.println(
                        "Moving file: " + tempDir + filename + " --> " + settings.getOutput() + filename);
                FileUtils.moveFile(source, dest);

                filename = "ZScoreMatrixNrSamples-Permutation" + permutation + ".txt.gz";
                if (permutation == 0) {
                    filename = "ZScoreMatrixNrSamples.txt.gz";
                }
                source = new File(tempDir + filename);
                dest = new File(settings.getOutput() + filename);
                if (dest.exists()) {
                    System.out.println(
                            "Destination file: " + dest.getAbsolutePath() + " exists already.. Deleting!");
                    dest.delete();
                }
                System.out.println(
                        "Moving file: " + tempDir + filename + " --> " + settings.getOutput() + filename);
                FileUtils.moveFile(source, dest);
            }
        }

        for (BinaryMetaAnalysisDataset dataset : datasets) {
            dataset.close();
        }

        if (!DEBUG) {
            writeBuffer(outdir, permutation);

        }
    }
    if (usetmp) {
        // move remaining contents of tmp dir to final directory
        File source = new File(tempDir);
        File dest = new File(settings.getOutput());
        FileUtils.copyDirectory(source, dest);
        FileUtils.cleanDirectory(source);
    }
}

From source file:nl.systemsgenetics.eqtlinteractionanalyser.eqtlinteractionanalyser.TestEQTLDatasetForInteractions.java

public final String performInteractionAnalysis(String[] covsToCorrect, String[] covsToCorrect2,
        TextFile outputTopCovs, File snpsToSwapFile, HashMultimap<String, String> qtlProbeSnpMultiMap,
        String[] covariatesToTest, HashMap hashSamples, int numThreads, final TIntHashSet snpsToTest,
        boolean skipNormalization, boolean skipCovariateNormalization,
        HashMultimap<String, String> qtlProbeSnpMultiMapCovariates) throws IOException, Exception {

    //hashSamples = excludeOutliers(hashSamples);

    HashMap<String, Integer> covariatesToLoad = new HashMap();
    if (covariatesToTest != null) {
        for (String c : covariatesToTest) {
            covariatesToLoad.put(c, null);
        }//from w w w  .j  a v a  2  s . c  om
        for (String c : covsToCorrect) {
            covariatesToLoad.put(c, null);
        }
        for (String c : covsToCorrect2) {
            covariatesToLoad.put(c, null);
        }
        for (int i = 1; i <= 50; ++i) {
            covariatesToLoad.put("Comp" + i, null);
        }
    } else {
        covariatesToLoad = null;
    }

    ExpressionDataset datasetExpression = new ExpressionDataset(
            inputDir + "/bigTableLude.txt.Expression.binary", '\t', null, hashSamples);
    ExpressionDataset datasetCovariates = new ExpressionDataset(
            inputDir + "/covariateTableLude.txt.Covariates.binary", '\t', covariatesToLoad, hashSamples);

    org.apache.commons.math3.stat.regression.OLSMultipleLinearRegression regression = new org.apache.commons.math3.stat.regression.OLSMultipleLinearRegression();
    int nrSamples = datasetGenotypes.nrSamples;

    correctDosageDirectionForQtl(snpsToSwapFile, datasetGenotypes, datasetExpression);

    if (!skipNormalization) {
        correctExpressionData(covsToCorrect2, datasetGenotypes, datasetCovariates, datasetExpression);
    }

    ExpressionDataset datasetCovariatesPCAForceNormal = new ExpressionDataset(
            inputDir + "/covariateTableLude.txt.Covariates.binary", '\t', covariatesToLoad, hashSamples);

    if (!skipNormalization && !skipCovariateNormalization) {
        correctCovariateDataPCA(covsToCorrect2, covsToCorrect, datasetGenotypes,
                datasetCovariatesPCAForceNormal);
    }

    if (1 == 1) {

        if (!skipNormalization && !skipCovariateNormalization && covsToCorrect2.length != 0
                && covsToCorrect.length != 0) {
            correctCovariateData(covsToCorrect2, covsToCorrect, datasetGenotypes, datasetCovariates);
        }

        if (!skipNormalization && !skipCovariateNormalization && !qtlProbeSnpMultiMapCovariates.isEmpty()) {
            correctCovariatesForQtls(datasetCovariates, datasetGenotypes, qtlProbeSnpMultiMapCovariates);
        }

        if (1 == 2) {
            saveCorrectedCovariates(datasetCovariates);
        }

        if (1 == 2) {
            icaCovariates(datasetCovariates);
        }
        if (!skipNormalization) {
            forceNormalCovariates(datasetCovariates, datasetGenotypes);
        }

    }

    ExpressionDataset datasetExpressionBeforeEQTLCorrection = new ExpressionDataset(datasetExpression.nrProbes,
            datasetExpression.nrSamples);
    for (int p = 0; p < datasetExpression.nrProbes; p++) {
        for (int s = 0; s < datasetExpression.nrSamples; s++) {
            datasetExpressionBeforeEQTLCorrection.rawData[p][s] = datasetExpression.rawData[p][s];
        }
    }

    if (!skipNormalization && covsToCorrect.length != 0) {
        correctExpressionDataForInteractions(covsToCorrect, datasetCovariates, datasetGenotypes, nrSamples,
                datasetExpression, regression, qtlProbeSnpMultiMap);
    }

    if (!skipNormalization) {
        forceNormalExpressionData(datasetExpression);
    }

    datasetExpression.save(outputDir + "/expressionDataRound_" + covsToCorrect.length + ".txt");
    datasetExpression.save(outputDir + "/expressionDataRound_" + covsToCorrect.length + ".binary");
    datasetCovariates.save(outputDir + "/covariateData_" + covsToCorrect.length + ".binary");

    if (1 == 1) {

        ExpressionDataset datasetZScores = new ExpressionDataset(datasetCovariates.nrProbes,
                datasetExpression.nrProbes);
        datasetZScores.probeNames = datasetCovariates.probeNames;

        datasetZScores.sampleNames = new String[datasetGenotypes.probeNames.length];
        for (int i = 0; i < datasetGenotypes.probeNames.length; ++i) {
            datasetZScores.sampleNames[i] = datasetGenotypes.probeNames[i] + datasetExpression.probeNames[i]
                    .substring(datasetExpression.probeNames[i].lastIndexOf('_'));
        }

        datasetZScores.recalculateHashMaps();

        SkippedInteractionWriter skippedWriter = new SkippedInteractionWriter(
                new File(outputDir + "/skippedInteractionsRound_" + covsToCorrect.length + ".txt"));

        java.util.concurrent.ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
        CompletionService<DoubleArrayIntegerObject> pool = new ExecutorCompletionService<DoubleArrayIntegerObject>(
                threadPool);
        int nrTasks = 0;
        for (int cov = 0; cov < datasetCovariates.nrProbes; cov++) {
            double stdev = JSci.maths.ArrayMath.standardDeviation(datasetCovariates.rawData[cov]);
            if (stdev > 0) {
                PerformInteractionAnalysisPermutationTask task = new PerformInteractionAnalysisPermutationTask(
                        datasetGenotypes, datasetExpression, datasetCovariates, datasetCovariatesPCAForceNormal,
                        cov, skippedWriter, snpsToTest);
                pool.submit(task);
                nrTasks++;
            }
        }

        String maxChi2Cov = "";
        int maxChi2CovI = 0;
        double maxChi2 = 0;
        try {
            // If gene annotation provided, for chi2sum calculation use only genes that are 1mb apart
            //if (geneDistanceMap != null) {
            for (int task = 0; task < nrTasks; task++) {
                try {
                    //System.out.println("Waiting on thread for: " + datasetCovariates.probeNames[cov]);
                    DoubleArrayIntegerObject result = pool.take().get();
                    int cov = result.intValue;
                    double chi2Sum = 0;
                    double[] covZ = datasetZScores.rawData[cov];
                    for (int snp = 0; snp < datasetGenotypes.nrProbes; snp++) {
                        //if (genesFarAway(datasetZScores.sampleNames[snp], datasetZScores.probeNames[cov])) {
                        double z = result.doubleArray[snp];
                        covZ[snp] = z;
                        if (!Double.isNaN(z)) {
                            chi2Sum += z * z;
                        }
                        //}
                    }

                    if (chi2Sum > maxChi2 && !datasetCovariates.probeNames[cov].startsWith("Comp")
                            && !datasetCovariates.probeNames[cov].equals("LLS")
                            && !datasetCovariates.probeNames[cov].equals("LLdeep")
                            && !datasetCovariates.probeNames[cov].equals("RS")
                            && !datasetCovariates.probeNames[cov].equals("CODAM")) {
                        maxChi2 = chi2Sum;
                        maxChi2CovI = cov;
                        maxChi2Cov = datasetCovariates.probeNames[cov];
                    }
                    //System.out.println(covsToCorrect.length + "\t" + cov + "\t" + datasetCovariates.probeNames[cov] + "\t" + chi2Sum);
                    if ((task + 1) % 512 == 0) {
                        System.out.println(task + 1 + " tasks processed");
                    }
                } catch (ExecutionException ex) {
                    Logger.getLogger(PerformInteractionAnalysisPermutationTask.class.getName())
                            .log(Level.SEVERE, null, ex);
                }
            }
            /*} //If gene annotation not provided, use all gene pairs
             else {
             for (int task = 0; task < nrTasks; task++) {
             try {
             DoubleArrayIntegerObject result = pool.take().get();
             int cov = result.intValue;
             double chi2Sum = 0;
             double[] covZ = datasetZScores.rawData[cov];
             for (int snp = 0; snp < datasetGenotypes.nrProbes; snp++) {
             double z = result.doubleArray[snp];
             covZ[snp] = z;
             if (!Double.isNaN(z)) {
             chi2Sum += z * z;
             }
             }
             if (chi2Sum > maxChi2) {
             maxChi2 = chi2Sum;
             maxChi2Cov = datasetCovariates.probeNames[cov];
             }
             //System.out.println(covsToCorrect.length + "\t" + cov + "\t" + datasetCovariates.probeNames[cov] + "\t" + chi2Sum);
             if ((task + 1) % 512 == 0) {
             System.out.println(task + 1 + " tasks processed");
             }
             } catch (ExecutionException ex) {
             Logger.getLogger(PerformInteractionAnalysisPermutationTask.class.getName()).log(Level.SEVERE, null, ex);
             }
             }
             }*/
            threadPool.shutdown();
        } catch (Exception e) {
            e.printStackTrace();
            System.out.println(e.getMessage());
        }

        System.out.println("Top covariate:\t" + maxChi2 + "\t" + maxChi2Cov);
        outputTopCovs.writeln("Top covariate:\t" + maxChi2 + "\t" + maxChi2Cov);
        outputTopCovs.flush();
        skippedWriter.close();
        datasetZScores.save(outputDir + "/InteractionZScoresMatrix-" + covsToCorrect.length + "Covariates.txt");

        BufferedWriter writer = new BufferedWriter(
                new FileWriter(outputDir + "/" + "topCov" + maxChi2Cov + "_expression.txt"));
        double[] topCovExpression = datasetCovariates.rawData[maxChi2CovI];
        for (int i = 0; i < topCovExpression.length; ++i) {
            writer.append(datasetCovariates.sampleNames[i]);
            writer.append('\t');
            writer.append(String.valueOf(topCovExpression[i]));
            writer.append('\n');
        }
        writer.close();

        return maxChi2Cov;
    }

    return null;
}

From source file:org.openspaces.admin.internal.admin.DefaultAdmin.java

@Override
public DumpResult generateDump(final Set<DumpProvider> dumpProviders, final DumpGeneratedListener listener,
        final String cause, final Map<String, Object> context, final String... processor)
        throws AdminException {
    CompoundDumpResult dumpResult = new CompoundDumpResult();

    ExecutorService es = Executors.newFixedThreadPool(dumpProviders.size());
    CompletionService<DumpResult> cs = new ExecutorCompletionService<DumpResult>(es);

    final AtomicInteger counter = new AtomicInteger();
    for (final DumpProvider dumpProvider : dumpProviders) {
        cs.submit(new Callable<DumpResult>() {
            @Override//  w  ww.  j av a2s.  com
            public DumpResult call() throws Exception {
                DumpResult result = dumpProvider.generateDump(cause, context, processor);
                synchronized (listener) {
                    listener.onGenerated(dumpProvider, result, counter.incrementAndGet(), dumpProviders.size());
                }
                return result;
            }
        });
    }

    for (int i = 0; i < dumpProviders.size(); i++) {
        try {
            dumpResult.add(cs.take().get());
        } catch (Exception e) {
            // ignore it for now
        }
    }

    es.shutdown();

    return dumpResult;
}

From source file:com.spotify.docker.client.DefaultDockerClientTest.java

@Test(expected = DockerTimeoutException.class)
public void testConnectionRequestTimeout() throws Exception {
    final int connectionPoolSize = 1;
    final int callableCount = connectionPoolSize * 100;

    final ExecutorService executor = Executors.newCachedThreadPool();
    final CompletionService completion = new ExecutorCompletionService(executor);

    // Spawn and wait on many more containers than the connection pool size.
    // This should cause a timeout once the connection pool is exhausted.

    try (final DockerClient dockerClient = DefaultDockerClient.fromEnv().connectionPoolSize(connectionPoolSize)
            .build()) {/*ww w. ja v  a2s.  c om*/
        // Create container
        final ContainerConfig config = ContainerConfig.builder().image(BUSYBOX_LATEST)
                .cmd("sh", "-c", "while :; do sleep 1; done").build();
        final String name = randomName();
        final ContainerCreation creation = dockerClient.createContainer(config, name);
        final String id = creation.id();

        // Start the container
        dockerClient.startContainer(id);

        // Submit a bunch of waitContainer requests
        for (int i = 0; i < callableCount; i++) {
            //noinspection unchecked
            completion.submit(new Callable<ContainerExit>() {
                @Override
                public ContainerExit call() throws Exception {
                    return dockerClient.waitContainer(id);
                }
            });
        }

        // Wait for the requests to complete or throw expected exception
        for (int i = 0; i < callableCount; i++) {
            try {
                completion.take().get();
            } catch (ExecutionException e) {
                Throwables.propagateIfInstanceOf(e.getCause(), DockerTimeoutException.class);
                throw e;
            }
        }
    } finally {
        executor.shutdown();
    }
}

From source file:org.apache.hadoop.hdfs.DFSInputStream.java

/**
 * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[],
 * int, Map)} except we start up a second, parallel, 'hedged' read
 * if the first read is taking longer than configured amount of
 * time.  We then wait on which ever read returns first.
 * /*w  w w.  j  a  va 2 s .c  o m*/
 * @param block
 * @param start
 * @param end
 * @param buf
 * @param offset
 * @param corruptedBlockMap
 * @throws IOException
 */
private void hedgedFetchBlockByteRange(long blockStartOffset, long start, long end, byte[] buf, int offset,
        Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException {
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>(
            dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>();
    ByteBuffer bb = null;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    LocatedBlock block = getBlockAt(blockStartOffset);
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.wrap(buf, offset, len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode,
                    block.getStartOffset(), start, end, bb, corruptedBlockMap, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(),
                        TimeUnit.MILLISECONDS);
                if (future != null) {
                    future.get();
                    return;
                }
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from "
                            + chosenNode.info + "; spawning hedged read");
                }
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                continue; // no need to refresh block locations
            } catch (InterruptedException e) {
                // Ignore
            } catch (ExecutionException e) {
                // Ignore already logged in the call.
            }
        } else {
            // We are starting up a 'hedged' read. We have a read already
            // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
            // If no nodes to do hedged reads against, pass.
            try {
                try {
                    chosenNode = getBestNodeDNAddrPair(block.getLocations(), ignored);
                } catch (IOException ioe) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode,
                        block.getStartOffset(), start, end, bb, corruptedBlockMap, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage());
                }
            }
            // if not succeeded. Submit callables for each datanode in a loop, wait
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                if (result.array() != buf) { // compare the array pointers
                    dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                    System.arraycopy(result.array(), result.position(), buf, offset, len);
                } else {
                    dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                }
                return;
            } catch (InterruptedException ie) {
                // Ignore and retry
            }
            // We got here if exception. Ignore this node on next go around IFF
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}

From source file:com.mellanox.r4h.DFSInputStream.java

/**
 * Like {@link #fetchBlockByteRange(LocatedBlock, long, long, byte[], int, Map)} except we start up a second, parallel, 'hedged' read
 * if the first read is taking longer than configured amount of
 * time. We then wait on which ever read returns first.
 *//*from   w w w .ja va 2s  . c o m*/
private void hedgedFetchBlockByteRange(LocatedBlock block, long start, long end, byte[] buf, int offset,
        Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException {
    ArrayList<Future<ByteBuffer>> futures = new ArrayList<Future<ByteBuffer>>();
    CompletionService<ByteBuffer> hedgedService = new ExecutorCompletionService<ByteBuffer>(
            dfsClient.getHedgedReadsThreadPool());
    ArrayList<DatanodeInfo> ignored = new ArrayList<DatanodeInfo>();
    ByteBuffer bb = null;
    int len = (int) (end - start + 1);
    int hedgedReadId = 0;
    block = getBlockAt(block.getStartOffset());
    while (true) {
        // see HDFS-6591, this metric is used to verify/catch unnecessary loops
        hedgedReadOpsLoopNumForTesting++;
        DNAddrPair chosenNode = null;
        // there is no request already executing.
        if (futures.isEmpty()) {
            // chooseDataNode is a commitment. If no node, we go to
            // the NN to reget block locations. Only go here on first read.
            chosenNode = chooseDataNode(block, ignored);
            bb = ByteBuffer.wrap(buf, offset, len);
            Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end, bb,
                    corruptedBlockMap, hedgedReadId++);
            Future<ByteBuffer> firstRequest = hedgedService.submit(getFromDataNodeCallable);
            futures.add(firstRequest);
            try {
                Future<ByteBuffer> future = hedgedService.poll(dfsClient.getHedgedReadTimeout(),
                        TimeUnit.MILLISECONDS);
                if (future != null) {
                    future.get();
                    return;
                }
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Waited " + dfsClient.getHedgedReadTimeout() + "ms to read from "
                            + chosenNode.info + "; spawning hedged read");
                }
                // Ignore this node on next go around.
                ignored.add(chosenNode.info);
                dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                continue; // no need to refresh block locations
            } catch (InterruptedException e) {
                // Ignore
            } catch (ExecutionException e) {
                // Ignore already logged in the call.
            }
        } else {
            // We are starting up a 'hedged' read. We have a read already
            // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.
            // If no nodes to do hedged reads against, pass.
            try {
                try {
                    chosenNode = getBestNodeDNAddrPair(block, ignored);
                } catch (IOException ioe) {
                    chosenNode = chooseDataNode(block, ignored);
                }
                bb = ByteBuffer.allocate(len);
                Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(chosenNode, block, start, end,
                        bb, corruptedBlockMap, hedgedReadId++);
                Future<ByteBuffer> oneMoreRequest = hedgedService.submit(getFromDataNodeCallable);
                futures.add(oneMoreRequest);
            } catch (IOException ioe) {
                if (DFSClient.LOG.isDebugEnabled()) {
                    DFSClient.LOG.debug("Failed getting node for hedged read: " + ioe.getMessage());
                }
            }
            // if not succeeded. Submit callables for each datanode in a loop, wait
            // for a fixed interval and get the result from the fastest one.
            try {
                ByteBuffer result = getFirstToComplete(hedgedService, futures);
                // cancel the rest.
                cancelAll(futures);
                if (result.array() != buf) { // compare the array pointers
                    dfsClient.getHedgedReadMetrics().incHedgedReadWins();
                    System.arraycopy(result.array(), result.position(), buf, offset, len);
                } else {
                    dfsClient.getHedgedReadMetrics().incHedgedReadOps();
                }
                return;
            } catch (InterruptedException ie) {
                // Ignore and retry
            }
            // We got here if exception. Ignore this node on next go around IFF
            // we found a chosenNode to hedge read against.
            if (chosenNode != null && chosenNode.info != null) {
                ignored.add(chosenNode.info);
            }
        }
    }
}

From source file:org.apache.camel.processor.MulticastProcessor.java

protected void doProcessParallel(final Exchange original, final AtomicExchange result,
        final Iterable<ProcessorExchangePair> pairs, final boolean streaming, final AsyncCallback callback)
        throws Exception {

    ObjectHelper.notNull(executorService, "ExecutorService", this);
    ObjectHelper.notNull(aggregateExecutorService, "AggregateExecutorService", this);

    final CompletionService<Exchange> completion;
    if (streaming) {
        // execute tasks in parallel+streaming and aggregate in the order they are finished (out of order sequence)
        completion = new ExecutorCompletionService<Exchange>(executorService);
    } else {//from  w  w w .  j a  v a 2 s .  co m
        // execute tasks in parallel and aggregate in the order the tasks are submitted (in order sequence)
        completion = new SubmitOrderedCompletionService<Exchange>(executorService);
    }

    // when parallel then aggregate on the fly
    final AtomicBoolean running = new AtomicBoolean(true);
    final AtomicInteger total = new AtomicInteger(0);
    final AtomicBoolean allTasksSubmitted = new AtomicBoolean();
    final CountDownLatch aggregationOnTheFlyDone = new CountDownLatch(1);
    final AtomicException executionException = new AtomicException();

    final Iterator<ProcessorExchangePair> it = pairs.iterator();

    if (it.hasNext()) {
        // issue task to execute in separate thread so it can aggregate on-the-fly
        // while we submit new tasks, and those tasks complete concurrently
        // this allows us to optimize work and reduce memory consumption
        AggregateOnTheFlyTask task = new AggregateOnTheFlyTask(result, original, total, completion, running,
                aggregationOnTheFlyDone, allTasksSubmitted, executionException);

        // and start the aggregation task so we can aggregate on-the-fly
        aggregateExecutorService.submit(task);
    }

    LOG.trace("Starting to submit parallel tasks");

    while (it.hasNext()) {
        final ProcessorExchangePair pair = it.next();
        final Exchange subExchange = pair.getExchange();
        updateNewExchange(subExchange, total.intValue(), pairs, it);

        completion.submit(new Callable<Exchange>() {
            public Exchange call() throws Exception {
                if (!running.get()) {
                    // do not start processing the task if we are not running
                    return subExchange;
                }

                try {
                    doProcessParallel(pair);
                } catch (Throwable e) {
                    subExchange.setException(e);
                }

                // Decide whether to continue with the multicast or not; similar logic to the Pipeline
                Integer number = getExchangeIndex(subExchange);
                boolean continueProcessing = PipelineHelper.continueProcessing(subExchange,
                        "Parallel processing failed for number " + number, LOG);
                if (stopOnException && !continueProcessing) {
                    // signal to stop running
                    running.set(false);
                    // throw caused exception
                    if (subExchange.getException() != null) {
                        // wrap in exception to explain where it failed
                        throw new CamelExchangeException("Parallel processing failed for number " + number,
                                subExchange, subExchange.getException());
                    }
                }

                if (LOG.isTraceEnabled()) {
                    LOG.trace("Parallel processing complete for exchange: " + subExchange);
                }
                return subExchange;
            }
        });

        total.incrementAndGet();
    }

    // signal all tasks has been submitted
    if (LOG.isTraceEnabled()) {
        LOG.trace("Signaling that all " + total.get() + " tasks has been submitted.");
    }
    allTasksSubmitted.set(true);

    // its to hard to do parallel async routing so we let the caller thread be synchronously
    // and have it pickup the replies and do the aggregation (eg we use a latch to wait)
    // wait for aggregation to be done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Waiting for on-the-fly aggregation to complete aggregating " + total.get() + " responses.");
    }
    aggregationOnTheFlyDone.await();

    // did we fail for whatever reason, if so throw that caused exception
    if (executionException.get() != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parallel processing failed due " + executionException.get().getMessage());
        }
        throw executionException.get();
    }

    // no everything is okay so we are done
    if (LOG.isDebugEnabled()) {
        LOG.debug("Done parallel processing " + total + " exchanges");
    }
}

From source file:org.apache.hadoop.hbase.regionserver.Store.java

/**
 * Creates an unsorted list of StoreFile loaded in parallel
 * from the given directory.//from   w  w w.  jav a 2s . com
 * @throws IOException
 */
private List<StoreFile> loadStoreFiles() throws IOException {
    ArrayList<StoreFile> results = new ArrayList<StoreFile>();
    FileStatus files[] = getStoreFiles();

    if (files == null || files.length == 0) {
        return results;
    }
    // initialize the thread pool for opening store files in parallel..
    ThreadPoolExecutor storeFileOpenerThreadPool = this.region
            .getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" + this.family.getNameAsString());
    CompletionService<StoreFile> completionService = new ExecutorCompletionService<StoreFile>(
            storeFileOpenerThreadPool);

    int totalValidStoreFile = 0;
    for (int i = 0; i < files.length; i++) {
        // Skip directories.
        if (files[i].isDir()) {
            continue;
        }
        final Path p = files[i].getPath();
        // Check for empty hfile. Should never be the case but can happen
        // after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
        // NOTE: that the HFileLink is just a name, so it's an empty file.
        if (!HFileLink.isHFileLink(p) && this.fs.getFileStatus(p).getLen() <= 0) {
            LOG.warn("Skipping " + p + " because its empty. HBASE-646 DATA LOSS?");
            continue;
        }

        // open each store file in parallel
        completionService.submit(new Callable<StoreFile>() {
            public StoreFile call() throws IOException {
                StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf, family.getBloomFilterType(),
                        dataBlockEncoder, isAssistant());
                passSchemaMetricsTo(storeFile);
                storeFile.createReader();
                return storeFile;
            }
        });
        totalValidStoreFile++;
    }

    try {
        for (int i = 0; i < totalValidStoreFile; i++) {
            Future<StoreFile> future = completionService.take();
            StoreFile storeFile = future.get();
            long length = storeFile.getReader().length();
            this.storeSize += length;
            this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes();
            if (LOG.isDebugEnabled()) {
                LOG.debug("loaded " + storeFile.toStringDetailed());
            }
            results.add(storeFile);
        }
    } catch (InterruptedException e) {
        throw new IOException(e);
    } catch (ExecutionException e) {
        throw new IOException(e.getCause());
    } finally {
        storeFileOpenerThreadPool.shutdownNow();
    }

    return results;
}

From source file:com.github.NearestNeighbors.java

public Map<String, Map<String, Collection<Float>>> evaluate(final Collection<Watcher> test_instances)
        throws IOException, InterruptedException, ExecutionException {
    log.info("knn-evaluate: Loading watchers.");

    log.debug(String.format("knn-evaluate: Total unique test watchers: %d", test_instances.size()));

    final Map<String, Map<String, Collection<Float>>> results = new HashMap<String, Map<String, Collection<Float>>>();

    final ExecutorService pool = Executors.newFixedThreadPool(THREAD_POOL_SIZE);

    // For each watcher in the test set . . .
    log.info("knn-evaluate: Starting evaluations");
    int test_watcher_count = 0;
    for (final Watcher watcher : test_instances) {
        test_watcher_count++;//w  ww.j a  v a2  s.co  m
        log.info(String.format("Processing watcher (%d/%d)", test_watcher_count, test_instances.size()));

        results.put(watcher.id, new HashMap<String, Collection<Float>>());

        // See if we have any training instances for the watcher.  If not, we really can't guess anything.
        final Watcher training_watcher = training_watchers.get(watcher.id);
        if (training_watcher == null) {
            continue;
        }

        /***********************************
         *** Handling repository regions ***
         ***********************************/

        // Calculate the distance between the repository regions we know the test watcher is in, to every other
        // region in the training data.
        final Set<NeighborRegion> test_regions = watchers_to_regions.get(watcher.id);

        /*
        final List<NeighborRegion> related_regions = find_regions_with_most_cutpoints(watcher, test_regions);
        for (final NeighborRegion related_region : related_regions)
        {
          storeDistance(results, watcher, related_region.most_popular, 0.0f);
          storeDistance(results, watcher, related_region.most_forked, 0.0f);
        }
        */

        /*
          also_owned_counts = {}
          training_watcher.repositories.each do |repo_id|
            repo = @training_repositories[repo_id]
                
            also_owned_counts[repo.owner] ||= 0
            also_owned_counts[repo.owner] += 1
          end
                
          also_owned_counts.each do |owner, count|
            # If 5% or more of the test watcher's repositories are owned by the same person, look at the owner's other repositories.
            if (also_owned_repos.size.to_f / training_watcher.repositories.size) > 0.05 || (also_owned_repos.size.to_f / @owners_to_repositories[owner].size) > 0.3
              repositories_to_check.merge(@owners_to_repositories[owner].collect {|r| r.id})
            end
          end
          */

        // Add in the most forked regions from similar watchers.
        /*
        final Set<NeighborRegion> related_regions = find_regions_containing_fellow_watchers(test_regions);
        for (final NeighborRegion region : related_regions)
        {
          repositories_to_check.add(region.most_forked);
        }
        */

        /*************************************
         **** Begin distance calculations ****
         *************************************/
        int test_region_count = 0;

        for (final NeighborRegion test_region : test_regions) {
            test_region_count++;

            final CompletionService<Map<Repository, Float>> cs = new ExecutorCompletionService<Map<Repository, Float>>(
                    pool);
            int training_region_count = 0;

            final Set<Repository> repositories_to_check = new HashSet<Repository>();

            // Add in the most forked repositories from each region we know the test watcher is in.
            for (final NeighborRegion region : test_regions) {
                repositories_to_check.add(region.most_forked);
            }

            for (final Repository repo : training_watcher.repositories) {
                if (repo.parent != null) {
                    repositories_to_check.add(repo.parent);
                }
            }

            /********************************************************************
             *** Handling repositories owned by owners we're already watching ***
             ********************************************************************/
            if (training_watcher.owner_counts.get(test_region.most_forked.owner) != null
                    && (((training_watcher.owner_counts.get(test_region.most_forked.owner).floatValue()
                            / owners_to_repositories.get(test_region.most_forked.owner).size()) > 0.25)
                            || (training_watcher.owner_distribution(test_region.most_forked.owner) > 0.25))) {
                for (final Repository also_owned : owners_to_repositories.get(test_region.most_forked.owner)) {
                    {
                        // Only add repos that are the most forked in their respective regions.
                        if (also_owned.region.most_forked.equals(also_owned)) {
                            repositories_to_check.add(also_owned);
                        }
                    }
                }
            }

            for (final Repository training_repository : repositories_to_check) {
                training_region_count++;

                if (log.isDebugEnabled()) {
                    log.debug(String.format("Processing watcher (%d/%d) - (%d/%d):(%d/%d)", test_watcher_count,
                            test_instances.size(), test_region_count, test_regions.size(),
                            training_region_count, repositories_to_check.size()));
                }

                // Submit distance calculation task if the test watcher isn't already watching the repository.
                cs.submit(new Callable<Map<Repository, Float>>() {

                    public Map<Repository, Float> call() throws Exception {
                        final Map<Repository, Float> ret = new HashMap<Repository, Float>();

                        if (!training_repository.watchers.contains(training_watcher)) {
                            float distance = euclidian_distance(training_watcher, test_region.most_forked,
                                    training_repository);

                            ret.put(training_repository, Float.valueOf(distance));
                        }

                        return ret;
                    }

                });
            }

            // Process the distance calculation results.
            for (int i = 0; i < repositories_to_check.size(); i++) {
                final Map<Repository, Float> distance = cs.take().get();

                for (final Map.Entry<Repository, Float> pair : distance.entrySet()) {
                    storeDistance(results, watcher, pair.getKey(), pair.getValue().floatValue());
                }
            }
        }
    }

    /*
            
            
    =begin
      # Find a set of repositories from fellow watchers that happen to watch a lot of same repositories as the test watcher.
      repositories_to_check.merge find_repositories_containing_fellow_watchers(test_regions)
            
      # Add in the most popular and most forked regions we know the test watcher is in.
      related_regions = find_regions_containing_fellow_watchers(test_regions)
      related_regions.each do |region|
        repositories_to_check << region.most_popular.id
        repositories_to_check << region.most_forked.id
      end
            
      $LOG.info "Added regions from fellow watchers for watcher #{watcher.id} -- new size #{repositories_to_check.size} (+ #{repositories_to_check.size - old_size})"
      old_size = repositories_to_check.size
            
      $LOG.info "Added similarly owned for watcher #{watcher.id} -- new size #{repositories_to_check.size} (+ #{repositories_to_check.size - old_size})"
      old_size = repositories_to_check.size
    =end
            
            
            
            
    =begin
            
    end
            
    results
     */

    return results;
}