Example usage for java.util.concurrent ExecutorCompletionService submit

List of usage examples for java.util.concurrent ExecutorCompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService submit.

Prototype

public Future<V> submit(Callable<V> task) 

Source Link

Usage

From source file:org.apache.hadoop.hbase.util.ModifyRegionUtils.java

/**
 * Execute the task on the specified set of regions.
 *
 * @param exec Thread Pool Executor//from   w ww.ja  v  a  2s.  c o  m
 * @param regions {@link HRegionInfo} that describes the regions to edit
 * @param task {@link RegionFillTask} custom code to edit the region
 * @throws IOException
 */
public static void editRegions(final ThreadPoolExecutor exec, final Collection<HRegionInfo> regions,
        final RegionEditTask task) throws IOException {
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    for (final HRegionInfo hri : regions) {
        completionService.submit(new Callable<Void>() {
            @Override
            public Void call() throws IOException {
                task.editRegion(hri);
                return null;
            }
        });
    }

    try {
        for (HRegionInfo hri : regions) {
            completionService.take().get();
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
}

From source file:org.apache.hadoop.hbase.util.TestIdLock.java

@Test
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {// w ww . j a va 2  s.c  o  m
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        idLock.assertMapEmpty();
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.hadoop.hbase.util.TestIdReadWriteLock.java

@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {/*from  w  w w. j a va  2 s  .  com*/
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        // make sure the entry pool will be cleared after GC and purge call
        int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
        LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
        assertEquals(0, entryPoolSize);
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static ExecutorService createExecutor() throws Exception {
    final ExecutorService executor = Executors.newFixedThreadPool(NUM_THREADS);
    final ExecutorCompletionService<Path> completion = new ExecutorCompletionService<>(executor);

    // initialize all threads and buffers
    for (int i = 0; i < NUM_THREADS; i++) {
        completion.submit(new Callable<Path>() {
            @Override//  w  ww.j a v a  2s  . co  m
            public Path call() throws Exception {
                IO_BUF.get();
                VERIFY_BUF.get();
                return null;
            }
        });
    }
    for (int i = 0; i < NUM_THREADS; i++) {
        completion.take().get();
    }
    return executor;
}

From source file:org.apache.hadoop.hdfs.server.datanode.TestBatchIbr.java

static void runIbrTest(final long ibrInterval) throws Exception {
    final ExecutorService executor = createExecutor();
    final Random ran = new Random();

    final Configuration conf = newConf(ibrInterval);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();

    try {//  ww w.j av a  2 s  .  c  o  m
        final String dirPathString = "/dir";
        final Path dir = new Path(dirPathString);
        dfs.mkdirs(dir);

        // start testing
        final long testStartTime = Time.monotonicNow();
        final ExecutorCompletionService<Path> createService = new ExecutorCompletionService<>(executor);
        final AtomicLong createFileTime = new AtomicLong();
        final AtomicInteger numBlockCreated = new AtomicInteger();

        // create files
        for (int i = 0; i < NUM_FILES; i++) {
            createService.submit(new Callable<Path>() {
                @Override
                public Path call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        final long seed = ran.nextLong();
                        final int numBlocks = ran.nextInt(MAX_BLOCK_NUM) + 1;
                        numBlockCreated.addAndGet(numBlocks);
                        return createFile(dir, numBlocks, seed, dfs);
                    } finally {
                        createFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }

        // verify files
        final ExecutorCompletionService<Boolean> verifyService = new ExecutorCompletionService<>(executor);
        final AtomicLong verifyFileTime = new AtomicLong();
        for (int i = 0; i < NUM_FILES; i++) {
            final Path file = createService.take().get();
            verifyService.submit(new Callable<Boolean>() {
                @Override
                public Boolean call() throws Exception {
                    final long start = Time.monotonicNow();
                    try {
                        return verifyFile(file, dfs);
                    } finally {
                        verifyFileTime.addAndGet(Time.monotonicNow() - start);
                    }
                }
            });
        }
        for (int i = 0; i < NUM_FILES; i++) {
            Assert.assertTrue(verifyService.take().get());
        }
        final long testEndTime = Time.monotonicNow();

        LOG.info("ibrInterval=" + ibrInterval + " ("
                + toConfString(DFS_BLOCKREPORT_INCREMENTAL_INTERVAL_MSEC_KEY, conf) + "), numBlockCreated="
                + numBlockCreated);
        LOG.info("duration=" + toSecondString(testEndTime - testStartTime) + ", createFileTime="
                + toSecondString(createFileTime.get()) + ", verifyFileTime="
                + toSecondString(verifyFileTime.get()));
        LOG.info("NUM_FILES=" + NUM_FILES + ", MAX_BLOCK_NUM=" + MAX_BLOCK_NUM + ", BLOCK_SIZE=" + BLOCK_SIZE
                + ", NUM_THREADS=" + NUM_THREADS + ", NUM_DATANODES=" + NUM_DATANODES);
        logIbrCounts(cluster.getDataNodes());
    } finally {
        executor.shutdown();
        cluster.shutdown();
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.amrmproxy.BaseAMRMProxyTest.java

/**
 * This helper method will invoke the specified function in parallel for each
 * end point in the specified list using a thread pool and return the
 * responses received from the function. It implements the logic required for
 * dispatching requests in parallel and waiting for the responses. If any of
 * the function call fails or times out, it will ignore and proceed with the
 * rest. So the responses returned can be less than the number of end points
 * specified/* w ww  .  java  2 s  . c o m*/
 * 
 * @param testContext
 * @param func
 * @return
 */
protected <T, R> List<R> runInParallel(List<T> testContexts, final Function<T, R> func) {
    ExecutorCompletionService<R> completionService = new ExecutorCompletionService<R>(this.getThreadPool());
    LOG.info("Sending requests to endpoints asynchronously. Number of test contexts=" + testContexts.size());
    for (int index = 0; index < testContexts.size(); index++) {
        final T testContext = testContexts.get(index);

        LOG.info("Adding request to threadpool for test context: " + testContext.toString());

        completionService.submit(new Callable<R>() {
            @Override
            public R call() throws Exception {
                LOG.info("Sending request. Test context:" + testContext.toString());

                R response = null;
                try {
                    response = func.invoke(testContext);
                    LOG.info("Successfully sent request for context: " + testContext.toString());
                } catch (Throwable ex) {
                    LOG.error("Failed to process request for context: " + testContext);
                    response = null;
                }

                return response;
            }
        });
    }

    ArrayList<R> responseList = new ArrayList<R>();
    LOG.info("Waiting for responses from endpoints. Number of contexts=" + testContexts.size());
    for (int i = 0; i < testContexts.size(); ++i) {
        try {
            final Future<R> future = completionService.take();
            final R response = future.get(3000, TimeUnit.MILLISECONDS);
            responseList.add(response);
        } catch (Throwable e) {
            LOG.error("Failed to process request " + e.getMessage());
        }
    }

    return responseList;
}

From source file:org.apache.hama.bsp.LocalBSPRunner.java

@Override
public JobStatus submitJob(BSPJobID jobID, String jobFile) throws IOException {
    this.jobFile = jobFile;

    if (fs == null) {
        this.fs = FileSystem.get(conf);
    }//from  w  w w  .  ja  va2 s.  c  om

    // add the resource to the current configuration, because add resouce in
    // HamaConfigurations constructor (ID,FILE) does not take local->HDFS
    // connections into account. This leads to not serializing the
    // configuration, which yields into failure.
    conf.addResource(fs.open(new Path(jobFile)));

    conf.setClass(MessageManagerFactory.MESSAGE_MANAGER_CLASS, LocalMessageManager.class, MessageManager.class);
    conf.setClass(SyncServiceFactory.SYNC_PEER_CLASS, LocalSyncClient.class, SyncClient.class);

    BSPJob job = new BSPJob(new HamaConfiguration(conf), jobID);
    currentJobStatus = new JobStatus(jobID, System.getProperty("user.name"), 0L, JobStatus.RUNNING,
            globalCounters);

    int numBspTask = job.getNumBspTask();

    String jobSplit = conf.get("bsp.job.split.file");

    BSPJobClient.RawSplit[] splits = null;
    if (jobSplit != null) {

        DataInputStream splitFile = fs.open(new Path(jobSplit));

        try {
            splits = BSPJobClient.readSplitFile(splitFile);
        } finally {
            splitFile.close();
        }
    }

    threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(numBspTask);
    @SuppressWarnings("rawtypes")
    ExecutorCompletionService<BSPPeerImpl> completionService = new ExecutorCompletionService<BSPPeerImpl>(
            threadPool);

    peerNames = new String[numBspTask];
    for (int i = 0; i < numBspTask; i++) {
        peerNames[i] = "local:" + i;
        completionService.submit(new BSPRunner(new HamaConfiguration(conf), job, i, splits));
        globalCounters.incrCounter(JobInProgress.JobCounter.LAUNCHED_TASKS, 1L);
    }

    new Thread(new ThreadObserver(numBspTask, completionService)).start();
    return currentJobStatus;
}

From source file:org.apache.tika.batch.fs.strawman.StrawManTikaAppDriver.java

public static void main(String[] args) {
    long start = new Date().getTime();
    if (args.length < 6) {
        System.err.println(StrawManTikaAppDriver.usage());
    }//from w  w  w.ja  v  a 2  s . c  o  m
    Path inputDir = Paths.get(args[0]);
    Path outputDir = Paths.get(args[1]);
    int totalThreads = Integer.parseInt(args[2]);

    List<String> commandLine = new ArrayList<>();
    commandLine.addAll(Arrays.asList(args).subList(3, args.length));
    totalThreads = (totalThreads < 1) ? 1 : totalThreads;
    ExecutorService ex = Executors.newFixedThreadPool(totalThreads);
    ExecutorCompletionService<Integer> completionService = new ExecutorCompletionService<>(ex);

    for (int i = 0; i < totalThreads; i++) {
        StrawManTikaAppDriver driver = new StrawManTikaAppDriver(inputDir, outputDir, totalThreads,
                commandLine.toArray(new String[commandLine.size()]));
        completionService.submit(driver);
    }

    int totalFilesProcessed = 0;
    for (int i = 0; i < totalThreads; i++) {
        try {
            Future<Integer> future = completionService.take();
            if (future != null) {
                totalFilesProcessed += future.get();
            }
        } catch (InterruptedException | ExecutionException e) {
            LOG.error(e.getMessage(), e);
        }
    }
    double elapsedSeconds = (double) (new Date().getTime() - start) / (double) 1000;
    LOG.info("Processed {} in {} seconds", totalFilesProcessed, elapsedSeconds);
}

From source file:org.commonjava.util.partyline.ManyReadersWithPreExistingWriterTest.java

private void executeTestIteration() throws Exception {
    ThreadContext.getContext(true);/*from   w w w  .  j a  v a2  s .co  m*/

    ExecutorCompletionService<String> completionService = new ExecutorCompletionService<String>(executor);

    final AtomicBoolean readFlag = new AtomicBoolean(false);
    final AtomicBoolean writeFlag = new AtomicBoolean(false);

    completionService.submit(writer(writeFlag, readFlag));
    for (int i = 0; i < THREADS; i++) {
        completionService.submit(reader(readFlag));
    }

    writeFlag.set(true);

    for (int i = 0; i < (THREADS + 1); i++) {
        String error = completionService.take().get();
        if (error != null) {
            logger.info(error);
            fail("thread failed.");
        }
        assertThat(error, nullValue());
    }

    ThreadContext.clearContext();
}

From source file:org.geowebcache.layer.wms.WMSLayerTest.java

private List<ConveyorTile> getTiles(StorageBroker storageBroker, TileRange tr, final WMSLayer tl)
        throws Exception {
    final String layerName = tl.getName();
    // define the meta tile size to 1,1 so we hit all the tiles
    final TileRangeIterator trIter = new TileRangeIterator(tr, new int[] { 1, 1 });

    long[] gridLoc = trIter.nextMetaGridLocation(new long[3]);

    // six concurrent requests max
    ExecutorService requests = Executors.newFixedThreadPool(6);
    ExecutorCompletionService completer = new ExecutorCompletionService(requests);

    List<Future<ConveyorTile>> futures = new ArrayList<Future<ConveyorTile>>();
    while (gridLoc != null) {
        Map<String, String> fullParameters = tr.getParameters();

        final ConveyorTile tile = new ConveyorTile(storageBroker, layerName, tr.getGridSetId(), gridLoc,
                tr.getMimeType(), fullParameters, null, null);
        futures.add(completer.submit(new Callable<ConveyorTile>() {

            public ConveyorTile call() throws Exception {
                try {
                    return tl.getTile(tile);
                } catch (OutsideCoverageException oce) {
                    return null;
                }/*from  w  ww. j av a2  s  .  co m*/
            }
        }));

        gridLoc = trIter.nextMetaGridLocation(gridLoc);
    }

    // these assertions could be externalized
    List<ConveyorTile> results = new ArrayList<ConveyorTile>();
    for (int i = 0; i < futures.size(); i++) {
        ConveyorTile get = futures.get(i).get();
        if (get != null) {
            results.add(get);
        }
    }

    requests.shutdown();

    return results;
}