Example usage for java.util.concurrent ThreadPoolExecutor execute

List of usage examples for java.util.concurrent ThreadPoolExecutor execute

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor execute.

Prototype

public void execute(Runnable command) 

Source Link

Document

Executes the given task sometime in the future.

Usage

From source file:org.apache.activemq.usecases.RequestReplyToTopicViaThreeNetworkHopsTest.java

@Test
public void runWithTempTopicReplyTo() throws Exception {
    EchoService echo_svc;// w ww .j av a 2  s .c o m
    TopicTrafficGenerator traffic_gen;
    Thread start1;
    Thread start2;
    Thread start3;
    Thread start4;
    ThreadPoolExecutor clientExecPool;
    final CountDownLatch clientCompletionLatch;
    int iter;

    fatalTestError = false;
    testError = false;

    //
    // Execute up to 20 clients at a time to simulate that load.
    //

    clientExecPool = new ThreadPoolExecutor(CONCURRENT_CLIENT_COUNT, CONCURRENT_CLIENT_COUNT, 0,
            TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(10000));
    clientCompletionLatch = new CountDownLatch(TOTAL_CLIENT_ITER);

    // Use threads to avoid startup deadlock since the first broker started waits until
    // it knows the name of the remote broker before finishing its startup, which means
    // the remote must already be running.

    start1 = new Thread() {
        @Override
        public void run() {
            try {
                edge1.start();
            } catch (Exception ex) {
                LOG.error(null, ex);
            }
        }
    };

    start2 = new Thread() {
        @Override
        public void run() {
            try {
                edge2.start();
            } catch (Exception ex) {
                LOG.error(null, ex);
            }
        }
    };

    start3 = new Thread() {
        @Override
        public void run() {
            try {
                core1.start();
            } catch (Exception ex) {
                LOG.error(null, ex);
            }
        }
    };

    start4 = new Thread() {
        @Override
        public void run() {
            try {
                core2.start();
            } catch (Exception ex) {
                LOG.error(null, ex);
            }
        }
    };

    start1.start();
    start2.start();
    start3.start();
    start4.start();

    start1.join();
    start2.join();
    start3.join();
    start4.join();

    traffic_gen = new TopicTrafficGenerator(edge1.getConnectionUrl(), edge2.getConnectionUrl());
    traffic_gen.start();

    //
    // Now start the echo service with that queue.
    //
    echo_svc = new EchoService("echo", edge1.getConnectionUrl());
    echo_svc.start();

    //
    // Run the tests on Temp Topics.
    //

    LOG.info("** STARTING TEMP TOPIC TESTS");
    iter = 0;
    while ((iter < TOTAL_CLIENT_ITER) && (!fatalTestError)) {
        clientExecPool.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    RequestReplyToTopicViaThreeNetworkHopsTest.this.testTempTopic(edge1.getConnectionUrl(),
                            edge2.getConnectionUrl());
                } catch (Exception exc) {
                    LOG.error("test exception", exc);
                    fatalTestError = true;
                    testError = true;
                }

                clientCompletionLatch.countDown();
            }
        });

        iter++;
    }

    boolean allDoneOnTime = clientCompletionLatch.await(20, TimeUnit.MINUTES);

    LOG.info("** FINISHED TEMP TOPIC TESTS AFTER " + iter + " ITERATIONS, testError:" + testError + ", fatal: "
            + fatalTestError + ", onTime:" + allDoneOnTime);

    Thread.sleep(100);

    echo_svc.shutdown();
    traffic_gen.shutdown();

    shutdown();

    assertTrue("test completed in time", allDoneOnTime);
    assertTrue("no errors", !testError);
}

From source file:org.apache.cxf.systest.jaxrs.JAXRSContinuationsTest.java

private void doTestContinuation(String pathSegment) throws Exception {
    ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 5, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(10));
    CountDownLatch startSignal = new CountDownLatch(1);
    CountDownLatch doneSignal = new CountDownLatch(5);

    executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/1", "1",
            "CXF in Action1", startSignal, doneSignal));
    executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/2", "2",
            "CXF in Action2", startSignal, doneSignal));
    executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/3", "3",
            "CXF in Action3", startSignal, doneSignal));
    executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/4", "4",
            "CXF in Action4", startSignal, doneSignal));
    executor.execute(new BookWorker("http://localhost:" + PORT + "/bookstore/" + pathSegment + "/5", "5",
            "CXF in Action5", startSignal, doneSignal));

    startSignal.countDown();//from  ww  w .ja  v  a  2  s. c  o  m
    doneSignal.await(60, TimeUnit.SECONDS);
    executor.shutdownNow();
    assertEquals("Not all invocations have completed", 0, doneSignal.getCount());
}

From source file:org.apache.hadoop.hbase.regionserver.CompactSplit.java

/**
 * @param r region store belongs to/*from ww  w .j a  va 2s  .co m*/
 * @param s Store to request compaction on
 * @param why Why compaction requested -- used in debug messages
 * @param priority override the default priority (NO_PRIORITY == decide)
 * @param request custom compaction request. Can be <tt>null</tt> in which case a simple
 *          compaction will be used.
 */
private synchronized CompactionRequest requestCompactionInternal(final Region r, final Store s,
        final String why, int priority, CompactionRequest request, boolean selectNow, User user)
        throws IOException {
    if (this.server.isStopped()
            || (r.getTableDescriptor() != null && !r.getTableDescriptor().isCompactionEnabled())) {
        return null;
    }

    CompactionContext compaction = null;
    if (selectNow) {
        compaction = selectCompaction(r, s, priority, request, user);
        if (compaction == null)
            return null; // message logged inside
    }

    final RegionServerSpaceQuotaManager spaceQuotaManager = this.server.getRegionServerSpaceQuotaManager();
    if (spaceQuotaManager != null
            && spaceQuotaManager.areCompactionsDisabled(r.getTableDescriptor().getTableName())) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Ignoring compaction request for " + r + " as an active space quota violation "
                    + " policy disallows compactions.");
        }
        return null;
    }

    // We assume that most compactions are small. So, put system compactions into small
    // pool; we will do selection there, and move to large pool if necessary.
    ThreadPoolExecutor pool = (selectNow && s.throttleCompaction(compaction.getRequest().getSize()))
            ? longCompactions
            : shortCompactions;
    pool.execute(new CompactionRunner(s, r, compaction, pool, user));
    if (LOG.isDebugEnabled()) {
        String type = (pool == shortCompactions) ? "Small " : "Large ";
        LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system")
                + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this);
    }
    return selectNow ? compaction.getRequest() : null;
}

From source file:org.apache.hadoop.hbase.regionserver.CompactSplitThread.java

/**
 * @param r HRegion store belongs to/*ww w. j ava2s.c om*/
 * @param s Store to request compaction on
 * @param why Why compaction requested -- used in debug messages
 * @param priority override the default priority (NO_PRIORITY == decide)
 * @param request custom compaction request. Can be <tt>null</tt> in which case a simple
 *          compaction will be used.
 */
private synchronized CompactionRequest requestCompactionInternal(final HRegion r, final Store s,
        final String why, int priority, CompactionRequest request, boolean selectNow) throws IOException {
    if (this.server.isStopped() || (r.getTableDesc() != null && !r.getTableDesc().isCompactionEnabled())) {
        return null;
    }

    CompactionContext compaction = null;
    if (selectNow) {
        compaction = selectCompaction(r, s, priority, request);
        if (compaction == null)
            return null; // message logged inside
    }

    // We assume that most compactions are small. So, put system compactions into small
    // pool; we will do selection there, and move to large pool if necessary.
    long size = selectNow ? compaction.getRequest().getSize() : 0;
    ThreadPoolExecutor pool = (!selectNow && s.throttleCompaction(size)) ? longCompactions : shortCompactions;
    pool.execute(new CompactionRunner(s, r, compaction, pool));
    if (LOG.isDebugEnabled()) {
        String type = (pool == shortCompactions) ? "Small " : "Large ";
        LOG.debug(type + "Compaction requested: " + (selectNow ? compaction.toString() : "system")
                + (why != null && !why.isEmpty() ? "; Because: " + why : "") + "; " + this);
    }
    return selectNow ? compaction.getRequest() : null;
}

From source file:org.apache.hadoop.hbase.util.FSUtils.java

/**
 * This function is to scan the root path of the file system to get either the
 * mapping between the region name and its best locality region server or the
 * degree of locality of each region on each of the servers having at least
 * one block of that region. The output map parameters are both optional.
 *
 * @param conf/*www . java2  s  .  com*/
 *          the configuration to use
 * @param desiredTable
 *          the table you wish to scan locality for
 * @param threadPoolSize
 *          the thread pool size to use
 * @param regionToBestLocalityRSMapping
 *          the map into which to put the best locality mapping or null
 * @param regionDegreeLocalityMapping
 *          the map into which to put the locality degree mapping or null,
 *          must be a thread-safe implementation
 * @throws IOException
 *           in case of file system errors or interrupts
 */
private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable,
        int threadPoolSize, Map<String, String> regionToBestLocalityRSMapping,
        Map<String, Map<String, Float>> regionDegreeLocalityMapping) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    Path rootPath = FSUtils.getRootDir(conf);
    long startTime = EnvironmentEdgeManager.currentTimeMillis();
    Path queryPath;
    // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
    if (null == desiredTable) {
        queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
    } else {
        queryPath = new Path(FSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
    }

    // reject all paths that are not appropriate
    PathFilter pathFilter = new PathFilter() {
        @Override
        public boolean accept(Path path) {
            // this is the region name; it may get some noise data
            if (null == path) {
                return false;
            }

            // no parent?
            Path parent = path.getParent();
            if (null == parent) {
                return false;
            }

            String regionName = path.getName();
            if (null == regionName) {
                return false;
            }

            if (!regionName.toLowerCase().matches("[0-9a-f]+")) {
                return false;
            }
            return true;
        }
    };

    FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);

    if (null == statusList) {
        return;
    } else {
        LOG.debug("Query Path: " + queryPath + " ; # list of files: " + statusList.length);
    }

    // lower the number of threads in case we have very few expected regions
    threadPoolSize = Math.min(threadPoolSize, statusList.length);

    // run in multiple threads
    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadPoolSize, threadPoolSize, 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(statusList.length));
    try {
        // ignore all file status items that are not of interest
        for (FileStatus regionStatus : statusList) {
            if (null == regionStatus) {
                continue;
            }

            if (!regionStatus.isDirectory()) {
                continue;
            }

            Path regionPath = regionStatus.getPath();
            if (null == regionPath) {
                continue;
            }

            tpe.execute(new FSRegionScanner(fs, regionPath, regionToBestLocalityRSMapping,
                    regionDegreeLocalityMapping));
        }
    } finally {
        tpe.shutdown();
        int threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 60 * 1000);
        try {
            // here we wait until TPE terminates, which is either naturally or by
            // exceptions in the execution of the threads
            while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) {
                // printing out rough estimate, so as to not introduce
                // AtomicInteger
                LOG.info("Locality checking is underway: { Scanned Regions : " + tpe.getCompletedTaskCount()
                        + "/" + tpe.getTaskCount() + " }");
            }
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }

    long overhead = EnvironmentEdgeManager.currentTimeMillis() - startTime;
    String overheadMsg = "Scan DFS for locality info takes " + overhead + " ms";

    LOG.info(overheadMsg);
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService.java

/**
 * Execute the task sometime in the future, using ThreadPools.
 *///from  w ww. j  ava 2s .com
synchronized void execute(File root, Runnable task) {
    if (executors == null) {
        throw new RuntimeException("AsyncDiskService is already shutdown");
    }
    ThreadPoolExecutor executor = executors.get(root);
    if (executor == null) {
        throw new RuntimeException("Cannot find root " + root + " for execution of task " + task);
    } else {
        executor.execute(task);
    }
}

From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskAsyncLazyPersistService.java

/**
 * Execute the task sometime in the future, using ThreadPools.
 *//*from  ww w.  ja va 2s.  co m*/
synchronized void execute(File root, Runnable task) {
    if (executors == null) {
        throw new RuntimeException("AsyncLazyPersistService is already shutdown");
    }
    ThreadPoolExecutor executor = executors.get(root);
    if (executor == null) {
        throw new RuntimeException("Cannot find root " + root + " for execution of task " + task);
    } else {
        executor.execute(task);
    }
}

From source file:org.apache.hadoop.util.AsyncDiskService.java

/**
 * Execute the task sometime in the future, using ThreadPools.
 *///from w  w w.j  a va2s. c  om
public synchronized void execute(String root, Runnable task) {
    ThreadPoolExecutor executor = executors.get(root);
    if (executor == null) {
        throw new RuntimeException("Cannot find root " + root + " for execution of task " + task);
    } else {
        executor.execute(task);
    }
}

From source file:org.apache.hama.graph.GraphJobRunner.java

/**
 * Do the main logic of a superstep, namely checking if vertices are active,
 * feeding compute with messages and controlling combiners/aggregators. We
 * iterate over our messages and vertices in sorted order. That means that we
 * need to seek the first vertex that has the same ID as the iterated message.
 *///w ww .ja  va  2 s  .  c om
@SuppressWarnings("unchecked")
private void doSuperstep(GraphJobMessage currentMessage,
        BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer) throws IOException {
    this.errorCount.set(0);
    long startTime = System.currentTimeMillis();

    this.changedVertexCnt = 0;
    vertices.startSuperstep();

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64));
    executor.setRejectedExecutionHandler(retryHandler);

    long loopStartTime = System.currentTimeMillis();
    while (currentMessage != null) {
        executor.execute(new ComputeRunnable(currentMessage));

        currentMessage = peer.getCurrentMessage();
    }
    LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " looping: "
            + (System.currentTimeMillis() - loopStartTime) + " ms");

    executor.shutdown();
    try {
        executor.awaitTermination(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (errorCount.get() > 0) {
        throw new IOException("there were " + errorCount + " exceptions during compute vertices.");
    }

    Iterator it = vertices.iterator();
    while (it.hasNext()) {
        Vertex<V, E, M> vertex = (Vertex<V, E, M>) it.next();
        if (!vertex.isHalted() && !vertex.isComputed()) {
            vertex.compute(Collections.<M>emptyList());
            vertices.finishVertexComputation(vertex);
        }
    }

    getAggregationRunner().sendAggregatorValues(peer, vertices.getActiveVerticesNum(), this.changedVertexCnt);
    this.iteration++;

    LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " computing vertices: "
            + (System.currentTimeMillis() - startTime) + " ms");

    startTime = System.currentTimeMillis();
    finishSuperstep();
    LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " synchronizing: "
            + (System.currentTimeMillis() - startTime) + " ms");
}

From source file:org.apache.hama.graph.GraphJobRunner.java

/**
 * Seed the vertices first with their own values in compute. This is the first
 * superstep after the vertices have been loaded.
 *//*from  ww w . j  av a 2s . c  om*/
private void doInitialSuperstep(BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer)
        throws IOException {
    this.changedVertexCnt = 0;
    this.errorCount.set(0);
    vertices.startSuperstep();

    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();
    executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64));
    executor.setRejectedExecutionHandler(retryHandler);

    for (V v : vertices.keySet()) {
        executor.execute(new ComputeRunnable(v));
    }

    executor.shutdown();
    try {
        executor.awaitTermination(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (errorCount.get() > 0) {
        throw new IOException("there were " + errorCount + " exceptions during compute vertices.");
    }

    getAggregationRunner().sendAggregatorValues(peer, 1, this.changedVertexCnt);
    iteration++;
    finishSuperstep();
}