Example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor.

Prototype

public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
        BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) 

Source Link

Document

Creates a new ThreadPoolExecutor with the given initial parameters.

Usage

From source file:org.apache.ambari.server.state.services.AlertNoticeDispatchService.java

/**
 * Constructor./*from  www .ja v a2  s.  c  o m*/
 */
public AlertNoticeDispatchService() {
    m_executor = new ThreadPoolExecutor(0, 2, 5L, TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>(),
            new AlertDispatchThreadFactory(), new ThreadPoolExecutor.CallerRunsPolicy());

    GsonBuilder gsonBuilder = new GsonBuilder();
    gsonBuilder.registerTypeAdapter(AlertTargetProperties.class, new AlertTargetPropertyDeserializer());

    m_gson = gsonBuilder.create();
}

From source file:com.ebay.pulsar.metriccalculator.processor.MetricCassandraCollector.java

private void init() {
    workQueue = new LinkedBlockingQueue<Runnable>(m_workingQueueSize);
    worker = new ThreadPoolExecutor(m_workerThreadSize, m_workerThreadSize, 30, TimeUnit.SECONDS, workQueue,
            new NamedThreadFactory("CassandraRequestWorker"), new ThreadPoolExecutor.CallerRunsPolicy());

    timer = MCScheduler.getMCScheduler();
    timer.scheduleWithFixedDelay(new CassandraChecker(), ONE_MINUTE, ONE_MINUTE, TimeUnit.MILLISECONDS);
}

From source file:org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor.java

/**
 * Creates the BlockingQueue and the ThreadPoolExecutor.
 * @see #createQueue/*from   w  w w . j a  v  a  2 s .  co m*/
 */
public void initialize() {
    if (logger.isInfoEnabled()) {
        logger.info(
                "Initializing ThreadPoolExecutor" + (this.beanName != null ? " '" + this.beanName + "'" : ""));
    }
    BlockingQueue queue = createQueue(this.queueCapacity);
    this.threadPoolExecutor = new ThreadPoolExecutor(this.corePoolSize, this.maxPoolSize, this.keepAliveSeconds,
            TimeUnit.SECONDS, queue, this.threadFactory, this.rejectedExecutionHandler);
}

From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java

@Override
public synchronized long claimRecords(SDFSEvent evt, LargeBloomFilter bf) throws IOException {
    if (this.isClosed())
        throw new IOException("Hashtable " + this.fileName + " is close");
    executor = new ThreadPoolExecutor(Main.writeThreads + 1, Main.writeThreads + 1, 10, TimeUnit.SECONDS,
            worksQueue, new ProcessPriorityThreadFactory(Thread.MIN_PRIORITY), executionHandler);
    csz = new AtomicLong(0);

    try {/*from   w  w  w .  j  av a 2s  . c o m*/
        Lock l = this.gcLock.writeLock();
        l.lock();
        this.runningGC = true;
        try {
            File _fs = new File(fileName);
            lbf = new LargeBloomFilter(_fs.getParentFile(), maxSz, .01, true, true, false);
        } finally {
            l.unlock();
        }
        SDFSLogger.getLog().info("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]");
        SDFSEvent tEvt = SDFSEvent
                .claimInfoEvent("Claiming Records [" + this.getSize() + "] from [" + this.fileName + "]", evt);
        tEvt.maxCt = this.maps.size();
        Iterator<AbstractShard> iter = maps.iterator();
        ArrayList<ClaimShard> excs = new ArrayList<ClaimShard>();
        while (iter.hasNext()) {
            tEvt.curCt++;
            AbstractShard m = null;
            try {
                m = iter.next();
                ClaimShard cms = new ClaimShard(m, bf, lbf, csz);
                excs.add(cms);
                executor.execute(cms);
            } catch (Exception e) {
                tEvt.endEvent("Unable to claim records for " + m + " because : [" + e.toString() + "]",
                        SDFSEvent.ERROR);
                SDFSLogger.getLog().error("Unable to claim records for " + m, e);
                throw new IOException(e);
            }
        }
        executor.shutdown();
        try {
            while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                SDFSLogger.getLog().debug("Awaiting fdisk completion of threads.");
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        for (ClaimShard cms : excs) {
            if (cms.ex != null)
                throw new IOException(cms.ex);
        }
        this.kSz.getAndAdd(-1 * csz.get());
        tEvt.endEvent("removed [" + csz.get() + "] records");
        SDFSLogger.getLog().info("removed [" + csz.get() + "] records");
        iter = maps.iterator();
        while (iter.hasNext()) {
            AbstractShard m = null;
            try {
                m = iter.next();
                if (!m.isFull() && !m.isActive()) {

                    // SDFSLogger.getLog().info("deleting " +
                    // m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ProgressiveFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value);
                            this.lbf.put(p.key);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                } else if (m.isMaxed()) {
                    SDFSLogger.getLog().info("deleting maxed " + m.toString());
                    m.iterInit();
                    KVPair p = m.nextKeyValue();
                    while (p != null) {
                        ProgressiveFileByteArrayLongMap _m = this.getWriteMap();
                        try {
                            _m.put(p.key, p.value);
                            p = m.nextKeyValue();
                        } catch (HashtableFullException e) {

                        }

                    }
                    int mapsz = maps.size();
                    l = this.gcLock.writeLock();
                    l.lock();
                    try {
                        maps.remove(m);
                    } finally {
                        l.unlock();
                    }
                    mapsz = mapsz - maps.size();
                    SDFSLogger.getLog()
                            .info("removing map " + m.toString() + " sz=" + maps.size() + " rm=" + mapsz);
                    m.vanish();

                    m = null;
                }
            } catch (Exception e) {
                tEvt.endEvent("Unable to compact " + m + " because : [" + e.toString() + "]", SDFSEvent.ERROR);
                SDFSLogger.getLog().error("to compact " + m, e);
                throw new IOException(e);
            }
        }
        l.lock();
        this.runningGC = false;
        l.unlock();
        return csz.get();
    } finally {
        executor = null;
    }
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHTable.java

public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) {
    int maxThreads = conf.getInt("hbase.crosssite.table.threads.max", Integer.MAX_VALUE);
    if (maxThreads <= 0) {
        maxThreads = Integer.MAX_VALUE;
    }//  w w w.  ja  v  a 2s.  c  om
    final SynchronousQueue<Runnable> blockingQueue = new SynchronousQueue<Runnable>();
    RejectedExecutionHandler rejectHandler = new RejectedExecutionHandler() {

        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                blockingQueue.put(r);
            } catch (InterruptedException e) {
                throw new RejectedExecutionException(e);
            }
        }
    };
    long keepAliveTime = conf.getLong("hbase.table.threads.keepalivetime", 60);
    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            blockingQueue, Threads.newDaemonThreadFactory("crosssite-hbase-table"), rejectHandler);
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java

public CrossSiteHBaseAdmin(Configuration conf) throws IOException, KeeperException {
    //    super();
    // create the connection to the global zk of the CrossSiteHBaseAdmin
    Configuration crossSiteZKConf = new Configuration(conf);
    ZKUtil.applyClusterKeyToConf(crossSiteZKConf, conf.get(CrossSiteConstants.CROSS_SITE_ZOOKEEPER));
    this.conf = crossSiteZKConf;
    zkw = new ZooKeeperWatcher(this.conf, "connection to global zookeeper", this, false);
    znodes = new CrossSiteZNodes(zkw);
    this.numRetries = this.conf.getInt("hbase.crosssite.client.retries.number", 5);
    this.retryLongerMultiplier = this.conf.getInt("hbase.crosssite.client.retries.longer.multiplier", 2);
    this.pause = this.conf.getLong("hbase.crosssite.client.pause", 1000);

    int poolSize = this.conf.getInt("hbase.crosssite.admin.pool.size", Integer.MAX_VALUE);
    if (poolSize <= 0) {
        poolSize = Integer.MAX_VALUE;
    }//from ww w .j  a v a2  s  .co m
    final SynchronousQueue<Runnable> blockingQueue = new SynchronousQueue<Runnable>();
    RejectedExecutionHandler rejectHandler = new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                blockingQueue.put(r);
            } catch (InterruptedException e) {
                throw new RejectedExecutionException(e);
            }
        }
    };
    pool = new ThreadPoolExecutor(1, poolSize, 60, TimeUnit.SECONDS, blockingQueue,
            Threads.newDaemonThreadFactory("crosssite-hbase-admin-"), rejectHandler);
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
}

From source file:com.coinblesk.server.service.WalletService.java

/***
 * Add a listener for when a transaction we are watching's confidence
 * changed due to a new block.//from  w  w w . j  a v a  2s.com
 *
 * After the transaction is {bitcoin.minconf} blocks deep, we remove the tx
 * from the database, as it is considered safe.
 *
 * The method should only be called after complete download of the
 * blockchain, since the handler is called for every block and transaction
 * we are watching, which will result in high CPU and memory consumption and
 * might exceed the JVM memory limit. After download is complete, blocks
 * arrive only sporadically and this is not a problem.
 */
private void addConficenceChangedHandler() {
    // Use a custom thread pool to speed up the processing of transactions.
    // Queue is blocking and limited to 10'000
    // to avoid memory exhaustion. After threshold is reached, the
    // CallerRunsPolicy() forces blocking behavior.
    ContextPropagatingThreadFactory factory = new ContextPropagatingThreadFactory("listenerFactory");
    Executor listenerExecutor = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(),
            Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(10000), factory, new ThreadPoolExecutor.CallerRunsPolicy());

    wallet.addTransactionConfidenceEventListener(listenerExecutor, (wallet, tx) -> {
        if (tx.getConfidence().getDepthInBlocks() >= appConfig.getMinConf()
                && !removed.contains(tx.getHash())) {
            LOG.debug("remove tx we got from the network {}", tx);

            try {
                transactionService.removeTransaction(tx);
            } catch (EmptyResultDataAccessException e) {
                LOG.debug("tx was not in tx table {}", tx);
            }

            try {
                txQueueService.removeTx(tx);
            } catch (EmptyResultDataAccessException e) {
                LOG.debug("tx was not in txqueue table {}", tx);
            }

            removed.add(tx.getHash());
        }
    });
}

From source file:com.amazon.mws.shared.MwsConnection.java

/**
 * Get the shared executor service that is used by async calls if no
 * executor is supplied.// w  w  w  .  j  av a2s .com
 * 
 * @return The shared executor service.
 */
private ExecutorService getSharedES() {
    synchronized (this.getClass()) {
        if (sharedES != null) {
            return sharedES;
        }
        sharedES = new ThreadPoolExecutor(maxAsyncThreads / 10, maxAsyncThreads, 60L, TimeUnit.SECONDS,
                new ArrayBlockingQueue<Runnable>(maxAsyncQueueSize), new ThreadFactory() {
                    private final AtomicInteger threadNumber = new AtomicInteger(1);

                    public Thread newThread(Runnable task) {
                        Thread thread = new Thread(task, "MWSClient-" + threadNumber.getAndIncrement());
                        thread.setDaemon(true);
                        thread.setPriority(Thread.NORM_PRIORITY);
                        return thread;
                    }
                }, new RejectedExecutionHandler() {
                    public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
                        if (!executor.isShutdown()) {
                            log.warn("MWSClient async queue full, running on calling thread.");
                            task.run();
                        } else {
                            throw new RejectedExecutionException();
                        }
                    }
                });
        return sharedES;
    }
}

From source file:com.amazonservices.mws.client.MwsConnection.java

/**
 * Get the shared executor service that is used by async calls if no
 * executor is supplied./*from  w ww.  j  av  a 2s.c o  m*/
 * 
 * @return The shared executor service.
 */
private ExecutorService getSharedES() {
    synchronized (this.getClass()) {
        if (sharedES != null) {
            return sharedES;
        }
        sharedES = new ThreadPoolExecutor(maxAsyncThreads / 10, maxAsyncThreads, 60L, TimeUnit.SECONDS,
                new ArrayBlockingQueue<Runnable>(maxAsyncQueueSize), new ThreadFactory() {
                    private final AtomicInteger threadNumber = new AtomicInteger(1);

                    //@Override
                    public Thread newThread(Runnable task) {
                        Thread thread = new Thread(task, "MWSClient-" + threadNumber.getAndIncrement());
                        thread.setDaemon(true);
                        thread.setPriority(Thread.NORM_PRIORITY);
                        return thread;
                    }
                }, new RejectedExecutionHandler() {
                    //@Override
                    public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) {
                        if (!executor.isShutdown()) {
                            log.warn("MWSClient async queue full, running on calling thread.");
                            task.run();
                        } else {
                            throw new RejectedExecutionException();
                        }
                    }
                });
        return sharedES;
    }
}