List of usage examples for java.util.concurrent ThreadPoolExecutor allowCoreThreadTimeOut
boolean allowCoreThreadTimeOut
To view the source code for java.util.concurrent ThreadPoolExecutor allowCoreThreadTimeOut.
Click Source Link
From source file:com.opengamma.language.context.DefaultGlobalContextEventHandler.java
public DefaultGlobalContextEventHandler() { final int cores = Runtime.getRuntime().availableProcessors(); final ThreadPoolExecutor executor = new ThreadPoolExecutor(cores, cores, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); executor.allowCoreThreadTimeOut(true); executor.setThreadFactory(new NamedThreadPoolFactory("S-Worker")); setSaturatingExecutor(executor);// w w w . j a v a2 s . c o m }
From source file:com.techcavern.pircbotz.hooks.managers.ThreadedListenerManager.java
/** * Configures with default cached thread thread pool. *//*from w w w.j a v a 2 s . c o m*/ public ThreadedListenerManager() { managerNumber = MANAGER_COUNT.getAndIncrement(); BasicThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("listenerPool" + managerNumber + "-thread%d").daemon(true).build(); ThreadPoolExecutor defaultPool = (ThreadPoolExecutor) Executors.newCachedThreadPool(factory); defaultPool.allowCoreThreadTimeOut(true); this.pool = defaultPool; }
From source file:org.lizardirc.beancounter.Beancounter.java
private ExecutorService constructExecutorService() { BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("primaryListenerPool-thread%d") .daemon(true).build();/*w ww . j a v a 2 s . c om*/ ThreadPoolExecutor ret = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), factory); ret.allowCoreThreadTimeOut(true); return ret; }
From source file:org.apache.hadoop.hbase.util.MultiHConnection.java
private void createBatchPool(Configuration conf) { // Use the same config for keep alive as in ConnectionImplementation.getBatchPool(); int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256); int coreThreads = conf.getInt("hbase.multihconnection.threads.core", 256); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; }// ww w .jav a 2 s.com if (coreThreads == 0) { coreThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * conf .getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-")); tpe.allowCoreThreadTimeOut(true); this.batchPool = tpe; }
From source file:org.apache.hadoop.util.AsyncDiskService.java
/** * Create a AsyncDiskServices with a set of volumes (specified by their * root directories).//w ww .j a v a2 s . com * * The AsyncDiskServices uses one ThreadPool per volume to do the async * disk operations. * * @param volumes The roots of the file system volumes. */ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } }; // Create one ThreadPool per volume for (int v = 0; v < volumes.length; v++) { ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volumes[v], executor); } }
From source file:org.green.code.async.executor.ThreadPoolTaskExecutor.java
protected ExecutorService initializeExecutor(ThreadFactory threadFactory, RejectedExecutionHandler rejectedExecutionHandler) { BlockingQueue<Runnable> queue = createQueue(this.queueCapacity); // I am using my custom ThreadPoolExecutor ThreadPoolExecutor executor = new ThreadPoolExecutor(this.corePoolSize, this.maxPoolSize, this.keepAliveSeconds, TimeUnit.SECONDS, queue, threadFactory, rejectedExecutionHandler); if (this.allowCoreThreadTimeOut) { executor.allowCoreThreadTimeOut(true); }/*from w w w. ja v a 2 s . c om*/ this.threadPoolExecutor = executor; return executor; }
From source file:org.apache.hadoop.hdfs.server.datanode.FSDatasetAsyncDiskService.java
/** * Create a AsyncDiskServices with a set of volumes (specified by their * root directories).//from www . jav a 2s. co m * * The AsyncDiskServices uses one ThreadPool per volume to do the async * disk operations. * * @param volumes The roots of the data volumes. */ FSDatasetAsyncDiskService(File[] volumes) { threadFactory = new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } }; // Create one ThreadPool per volume for (int v = 0; v < volumes.length; v++) { ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volumes[v], executor); } }
From source file:org.apache.hadoop.hbase.ipc.TestFifoRpcScheduler.java
private ThreadPoolExecutor disableHandlers(RpcScheduler scheduler) { ThreadPoolExecutor rpcExecutor = null; try {/* w ww . ja v a2 s.c o m*/ Field ExecutorField = scheduler.getClass().getDeclaredField("executor"); ExecutorField.setAccessible(true); scheduler.start(); rpcExecutor = (ThreadPoolExecutor) ExecutorField.get(scheduler); rpcExecutor.setMaximumPoolSize(1); rpcExecutor.allowCoreThreadTimeOut(true); rpcExecutor.setCorePoolSize(0); rpcExecutor.setKeepAliveTime(1, TimeUnit.MICROSECONDS); // Wait for 2 seconds, so that idle threads will die Thread.sleep(2000); } catch (NoSuchFieldException e) { LOG.error("No such field exception:" + e); } catch (IllegalAccessException e) { LOG.error("Illegal access exception:" + e); } catch (InterruptedException e) { LOG.error("Interrupted exception:" + e); } return rpcExecutor; }
From source file:org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskAsyncLazyPersistService.java
private void addExecutorForVolume(final File volume) { ThreadFactory threadFactory = new ThreadFactory() { @Override/* ww w . ja v a2s .com*/ public Thread newThread(Runnable r) { Thread t = new Thread(threadGroup, r); t.setName("Async RamDisk lazy persist worker for volume " + volume); return t; } }; ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volume, executor); }
From source file:org.apache.hadoop.hbase.AcidGuaranteesTestTool.java
private ExecutorService createThreadPool() { int maxThreads = 256; int coreThreads = 128; long keepAliveTime = 60; BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>( maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(toString() + "-shared")); tpe.allowCoreThreadTimeOut(true); return tpe;//from w w w . ja v a2 s. c o m }