List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor
public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler)
From source file:org.apache.hadoop.hbase.backup.regionserver.LogRollBackupSubprocedurePool.java
public LogRollBackupSubprocedurePool(String name, Configuration conf) { // configure the executor service long keepAlive = conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); this.name = name; executor = new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new DaemonThreadFactory("rs(" + name + ")-backup-pool")); taskPool = new ExecutorCompletionService<Void>(executor); }
From source file:org.apache.hadoop.hbase.procedure.ProcedureMember.java
/** * Default thread pool for the procedure * * @param memberName// w w w. j ava 2s. c o m * @param procThreads the maximum number of threads to allow in the pool * @param keepAliveMillis the maximum time (ms) that excess idle threads will wait for new tasks */ public static ThreadPoolExecutor defaultPool(String memberName, int procThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, procThreads, keepAliveMillis, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), new DaemonThreadFactory("member: '" + memberName + "' subprocedure-pool")); }
From source file:bigbird.benchmark.HttpBenchmark.java
public void execute() { params = getHttpParams(socketTimeout, useHttp1_0); for (RequestGenerator g : requestGenerators) { g.setParameters(params);/*from w w w . j a va2s .c om*/ } host = new HttpHost(url.getHost(), url.getPort(), url.getProtocol()); ThreadPoolExecutor workerPool = new ThreadPoolExecutor(threads, threads, 5, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(r, "ClientPool"); } }); workerPool.prestartAllCoreThreads(); BenchmarkWorker[] workers = new BenchmarkWorker[threads]; for (int i = 0; i < threads; i++) { workers[i] = new BenchmarkWorker(params, verbosity, requestGenerators[i], host, requests, keepAlive); workerPool.execute(workers[i]); } while (workerPool.getCompletedTaskCount() < threads) { Thread.yield(); try { Thread.sleep(1000); } catch (InterruptedException ignore) { } } workerPool.shutdown(); ResultProcessor.printResults(workers, host, url.toString(), contentLength); }
From source file:com.optimizely.ab.event.AsyncEventHandler.java
public AsyncEventHandler(int queueCapacity, int numWorkers, int maxConnections, int connectionsPerRoute, int validateAfter, long closeTimeout, TimeUnit closeTimeoutUnit) { if (queueCapacity <= 0) { throw new IllegalArgumentException("queue capacity must be > 0"); }/*w ww . j a va 2 s . c o m*/ this.httpClient = OptimizelyHttpClient.builder().withMaxTotalConnections(maxConnections) .withMaxPerRoute(connectionsPerRoute).withValidateAfterInactivity(validateAfter).build(); this.workerExecutor = new ThreadPoolExecutor(numWorkers, numWorkers, 0L, TimeUnit.MILLISECONDS, new ArrayBlockingQueue<Runnable>(queueCapacity), new NamedThreadFactory("optimizely-event-dispatcher-thread-%s", true)); this.closeTimeout = closeTimeout; this.closeTimeoutUnit = closeTimeoutUnit; }
From source file:com.amazonaws.mobileconnectors.pinpoint.internal.event.EventRecorder.java
public static EventRecorder newInstance(final PinpointContext pinpointContext, final PinpointDBUtil dbUtil) { final ExecutorService submissionRunnableQueue = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(MAX_EVENT_OPERATIONS), new ThreadPoolExecutor.DiscardPolicy()); return new EventRecorder(pinpointContext, dbUtil, submissionRunnableQueue); }
From source file:com.pinterest.terrapin.controller.TerrapinControllerServiceImpl.java
public TerrapinControllerServiceImpl(PropertiesConfiguration configuration, ZooKeeperManager zkManager, DFSClient hdfsClient, HelixAdmin helixAdmin, String clusterName) { this.configuration = configuration; this.zkManager = zkManager; this.hdfsClient = hdfsClient; this.helixAdmin = helixAdmin; this.clusterName = clusterName; ExecutorService threadPool = new ThreadPoolExecutor(100, 100, 0, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1000), new ThreadFactoryBuilder().setDaemon(false).setNameFormat("controller-pool-%d").build()); this.futurePool = new ExecutorServiceFuturePool(threadPool); }
From source file:org.apache.hadoop.util.AsyncDiskService.java
/** * Create a AsyncDiskServices with a set of volumes (specified by their * root directories)./*from ww w. j a v a 2 s. com*/ * * The AsyncDiskServices uses one ThreadPool per volume to do the async * disk operations. * * @param volumes The roots of the file system volumes. */ public AsyncDiskService(String[] volumes) { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } }; // Create one ThreadPool per volume for (int v = 0; v < volumes.length; v++) { ThreadPoolExecutor executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); executors.put(volumes[v], executor); } }
From source file:org.eclipse.ecr.core.event.impl.AsyncEventExecutor.java
public AsyncEventExecutor(int poolSize, int maxPoolSize, int keepAliveTime, int queueSize) { queue = new LinkedBlockingQueue<Runnable>(queueSize); mono_queue = new LinkedBlockingQueue<Runnable>(queueSize); NamedThreadFactory threadFactory = new NamedThreadFactory("Nuxeo Async Events"); executor = new ThreadPoolExecutor(poolSize, maxPoolSize, keepAliveTime, TimeUnit.SECONDS, queue, threadFactory);/* w w w . j a v a2 s .com*/ mono_executor = new ThreadPoolExecutor(1, 1, keepAliveTime, TimeUnit.SECONDS, mono_queue, threadFactory); }
From source file:com.roncoo.pay.app.settlement.utils.SettThreadPoolExecutor.java
public void init() { if (workQueueSize < 1) { workQueueSize = 1000;/*from w w w . java2 s.com*/ } if (this.keepAliveTime < 1) { this.keepAliveTime = 1000; } int coreSize = 0; if (this.corePoolSize < 1) { coreSize = Runtime.getRuntime().availableProcessors(); maxPoolSize = Math.round(((float) (coreSize * notifyRadio)) / 10); corePoolSize = coreSize / 4; if (corePoolSize < 1) { corePoolSize = 1; } } // NOTICE: corePoolSize?maxPoolSize? if (maxPoolSize < corePoolSize) { maxPoolSize = corePoolSize; } /** * ThreadPoolExecutor??BlockingQueue????workQueue.take()? */ BlockingQueue<Runnable> notifyWorkQueue = new ArrayBlockingQueue<Runnable>(workQueueSize); executor = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime, TimeUnit.SECONDS, notifyWorkQueue, new ThreadPoolExecutor.CallerRunsPolicy()); LOG.info("NotifyExecutor Info : CPU = " + coreSize + " | corePoolSize = " + corePoolSize + " | maxPoolSize = " + maxPoolSize + " | workQueueSize = " + workQueueSize); }
From source file:org.apache.hadoop.fs.nfs.stream.NFSBufferedOutputStream.java
public NFSBufferedOutputStream(Configuration configuration, FileHandle handle, Path path, NFSv3FileSystemStore store, Credentials credentials, boolean append) throws IOException { this.handle = handle; this.credentials = credentials; this.path = path; this.pathString = path.toUri().getPath(); this.statistics = new StreamStatistics(NFSBufferedInputStream.class + pathString, streamId.getAndIncrement(), false); this.store = store; this.blockSizeBits = store.getWriteSizeBits(); this.currentBlock = null; this.closed = new AtomicBoolean(false); assert (blockSizeBits >= 0 && blockSizeBits <= 22); // Create the task queues executors = new ThreadPoolExecutor(DEFAULT_WRITEBACK_POOL_SIZE, MAX_WRITEBACK_POOL_SIZE, 5, TimeUnit.SECONDS, new LinkedBlockingDeque<Runnable>(1024), new ThreadPoolExecutor.CallerRunsPolicy()); ongoing = new LinkedList<>(); // Set file offset to 0 or file length if (append) { Nfs3FileAttributes attributes = store.getFileAttributes(handle, credentials); if (attributes != null) { fileOffset = attributes.getSize(); LOG.info("Appending to file so starting at offset = " + fileOffset); } else {//from www .j a v a2 s. co m throw new IOException("Could not get file length"); } } else { fileOffset = 0L; } }