List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor
public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue)
From source file:org.apache.hadoop.mapred.lib.MultithreadedMapRunner.java
@SuppressWarnings("unchecked") public void configure(JobConf jobConf) { int numberOfThreads = jobConf.getInt("mapred.map.multithreadedrunner.threads", 10); if (LOG.isDebugEnabled()) { LOG.debug("Configuring jobConf " + jobConf.getJobName() + " to use " + numberOfThreads + " threads"); }//from www .ja v a 2s.c o m this.job = jobConf; //increment processed counter only if skipping feature is enabled this.incrProcCount = SkipBadRecords.getMapperMaxSkipRecords(job) > 0 && SkipBadRecords.getAutoIncrMapperProcCount(job); this.mapper = ReflectionUtils.newInstance(jobConf.getMapperClass(), jobConf); // Creating a threadpool of the configured size to execute the Mapper // map method in parallel. executorService = new ThreadPoolExecutor(numberOfThreads, numberOfThreads, 0L, TimeUnit.MILLISECONDS, new BlockingArrayQueue(numberOfThreads)); }
From source file:org.sourceopen.hadoop.hbase.replication.consumer.FileChannelManager.java
public void init() throws Exception { if (LOG.isInfoEnabled()) { LOG.info("FileChannelManager is pendding to start."); }//from w ww. j a v a 2 s. c o m conf.addResource(ConsumerConstants.COMMON_CONFIG_FILE); conf.addResource(ConsumerConstants.CONSUMER_CONFIG_FILE); adapter = ProtocolAdapter.getAdapter(conf); fileChannelPool = new ThreadPoolExecutor( conf.getInt(ConsumerConstants.CONFKEY_REP_FILE_CHANNEL_POOL_SIZE, 10), conf.getInt(ConsumerConstants.CONFKEY_REP_FILE_CHANNEL_POOL_SIZE, 10), conf.getInt(ConsumerConstants.CONFKEY_THREADPOOL_KEEPALIVE_TIME, 100), TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(conf.getInt(ConsumerConstants.CONFKEY_THREADPOOL_SIZE, 100))); // ?? // ProtocolAdapter.listHead() ? Head ??? ProtocolAdapter fs = FileSystem.get(URI.create(conf.get(HDFSFileAdapter.CONFKEY_HDFS_FS)), conf); zoo = HRepConfigUtil.createAdvZooKeeperByHBaseConfig(conf, new NothingZookeeperWatch()); root = ZNodeFactory.createZNode(zoo, conf.get(ConsumerConstants.CONFKEY_ROOT_ZOO, ConsumerConstants.ROOT_ZOO), true); if (LOG.isInfoEnabled()) { LOG.info("FileChannelManager init."); } }
From source file:org.duracloud.mill.workman.TaskWorkerManager.java
public void init() { this.defaultMinWaitTime = new Long( System.getProperty(MIN_WAIT_BEFORE_TAKE_KEY, DEFAULT_MIN_WAIT_BEFORE_TAKE + "")); Integer maxThreadCount = new Integer( System.getProperty(MAX_WORKER_PROPERTY_KEY, String.valueOf(DEFAULT_MAX_WORKERS))); //With a bound pool and unbounded queue, rejection should never occur. this.executor = new ThreadPoolExecutor(maxThreadCount, maxThreadCount, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); this.executor.setRejectedExecutionHandler(new ThreadPoolExecutor.DiscardPolicy()); new Thread(new Runnable() { @Override//from w w w . ja v a 2s . c om public void run() { runManager(); } }).start(); timer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { List<String> queueStats = new LinkedList<String>(); for (TaskQueue queue : taskQueues) { queueStats.add(formatQueueStat(queue)); } queueStats.add(formatQueueStat(deadLetterQueue)); log.info("Status: max_workers={} running_workers={}" + " completed_workers={}" + queueStats, new Object[] { getMaxWorkers(), executor.getActiveCount(), executor.getCompletedTaskCount(), StringUtils.join(queueStats, " ") }); } private String formatQueueStat(TaskQueue queue) { return queue.getName() + "_q_size=" + queue.size(); } }, new Date(), 1 * 60 * 1000); }
From source file:com.cloudant.sync.replication.BasicPullStrategy.java
public BasicPullStrategy(PullReplication pullReplication, ExecutorService executorService, PullConfiguration config) {//from ww w . ja v a 2 s. c o m Preconditions.checkNotNull(pullReplication, "PullReplication must not be null."); if (executorService == null) { executorService = new ThreadPoolExecutor(4, 4, 1, TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>()); } if (config == null) { config = new PullConfiguration(); } this.executor = executorService; this.config = config; this.filter = pullReplication.filter; String dbName = pullReplication.getSourceDbName(); CouchConfig couchConfig = pullReplication.getCouchConfig(); this.sourceDb = new CouchClientWrapper(dbName, couchConfig); this.targetDb = new DatastoreWrapper((DatastoreExtended) pullReplication.target); this.name = String.format("%s [%s]", LOG_TAG, pullReplication.getReplicatorName()); }
From source file:com.pinterest.rocksplicator.controller.DispatcherTest.java
@Test public void testSingleTaskLifeCycle() throws Exception { // Assuming there is only one task in the queue PowerMockito.when(taskQueue.dequeueTask(anyString())).thenReturn(getSleepIncrementTaskFromQueue()) .thenReturn(null);//from w ww . java 2 s . c o m Semaphore idleWorkersSemaphore = new Semaphore(1); ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(1)); WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, taskQueue); TaskDispatcher dispatcher = new TaskDispatcher(2, idleWorkersSemaphore, workerPool, taskQueue); dispatcher.start(); // Wait for first task to be done synchronized (SleepIncrementTask.notifyObject) { SleepIncrementTask.notifyObject.wait(); } verify(taskQueue, atLeastOnce()).dequeueTask(anyString()); Assert.assertEquals(1, SleepIncrementTask.executionCounter.intValue()); Assert.assertEquals(1, idleWorkersSemaphore.availablePermits()); dispatcher.stop(); }
From source file:org.paxle.crawler.urlRedirector.impl.UrlRedirectorServer.java
protected void activate(Dictionary<String, Object> props) throws UnknownHostException, IOException { Integer port = (Integer) props.get(PORT); if (port == null) port = Integer.valueOf(8090); // init thread-pool this.execService = new ThreadPoolExecutor(5, 20, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); // create server this.srv = new Server(port, new UrlRedirectorHandler(this)); // start it/*from w w w. j ava 2 s .c o m*/ this.srv.start(); }
From source file:com.isthari.spring.cloud.config.cassandra.CassandraEnvironmentRepository.java
public CassandraEnvironmentRepository(ConfigurableEnvironment environment, String hostnames, String username, String password, Boolean createSchema) { // Create cluster object Cluster.Builder builder = Cluster.builder(); // Add contact points for (String hostname : hostnames.split(",")) { builder.addContactPoints(hostname); }//from w w w .jav a 2 s . c o m if (username != null && password != null) { builder.withCredentials(username, password); } Cluster cluster = builder.build(); if (createSchema) { this.createSchema(cluster); } // Connect session = cluster.connect("cloud_config"); this.stmtGetVersion = session.prepare( "select version from application_label_version where application=? and label=? and profile=? limit 1"); this.stmtGetSnapshot = session .prepare("select parameters from configuration_snapshot where application=? and version=?"); // Executor for async tasks BlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(10); executor = new ThreadPoolExecutor(4, 10, 1, TimeUnit.DAYS, workQueue); }
From source file:org.kaaproject.kaa.server.verifiers.gplus.verifier.GplusUserVerifier.java
@Override public void start() { LOG.info("user verifier started"); threadPool = new ThreadPoolExecutor(configuration.getMinParallelConnections(), configuration.getMaxParallelConnections(), configuration.getKeepAliveTimeMilliseconds(), TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); connectionManager.setMaxTotal(configuration.getMaxParallelConnections()); httpClient = HttpClients.custom().setConnectionManager(connectionManager).build(); }
From source file:com.amazonaws.services.simpleworkflow.flow.worker.GenericActivityWorker.java
@Override protected TaskPoller createPoller() { ThreadPoolExecutor tasksExecutor = new ThreadPoolExecutor(1, taskExecutorThreadPoolSize, 1, TimeUnit.MINUTES, new SynchronousQueue<Runnable>()); tasksExecutor.setThreadFactory(/* ww w . ja v a 2 s . com*/ new ExecutorThreadFactory(ACTIVITY_THREAD_NAME_PREFIX + " " + getTaskListToPoll() + " ")); tasksExecutor.setRejectedExecutionHandler(new BlockCallerPolicy()); return new ActivityTaskPoller(service, domain, getTaskListToPoll(), activityImplementationFactory, tasksExecutor); }
From source file:com.espertech.esper.example.stockticker.TestStockTickerMultithreaded.java
public void performTest(int numberOfThreads, int numberOfTicksToSend, int ratioPriceOutOfLimit, int numberOfSecondsWaitForCompletion) { final int totalNumTicks = numberOfTicksToSend + 2 * TestStockTickerGenerator.NUM_STOCK_NAMES; log.info(".performTest Generating data, numberOfTicksToSend=" + numberOfTicksToSend + " ratioPriceOutOfLimit=" + ratioPriceOutOfLimit); StockTickerEventGenerator generator = new StockTickerEventGenerator(); LinkedList stream = generator.makeEventStream(numberOfTicksToSend, ratioPriceOutOfLimit, TestStockTickerGenerator.NUM_STOCK_NAMES, StockTickerRegressionConstants.PRICE_LIMIT_PCT_LOWER_LIMIT, StockTickerRegressionConstants.PRICE_LIMIT_PCT_UPPER_LIMIT, StockTickerRegressionConstants.PRICE_LOWER_LIMIT, StockTickerRegressionConstants.PRICE_UPPER_LIMIT, true);/*from w ww . j av a2 s . c o m*/ log.info(".performTest Send limit and initial tick events - singlethreaded"); for (int i = 0; i < TestStockTickerGenerator.NUM_STOCK_NAMES * 2; i++) { Object theEvent = stream.removeFirst(); epService.getEPRuntime().sendEvent(theEvent); } log.info(".performTest Loading thread pool work queue, numberOfRunnables=" + stream.size()); ThreadPoolExecutor pool = new ThreadPoolExecutor(0, numberOfThreads, 99999, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); for (Object theEvent : stream) { SendEventRunnable runnable = new SendEventRunnable(epService, theEvent); pool.execute(runnable); } log.info(".performTest Starting thread pool, threads=" + numberOfThreads); pool.setCorePoolSize(numberOfThreads); log.info(".performTest Listening for completion"); EPRuntimeUtil.awaitCompletion(epService.getEPRuntime(), totalNumTicks, numberOfSecondsWaitForCompletion, 1, 10); pool.shutdown(); // Check results : make sure the given ratio of out-of-limit stock prices was reported int expectedNumEmitted = (numberOfTicksToSend / ratioPriceOutOfLimit) + 1; assertTrue(listener.getSize() == expectedNumEmitted); log.info(".performTest Done test"); }