List of usage examples for java.util.concurrent SynchronousQueue SynchronousQueue
public SynchronousQueue()
From source file:com.cloud.agent.Agent.java
public Agent(final IAgentShell shell) { _shell = shell;/* w w w. j a v a 2 s . com*/ _link = null; _connection = new NioClient("Agent", _shell.getHost(), _shell.getPort(), _shell.getWorkers(), this); Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); _ugentTaskPool = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), new NamedThreadFactory("UgentTask")); _executor = new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("agentRequest-Handler")); }
From source file:org.xwiki.job.internal.DefaultJobStatusStore.java
@Override public void initialize() throws InitializationException { try {/*from w ww.j a v a2 s.com*/ this.serializer = new JobStatusSerializer(); repair(); } catch (Exception e) { this.logger.error("Failed to load jobs", e); } BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("Job status serializer") .daemon(true).priority(Thread.MIN_PRIORITY).build(); this.executorService = new ThreadPoolExecutor(0, 10, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), threadFactory); this.cache = Collections.synchronizedMap(new Cache(this.configuration.getJobStatusCacheSize())); }
From source file:org.green.code.async.executor.ThreadPoolTaskExecutor.java
/** * Create the BlockingQueue to use for the ThreadPoolExecutor. * <p>/*from w ww . ja va 2 s .co m*/ * A LinkedBlockingQueue instance will be created for a positive capacity * value; a SynchronousQueue else. * * @param queueCapacity * the specified queue capacity * @return the BlockingQueue instance * @see java.util.concurrent.LinkedBlockingQueue * @see java.util.concurrent.SynchronousQueue */ protected BlockingQueue<Runnable> createQueue(int queueCapacity) { if (queueCapacity > 0) { return new LinkedBlockingQueue<Runnable>(queueCapacity); } else { return new SynchronousQueue<Runnable>(); } }
From source file:com.taobao.metamorphosis.gregor.slave.OrderedThreadPoolExecutor.java
/** * Creates a new instance of a OrderedThreadPoolExecutor. * //w ww .j a va 2 s . c o m * @param corePoolSize * The initial pool sizePoolSize * @param maximumPoolSize * The maximum pool size * @param keepAliveTime * Default duration for a thread * @param unit * Time unit used for the keepAlive value * @param threadFactory * The factory used to create threads * @param eventQueueHandler * The queue used to store events */ public OrderedThreadPoolExecutor(final int corePoolSize, final int maximumPoolSize, final long keepAliveTime, final TimeUnit unit, final ThreadFactory threadFactory) { // We have to initialize the pool with default values (0 and 1) in order // to // handle the exception in a better way. We can't add a try {} catch() // {} // around the super() call. super(DEFAULT_INITIAL_THREAD_POOL_SIZE, 1, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new AbortPolicy()); if (corePoolSize < DEFAULT_INITIAL_THREAD_POOL_SIZE) { throw new IllegalArgumentException("corePoolSize: " + corePoolSize); } if (maximumPoolSize == 0 || maximumPoolSize < corePoolSize) { throw new IllegalArgumentException("maximumPoolSize: " + maximumPoolSize); } // Now, we can setup the pool sizes super.setCorePoolSize(corePoolSize); super.setMaximumPoolSize(maximumPoolSize); }
From source file:org.apache.hadoop.hbase.client.TestHCM.java
@Test public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("test-hcm")); HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool); // make sure the internally created ExecutorService is the one passed assertTrue(otherPool == ((HConnectionImplementation) con2).getCurrentBatchPool()); String tableName = "testClusterConnection"; TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close(); HTable t = (HTable) con1.getTable(tableName, otherPool); // make sure passing a pool to the getTable does not trigger creation of an internal pool assertNull("Internal Thread pool should be null", ((HConnectionImplementation) con1).getCurrentBatchPool()); // table should use the pool passed assertTrue(otherPool == t.getPool()); t.close();/*from w ww . j a v a2 s. com*/ t = (HTable) con2.getTable(tableName); // table should use the connectin's internal pool assertTrue(otherPool == t.getPool()); t.close(); t = (HTable) con2.getTable(Bytes.toBytes(tableName)); // try other API too assertTrue(otherPool == t.getPool()); t.close(); t = (HTable) con2.getTable(TableName.valueOf(tableName)); // try other API too assertTrue(otherPool == t.getPool()); t.close(); t = (HTable) con1.getTable(tableName); ExecutorService pool = ((HConnectionImplementation) con1).getCurrentBatchPool(); // make sure an internal pool was created assertNotNull("An internal Thread pool should have been created", pool); // and that the table is using it assertTrue(t.getPool() == pool); t.close(); t = (HTable) con1.getTable(tableName); // still using the *same* internal pool assertTrue(t.getPool() == pool); t.close(); con1.close(); // if the pool was created on demand it should be closed upon connection close assertTrue(pool.isShutdown()); con2.close(); // if the pool is passed, it is not closed assertFalse(otherPool.isShutdown()); otherPool.shutdownNow(); }
From source file:org.sapia.soto.util.concurrent.ExecutorService.java
/** * Internal factory method that creates the executor instance. Subclass may override this method to * change the actual executor implementation used. * * @param aThreadFactory The thread factory to use for the executor. * @return The created thead pool executor instance. *//* w w w .j a va 2 s . c om*/ protected ThreadPoolExecutor createExecutor(ThreadFactory aThreadFactory) { // Create the queue of pending tasks BlockingQueue queue; if (_taskQueueSize == 0) { queue = new SynchronousQueue(); } else { queue = new ArrayBlockingQueue(_taskQueueSize); } return new ThreadPoolExecutor(_coreThreadPoolSize, _maximumThreadPoolSize, _threadKeepAliveTime, TimeUnit.MILLISECONDS, queue, aThreadFactory, _rejectedExecutionHandler); }
From source file:org.apache.hadoop.hbase.util.TestHBaseFsck.java
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.handler.count", 2); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.metahandler.count", 2); TEST_UTIL.startMiniCluster(3);//w w w . jav a 2s. co m executorService = new ThreadPoolExecutor(1, Integer.MAX_VALUE, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("testhbck")); AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); regionStates = assignmentManager.getRegionStates(); TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true); }
From source file:com.splicemachine.derby.stream.control.ControlDataSet.java
@Override public DataSet<V> union(DataSet<V> dataSet) { ThreadPoolExecutor tpe = null; try {/* w ww .j av a2 s . c om*/ ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("union-begin-query-%d") .setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { e.printStackTrace(); } }).build(); tpe = new ThreadPoolExecutor(2, 2, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), factory, new ThreadPoolExecutor.CallerRunsPolicy()); tpe.allowCoreThreadTimeOut(false); tpe.prestartAllCoreThreads(); Future<Iterator<V>> leftSideFuture = tpe.submit(new NonLazy(iterator)); Future<Iterator<V>> rightSideFuture = tpe.submit(new NonLazy(((ControlDataSet<V>) dataSet).iterator)); return new ControlDataSet<>(Iterators.concat(leftSideFuture.get(), rightSideFuture.get())); } catch (Exception e) { throw new RuntimeException(e); } finally { if (tpe != null) tpe.shutdown(); } }
From source file:com.facebook.presto.accumulo.tools.RewriteMetricsTask.java
public int exec() throws Exception { // Validate the required parameters have been set int numErrors = checkParam(config, "config"); numErrors += checkParam(schema, "schema"); numErrors += checkParam(tableName, "tableName"); if (numErrors > 0) { return 1; }/*from w ww . ja va 2 s. c o m*/ // Create the instance and the connector Instance inst = new ZooKeeperInstance(config.getInstance(), config.getZooKeepers()); Connector connector = inst.getConnector(config.getUsername(), new PasswordToken(config.getPassword())); if (auths == null) { auths = connector.securityOperations().getUserAuthorizations(config.getUsername()); } // Fetch the table metadata ZooKeeperMetadataManager manager = new ZooKeeperMetadataManager(config, new TypeRegistry()); LOG.info("Scanning Presto metadata for tables..."); AccumuloTable table = manager.getTable(new SchemaTableName(schema, tableName)); if (table == null) { LOG.error("Table is null, does it exist?"); return 1; } reconfigureIterators(connector, table); if (!dryRun) { LOG.info("Truncating metrics table " + table.getIndexTableName() + "_metrics"); connector.tableOperations().deleteRows(table.getIndexTableName() + "_metrics", null, null); } else { LOG.info("Would have truncated metrics table " + table.getIndexTableName() + "_metrics"); } long start = System.currentTimeMillis(); ExecutorService service = MoreExecutors.getExitingExecutorService( new ThreadPoolExecutor(2, 2, 0, TimeUnit.MILLISECONDS, new SynchronousQueue<>())); List<Future<Void>> tasks = service.invokeAll(ImmutableList.of(() -> { rewriteMetrics(connector, table, start); return null; }, () -> { rewriteNumRows(connector, table, start); return null; })); for (Future<Void> task : tasks) { task.get(); } LOG.info("Finished"); return 0; }
From source file:org.apache.solr.cloud.OverseerTaskProcessor.java
@Override public void run() { log.debug("Process current queue of overseer operations"); LeaderStatus isLeader = amILeader(); while (isLeader == LeaderStatus.DONT_KNOW) { log.debug("am_i_leader unclear {}", isLeader); isLeader = amILeader(); // not a no, not a yes, try ask again }/*from w w w.ja v a 2 s . c om*/ String oldestItemInWorkQueue = null; // hasLeftOverItems - used for avoiding re-execution of async tasks that were processed by a previous Overseer. // This variable is set in case there's any task found on the workQueue when the OCP starts up and // the id for the queue tail is used as a marker to check for the task in completed/failed map in zk. // Beyond the marker, all tasks can safely be assumed to have never been executed. boolean hasLeftOverItems = true; try { oldestItemInWorkQueue = workQueue.getTailId(); } catch (KeeperException e) { // We don't need to handle this. This is just a fail-safe which comes in handy in skipping already processed // async calls. SolrException.log(log, "", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (oldestItemInWorkQueue == null) hasLeftOverItems = false; else log.debug("Found already existing elements in the work-queue. Last element: {}", oldestItemInWorkQueue); try { prioritizer.prioritizeOverseerNodes(myId); } catch (Exception e) { if (!zkStateReader.getZkClient().isClosed()) { log.error("Unable to prioritize overseer ", e); } } // TODO: Make maxThreads configurable. this.tpe = new ExecutorUtil.MDCAwareThreadPoolExecutor(5, MAX_PARALLEL_TASKS, 0L, TimeUnit.MILLISECONDS, new SynchronousQueue<Runnable>(), new DefaultSolrThreadFactory("OverseerThreadFactory")); try { while (!this.isClosed) { try { isLeader = amILeader(); if (LeaderStatus.NO == isLeader) { break; } else if (LeaderStatus.YES != isLeader) { log.debug("am_i_leader unclear {}", isLeader); continue; // not a no, not a yes, try asking again } log.debug("Cleaning up work-queue. #Running tasks: {}", runningTasks.size()); cleanUpWorkQueue(); printTrackingMaps(); boolean waited = false; while (runningTasks.size() > MAX_PARALLEL_TASKS) { synchronized (waitLock) { waitLock.wait(100);//wait for 100 ms or till a task is complete } waited = true; } if (waited) cleanUpWorkQueue(); ArrayList<QueueEvent> heads = new ArrayList<>(blockedTasks.size() + MAX_PARALLEL_TASKS); heads.addAll(blockedTasks.values()); //If we have enough items in the blocked tasks already, it makes // no sense to read more items from the work queue. it makes sense // to clear out at least a few items in the queue before we read more items if (heads.size() < MAX_BLOCKED_TASKS) { //instead of reading MAX_PARALLEL_TASKS items always, we should only fetch as much as we can execute int toFetch = Math.min(MAX_BLOCKED_TASKS - heads.size(), MAX_PARALLEL_TASKS - runningTasks.size()); List<QueueEvent> newTasks = workQueue.peekTopN(toFetch, excludedTasks, 2000L); log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks); heads.addAll(newTasks); } else { // Prevent free-spinning this loop. Thread.sleep(1000); } if (isClosed) break; if (heads.isEmpty()) { continue; } blockedTasks.clear(); // clear it now; may get refilled below. taskBatch.batchId++; boolean tooManyTasks = false; for (QueueEvent head : heads) { if (!tooManyTasks) { synchronized (runningTasks) { tooManyTasks = runningTasks.size() >= MAX_PARALLEL_TASKS; } } if (tooManyTasks) { // Too many tasks are running, just shove the rest into the "blocked" queue. if (blockedTasks.size() < MAX_BLOCKED_TASKS) blockedTasks.put(head.getId(), head); continue; } if (runningZKTasks.contains(head.getId())) continue; final ZkNodeProps message = ZkNodeProps.load(head.getBytes()); OverseerMessageHandler messageHandler = selector.selectOverseerMessageHandler(message); final String asyncId = message.getStr(ASYNC); if (hasLeftOverItems) { if (head.getId().equals(oldestItemInWorkQueue)) hasLeftOverItems = false; if (asyncId != null && (completedMap.contains(asyncId) || failureMap.contains(asyncId))) { log.debug("Found already processed task in workQueue, cleaning up. AsyncId [{}]", asyncId); workQueue.remove(head); continue; } } String operation = message.getStr(Overseer.QUEUE_OPERATION); OverseerMessageHandler.Lock lock = messageHandler.lockTask(message, taskBatch); if (lock == null) { log.debug("Exclusivity check failed for [{}]", message.toString()); //we may end crossing the size of the MAX_BLOCKED_TASKS. They are fine if (blockedTasks.size() < MAX_BLOCKED_TASKS) blockedTasks.put(head.getId(), head); continue; } try { markTaskAsRunning(head, asyncId); log.debug("Marked task [{}] as running", head.getId()); } catch (KeeperException.NodeExistsException e) { lock.unlock(); // This should never happen log.error("Tried to pick up task [{}] when it was already running!", head.getId()); continue; } catch (InterruptedException e) { lock.unlock(); log.error("Thread interrupted while trying to pick task for execution.", head.getId()); Thread.currentThread().interrupt(); continue; } log.debug(messageHandler.getName() + ": Get the message id:" + head.getId() + " message:" + message.toString()); Runner runner = new Runner(messageHandler, message, operation, head, lock); tpe.execute(runner); } } catch (KeeperException e) { if (e.code() == KeeperException.Code.SESSIONEXPIRED) { log.warn("Overseer cannot talk to ZK"); return; } SolrException.log(log, "", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } catch (Exception e) { SolrException.log(log, "", e); } } } finally { this.close(); } }