List of usage examples for java.util.concurrent ThreadPoolExecutor setMaximumPoolSize
public void setMaximumPoolSize(int maximumPoolSize)
From source file:com.alibaba.otter.node.etl.common.pipe.impl.http.archive.ArchiveBean.java
public void adjustPoolSize(int newPoolSize) { if (newPoolSize != poolSize) { poolSize = newPoolSize;/*from w ww . jav a 2s. c om*/ if (executor instanceof ThreadPoolExecutor) { ThreadPoolExecutor pool = (ThreadPoolExecutor) executor; pool.setCorePoolSize(newPoolSize); pool.setMaximumPoolSize(newPoolSize); } } }
From source file:com.alibaba.otter.node.etl.OtterController.java
public void setThreadPoolSize(int size) { if (executorService instanceof ThreadPoolExecutor) { ThreadPoolExecutor pool = (ThreadPoolExecutor) executorService; pool.setCorePoolSize(size);/* w w w . ja v a2 s . co m*/ pool.setMaximumPoolSize(size); } }
From source file:com.alibaba.otter.node.etl.load.loader.db.FileLoadAction.java
private void adjustPoolSize(FileLoadContext context) { Pipeline pipeline = context.getPipeline(); int newPoolSize = pipeline.getParameters().getFileLoadPoolSize(); if (newPoolSize != poolSize) { poolSize = newPoolSize;//from w w w . j a v a2s . com if (executor instanceof ThreadPoolExecutor) { ThreadPoolExecutor pool = (ThreadPoolExecutor) executor; pool.setCorePoolSize(newPoolSize); pool.setMaximumPoolSize(newPoolSize); } } }
From source file:com.redhat.example.rules.runtimestats.DefaultRuleSimulator.java
public void setJittingThreads(int jittingThreads) { this.jittingThreads = jittingThreads; int n = jittingThreads > 0 ? jittingThreads : Runtime.getRuntime().availableProcessors(); ThreadPoolExecutor ex = (ThreadPoolExecutor) ExecutorProviderFactory.getExecutorProvider().getExecutor(); if (n != ex.getCorePoolSize()) { ex.setCorePoolSize(n);//from w w w . j a va 2 s . c o m ex.setMaximumPoolSize(n); } }
From source file:com.alibaba.otter.node.etl.extract.extractor.DatabaseExtractor.java
private void adjustPoolSize(int newPoolSize) { if (newPoolSize != poolSize) { poolSize = newPoolSize;/*from ww w . j a v a2s.c om*/ if (executor instanceof ThreadPoolExecutor) { ThreadPoolExecutor pool = (ThreadPoolExecutor) executor; pool.setCorePoolSize(newPoolSize); pool.setMaximumPoolSize(newPoolSize); } } }
From source file:com.alibaba.otter.node.etl.load.loader.db.DbLoadAction.java
private void adjustPoolSize(DbLoadContext context) { Pipeline pipeline = context.getPipeline(); int newPoolSize = pipeline.getParameters().getLoadPoolSize(); if (newPoolSize != poolSize) { poolSize = newPoolSize;/* w ww . java2 s. c o m*/ if (executor instanceof ThreadPoolExecutor) { ThreadPoolExecutor pool = (ThreadPoolExecutor) executor; pool.setCorePoolSize(newPoolSize); pool.setMaximumPoolSize(newPoolSize); } } }
From source file:org.apache.accumulo.tserver.TabletServer.java
@Override public void run() { SecurityUtil.serverLogin(SiteConfiguration.getInstance()); // To make things easier on users/devs, and to avoid creating an upgrade path to 1.7 // We can just make the zookeeper paths before we try to use. try {//from ww w . j a v a2 s. co m ZooKeeperInitialization.ensureZooKeeperInitialized(ZooReaderWriter.getInstance(), ZooUtil.getRoot(getInstance())); } catch (KeeperException | InterruptedException e) { log.error("Could not ensure that ZooKeeper is properly initialized", e); throw new RuntimeException(e); } Metrics tserverMetrics = metricsFactory.createTabletServerMetrics(this); // Register MBeans try { tserverMetrics.register(); mincMetrics.register(); scanMetrics.register(); updateMetrics.register(); } catch (Exception e) { log.error("Error registering with JMX", e); } if (null != authKeyWatcher) { log.info("Seeding ZooKeeper watcher for authentication keys"); try { authKeyWatcher.updateAuthKeys(); } catch (KeeperException | InterruptedException e) { // TODO Does there need to be a better check? What are the error conditions that we'd fall out here? AUTH_FAILURE? // If we get the error, do we just put it on a timer and retry the exists(String, Watcher) call? log.error( "Failed to perform initial check for authentication tokens in ZooKeeper. Delegation token authentication will be unavailable.", e); } } try { clientAddress = startTabletClientService(); } catch (UnknownHostException e1) { throw new RuntimeException("Failed to start the tablet client service", e1); } announceExistence(); try { walMarker.initWalMarker(getTabletSession()); } catch (Exception e) { log.error("Unable to create WAL marker node in zookeeper", e); throw new RuntimeException(e); } ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool( getConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue"); bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(getInstance()) + Constants.ZBULK_FAILED_COPYQ, getConfiguration()); try { bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool); } catch (Exception e1) { throw new RuntimeException("Failed to start distributed work queue for copying ", e1); } try { logSorter.startWatchingForRecoveryLogs(distWorkQThreadPool); } catch (Exception ex) { log.error("Error setting watches for recoveries"); throw new RuntimeException(ex); } // Start the thrift service listening for incoming replication requests try { replicationAddress = startReplicationService(); } catch (UnknownHostException e) { throw new RuntimeException("Failed to start replication service", e); } // Start the pool to handle outgoing replications final ThreadPoolExecutor replicationThreadPool = new SimpleThreadPool( getConfiguration().getCount(Property.REPLICATION_WORKER_THREADS), "replication task"); replWorker.setExecutor(replicationThreadPool); replWorker.run(); // Check the configuration value for the size of the pool and, if changed, resize the pool, every 5 seconds); final AccumuloConfiguration aconf = getConfiguration(); Runnable replicationWorkThreadPoolResizer = new Runnable() { @Override public void run() { int maxPoolSize = aconf.getCount(Property.REPLICATION_WORKER_THREADS); if (replicationThreadPool.getMaximumPoolSize() != maxPoolSize) { log.info("Resizing thread pool for sending replication work from " + replicationThreadPool.getMaximumPoolSize() + " to " + maxPoolSize); replicationThreadPool.setMaximumPoolSize(maxPoolSize); } } }; SimpleTimer.getInstance(aconf).schedule(replicationWorkThreadPoolResizer, 10000, 30000); final long CLEANUP_BULK_LOADED_CACHE_MILLIS = 15 * 60 * 1000; SimpleTimer.getInstance(aconf).schedule(new BulkImportCacheCleaner(this), CLEANUP_BULK_LOADED_CACHE_MILLIS, CLEANUP_BULK_LOADED_CACHE_MILLIS); HostAndPort masterHost; while (!serverStopRequested) { // send all of the pending messages try { MasterMessage mm = null; MasterClientService.Client iface = null; try { // wait until a message is ready to send, or a sever stop // was requested while (mm == null && !serverStopRequested) { mm = masterMessages.poll(1000, TimeUnit.MILLISECONDS); } // have a message to send to the master, so grab a // connection masterHost = getMasterAddress(); iface = masterConnection(masterHost); TServiceClient client = iface; // if while loop does not execute at all and mm != null, // then finally block should place mm back on queue while (!serverStopRequested && mm != null && client != null && client.getOutputProtocol() != null && client.getOutputProtocol().getTransport() != null && client.getOutputProtocol().getTransport().isOpen()) { try { mm.send(rpcCreds(), getClientAddressString(), iface); mm = null; } catch (TException ex) { log.warn("Error sending message: queuing message again"); masterMessages.putFirst(mm); mm = null; throw ex; } // if any messages are immediately available grab em and // send them mm = masterMessages.poll(); } } finally { if (mm != null) { masterMessages.putFirst(mm); } returnMasterConnection(iface); sleepUninterruptibly(1, TimeUnit.SECONDS); } } catch (InterruptedException e) { log.info("Interrupt Exception received, shutting down"); serverStopRequested = true; } catch (Exception e) { // may have lost connection with master // loop back to the beginning and wait for a new one // this way we survive master failures log.error(getClientAddressString() + ": TServerInfo: Exception. Master down?", e); } } // wait for shutdown // if the main thread exits oldServer the master listener, the JVM will // kill the other threads and finalize objects. We want the shutdown that is // running in the master listener thread to complete oldServer this happens. // consider making other threads daemon threads so that objects don't // get prematurely finalized synchronized (this) { while (shutdownComplete == false) { try { this.wait(1000); } catch (InterruptedException e) { log.error(e.toString()); } } } log.debug("Stopping Replication Server"); TServerUtils.stopTServer(this.replServer); log.debug("Stopping Thrift Servers"); TServerUtils.stopTServer(server); try { log.debug("Closing filesystem"); fs.close(); } catch (IOException e) { log.warn("Failed to close filesystem : {}", e.getMessage(), e); } gcLogger.logGCInfo(getConfiguration()); log.info("TServerInfo: stop requested. exiting ... "); try { tabletServerLock.unlock(); } catch (Exception e) { log.warn("Failed to release tablet server lock", e); } }
From source file:org.apache.hadoop.hbase.ipc.TestFifoRpcScheduler.java
private ThreadPoolExecutor disableHandlers(RpcScheduler scheduler) { ThreadPoolExecutor rpcExecutor = null; try {//from w w w . j a v a 2 s . c om Field ExecutorField = scheduler.getClass().getDeclaredField("executor"); ExecutorField.setAccessible(true); scheduler.start(); rpcExecutor = (ThreadPoolExecutor) ExecutorField.get(scheduler); rpcExecutor.setMaximumPoolSize(1); rpcExecutor.allowCoreThreadTimeOut(true); rpcExecutor.setCorePoolSize(0); rpcExecutor.setKeepAliveTime(1, TimeUnit.MICROSECONDS); // Wait for 2 seconds, so that idle threads will die Thread.sleep(2000); } catch (NoSuchFieldException e) { LOG.error("No such field exception:" + e); } catch (IllegalAccessException e) { LOG.error("Illegal access exception:" + e); } catch (InterruptedException e) { LOG.error("Interrupted exception:" + e); } return rpcExecutor; }
From source file:org.apache.hama.graph.GraphJobRunner.java
/** * Do the main logic of a superstep, namely checking if vertices are active, * feeding compute with messages and controlling combiners/aggregators. We * iterate over our messages and vertices in sorted order. That means that we * need to seek the first vertex that has the same ID as the iterated message. *//* w w w . j a v a 2s . c o m*/ @SuppressWarnings("unchecked") private void doSuperstep(GraphJobMessage currentMessage, BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer) throws IOException { this.errorCount.set(0); long startTime = System.currentTimeMillis(); this.changedVertexCnt = 0; vertices.startSuperstep(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool(); executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64)); executor.setRejectedExecutionHandler(retryHandler); long loopStartTime = System.currentTimeMillis(); while (currentMessage != null) { executor.execute(new ComputeRunnable(currentMessage)); currentMessage = peer.getCurrentMessage(); } LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " looping: " + (System.currentTimeMillis() - loopStartTime) + " ms"); executor.shutdown(); try { executor.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new IOException(e); } if (errorCount.get() > 0) { throw new IOException("there were " + errorCount + " exceptions during compute vertices."); } Iterator it = vertices.iterator(); while (it.hasNext()) { Vertex<V, E, M> vertex = (Vertex<V, E, M>) it.next(); if (!vertex.isHalted() && !vertex.isComputed()) { vertex.compute(Collections.<M>emptyList()); vertices.finishVertexComputation(vertex); } } getAggregationRunner().sendAggregatorValues(peer, vertices.getActiveVerticesNum(), this.changedVertexCnt); this.iteration++; LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " computing vertices: " + (System.currentTimeMillis() - startTime) + " ms"); startTime = System.currentTimeMillis(); finishSuperstep(); LOG.info("Total time spent for superstep-" + peer.getSuperstepCount() + " synchronizing: " + (System.currentTimeMillis() - startTime) + " ms"); }
From source file:org.apache.hama.graph.GraphJobRunner.java
/** * Seed the vertices first with their own values in compute. This is the first * superstep after the vertices have been loaded. *//*from www .ja va2s.c om*/ private void doInitialSuperstep(BSPPeer<Writable, Writable, Writable, Writable, GraphJobMessage> peer) throws IOException { this.changedVertexCnt = 0; this.errorCount.set(0); vertices.startSuperstep(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newCachedThreadPool(); executor.setMaximumPoolSize(conf.getInt(DEFAULT_THREAD_POOL_SIZE, 64)); executor.setRejectedExecutionHandler(retryHandler); for (V v : vertices.keySet()) { executor.execute(new ComputeRunnable(v)); } executor.shutdown(); try { executor.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new IOException(e); } if (errorCount.get() > 0) { throw new IOException("there were " + errorCount + " exceptions during compute vertices."); } getAggregationRunner().sendAggregatorValues(peer, 1, this.changedVertexCnt); iteration++; finishSuperstep(); }