List of usage examples for java.util.concurrent ThreadPoolExecutor ThreadPoolExecutor
public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, RejectedExecutionHandler handler)
From source file:org.wso2.carbon.analytics.iots.smarthomeanalytics.SensorAgentService.java
public static void initialization() { try {//from w w w.j a va 2s .com dataPublisher = new DataPublisher( Constants.TRASPORT_LEVEL_PROTOCOL + Constants.SERVER_IP_ADDRESS + ":" + Constants.SERVER_PORT, Constants.USERNAME, Constants.PASSWORD); if (executorService == null) { RejectedExecutionHandler rejectedExecutionHandler = new RejectedExecutionHandler() { @Override public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { try { executor.getQueue().put(r); } catch (InterruptedException e) { log.error("Exception while adding event to executor queue : " + e.getMessage(), e); } } }; executorService = new ThreadPoolExecutor(Constants.ADAPTER_MIN_THREAD_POOL_SIZE, Constants.ADAPTER_MAX_THREAD_POOL_SIZE, Constants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLS, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(Constants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE), rejectedExecutionHandler); } } catch (DataEndpointConfigurationException e) { log.error("Required fields are missing in Data endpoint configuration file ", e); } catch (DataEndpointException e) { log.error("Error while connecting to configured endpoint ", e); } catch (DataEndpointAgentConfigurationException e) { log.error("Required fields are missing in Data endpoint agent configuration file ", e); } catch (TransportException e) { log.error("Error while connecting to server through Thrift", e); } catch (DataEndpointAuthenticationException e) { log.error("Please check whether user name and password is correct,", e); } }
From source file:org.apache.hadoop.hbase.util.MultiHConnection.java
private void createBatchPool(Configuration conf) { // Use the same config for keep alive as in ConnectionImplementation.getBatchPool(); int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256); int coreThreads = conf.getInt("hbase.multihconnection.threads.core", 256); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; }/*from w w w . j a va2s .co m*/ if (coreThreads == 0) { coreThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong("hbase.multihconnection.threads.keepalivetime", 60); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<Runnable>(maxThreads * conf .getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory("MultiHConnection" + "-shared-")); tpe.allowCoreThreadTimeOut(true); this.batchPool = tpe; }
From source file:org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler.java
@Override protected void serviceStart() throws Exception { ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder().setNameFormat("CommitterEvent Processor #%d"); if (jobClassLoader != null) { // if the job classloader is enabled, we need to use the job classloader // as the thread context classloader (TCCL) of these threads in case the // committer needs to load another class via TCCL ThreadFactory backingTf = new ThreadFactory() { @Override//from ww w.java2 s . c o m public Thread newThread(Runnable r) { Thread thread = new Thread(r); thread.setContextClassLoader(jobClassLoader); return thread; } }; tfBuilder.setThreadFactory(backingTf); } ThreadFactory tf = tfBuilder.build(); launcherPool = new ThreadPoolExecutor(5, 5, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf); eventHandlingThread = new Thread(new Runnable() { @Override public void run() { CommitterEvent event = null; while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { event = eventQueue.take(); } catch (InterruptedException e) { if (!stopped.get()) { LOG.error("Returning, interrupted : " + e); } return; } // the events from the queue are handled in parallel // using a thread pool launcherPool.execute(new EventProcessor(event)); } } }); eventHandlingThread.setName("CommitterEvent Handler"); eventHandlingThread.start(); super.serviceStart(); }
From source file:com.cloud.agent.Agent.java
public Agent(final IAgentShell shell) { _shell = shell;/*from ww w.ja v a 2 s .c om*/ _link = null; _connection = new NioClient("Agent", _shell.getHost(), _shell.getPort(), _shell.getWorkers(), this); Runtime.getRuntime().addShutdownHook(new ShutdownThread(this)); _ugentTaskPool = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10, TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), new NamedThreadFactory("UgentTask")); _executor = new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("agentRequest-Handler")); }
From source file:codeswarm.code_swarm.java
/** * Initialisation/* w ww . j a va 2 s . c o m*/ */ public void setup() { width = cfg.getIntProperty(CodeSwarmConfig.WIDTH_KEY, 640); if (width <= 0) { width = 640; } height = cfg.getIntProperty(CodeSwarmConfig.HEIGHT_KEY, 480); if (height <= 0) { height = 480; } maxBackgroundThreads = cfg.getIntProperty(CodeSwarmConfig.MAX_THREADS_KEY, 4); if (maxBackgroundThreads <= 0) { maxBackgroundThreads = 4; } backgroundExecutor = new ThreadPoolExecutor(1, maxBackgroundThreads, Long.MAX_VALUE, TimeUnit.NANOSECONDS, new ArrayBlockingQueue<Runnable>(4 * maxBackgroundThreads), new ThreadPoolExecutor.CallerRunsPolicy()); if (cfg.getBooleanProperty(CodeSwarmConfig.USE_OPEN_GL, false)) { size(width, height, OPENGL); } else { size(width, height); } showLegend = cfg.getBooleanProperty(CodeSwarmConfig.SHOW_LEGEND, false); showHistogram = cfg.getBooleanProperty(CodeSwarmConfig.SHOW_HISTORY, false); showDate = cfg.getBooleanProperty(CodeSwarmConfig.SHOW_DATE, false); showEdges = cfg.getBooleanProperty(CodeSwarmConfig.SHOW_EDGES, false); showDebug = cfg.getBooleanProperty(CodeSwarmConfig.SHOW_DEBUG, false); takeSnapshots = cfg.getBooleanProperty(CodeSwarmConfig.TAKE_SNAPSHOTS_KEY, false); drawNamesSharp = cfg.getBooleanProperty(CodeSwarmConfig.DRAW_NAMES_SHARP, true); drawNamesHalos = cfg.getBooleanProperty(CodeSwarmConfig.DRAW_NAMES_HALOS, false); drawFilesSharp = cfg.getBooleanProperty(CodeSwarmConfig.DRAW_FILES_SHARP, false); drawFilesFuzzy = cfg.getBooleanProperty(CodeSwarmConfig.DRAW_FILES_FUZZY, true); drawFilesJelly = cfg.getBooleanProperty(CodeSwarmConfig.DRAW_FILES_JELLY, false); background = cfg.getBackground().getRGB(); UPDATE_DELTA = cfg.getIntProperty(CodeSwarmConfig.MSEC_PER_FRAME_KEY, -1); if (UPDATE_DELTA == -1) { int framesperday = cfg.getIntProperty(CodeSwarmConfig.FRAMES_PER_DAY_KEY, 4); if (framesperday > 0) { UPDATE_DELTA = (86400000 / framesperday); } } if (UPDATE_DELTA <= 0) { // Default to 4 frames per day. UPDATE_DELTA = 21600000; } isInputSorted = cfg.getBooleanProperty(CodeSwarmConfig.IS_INPUT_SORTED_KEY, false); /** * This section loads config files and calls the setup method for all physics engines. */ physicsEngineConfigDir = cfg.getStringProperty(CodeSwarmConfig.PHYSICS_ENGINE_CONF_DIR, "physics_engine"); File f = new File(physicsEngineConfigDir); String[] configFiles = null; if (f.exists() && f.isDirectory()) { configFiles = f.list(); } for (int i = 0; configFiles != null && i < configFiles.length; i++) { if (configFiles[i].endsWith(".config")) { Properties p = new Properties(); String ConfigPath = physicsEngineConfigDir + System.getProperty("file.separator") + configFiles[i]; try { p.load(new FileInputStream(ConfigPath)); } catch (FileNotFoundException e) { e.printStackTrace(); System.exit(1); } catch (IOException e) { e.printStackTrace(); System.exit(1); } String ClassName = p.getProperty("name", "__DEFAULT__"); if (!ClassName.equals("__DEFAULT__")) { PhysicsEngine pe = getPhysicsEngine(ClassName); pe.setup(p); mPhysicsEngineChoices.add(pe); } else { logger.error("Skipping config file '" + ConfigPath + "'. Must specify class name via the 'name' parameter."); System.exit(1); } } } if (mPhysicsEngineChoices.size() == 0) { logger.error("No physics engine config files found in '" + physicsEngineConfigDir + "'."); System.exit(1); } // Physics engine configuration and instantiation physicsEngineSelection = cfg.getStringProperty(CodeSwarmConfig.PHYSICS_ENGINE_SELECTION, PHYSICS_ENGINE_LEGACY); for (PhysicsEngine p : mPhysicsEngineChoices) { if (physicsEngineSelection.equals(p.getClass().getName())) { mPhysicsEngine = p; } } if (mPhysicsEngine == null) { logger.error("No physics engine matches your choice of '" + physicsEngineSelection + "'. Check '" + physicsEngineConfigDir + "' for options."); System.exit(1); } smooth(); frameRate(FRAME_RATE); // init data structures nodes = new CopyOnWriteArrayList<FileNode>(); edges = new CopyOnWriteArrayList<Edge>(); people = new CopyOnWriteArrayList<PersonNode>(); history = new LinkedList<ColorBins>(); if (isInputSorted) { //If the input is sorted, we only need to store the next few events eventsQueue = new ArrayBlockingQueue<FileEvent>(5000); } else { //Otherwise we need to store them all at once in a data structure that will sort them eventsQueue = new PriorityBlockingQueue<FileEvent>(); } // Init color map initColors(); loadRepEvents(cfg.getStringProperty(CodeSwarmConfig.INPUT_FILE_KEY)); // event formatted (this is the standard) synchronized (this) { while (!finishedLoading && eventsQueue.isEmpty()) { try { wait(); } catch (InterruptedException e) { logger.error("The ready-check thread was interrupted", e); } } } prevDate = eventsQueue.peek().getDate(); SCREENSHOT_FILE = cfg.getStringProperty(CodeSwarmConfig.SNAPSHOT_LOCATION_KEY); maxFramesSaved = (int) Math.pow(10, SCREENSHOT_FILE.replaceAll("[^#]", "").length()); // Create fonts String fontName = cfg.getStringProperty(CodeSwarmConfig.FONT_KEY, "SansSerif"); String fontNameBold = cfg.getStringProperty(CodeSwarmConfig.FONT_KEY_BOLD, "SansSerif"); Integer fontSize = cfg.getIntProperty(CodeSwarmConfig.FONT_SIZE, 10); Integer fontSizeBold = cfg.getIntProperty(CodeSwarmConfig.FONT_SIZE_BOLD, 14); font = createFont(fontName, fontSize); boldFont = createFont(fontNameBold, fontSizeBold); textFont(font); // Create the file particle image sprite = loadImage(cfg.getStringProperty(CodeSwarmConfig.SPRITE_FILE_KEY, "particle.png")); // Add translucency (using itself in this case) sprite.mask(sprite); }
From source file:com.bittorrent.mpetazzoni.client.ConnectionHandler.java
/** * Start accepting new connections in a background thread. *///from w ww. j a v a2 s.c om public void start() { if (this.channel == null) { throw new IllegalStateException("Connection handler cannot be recycled!"); } this.stop = false; if (this.executor == null || this.executor.isShutdown()) { this.executor = new ThreadPoolExecutor(OUTBOUND_CONNECTIONS_POOL_SIZE, OUTBOUND_CONNECTIONS_POOL_SIZE, OUTBOUND_CONNECTIONS_THREAD_KEEP_ALIVE_SECS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ConnectorThreadFactory()); } if (this.thread == null || !this.thread.isAlive()) { this.thread = new Thread(this); this.thread.setName("bt-serve"); this.thread.start(); } }
From source file:org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl.java
@Override protected void serviceStart() throws Exception { client.start();/*from w ww. ja v a 2 s. com*/ ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(this.getClass().getName() + " #%d") .setDaemon(true).build(); // Start with a default core-pool size and change it dynamically. int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize); threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf); eventDispatcherThread = new Thread() { @Override public void run() { ContainerEvent event = null; Set<String> allNodes = new HashSet<String>(); while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { event = events.take(); } catch (InterruptedException e) { if (!stopped.get()) { LOG.error("Returning, thread interrupted", e); } return; } allNodes.add(event.getNodeId().toString()); int threadPoolSize = threadPool.getCorePoolSize(); // We can increase the pool size only if haven't reached the maximum // limit yet. if (threadPoolSize != maxThreadPoolSize) { // nodes where containers will run at *this* point of time. This is // *not* the cluster size and doesn't need to be. int nodeNum = allNodes.size(); int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum); if (threadPoolSize < idealThreadPoolSize) { // Bump up the pool size to idealThreadPoolSize + // INITIAL_POOL_SIZE, the later is just a buffer so we are not // always increasing the pool-size int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE); LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum); threadPool.setCorePoolSize(newThreadPoolSize); } } // the events from the queue are handled in parallel with a thread // pool threadPool.execute(getContainerEventProcessor(event)); // TODO: Group launching of multiple containers to a single // NodeManager into a single connection } } }; eventDispatcherThread.setName("Container Event Dispatcher"); eventDispatcherThread.setDaemon(false); eventDispatcherThread.start(); super.serviceStart(); }
From source file:org.apache.solr.handler.component.AlfrescoHttpShardHandlerFactory.java
@Override public void init(PluginInfo info) { NamedList args = info.initArgs;//from w ww .j a v a 2s . c o m this.soTimeout = getParameter(args, HttpClientUtil.PROP_SO_TIMEOUT, soTimeout); this.scheme = getParameter(args, INIT_URL_SCHEME, null); if (StringUtils.endsWith(this.scheme, "://")) { this.scheme = StringUtils.removeEnd(this.scheme, "://"); } this.connectionTimeout = getParameter(args, HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout); this.maxConnectionsPerHost = getParameter(args, HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost); this.corePoolSize = getParameter(args, INIT_CORE_POOL_SIZE, corePoolSize); this.maximumPoolSize = getParameter(args, INIT_MAX_POOL_SIZE, maximumPoolSize); this.keepAliveTime = getParameter(args, MAX_THREAD_IDLE_TIME, keepAliveTime); this.queueSize = getParameter(args, INIT_SIZE_OF_QUEUE, queueSize); this.accessPolicy = getParameter(args, INIT_FAIRNESS_POLICY, accessPolicy); // magic sysprop to make tests reproducible: set by SolrTestCaseJ4. String v = System.getProperty("tests.shardhandler.randomSeed"); if (v != null) { r.setSeed(Long.parseLong(v)); } BlockingQueue<Runnable> blockingQueue = (this.queueSize == -1) ? new SynchronousQueue<Runnable>(this.accessPolicy) : new ArrayBlockingQueue<Runnable>(this.queueSize, this.accessPolicy); this.commExecutor = new ThreadPoolExecutor(this.corePoolSize, this.maximumPoolSize, this.keepAliveTime, TimeUnit.SECONDS, blockingQueue, new DefaultSolrThreadFactory("httpShardExecutor")); ModifiableSolrParams clientParams = new ModifiableSolrParams(); clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, maxConnectionsPerHost); clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS, 10000); clientParams.set(HttpClientUtil.PROP_SO_TIMEOUT, soTimeout); clientParams.set(HttpClientUtil.PROP_CONNECTION_TIMEOUT, connectionTimeout); clientParams.set(HttpClientUtil.PROP_USE_RETRY, false); this.defaultClient = HttpClientUtil.createClient(clientParams); this.loadbalancer = createLoadbalancer(defaultClient); }
From source file:org.apache.hadoop.hbase.thrift.IncrementCoalescer.java
@SuppressWarnings("deprecation") public IncrementCoalescer(HBaseHandler hand) { this.handler = hand; LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(); pool = new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue, Threads.newDaemonThreadFactory("IncrementCoalescer")); MBeanUtil.registerMBean("thrift", "Thrift", this); }