List of usage examples for java.util.concurrent Executors newCachedThreadPool
public static ExecutorService newCachedThreadPool(ThreadFactory threadFactory)
From source file:com.adaptris.core.services.splitter.ServiceWorkerPool.java
/** * //from www . j a va 2 s .c o m * @deprecated since 3.8.3 switch to commons-pool2 instead. */ @Deprecated @Removal(version = "3.9.0", message = "use commons-pool2 variant instead") public void warmup(final org.apache.commons.pool.impl.GenericObjectPool<Worker> objectPool) throws CoreException { logDeprecationWarning(); ExecutorService populator = Executors .newCachedThreadPool(new ManagedThreadFactory(this.getClass().getSimpleName())); try { log.trace("Warming up {} service-workers", maxThreads); final List<Future<Worker>> futures = new ArrayList<>(maxThreads); for (int i = 0; i < maxThreads; i++) { futures.add(populator.submit(new Callable<Worker>() { @Override public Worker call() throws Exception { return objectPool.borrowObject(); } })); } for (Worker w : waitFor(futures)) { objectPool.returnObject(w); } log.trace("ObjectPool contains {} (active) of {} objects", objectPool.getNumActive(), objectPool.getNumIdle()); } catch (Exception e) { throw ExceptionHelper.wrapCoreException(e); } finally { populator.shutdownNow(); } }
From source file:com.datatorrent.lib.io.WebSocketInputOperator.java
@Override public void run() { try {/* www . jav a 2 s . c o m*/ connectionClosed = false; AsyncHttpClientConfigBean config = new AsyncHttpClientConfigBean(); config.setIoThreadMultiplier(ioThreadMultiplier); config.setApplicationThreadPool(Executors.newCachedThreadPool(new ThreadFactory() { private long count = 0; @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName(ClassUtils.getShortClassName(this.getClass()) + "-AsyncHttpClient-" + count++); return t; } })); if (client != null) { client.closeAsynchronously(); } client = new AsyncHttpClient(config); connection = client.prepareGet(uri.toString()).execute( new WebSocketUpgradeHandler.Builder().addWebSocketListener(new WebSocketTextListener() { @Override public void onMessage(String string) { LOG.debug("Got: " + string); try { T o = convertMessage(string); if (!(skipNull && o == null)) { outputPort.emit(o); } } catch (IOException ex) { LOG.error("Got exception: ", ex); } } @Override public void onOpen(WebSocket ws) { LOG.debug("Connection opened"); } @Override public void onClose(WebSocket ws) { LOG.debug("Connection connectionClosed."); connectionClosed = true; } @Override public void onError(Throwable t) { LOG.error("Caught exception", t); } }).build()).get(5, TimeUnit.SECONDS); } catch (Exception ex) { LOG.error("Error reading from " + uri, ex); if (client != null) { client.close(); } connectionClosed = true; } }
From source file:com.twitter.aurora.scheduler.state.CronJobManager.java
@Inject CronJobManager(StateManager stateManager, Storage storage, CronScheduler cron, ShutdownRegistry shutdownRegistry) { this(stateManager, storage, cron, shutdownRegistry, Executors.newCachedThreadPool( new ThreadFactoryBuilder().setDaemon(true).setNameFormat("CronDelay-%d").build())); }
From source file:com.datatorrent.lib.io.WebSocketOutputOperator.java
private void openConnection() throws IOException, ExecutionException, InterruptedException, TimeoutException { final AsyncHttpClientConfigBean config = new AsyncHttpClientConfigBean(); config.setIoThreadMultiplier(ioThreadMultiplier); config.setApplicationThreadPool(Executors.newCachedThreadPool(new ThreadFactory() { @Override/*from w w w. j a v a 2 s. c om*/ public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName(ClassUtils.getShortClassName(this.getClass()) + "-AsyncHttpClient-" + count++); return t; } })); client = new AsyncHttpClient(config); uri = URI.create(uri.toString()); // force reparse after deserialization LOG.info("Opening URL: {}", uri); connection = client.prepareGet(uri.toString()) .execute(new WebSocketUpgradeHandler.Builder().addWebSocketListener(new WebSocketTextListener() { @Override public void onMessage(String string) { } @Override public void onOpen(WebSocket ws) { LOG.debug("Connection opened"); } @Override public void onClose(WebSocket ws) { LOG.debug("Connection closed."); } @Override public void onError(Throwable t) { LOG.error("Caught exception", t); } }).build()).get(5, TimeUnit.SECONDS); }
From source file:com.flipkart.phantom.runtime.impl.server.oio.UDSOIOServer.java
/** * Interface method implementation. Creates worker thread pool if required and then calls {@link #afterPropertiesSet()} on the super class * @see org.springframework.beans.factory.InitializingBean#afterPropertiesSet() *///from ww w . ja v a 2 s .c o m public void afterPropertiesSet() throws Exception { File[] junixDirectories = FileLocator.findDirectories(this.junixNativeLibDirectoryName, null); if (junixDirectories == null || junixDirectories.length == 0) { throw new RuntimeException("Did not find junixDirectory: " + junixNativeLibDirectoryName); } LOGGER.info("Found junixDirectory: " + junixDirectories[0].getAbsolutePath()); System.setProperty(JUNIX_LIB_SYSTEM_PROPERTY, junixDirectories[0].getAbsolutePath()); //Required properties Assert.notNull(this.socketDir, "socketDir is a required property for UDSNetworkServer"); Assert.notNull(this.socketName, "socketName is a required property for UDSNetworkServer"); //Create the socket file this.socketFile = new File(new File(this.socketDir), this.socketName); //Create socket address LOGGER.info("Socket file: " + this.socketFile.getAbsolutePath()); try { this.socketAddress = new AFUNIXSocketAddress(this.socketFile); this.socket = AFUNIXServerSocket.newInstance(); this.socket.bind(this.socketAddress); } catch (IOException e) { throw new RuntimeException("Error creating Socket Address. ", e); } if (this.getWorkerExecutors() == null) { // no executors have been set for workers if (this.getWorkerPoolSize() != UDSOIOServer.INVALID_POOL_SIZE) { // thread pool size has been set. create and use a fixed thread pool this.setWorkerExecutors(Executors.newFixedThreadPool(this.getWorkerPoolSize(), new NamedThreadFactory("UDSOIOServer-Worker"))); } else { // default behavior of creating and using a cached thread pool this.setWorkerExecutors( Executors.newCachedThreadPool(new NamedThreadFactory("UDSOIOServer-Worker"))); } } super.afterPropertiesSet(); LOGGER.info("UDS Server startup complete"); }
From source file:ee.ria.xroad.proxy.clientproxy.ClientMessageProcessor.java
private static ExecutorService createSoapHandlerExecutor() { return Executors.newCachedThreadPool(new ThreadFactory() { @Override//from w w w . j a v a 2 s . co m public Thread newThread(Runnable r) { Thread handlerThread = new Thread(r); handlerThread.setName(Thread.currentThread().getName() + "-soap"); return handlerThread; } }); }
From source file:com.ebay.jetstream.event.processor.esper.raw.EsperTest.java
@Ignore public void multithreadingTest() { Configuration configuration = new Configuration(); configuration.configure(/* w w w . j a va 2 s . c o m*/ new File("src/test/java/com/ebay/jetstream/event/processor/esper/raw/EsperTestConfig.xml")); EPServiceProvider epService = EPServiceProviderManager.getProvider("EsperTest", configuration); EsperTestStatement esperStmt = new EsperTestStatement(epService.getEPAdministrator()); EsperTestSubscriber subscriber = new EsperTestSubscriber(); EsperTestListener listener = new EsperTestListener(); esperStmt.setSubscriber(subscriber); esperStmt.addListener(listener); ExecutorService threadPool = Executors.newCachedThreadPool(new EsperTestThreadFactory()); EsperTestRunnable runnables[] = new EsperTestRunnable[THREADS_NUM]; try { for (int i = 0; i < THREADS_NUM; i++) { runnables[i] = new EsperTestRunnable(epService, i); threadPool.submit(runnables[i]); } threadPool.shutdown(); threadPool.awaitTermination(200, TimeUnit.SECONDS); } catch (InterruptedException e) { fail("InterruptedException: " + e.getMessage()); } assertTrue("ExecutorService failed to shut down properly", threadPool.isShutdown()); log.info("[" + subscriber.getIds().first() + "," + subscriber.getIds().last() + "]"); assertEquals(THREADS_NUM, subscriber.getCount()); log.info("[" + listener.getIds().first() + "," + listener.getIds().last() + "]"); assertEquals(THREADS_NUM, listener.getCount()); assertEquals(THREADS_NUM, listener.getNewCount()); assertEquals(0, listener.getOldCount()); }
From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java
/** * Set up the distributed cache by localizing the resources, and updating * the configuration with references to the localized resources. * @param conf job configuration//from w ww . ja va 2 s . c o m * @throws IOException */ public void setup(Configuration conf) throws IOException { //If we are not 0th worker, wait for 0th worker to set up the cache if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS, WAIT_GRANULARITY_MS); } catch (Exception e) { throw new RuntimeException(e); } return; } File workDir = new File(System.getProperty("user.dir")); // Generate YARN local resources objects corresponding to the distributed // cache configuration Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); //CODE CHANGE FROM ORIGINAL FILE: //We need to clear the resources from jar files, since they are distributed through the IG. // Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator(); while (iterator.hasNext()) { Entry<String, LocalResource> entry = iterator.next(); if (entry.getKey().endsWith(".jar")) { iterator.remove(); } } // Generating unique numbers for FSDownload. AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis()); // Find which resources are to be put on the local classpath Map<String, Path> classpaths = new HashMap<String, Path>(); Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClassPaths != null) { for (Path p : archiveClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf); if (fileClassPaths != null) { for (Path p : fileClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } // Localize the resources LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR); FileContext localFSFileContext = FileContext.getLocalFSFileContext(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ExecutorService exec = null; try { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("LocalDistributedCacheManager Downloader #%d").build(); exec = Executors.newCachedThreadPool(tf); Path destPath = localDirAllocator.getLocalPathForWrite(".", conf); Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap(); for (LocalResource resource : localResources.values()) { Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf, new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource); Future<Path> future = exec.submit(download); resourcesToPaths.put(resource, future); } for (Entry<String, LocalResource> entry : localResources.entrySet()) { LocalResource resource = entry.getValue(); Path path; try { path = resourcesToPaths.get(resource).get(); } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e); } String pathString = path.toUri().toString(); String link = entry.getKey(); String target = new File(path.toUri()).getPath(); symlink(workDir, target, link); if (resource.getType() == LocalResourceType.ARCHIVE) { localArchives.add(pathString); } else if (resource.getType() == LocalResourceType.FILE) { localFiles.add(pathString); } else if (resource.getType() == LocalResourceType.PATTERN) { //PATTERN is not currently used in local mode throw new IllegalArgumentException( "Resource type PATTERN is not " + "implemented yet. " + resource.getResource()); } Path resourcePath; try { resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource()); } catch (URISyntaxException e) { throw new IOException(e); } LOG.info(String.format("Localized %s as %s", resourcePath, path)); String cp = resourcePath.toUri().getPath(); if (classpaths.keySet().contains(cp)) { localClasspaths.add(path.toUri().getPath().toString()); } } } finally { if (exec != null) { exec.shutdown(); } } // Update the configuration object with localized data. if (!localArchives.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()]))); } if (!localFiles.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()]))); } setupCalled = true; //If we are 0th worker, signal action complete if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME); } catch (Exception e) { throw new RuntimeException(e); } } }
From source file:com.adaptris.core.services.splitter.ServiceWorkerPool.java
public void warmup(final GenericObjectPool<Worker> objectPool) throws CoreException { ExecutorService populator = Executors .newCachedThreadPool(new ManagedThreadFactory(this.getClass().getSimpleName())); try {/*from w ww.ja va 2 s . co m*/ log.trace("Warming up {} service-workers", maxThreads); final List<Future<Worker>> futures = new ArrayList<>(maxThreads); for (int i = 0; i < maxThreads; i++) { futures.add(populator.submit(new Callable<Worker>() { @Override public Worker call() throws Exception { return objectPool.borrowObject(); } })); } for (Worker w : waitFor(futures)) { objectPool.returnObject(w); } log.trace("ObjectPool contains {} (active) of {} objects", objectPool.getNumActive(), objectPool.getNumIdle()); } catch (Exception e) { throw ExceptionHelper.wrapCoreException(e); } finally { populator.shutdownNow(); } }
From source file:com.dattack.dbtools.drules.engine.DrulesEngine.java
private static SourceResultGroup getSourceResultsList(final List<SourceBean> sourceList) throws DrulesNestableException { final ExecutorService executorService = Executors.newCachedThreadPool(createThreadFactory()); final List<Future<SourceResult>> futureList = new ArrayList<>(); for (final SourceBean sourceBean : sourceList) { futureList.add(executorService.submit(new SourceExecutor(sourceBean, ConfigurationUtils.cloneConfiguration(ThreadContext.getInstance().getConfiguration())))); }// w w w .j ava 2 s.c o m final SourceResultGroup sourceResultList = new SourceResultGroup(); for (final Future<SourceResult> future : futureList) { try { sourceResultList.add(future.get()); } catch (InterruptedException | ExecutionException e) { throw new DrulesNestableException(e); } } executorService.shutdown(); return sourceResultList; }