List of usage examples for java.util.concurrent ThreadFactory ThreadFactory
ThreadFactory
From source file:org.hyperic.hq.measurement.agent.server.TopNScheduler.java
private void createScheduler() { scheduler = Executors.newScheduledThreadPool(1, new ThreadFactory() { private final AtomicLong i = new AtomicLong(0); public Thread newThread(Runnable r) { return new Thread(r, "TopNScheduler" + i.getAndIncrement()); }//from www .j av a 2 s . c o m }); }
From source file:org.apache.hadoop.hbase.wal.LogRecoveredEditsOutputSink.java
/** * Close all of the output streams./*from ww w. jav a 2 s . c om*/ * @return the list of paths written. */ List<Path> close() throws IOException { Preconditions.checkState(!closeAndCleanCompleted); final List<Path> paths = new ArrayList<>(); final List<IOException> thrown = Lists.newArrayList(); ThreadPoolExecutor closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS, new ThreadFactory() { private int count = 1; @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "split-log-closeStream-" + count++); return t; } }); CompletionService<Void> completionService = new ExecutorCompletionService<>(closeThreadPool); boolean progress_failed; try { progress_failed = executeCloseTask(completionService, thrown, paths); } catch (InterruptedException e) { IOException iie = new InterruptedIOException(); iie.initCause(e); throw iie; } catch (ExecutionException e) { throw new IOException(e.getCause()); } finally { closeThreadPool.shutdownNow(); } if (!thrown.isEmpty()) { throw MultipleIOException.createIOException(thrown); } writersClosed = true; closeAndCleanCompleted = true; if (progress_failed) { return null; } return paths; }
From source file:ee.ria.xroad.proxy.clientproxy.ClientMessageProcessor.java
private static ExecutorService createSoapHandlerExecutor() { return Executors.newCachedThreadPool(new ThreadFactory() { @Override//from w ww . j a v a 2 s . c om public Thread newThread(Runnable r) { Thread handlerThread = new Thread(r); handlerThread.setName(Thread.currentThread().getName() + "-soap"); return handlerThread; } }); }
From source file:org.apache.bookkeeper.replication.Auditor.java
public Auditor(final String bookieIdentifier, ServerConfiguration conf, ZooKeeper zkc, StatsLogger statsLogger) throws UnavailableException { this.conf = conf; this.bookieIdentifier = bookieIdentifier; this.statsLogger = statsLogger; numUnderReplicatedLedger = this.statsLogger.getOpStatsLogger(ReplicationStats.NUM_UNDER_REPLICATED_LEDGERS); uRLPublishTimeForLostBookies = this.statsLogger .getOpStatsLogger(ReplicationStats.URL_PUBLISH_TIME_FOR_LOST_BOOKIE); bookieToLedgersMapCreationTime = this.statsLogger .getOpStatsLogger(ReplicationStats.BOOKIE_TO_LEDGERS_MAP_CREATION_TIME); checkAllLedgersTime = this.statsLogger.getOpStatsLogger(ReplicationStats.CHECK_ALL_LEDGERS_TIME); numLedgersChecked = this.statsLogger.getCounter(ReplicationStats.NUM_LEDGERS_CHECKED); numFragmentsPerLedger = statsLogger.getOpStatsLogger(ReplicationStats.NUM_FRAGMENTS_PER_LEDGER); numBookiesPerLedger = statsLogger.getOpStatsLogger(ReplicationStats.NUM_BOOKIES_PER_LEDGER); numBookieAuditsDelayed = this.statsLogger.getCounter(ReplicationStats.NUM_BOOKIE_AUDITS_DELAYED); numDelayedBookieAuditsCancelled = this.statsLogger .getCounter(ReplicationStats.NUM_DELAYED_BOOKIE_AUDITS_DELAYES_CANCELLED); initialize(conf, zkc);//from ww w .j a va2s .c om executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "AuditorBookie-" + bookieIdentifier); t.setDaemon(true); return t; } }); }
From source file:org.apache.hc.core5.http.benchmark.HttpBenchmark.java
public Results doExecute() throws Exception { final URL url = config.getUrl(); final long endTime = System.currentTimeMillis() + config.getTimeLimit() * 1000; final HttpHost host = new HttpHost(url.getHost(), url.getPort(), url.getProtocol()); final ThreadPoolExecutor workerPool = new ThreadPoolExecutor(config.getThreads(), config.getThreads(), 5, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() { @Override// ww w . j a v a 2 s.com public Thread newThread(final Runnable r) { return new Thread(r, "ClientPool"); } }); workerPool.prestartAllCoreThreads(); SocketFactory socketFactory = null; if ("https".equals(host.getSchemeName())) { final SSLContextBuilder sslContextBuilder = new SSLContextBuilder(); sslContextBuilder.setProtocol("SSL"); if (config.isDisableSSLVerification()) { sslContextBuilder.loadTrustMaterial(null, new TrustStrategy() { @Override public boolean isTrusted(final X509Certificate[] chain, final String authType) throws CertificateException { return true; } }); } else if (config.getTrustStorePath() != null) { sslContextBuilder.loadTrustMaterial(new File(config.getTrustStorePath()), config.getTrustStorePassword() != null ? config.getTrustStorePassword().toCharArray() : null); } if (config.getIdentityStorePath() != null) { sslContextBuilder.loadKeyMaterial(new File(config.getIdentityStorePath()), config.getIdentityStorePassword() != null ? config.getIdentityStorePassword().toCharArray() : null, config.getIdentityStorePassword() != null ? config.getIdentityStorePassword().toCharArray() : null); } final SSLContext sslContext = sslContextBuilder.build(); socketFactory = sslContext.getSocketFactory(); } final BenchmarkWorker[] workers = new BenchmarkWorker[config.getThreads()]; for (int i = 0; i < workers.length; i++) { workers[i] = new BenchmarkWorker(host, createRequest(host), socketFactory, config); workerPool.execute(workers[i]); } while (workerPool.getCompletedTaskCount() < config.getThreads()) { Thread.yield(); try { Thread.sleep(1000); } catch (final InterruptedException ignore) { } if (config.getTimeLimit() != -1 && System.currentTimeMillis() > endTime) { for (int i = 0; i < workers.length; i++) { workers[i].setShutdownSignal(); } } } workerPool.shutdown(); return ResultProcessor.collectResults(workers, host, config.getUrl().toString()); }
From source file:com.reactivetechnologies.analytics.core.IncrementalClassifierBean.java
@PostConstruct void init() {//from w w w .jav a2 s.co m loadAndInitializeModel(); log.info((isUpdateable() ? "UPDATEABLE " : "NON-UPDATEABLE ") + "** Weka Classifier loaded [" + clazzifier + "] **"); if (log.isDebugEnabled()) { log.debug("weka.classifier.tokenize? " + filterDataset); log.debug("weka.classifier.tokenize.options: " + filterOpts); log.debug("weka.classifier.build.batchSize: " + instanceBatchSize); log.debug("weka.classifier.build.intervalSecs: " + delay); log.debug("weka.classifier.build.maxIdleSecs: " + maxIdle); } worker = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "RegressionBean.Worker.Thread"); return t; } }); worker.submit(new EventConsumer()); timer = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "RegressionBean.Timer.Thread"); t.setDaemon(true); return t; } }); ((ScheduledExecutorService) timer).scheduleWithFixedDelay(new EventTimer(), delay, delay, TimeUnit.SECONDS); }
From source file:org.apache.nifi.bootstrap.RunNiFi.java
public RunNiFi(final File bootstrapConfigFile, final boolean verbose) throws IOException { this.bootstrapConfigFile = bootstrapConfigFile; loggingExecutor = Executors.newFixedThreadPool(2, new ThreadFactory() { @Override/* ww w. j av a 2s .com*/ public Thread newThread(final Runnable runnable) { final Thread t = Executors.defaultThreadFactory().newThread(runnable); t.setDaemon(true); t.setName("NiFi logging handler"); return t; } }); serviceManager = loadServices(); }
From source file:org.apache.http.benchmark.HttpBenchmark.java
public String execute() throws Exception { prepare();/* www . j a v a 2 s. c o m*/ ThreadPoolExecutor workerPool = new ThreadPoolExecutor(config.getThreads(), config.getThreads(), 5, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(r, "ClientPool"); } }); workerPool.prestartAllCoreThreads(); BenchmarkWorker[] workers = new BenchmarkWorker[config.getThreads()]; for (int i = 0; i < workers.length; i++) { workers[i] = new BenchmarkWorker(params, config.getVerbosity(), request[i], host, config.getRequests(), config.isKeepAlive(), config.isDisableSSLVerification(), config.getTrustStorePath(), config.getTrustStorePassword(), config.getIdentityStorePath(), config.getIdentityStorePassword()); workerPool.execute(workers[i]); } while (workerPool.getCompletedTaskCount() < config.getThreads()) { Thread.yield(); try { Thread.sleep(1000); } catch (InterruptedException ignore) { } } workerPool.shutdown(); return ResultProcessor.printResults(workers, host, config.getUrl().toString(), contentLength); }
From source file:org.archive.modules.postprocessor.KafkaCrawlLogFeed.java
protected KafkaProducer<String, byte[]> kafkaProducer() { if (kafkaProducer == null) { synchronized (this) { if (kafkaProducer == null) { final Properties props = new Properties(); props.put("bootstrap.servers", getBrokerList()); props.put("acks", "1"); props.put("producer.type", "async"); props.put("key.serializer", StringSerializer.class.getName()); props.put("value.serializer", ByteArraySerializer.class.getName()); /*/*from w w w.j a v a 2 s . c o m*/ * XXX This mess here exists so that the kafka producer * thread is in a thread group that is not the ToePool, * so that it doesn't get interrupted at the end of the * crawl in ToePool.cleanup(). */ kafkaProducerThreads = new ThreadGroup(Thread.currentThread().getThreadGroup().getParent(), "KafkaProducerThreads"); ThreadFactory threadFactory = new ThreadFactory() { public Thread newThread(Runnable r) { return new Thread(kafkaProducerThreads, r); } }; Callable<KafkaProducer<String, byte[]>> task = new Callable<KafkaProducer<String, byte[]>>() { public KafkaProducer<String, byte[]> call() throws InterruptedException { return new KafkaProducer<String, byte[]>(props); } }; ExecutorService executorService = Executors.newFixedThreadPool(1, threadFactory); Future<KafkaProducer<String, byte[]>> future = executorService.submit(task); try { kafkaProducer = future.get(); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } finally { executorService.shutdown(); } } } } return kafkaProducer; }
From source file:com.ery.estorm.util.Threads.java
/** * Get a named {@link ThreadFactory} that just builds daemon threads. * // w w w . j a v a2 s. co m * @param prefix * name prefix for all threads created from the factory * @param handler * unhandles exception handler to set for all threads * @return a thread factory that creates named, daemon threads with the supplied exception handler and normal priority */ public static ThreadFactory newDaemonThreadFactory(final String prefix, final UncaughtExceptionHandler handler) { final ThreadFactory namedFactory = getNamedThreadFactory(prefix); return new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = namedFactory.newThread(r); if (handler != null) { t.setUncaughtExceptionHandler(handler); } if (!t.isDaemon()) { t.setDaemon(true); } if (t.getPriority() != Thread.NORM_PRIORITY) { t.setPriority(Thread.NORM_PRIORITY); } return t; } }; }