List of usage examples for java.util.concurrent Executors defaultThreadFactory
public static ThreadFactory defaultThreadFactory()
From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java
SQLPaxosLogger(int id, String strID, String dbPath, PaxosMessenger<?> messenger) { super(id, dbPath, messenger); this.strID = strID; GC = Executors.newScheduledThreadPool(2, new ThreadFactory() { @Override/*from www . jav a2s . c o m*/ public Thread newThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName(SQLPaxosLogger.class.getSimpleName() + ":" + strID); return thread; } }); // new Timer(strID); addDerbyLogger(this); this.journaler = new Journaler(this.logDirectory, this.strID/* this.myID */); this.deleteTmpJournalFiles(); this.mapDB = USE_MAP_DB ? new MapDBContainer(DBMaker.fileDB(new File(this.getLogIndexDBPrefix())).make(), DBMaker.memoryDB().transactionDisable().make()) : null; Diskable<String, LogIndex> disk = new Diskable<String, LogIndex>() { @Override public Set<String> commit(Map<String, LogIndex> toCommit) throws IOException { return SQLPaxosLogger.this.pauseLogIndex(toCommit); } @Override public LogIndex restore(String key) throws IOException { return SQLPaxosLogger.this.unpauseLogIndex(key); } public String toString() { return MessageLogDiskMap.class.getSimpleName() + SQLPaxosLogger.this.strID; } }; this.messageLog = USE_MAP_DB ? new MessageLogMapDB(this.mapDB.inMemory, this.mapDB.onDisk, disk) : USE_DISK_MAP ? new MessageLogDiskMap(disk) : new MessageLogPausable(disk); // will set up db, connection, tables, etc. as needed if (!initialize(true)) throw new RuntimeException("Unable to initiate " + PaxosManager.class.getSimpleName() + " for " + id); ; }
From source file:metlos.executors.batch.BatchExecutor.java
/** * @param corePoolSize// w w w . ja v a2 s . c o m * @param maximumPoolSize * @param keepAliveTime * @param unit */ public BatchExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit) { this(corePoolSize, maximumPoolSize, keepAliveTime, unit, Executors.defaultThreadFactory(), DEFAULT_REJECTED_EXECUTION_HANDLER, new TaskQueue<BatchReferringRunnable<?>>()); }
From source file:password.pwm.util.Helper.java
public static ThreadFactory makePwmThreadFactory(final String namePrefix, final boolean daemon) { return new ThreadFactory() { private final ThreadFactory realThreadFactory = Executors.defaultThreadFactory(); @Override/*from w w w. j a v a2 s.co m*/ public Thread newThread(final Runnable r) { final Thread t = realThreadFactory.newThread(r); t.setDaemon(daemon); if (namePrefix != null) { final String newName = namePrefix + t.getName(); t.setName(newName); } return t; } }; }
From source file:edu.umass.cs.gigapaxos.PaxosManager.java
/** * @param id//from w w w .j a va 2 s. c o m * My node ID. * @param unstringer * An instance of Stringifiable that can convert String to * NodeIDType. * @param niot * InterfaceNIOTransport or InterfaceMessenger object used for * messaging. * @param pi * InterfaceReplicable application controlled by gigapaxos. * Currently, all paxos instances must correspond to a single * umbrella application even though each createPaxosInstance * method explicitly specifies the app and this information is * stored explicitly inside a paxos instance. The reason for the * single umbrella app restriction is that we won't have a * pointer to the appropriate app upon recovery otherwise. * @param paxosLogFolder * Paxos logging folder. * @param enableNullCheckpoints * Whether null checkpoints are enabled. We need this flag to be * enabled if we intend to reconfigure paxos groups managed by * this PaxosManager. Otherwise, we can not distinguish between a * null checkpoint and no checkpoint, so the next epoch members * may be waiting forever for the previous epoch's final state * (that happens to be null). This flag needs to be set at * construction time and can not be changed thereafter. */ public PaxosManager(NodeIDType id, Stringifiable<NodeIDType> unstringer, InterfaceNIOTransport<NodeIDType, JSONObject> niot, Replicable pi, String paxosLogFolder, boolean enableNullCheckpoints) { this.myID = this.integerMap.put(id);// id.hashCode(); this.executor = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName(PaxosManager.class.getSimpleName() + myID); return thread; } }); this.unstringer = unstringer; this.largeCheckpointer = new LargeCheckpointer(paxosLogFolder, this.myID + ""); this.myApp = LargeCheckpointer.wrap(pi, largeCheckpointer); this.FD = new FailureDetection<NodeIDType>(id, niot, paxosLogFolder); this.pinstances = new MultiArrayMap<String, PaxosInstanceStateMachine>( Config.getGlobalInt(PC.PINSTANCES_CAPACITY)); this.corpses = new HashMap<String, PaxosInstanceStateMachine>(); // this.activePaxii = new HashMap<String, ActivePaxosState>(); this.messenger = (new PaxosMessenger<NodeIDType>(niot, this.integerMap)); this.paxosLogger = new SQLPaxosLogger(this.myID, id.toString(), paxosLogFolder, this.wrapMessenger(this.messenger)); this.nullCheckpointsEnabled = enableNullCheckpoints; // periodically remove active state for idle paxii executor.scheduleWithFixedDelay(new Deactivator(), 0, Config.getGlobalInt(PC.DEACTIVATION_PERIOD), TimeUnit.MILLISECONDS); this.pendingDigests = new PendingDigests(this.outstanding.requests, Config.getGlobalInt(PC.NUM_MESSAGE_DIGESTS), new PendingDigests.PendingDigestCallback() { public void callback(AcceptPacket accept) { PaxosManager.this.callbackDigestedAcceptTimeout(accept); } }); this.initOutstandingMonitor(); (this.requestBatcher = new RequestBatcher(this)).start(); (this.ppBatcher = new PaxosPacketBatcher(this)).start(); testingInitialization(); // needed to unclose when testing multiple runs of open and close open(); // so paxos packets will come to me before anyone else niot.precedePacketDemultiplexer( Config.getGlobalString(PC.JSON_LIBRARY).equals("org.json") ? new JSONDemultiplexer() : new FastDemultiplexer()); initiateRecovery(); if (!Config.getGlobalBoolean(PC.DELAY_PROFILER)) DelayProfiler.disable(); }
From source file:org.jbpm.executor.impl.ExecutorImpl.java
protected ScheduledExecutorService getScheduledExecutorService() { ThreadFactory threadFactory = null; try {/* w w w . j a v a 2 s . co m*/ threadFactory = InitialContext.doLookup(threadFactoryLookup); } catch (Exception e) { threadFactory = Executors.defaultThreadFactory(); } return new PrioritisedScheduledThreadPoolExecutor(threadPoolSize, threadFactory); }
From source file:SwingWorker.java
/** * returns workersExecutorService./*www . j a v a 2 s .c o m*/ * * returns the service stored in the appContext or creates it if * necessary. If the last one it triggers autoShutdown thread to * get started. * * @return ExecutorService for the {@code SwingWorkers} * @see #startAutoShutdownThread */ private static synchronized ExecutorService getWorkersExecutorService() { if (executorService == null) { //this creates non-daemon threads. ThreadFactory threadFactory = new ThreadFactory() { final ThreadFactory defaultFactory = Executors.defaultThreadFactory(); public Thread newThread(final Runnable r) { Thread thread = defaultFactory.newThread(r); thread.setName("SwingWorker-" + thread.getName()); return thread; } }; /* * We want a to have no more than MAX_WORKER_THREADS * running threads. * * We want a worker thread to wait no longer than 1 second * for new tasks before terminating. */ executorService = new ThreadPoolExecutor(0, MAX_WORKER_THREADS, 1L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory) { private final ReentrantLock pauseLock = new ReentrantLock(); private final Condition unpaused = pauseLock.newCondition(); private boolean isPaused = false; private final ReentrantLock executeLock = new ReentrantLock(); @Override public void execute(Runnable command) { /* * ThreadPoolExecutor first tries to run task * in a corePool. If all threads are busy it * tries to add task to the waiting queue. If it * fails it run task in maximumPool. * * We want corePool to be 0 and * maximumPool to be MAX_WORKER_THREADS * We need to change the order of the execution. * First try corePool then try maximumPool * pool and only then store to the waiting * queue. We can not do that because we would * need access to the private methods. * * Instead we enlarge corePool to * MAX_WORKER_THREADS before the execution and * shrink it back to 0 after. * It does pretty much what we need. * * While we changing the corePoolSize we need * to stop running worker threads from accepting new * tasks. */ //we need atomicity for the execute method. executeLock.lock(); try { pauseLock.lock(); try { isPaused = true; } finally { pauseLock.unlock(); } setCorePoolSize(MAX_WORKER_THREADS); super.execute(command); setCorePoolSize(0); pauseLock.lock(); try { isPaused = false; unpaused.signalAll(); } finally { pauseLock.unlock(); } } finally { executeLock.unlock(); } } @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); pauseLock.lock(); try { while (isPaused) { unpaused.await(); } } catch (InterruptedException ignore) { } finally { pauseLock.unlock(); } } }; } return executorService; }
From source file:org.talend.commons.utils.threading.Locker.java
private void initThreadsPool() { treadsPool = Executors.newCachedThreadPool(new ThreadFactory() { @Override/* ww w . j ava2 s .c o m*/ public Thread newThread(Runnable r) { Thread newThread = Executors.defaultThreadFactory().newThread(r); newThread.setName(newThread.getName() + "_" + Locker.class.getSimpleName()); //$NON-NLS-1$ return newThread; } }); }
From source file:org.apache.hadoop.hive.metastore.cache.CachedStore.java
@VisibleForTesting /**/*from www. j a v a2 s . c o m*/ * This starts a background thread, which initially populates the SharedCache and later * periodically gets updates from the metastore db * * @param conf * @param runOnlyOnce * @param shouldRunPrewarm */ static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, boolean shouldRunPrewarm) { if (cacheUpdateMaster == null) { initBlackListWhiteList(conf); if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { cacheRefreshPeriodMS = MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); } LOG.info("CachedStore: starting cache update service (run every {} ms", cacheRefreshPeriodMS); cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); t.setDaemon(true); return t; } }); if (!runOnlyOnce) { cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, cacheRefreshPeriodMS, TimeUnit.MILLISECONDS); } } if (runOnlyOnce) { // Some tests control the execution of the background update thread cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, TimeUnit.MILLISECONDS); } }
From source file:sx.blah.discord.api.internal.DiscordUtils.java
/** * This creates a {@link ThreadFactory} which produces threads which run as daemons. * * @param threadName The name of threads created by the returned factory. * @return The new daemon thread factory. *//*ww w . j a v a2s. c o m*/ public static ThreadFactory createDaemonThreadFactory(String threadName) { return (runnable) -> { //Ensures all threads are daemons Thread thread = Executors.defaultThreadFactory().newThread(runnable); if (threadName != null) thread.setName(threadName); thread.setDaemon(true); return thread; }; }