List of usage examples for java.lang Thread setDaemon
public final void setDaemon(boolean on)
From source file:com.srotya.monitoring.kafka.util.KafkaConsumerOffsetUtil.java
private KafkaConsumerOffsetUtil(KafkaMonitorConfiguration kafkaConfiguration, ZKClient zkClient, boolean enableHistory, StorageEngine server) { this.kafkaConfiguration = kafkaConfiguration; this.zkClient = zkClient; this.enableHistory = enableHistory; this.server = server; this.topics = new ConcurrentSkipListSet<>(); brokerHosts = new ArrayBlockingQueue<>(kafkaConfiguration.getKafkaBroker().length); for (String broker : kafkaConfiguration.getKafkaBroker()) { brokerHosts.add(broker);//from w ww .ja va2s . c o m } Thread th = new Thread(new KafkaNewConsumerOffsetThread(this)); th.setDaemon(true); th.start(); }
From source file:com.nokia.dempsy.messagetransport.tcp.TcpReceiver.java
@PostConstruct public synchronized void start() throws MessageTransportException { if (isStarted()) return;// w w w . ja v a2 s . c o m // this sets the destination instance getDestination(); // we need to call bind here in case the getDestination didn't do it // (which it wont if the port is not ephemeral) bind(); // in case this is a restart, we want to reset the stopMe value. stopMe.set(false); serverThread = new Thread(new Runnable() { @Override public void run() { while (!stopMe.get()) { try { // Wait for an event one of the registered channels Socket clientSocket = serverSocket.accept(); // at the point we're committed to adding a new ClientThread to the set. // So we need to lock it. synchronized (clientThreads) { // unless we're done. if (!stopMe.get()) { // This should come from a thread pool ClientThread clientThread = new ClientThread(clientSocket); Thread thread = new Thread(clientThread, "Client Handler for " + getClientDescription(clientSocket)); thread.setDaemon(true); thread.start(); clientThreads.add(clientThread); } } } // This can happen if I rip the socket out from underneath the accept call. // Because accept doesn't exit with a Thread.interrupt call so closing the server // socket from another thread is the only way to make this happen. catch (SocketException se) { // however, if we didn't explicitly stop the server, then there's another problem if (!stopMe.get()) logger.error("Socket error on the server managing " + destination, se); } catch (Throwable th) { logger.error("Major error on the server managing " + destination, th); } } // we're leaving so signal synchronized (eventLock) { eventSignaled = true; eventLock.notifyAll(); } } }, "Server for " + destination); serverThread.start(); }
From source file:com.quinsoft.zeidon.utils.TimedLruCache.java
public TimedLruCache(long timeToLiveInSeconds, final long timerIntervalInSeconds, int maxItems) { this.timeToLiveInMillis = timeToLiveInSeconds * 1000; cacheMap = new LRUMap(maxItems); if (timeToLiveInMillis > 0 && timerIntervalInSeconds > 0) { Thread t = new Thread(new Runnable() { @Override/*from w w w .ja v a 2 s . c o m*/ public void run() { while (true) { try { Thread.sleep(timerIntervalInSeconds * 1000); } catch (InterruptedException ex) { } cleanup(); Thread.yield(); } } }); t.setDaemon(true); t.start(); } }
From source file:com.xpn.xwiki.plugin.lucene.IndexRebuilder.java
public synchronized int startIndex(Collection<String> wikis, String hqlFilter, boolean clearIndex, boolean onlyNew, XWikiContext context) { if (this.rebuildInProgress) { LOGGER.warn("Cannot launch rebuild because another rebuild is in progress"); return LucenePluginApi.REBUILD_IN_PROGRESS; } else {/* w w w .j a va2 s . c o m*/ if (clearIndex) { if (wikis == null) { this.indexUpdater.cleanIndex(); } else { // TODO: clean wikis listed in wikis } } this.wikis = wikis != null ? new ArrayList<String>(wikis) : null; this.hqlFilter = hqlFilter; this.onlyNew = onlyNew; this.rebuildInProgress = true; Thread indexRebuilderThread = new Thread(this, "Lucene Index Rebuilder"); // The JVM should be allowed to shutdown while this thread is running indexRebuilderThread.setDaemon(true); // Client requests are more important than indexing indexRebuilderThread.setPriority(3); // Finally, start the rebuild in the background indexRebuilderThread.start(); // Too bad that now we can't tell how many items are there to be indexed... return 0; } }
From source file:ai.grakn.graql.GraqlShell.java
private void start(Optional<List<String>> queryStrings) throws IOException { // Begin sending pings Thread thread = new Thread(() -> WebSocketPing.ping(session), "graql-shell-ping"); thread.setDaemon(true); thread.start();//from www . ja v a2 s . c o m if (queryStrings.isPresent()) { for (String queryString : queryStrings.get()) { executeQuery(queryString); commit(); } } else { executeRepl(); } }
From source file:com.xpn.xwiki.plugin.lucene.internal.IndexRebuilder.java
public synchronized int startIndex(Collection<String> wikis, String hqlFilter, boolean clearIndex, boolean onlyNew, XWikiContext context) { if (this.rebuildInProgress) { LOGGER.warn("Cannot launch rebuild because another rebuild is in progress"); return LucenePluginApi.REBUILD_IN_PROGRESS; } else {//from w w w. j a v a 2 s.c o m if (clearIndex) { if (wikis == null) { this.indexUpdater.cleanIndex(); } else { try { IndexWriter writer = this.indexUpdater.openWriter(false); try { for (String wiki : wikis) { writer.deleteDocuments(new Term(IndexFields.DOCUMENT_WIKI, wiki)); } } finally { writer.close(); } } catch (IOException ex) { LOGGER.warn("Failed to clean wiki index: {}", ex.getMessage()); } } } this.wikis = wikis != null ? new ArrayList<String>(wikis) : null; this.hqlFilter = hqlFilter; this.onlyNew = onlyNew; this.rebuildInProgress = true; Thread indexRebuilderThread = new Thread(this, "Lucene Index Rebuilder"); // The JVM should be allowed to shutdown while this thread is running indexRebuilderThread.setDaemon(true); // Client requests are more important than indexing indexRebuilderThread.setPriority(3); // Finally, start the rebuild in the background indexRebuilderThread.start(); // Too bad that now we can't tell how many items are there to be indexed... return 0; } }
From source file:com.horstmeier.java.tftp.TFTPBaseServer.java
public void run() { try {//w ww.java2 s.c o m while (!shutdown_) { TFTPPacket tftpPacket; tftpPacket = serverTftp_.receive(); TFTPTransfer tt = new TFTPTransfer(tftpPacket); synchronized (transfers_) { transfers_.add(tt); } Thread thread = new Thread(tt, "TFTPTransfer-" + tftpPacket.getAddress()); thread.setDaemon(true); thread.start(); } } catch (Exception e) { if (!shutdown_) { serverException = e; log.error("Unexpected Error in TFTP Server - Server shut down!", e); } } finally { shutdown_ = true; //set this to true, so the launching thread can check to see if it started. if (serverTftp_ != null && serverTftp_.isOpen()) { serverTftp_.close(); } } }
From source file:org.jdesktop.swingworker.AccumulativeRunnable.java
/** * returns workersExecutorService./* ww w . j av a 2s . c om*/ * * returns the service stored in the appContext or creates it if * necessary. If the last one it triggers autoShutdown thread to * get started. * * @return ExecutorService for the {@code SwingWorkers} * @see #startAutoShutdownThread */ private static synchronized ExecutorService getWorkersExecutorService() { if (executorService == null) { //this creates non-daemon threads. ThreadFactory threadFactory = new ThreadFactory() { final AtomicInteger threadNumber = new AtomicInteger(1); public Thread newThread(final Runnable r) { StringBuilder name = new StringBuilder("SwingWorker-pool-"); name.append(System.identityHashCode(this)); name.append("-thread-"); name.append(threadNumber.getAndIncrement()); Thread t = new Thread(r, name.toString());; if (t.isDaemon()) t.setDaemon(false); if (t.getPriority() != Thread.NORM_PRIORITY) t.setPriority(Thread.NORM_PRIORITY); return t; } }; /* * We want a to have no more than MAX_WORKER_THREADS * running threads. * * We want a worker thread to wait no longer than 1 second * for new tasks before terminating. */ executorService = new ThreadPoolExecutor(0, MAX_WORKER_THREADS, 5L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory) { private final ReentrantLock pauseLock = new ReentrantLock(); private final Condition unpaused = pauseLock.newCondition(); private boolean isPaused = false; private final ReentrantLock executeLock = new ReentrantLock(); @Override public void execute(Runnable command) { /* * ThreadPoolExecutor first tries to run task * in a corePool. If all threads are busy it * tries to add task to the waiting queue. If it * fails it run task in maximumPool. * * We want corePool to be 0 and * maximumPool to be MAX_WORKER_THREADS * We need to change the order of the execution. * First try corePool then try maximumPool * pool and only then store to the waiting * queue. We can not do that because we would * need access to the private methods. * * Instead we enlarge corePool to * MAX_WORKER_THREADS before the execution and * shrink it back to 0 after. * It does pretty much what we need. * * While we changing the corePoolSize we need * to stop running worker threads from accepting new * tasks. */ //we need atomicity for the execute method. executeLock.lock(); try { pauseLock.lock(); try { isPaused = true; } finally { pauseLock.unlock(); } setCorePoolSize(MAX_WORKER_THREADS); super.execute(command); setCorePoolSize(0); pauseLock.lock(); try { isPaused = false; unpaused.signalAll(); } finally { pauseLock.unlock(); } } finally { executeLock.unlock(); } } @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); pauseLock.lock(); try { while(isPaused) { unpaused.await(); } } catch(InterruptedException ignore) { } finally { pauseLock.unlock(); } } }; } return executorService; }
From source file:com.yahoo.omid.tso.TSOHandler.java
public void start() { this.flushThread = new FlushThread(); this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override//from w ww. j av a 2 s . c o m public Thread newThread(Runnable r) { Thread t = new Thread(Thread.currentThread().getThreadGroup(), r); t.setDaemon(true); t.setName("Flush Thread"); return t; } }); this.flushFuture = scheduledExecutor.schedule(flushThread, TSOState.FLUSH_TIMEOUT, TimeUnit.MILLISECONDS); this.executor = Executors.newSingleThreadExecutor(); }