List of usage examples for java.lang Thread getState
public State getState()
From source file:org.sakaiproject.status.StatusServlet.java
protected void reportThreadDetails(HttpServletResponse response) throws Exception { PrintWriter pw = response.getWriter(); for (Thread thread : findAllThreads()) { if (thread != null) { String threadLocation = ""; try { StackTraceElement ste = thread.getStackTrace()[0]; StackTraceElement ste2 = thread.getStackTrace()[1]; threadLocation = ste.getClassName() + "." + ste.getMethodName() + "()," + ste.getFileName() + ":" + ste.getLineNumber() + "," + ste2.getClassName() + "." + ste2.getMethodName() + "()," + ste2.getFileName() + ":" + ste2.getLineNumber(); } catch (Exception e) { threadLocation = "?,?,?,?"; }//from ww w . jav a 2 s . c o m pw.print(thread.getThreadGroup().getName() + "," + thread.getId() + "," + thread.getName() + "," + thread.getPriority() + "," + thread.getState().name() + "," + (thread.isAlive() ? "" : "notalive") + "," + (thread.isDaemon() ? "daemon" : "") + "," + (thread.isInterrupted() ? "interrupted" : "") + "," + threadLocation + "\n"); } } }
From source file:it.evilsocket.dsploit.core.UpdateService.java
/** * wait that a shell terminate or user cancel the notification. * @param shell the Thread returned by {@link it.evilsocket.dsploit.core.Shell#async(String, it.evilsocket.dsploit.core.Shell.OutputReceiver, boolean)} * @param cancellationMessage the message of the CancellationException * @throws java.io.IOException when cannot execute shell * @throws java.util.concurrent.CancellationException when user cancelled the notification *//* w w w. j a v a 2 s .c om*/ private int execShell(Thread shell, String cancellationMessage) throws IOException, CancellationException, InterruptedException { if (!(shell instanceof Shell.StreamGobbler)) throw new IOException("cannot execute shell commands"); shell.start(); while (mRunning && shell.getState() != Thread.State.TERMINATED) Thread.sleep(10); if (!mRunning) { shell.interrupt(); throw new CancellationException(cancellationMessage); } else shell.join(); int ret = ((Shell.StreamGobbler) shell).exitValue; if (ret != 0 && mErrorOutput.length() > 0) for (String line : mErrorOutput.toString().split("\n")) if (line.length() > 0) Logger.error(line); return ret; }
From source file:com.datatorrent.stram.engine.StreamingContainer.java
public synchronized void deactivate() { ArrayList<Thread> activeThreads = new ArrayList<Thread>(); ArrayList<Integer> activeOperators = new ArrayList<Integer>(); for (Map.Entry<Integer, Node<?>> e : nodes.entrySet()) { Thread t = e.getValue().context.getThread(); if (t == null || !t.isAlive()) { disconnectNode(e.getKey());/* ww w .j a v a 2s. c om*/ } else { activeThreads.add(t); activeOperators.add(e.getKey()); e.getValue().shutdown(); } } try { Iterator<Integer> iterator = activeOperators.iterator(); for (Thread t : activeThreads) { t.join(1000); if (!t.getState().equals(State.TERMINATED)) { t.interrupt(); } disconnectNode(iterator.next()); } } catch (InterruptedException ex) { logger.warn("Aborting wait for operators to get deactivated!", ex); } for (WindowGenerator wg : activeGenerators.keySet()) { wg.deactivate(); } activeGenerators.clear(); for (Stream stream : activeStreams.keySet()) { stream.deactivate(); } activeStreams.clear(); }
From source file:com.datatorrent.stram.engine.StreamingContainer.java
private synchronized void undeploy(List<Integer> nodeList) { /**/*from ww w . j a v a2 s. com*/ * make sure that all the operators which we are asked to undeploy are in this container. */ HashMap<Integer, Node<?>> toUndeploy = new HashMap<Integer, Node<?>>(); for (Integer operatorId : nodeList) { Node<?> node = nodes.get(operatorId); if (node == null) { throw new IllegalArgumentException("Node " + operatorId + " is not hosted in this container!"); } else if (toUndeploy.containsKey(operatorId)) { throw new IllegalArgumentException( "Node " + operatorId + " is requested to be undeployed more than once"); } else { toUndeploy.put(operatorId, node); } } ArrayList<Thread> joinList = new ArrayList<Thread>(); ArrayList<Integer> discoList = new ArrayList<Integer>(); for (Integer operatorId : nodeList) { Thread t = nodes.get(operatorId).context.getThread(); if (t == null || !t.isAlive()) { disconnectNode(operatorId); } else { joinList.add(t); discoList.add(operatorId); nodes.get(operatorId).shutdown(); } } try { Iterator<Integer> iterator = discoList.iterator(); for (Thread t : joinList) { t.join(1000); if (!t.getState().equals(State.TERMINATED)) { t.interrupt(); } disconnectNode(iterator.next()); } logger.info("Undeploy complete."); } catch (InterruptedException ex) { logger.warn("Aborting wait for operators to get deactivated!", ex); } for (Integer operatorId : nodeList) { nodes.remove(operatorId); } }
From source file:edu.wisc.commons.httpclient.CleanShutdownPoolingClientConnectionManager.java
@Override public void shutdown() { if (shutdownComplete.get() || !this.shutdownLock.tryLock()) { //Already shutdown or shutdown in progress return;//from www . j a v a 2 s . c o m } try { //Create Thread to call shutdown final Thread shutdownThread = new Thread(new Runnable() { @Override public void run() { try { logger.info("PoolingClientConnectionManager shutdown started"); CleanShutdownPoolingClientConnectionManager.super.shutdown(); } finally { shutdownComplete.set(true); logger.info("PoolingClientConnectionManager shutdown complete"); } } }); shutdownThread.setName("PoolingClientConnectionManager Shutdown Monitor"); shutdownThread.setDaemon(true); //start shutdown thread shutdownThread.start(); //track initial shutdown start time and time spent by the shutdown thread waiting or blocked final long shutdownStart = System.nanoTime(); long waitStart = shutdownStart; //Monitor the shutdown thread while (!shutdownComplete.get()) { final long now = System.nanoTime(); final long shutdownTime = TimeUnit.NANOSECONDS.toMillis(now - shutdownStart); //if time spent shutting down is greater than kill time forcibly stop the shutdown thread if (shutdownTime > this.shutdownThreadKillTime) { final String stackTrace = getStackTrace(shutdownThread); logger.error("Shutdown thread " + shutdownThread.getName() + " has been stopping for " + shutdownTime + "ms, killing it. THIS IS BAD. \n" + stackTrace); shutdownThread.stop(); //break out of the monitoring loop break; } //if time spent shutting down is greater than max time immediately interrupt the thread else if (shutdownTime > this.shutdownThreadMaxTime) { logger.warn("Shutdown thread " + shutdownThread.getName() + " has been stopping for " + shutdownTime + "ms, interrupting immediately"); shutdownThread.interrupt(); } //otherwise check the state of the thread else { //If the thread is blocked or waiting and has been for longer than the max wait time //interrupt the thread. If not in blocked or waiting state update the wait-start time final State state = shutdownThread.getState(); switch (state) { case BLOCKED: case TIMED_WAITING: case WAITING: { final long waitTime = TimeUnit.NANOSECONDS.toMillis(now - waitStart); if (waitTime > shutdownThreadMaxWaitTime) { logger.info("Shutdown thread " + shutdownThread.getName() + " has been waiting for " + waitTime + "ms, interrupting"); shutdownThread.interrupt(); } else { break; } } default: { waitStart = now; break; } } } //Sleep between state checks, don't want to overload anything try { Thread.sleep(shutdownThreadPollRate); } catch (InterruptedException e) { //ignore } } } finally { this.shutdownLock.unlock(); } }
From source file:com.datatorrent.stram.engine.GenericNodeTest.java
@Test @SuppressWarnings("SleepWhileInLoop") public void testSynchingLogic() throws InterruptedException { long sleeptime = 25L; final ArrayList<Object> list = new ArrayList<Object>(); GenericOperator go = new GenericOperator(); final GenericNode gn = new GenericNode(go, new com.datatorrent.stram.engine.OperatorContext(0, new DefaultAttributeMap(), null)); gn.setId(1);/*from ww w . j av a 2 s.c o m*/ AbstractReservoir reservoir1 = AbstractReservoir.newReservoir("ip1Res", 1024); AbstractReservoir reservoir2 = AbstractReservoir.newReservoir("ip2Res", 1024); Sink<Object> output = new Sink<Object>() { @Override public void put(Object tuple) { list.add(tuple); } @Override public int getCount(boolean reset) { return 0; } }; gn.connectInputPort("ip1", reservoir1); gn.connectInputPort("ip2", reservoir2); gn.connectOutputPort("op", output); gn.firstWindowMillis = 0; gn.windowWidthMillis = 100; final AtomicBoolean ab = new AtomicBoolean(false); Thread t = new Thread() { @Override public void run() { ab.set(true); gn.activate(); gn.run(); gn.deactivate(); } }; t.start(); do { Thread.sleep(sleeptime); } while (ab.get() == false); Tuple beginWindow1 = new Tuple(MessageType.BEGIN_WINDOW, 0x1L); reservoir1.add(beginWindow1); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); reservoir2.add(beginWindow1); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); Tuple endWindow1 = new EndWindowTuple(0x1L); reservoir1.add(endWindow1); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); Tuple beginWindow2 = new Tuple(MessageType.BEGIN_WINDOW, 0x2L); reservoir1.add(beginWindow2); Thread.sleep(sleeptime); Assert.assertEquals(1, list.size()); reservoir2.add(endWindow1); Thread.sleep(sleeptime); Assert.assertEquals(3, list.size()); reservoir2.add(beginWindow2); Thread.sleep(sleeptime); Assert.assertEquals(3, list.size()); Tuple endWindow2 = new EndWindowTuple(0x2L); reservoir2.add(endWindow2); Thread.sleep(sleeptime); Assert.assertEquals(3, list.size()); reservoir1.add(endWindow2); Thread.sleep(sleeptime); Assert.assertEquals(4, list.size()); EndStreamTuple est = new EndStreamTuple(0L); reservoir1.add(est); Thread.sleep(sleeptime); Assert.assertEquals(4, list.size()); Tuple beginWindow3 = new Tuple(MessageType.BEGIN_WINDOW, 0x3L); reservoir2.add(beginWindow3); Thread.sleep(sleeptime); Assert.assertEquals(5, list.size()); Tuple endWindow3 = new EndWindowTuple(0x3L); reservoir2.add(endWindow3); Thread.sleep(sleeptime); Assert.assertEquals(6, list.size()); Assert.assertNotSame(Thread.State.TERMINATED, t.getState()); reservoir2.add(est); Thread.sleep(sleeptime); Assert.assertEquals(7, list.size()); Thread.sleep(sleeptime); Assert.assertEquals(Thread.State.TERMINATED, t.getState()); }
From source file:org.apache.hadoop.mapreduce.v2.hs.TestUnnecessaryBlockingOnHistoryFileInfo.java
/** * This create a test case in which two threads are trying to load two * different jobs of the same user under the intermediate directory. * One thread should not be blocked by the other thread that is loading * a huge job files (This is simulated by hanging up parsing the job files * forever). The test will fail by triggering the timeout if one thread is * blocked by the other while the other thread is holding the lock on its * associated job files and hanging up parsing the files. *///from ww w. j a va 2 s . c o m @Test(timeout = 600000) public void testTwoThreadsQueryingDifferentJobOfSameUser() throws InterruptedException, IOException { final Configuration config = new Configuration(); config.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, INTERMEDIATE_DIR.getPath()); config.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, Long.MAX_VALUE); final JobId job1 = createJobId(0); final JobId job2 = createJobId(1); final HistoryFileManagerUnderContention historyFileManager = createHistoryFileManager(config, job1, job2); Thread webRequest1 = null; Thread webRequest2 = null; try { /** * create a dummy .jhist file for job1, and try to load/parse the job * files in one child thread. */ createJhistFile(job1); webRequest1 = new Thread(new Runnable() { @Override public void run() { try { HistoryFileManager.HistoryFileInfo historyFileInfo = historyFileManager.getFileInfo(job1); historyFileInfo.loadJob(); } catch (IOException e) { e.printStackTrace(); } } }); webRequest1.start(); historyFileManager.waitUntilIntermediateDirIsScanned(job1); /** * At this point, thread webRequest1 has finished scanning the * intermediate directory and is hanging up parsing the job files while * it's holding the lock on the associated HistoryFileInfo object. */ /** * create a dummy .jhist file for job2 and try to load/parse the job files * in the other child thread. Because job files are not moved from the * intermediate directory to the done directory, thread webRequest2 * will also see the job history files for job1. */ createJhistFile(job2); webRequest2 = new Thread(new Runnable() { @Override public void run() { try { HistoryFileManager.HistoryFileInfo historyFileInfo = historyFileManager.getFileInfo(job2); historyFileInfo.loadJob(); } catch (IOException e) { e.printStackTrace(); } } }); webRequest2.start(); historyFileManager.waitUntilIntermediateDirIsScanned(job2); /** * If execution had gotten to this point, then thread webRequest2 would * not have tried to acquire the lock of the HistoryFileInfo object * associated job1, which is permanently held by thread webRequest1 that * is hanging up parsing the job history files, so it was able to proceed * with parsing job history files of job2. */ Assert.assertTrue( "Thread 2 is blocked while it is trying to " + "load job2 by Thread 1 which is loading job1.", webRequest2.getState() != Thread.State.BLOCKED); } finally { if (webRequest1 != null) { webRequest1.interrupt(); } if (webRequest2 != null) { webRequest2.interrupt(); } } }