List of usage examples for java.lang Thread isAlive
public final native boolean isAlive();
From source file:ffx.ui.MainPanel.java
/** * Converts a non-Force Field X data structure into an array of FFXSystem[]. * Presently does not yet have support for array- or list-based data * structures, only singular objects./*from w w w.j av a 2s.c o m*/ * * @param data Outside data structure * @param file Source file * @return An array of FFXSystem */ public synchronized FFXSystem[] convertWait(Object data, File file) { if (file == null) { try { file = PotentialsDataConverter.getDefaultFile(data); } catch (FileNotFoundException | IllegalArgumentException ex) { logger.warning(String.format(" Exception in finding file for data structure: %s", ex.toString())); return null; } } String name = file.getName(); Thread thread = convert(data, file, null); while (thread != null && thread.isAlive()) { try { wait(1); } catch (InterruptedException e) { String message = "Exception waiting for " + name + " to open."; logger.log(Level.WARNING, message, e); return null; } } MolecularAssembly systems[] = activeConvFilter.getMolecularAssemblys(); if (systems != null) { int n = systems.length; FFXSystem ffxSystems[] = new FFXSystem[n]; FFXSystem allSystems[] = getHierarchy().getSystems(); int total = allSystems.length; for (int i = 0; i < n; i++) { ffxSystems[i] = allSystems[total - n + i]; } return ffxSystems; } else { return null; } }
From source file:gov.noaa.pfel.erddap.util.EDStatic.java
/** This interrupts/kill all of the thredds in runningThreads. * Erddap.destroy calls this when tomcat is stopped. */// w ww.j a v a 2 s . com public static void destroy() { long time = System.currentTimeMillis(); try { String names[] = String2.toStringArray(runningThreads.keySet().toArray()); String2.log("\nEDStatic.destroy will try to interrupt nThreads=" + names.length + "\n threadNames=" + String2.toCSSVString(names)); //shutdown Cassandra clusters/sessions EDDTableFromCassandra.shutdown(); //interrupt all of them for (int i = 0; i < names.length; i++) { try { Thread thread = (Thread) runningThreads.get(names[i]); if (thread != null && thread.isAlive()) thread.interrupt(); else runningThreads.remove(names[i]); } catch (Throwable t) { String2.log(MustBe.throwableToString(t)); } } //wait for threads to finish int waitedSeconds = 0; int maxSeconds = 600; //10 minutes while (true) { boolean allDone = true; for (int i = 0; i < names.length; i++) { try { if (names[i] == null) continue; //it has already stopped Thread thread = (Thread) runningThreads.get(names[i]); if (thread != null && thread.isAlive()) { allDone = false; if (waitedSeconds > maxSeconds) { String2.log(" " + names[i] + " thread is being stop()ped!!!"); thread.stop(); runningThreads.remove(names[i]); names[i] = null; } } else { String2.log(" " + names[i] + " thread recognized the interrupt in " + waitedSeconds + " s"); runningThreads.remove(names[i]); names[i] = null; } } catch (Throwable t) { String2.log(MustBe.throwableToString(t)); allDone = false; } } if (allDone) { String2.log("EDStatic.destroy successfully interrupted all threads in " + waitedSeconds + " s"); break; } if (waitedSeconds > maxSeconds) { String2.log("!!! EDStatic.destroy is done, but it had to stop() some threads."); break; } Math2.sleep(2000); waitedSeconds += 2; } //finally if (useLuceneSearchEngine) String2.log("stopping lucene..."); try { if (luceneIndexSearcher != null) luceneIndexSearcher.close(); } catch (Throwable t) { } luceneIndexSearcher = null; try { if (luceneIndexReader != null) luceneIndexReader.close(); } catch (Throwable t) { } luceneIndexReader = null; luceneDatasetIDFieldCache = null; try { if (luceneIndexWriter != null) //indices will be thrown away, so don't make pending changes luceneIndexWriter.close(false); } catch (Throwable t) { } luceneIndexWriter = null; } catch (Throwable t) { String2.log(MustBe.throwableToString(t)); } }
From source file:com.att.android.arodatacollector.utils.AROCollectorUtils.java
/** * Executes the specified linux command on the device shell. * /*from ww w .j av a2 s .c o m*/ * @param shellCommand * A linux native shell command. * * @return The output from the linux native shell command. */ public String runCommand(String shellCommand) { String stdout; String sRet = ""; String stderr; try { final Process m_process = Runtime.getRuntime().exec(shellCommand); final StringBuilder sbread = new StringBuilder(); final Thread tout = new Thread(new Runnable() { public void run() { BufferedReader bufferedReader = new BufferedReader( new InputStreamReader(m_process.getInputStream()), 8192); String ls_1 = null; try { while ((ls_1 = bufferedReader.readLine()) != null) { sbread.append(ls_1).append("\n"); } } catch (IOException e) { AROLogger.e(TAG, "IOException in runCommand" + e); } finally { try { bufferedReader.close(); } catch (IOException e) { AROLogger.e(TAG, "Exception in runCommand bufferedReader.close()" + e); } } } }); tout.start(); final StringBuilder sberr = new StringBuilder(); final Thread terr = new Thread(new Runnable() { public void run() { final BufferedReader bufferedReader = new BufferedReader( new InputStreamReader(m_process.getErrorStream()), 8192); String ls_1 = null; try { while ((ls_1 = bufferedReader.readLine()) != null) { sberr.append(ls_1).append("\n"); } } catch (IOException e) { AROLogger.e(TAG, "Exception in runCommand" + e); } finally { try { bufferedReader.close(); } catch (IOException e) { AROLogger.e(TAG, "Exception in runCommand bufferedReader.close()" + e); } } } }); terr.start(); while (tout.isAlive()) { Thread.sleep(50); } if (terr.isAlive()) terr.interrupt(); stdout = sbread.toString(); stderr = sberr.toString(); sRet = stdout + stderr; } catch (java.io.IOException ee) { AROLogger.e(TAG, "Exception in runCommand" + ee); return null; } catch (InterruptedException ie) { AROLogger.e(TAG, "Exception in runCommand" + ie); return null; } return sRet; }
From source file:com.android.exchange.ExchangeService.java
private long checkMailboxes() { // First, see if any running mailboxes have been deleted ArrayList<Long> deletedMailboxes = new ArrayList<Long>(); synchronized (sSyncLock) { for (long mailboxId : mServiceMap.keySet()) { Mailbox m = Mailbox.restoreMailboxWithId(this, mailboxId); if (m == null) { deletedMailboxes.add(mailboxId); }/*from w w w . j a v a 2s. c o m*/ } // If so, stop them or remove them from the map for (Long mailboxId : deletedMailboxes) { AbstractSyncService svc = mServiceMap.get(mailboxId); if (svc == null || svc.mThread == null) { releaseMailbox(mailboxId); continue; } else { boolean alive = svc.mThread.isAlive(); log("Deleted mailbox: " + svc.mMailboxName); if (alive) { stopManualSync(mailboxId); } else { log("Removing from serviceMap"); releaseMailbox(mailboxId); } } } } long nextWait = EXCHANGE_SERVICE_HEARTBEAT_TIME; long now = System.currentTimeMillis(); // Start up threads that need it; use a query which finds eas mailboxes where the // the sync interval is not "never". This is the set of mailboxes that we control if (mAccountObserver == null) { log("mAccountObserver null; service died??"); return nextWait; } Cursor c = getContentResolver().query(Mailbox.CONTENT_URI, Mailbox.CONTENT_PROJECTION, mAccountObserver.getSyncableEasMailboxWhere(), null, null); if (c == null) throw new ProviderUnavailableException(); try { while (c.moveToNext()) { long mailboxId = c.getLong(Mailbox.CONTENT_ID_COLUMN); AbstractSyncService service = null; synchronized (sSyncLock) { service = mServiceMap.get(mailboxId); } if (service == null) { // Get the cached account Account account = getAccountById(c.getInt(Mailbox.CONTENT_ACCOUNT_KEY_COLUMN)); if (account == null) continue; // We handle a few types of mailboxes specially int mailboxType = c.getInt(Mailbox.CONTENT_TYPE_COLUMN); if (!isMailboxSyncable(account, mailboxType)) { continue; } // Check whether we're in a hold (temporary or permanent) SyncError syncError = mSyncErrorMap.get(mailboxId); if (syncError != null) { // Nothing we can do about fatal errors if (syncError.fatal) continue; if (now < syncError.holdEndTime) { // If release time is earlier than next wait time, // move next wait time up to the release time if (syncError.holdEndTime < now + nextWait) { nextWait = syncError.holdEndTime - now; mNextWaitReason = "Release hold"; } continue; } else { // Keep the error around, but clear the end time syncError.holdEndTime = 0; } } // Otherwise, we use the sync interval long syncInterval = c.getInt(Mailbox.CONTENT_SYNC_INTERVAL_COLUMN); if (syncInterval == Mailbox.CHECK_INTERVAL_PUSH) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_PUSH, null); } else if (mailboxType == Mailbox.TYPE_OUTBOX) { if (hasSendableMessages(c)) { Mailbox m = EmailContent.getContent(c, Mailbox.class); startServiceThread(new EasOutboxService(this, m), m); } } else if (syncInterval > 0 && syncInterval <= ONE_DAY_MINUTES) { long lastSync = c.getLong(Mailbox.CONTENT_SYNC_TIME_COLUMN); long sinceLastSync = now - lastSync; long toNextSync = syncInterval * MINUTES - sinceLastSync; String name = c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN); if (toNextSync <= 0) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_SCHEDULED, null); } else if (toNextSync < nextWait) { nextWait = toNextSync; if (Eas.USER_LOG) { log("Next sync for " + name + " in " + nextWait / 1000 + "s"); } mNextWaitReason = "Scheduled sync, " + name; } else if (Eas.USER_LOG) { log("Next sync for " + name + " in " + toNextSync / 1000 + "s"); } } } else { Thread thread = service.mThread; // Look for threads that have died and remove them from the map if (thread != null && !thread.isAlive()) { if (Eas.USER_LOG) { log("Dead thread, mailbox released: " + c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN)); } releaseMailbox(mailboxId); // Restart this if necessary if (nextWait > 3 * SECONDS) { nextWait = 3 * SECONDS; mNextWaitReason = "Clean up dead thread(s)"; } } else { long requestTime = service.mRequestTime; if (requestTime > 0) { long timeToRequest = requestTime - now; if (timeToRequest <= 0) { service.mRequestTime = 0; service.alarm(); } else if (requestTime > 0 && timeToRequest < nextWait) { if (timeToRequest < 11 * MINUTES) { nextWait = timeToRequest < 250 ? 250 : timeToRequest; mNextWaitReason = "Sync data change"; } else { log("Illegal timeToRequest: " + timeToRequest); } } } } } } } finally { c.close(); } return nextWait; }
From source file:org.apache.flink.test.recovery.AbstractTaskManagerProcessFailureRecoveryTest.java
@Test public void testTaskManagerProcessFailure() { final StringWriter processOutput1 = new StringWriter(); final StringWriter processOutput2 = new StringWriter(); final StringWriter processOutput3 = new StringWriter(); ActorSystem jmActorSystem = null;//w w w .j a va2 s.c om Process taskManagerProcess1 = null; Process taskManagerProcess2 = null; Process taskManagerProcess3 = null; File coordinateTempDir = null; try { // check that we run this test only if the java command // is available on this machine String javaCommand = getJavaCommandPath(); if (javaCommand == null) { System.out.println("---- Skipping Process Failure test : Could not find java executable ----"); return; } // create a logging file for the process File tempLogFile = File.createTempFile(getClass().getSimpleName() + "-", "-log4j.properties"); tempLogFile.deleteOnExit(); CommonTestUtils.printLog4jDebugConfig(tempLogFile); // coordination between the processes goes through a directory coordinateTempDir = CommonTestUtils.createTempDirectory(); // find a free port to start the JobManager final int jobManagerPort = NetUtils.getAvailablePort(); // start a JobManager Tuple2<String, Object> localAddress = new Tuple2<String, Object>("localhost", jobManagerPort); Configuration jmConfig = new Configuration(); jmConfig.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "1000 ms"); jmConfig.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "6 s"); jmConfig.setInteger(ConfigConstants.AKKA_WATCH_THRESHOLD, 9); jmConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "10 s"); jmConfig.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "100 s"); jmActorSystem = AkkaUtils.createActorSystem(jmConfig, new Some<>(localAddress)); ActorRef jmActor = JobManager .startJobManagerActors(jmConfig, jmActorSystem, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), JobManager.class, MemoryArchivist.class) ._1(); // the TaskManager java command String[] command = new String[] { javaCommand, "-Dlog.level=DEBUG", "-Dlog4j.configuration=file:" + tempLogFile.getAbsolutePath(), "-Xms80m", "-Xmx80m", "-classpath", getCurrentClasspath(), TaskManagerProcessEntryPoint.class.getName(), String.valueOf(jobManagerPort) }; // start the first two TaskManager processes taskManagerProcess1 = new ProcessBuilder(command).start(); new CommonTestUtils.PipeForwarder(taskManagerProcess1.getErrorStream(), processOutput1); taskManagerProcess2 = new ProcessBuilder(command).start(); new CommonTestUtils.PipeForwarder(taskManagerProcess2.getErrorStream(), processOutput2); // we wait for the JobManager to have the two TaskManagers available // since some of the CI environments are very hostile, we need to give this a lot of time (2 minutes) waitUntilNumTaskManagersAreRegistered(jmActor, 2, 120000); // the program will set a marker file in each of its parallel tasks once they are ready, so that // this coordinating code is aware of this. // the program will very slowly consume elements until the marker file (later created by the // test driver code) is present final File coordinateDirClosure = coordinateTempDir; final AtomicReference<Throwable> errorRef = new AtomicReference<>(); // we trigger program execution in a separate thread Thread programTrigger = new Thread("Program Trigger") { @Override public void run() { try { testTaskManagerFailure(jobManagerPort, coordinateDirClosure); } catch (Throwable t) { t.printStackTrace(); errorRef.set(t); } } }; //start the test program programTrigger.start(); // wait until all marker files are in place, indicating that all tasks have started // max 20 seconds if (!waitForMarkerFiles(coordinateTempDir, READY_MARKER_FILE_PREFIX, PARALLELISM, 120000)) { // check if the program failed for some reason if (errorRef.get() != null) { Throwable error = errorRef.get(); error.printStackTrace(); fail("The program encountered a " + error.getClass().getSimpleName() + " : " + error.getMessage()); } else { // no error occurred, simply a timeout fail("The tasks were not started within time (" + 120000 + "msecs)"); } } // start the third TaskManager taskManagerProcess3 = new ProcessBuilder(command).start(); new CommonTestUtils.PipeForwarder(taskManagerProcess3.getErrorStream(), processOutput3); // we wait for the third TaskManager to register // since some of the CI environments are very hostile, we need to give this a lot of time (2 minutes) waitUntilNumTaskManagersAreRegistered(jmActor, 3, 120000); // kill one of the previous TaskManagers, triggering a failure and recovery taskManagerProcess1.destroy(); taskManagerProcess1 = null; // we create the marker file which signals the program functions tasks that they can complete touchFile(new File(coordinateTempDir, PROCEED_MARKER_FILE)); // wait for at most 5 minutes for the program to complete programTrigger.join(300000); // check that the program really finished assertFalse("The program did not finish in time", programTrigger.isAlive()); // check whether the program encountered an error if (errorRef.get() != null) { Throwable error = errorRef.get(); error.printStackTrace(); fail("The program encountered a " + error.getClass().getSimpleName() + " : " + error.getMessage()); } // all seems well :-) } catch (Exception e) { e.printStackTrace(); printProcessLog("TaskManager 1", processOutput1.toString()); printProcessLog("TaskManager 2", processOutput2.toString()); printProcessLog("TaskManager 3", processOutput3.toString()); fail(e.getMessage()); } catch (Error e) { e.printStackTrace(); printProcessLog("TaskManager 1", processOutput1.toString()); printProcessLog("TaskManager 2", processOutput2.toString()); printProcessLog("TaskManager 3", processOutput3.toString()); throw e; } finally { if (taskManagerProcess1 != null) { taskManagerProcess1.destroy(); } if (taskManagerProcess2 != null) { taskManagerProcess2.destroy(); } if (taskManagerProcess3 != null) { taskManagerProcess3.destroy(); } if (jmActorSystem != null) { jmActorSystem.shutdown(); } if (coordinateTempDir != null) { try { FileUtils.deleteDirectory(coordinateTempDir); } catch (Throwable t) { // we can ignore this } } } }
From source file:azkaban.execapp.FlowRunnerTest2.java
/** * Tests a failure with the default FINISH_CURRENTLY_RUNNING. * After the first failure, every job that started should complete, and the * rest of the jobs should be skipped.// w ww .ja v a 2 s .c om * * @throws Exception */ @Ignore @Test public void testNormalFailure1() throws Exception { // Test propagation of KILLED status to embedded flows. EventCollectorListener eventCollector = new EventCollectorListener(); FlowRunner runner = createFlowRunner(eventCollector, "jobf"); ExecutableFlow flow = runner.getExecutableFlow(); Map<String, Status> expectedStateMap = new HashMap<String, Status>(); Map<String, ExecutableNode> nodeMap = new HashMap<String, ExecutableNode>(); // 1. START FLOW createExpectedStateMap(flow, expectedStateMap, nodeMap); Thread thread = runFlowRunnerInThread(runner); pause(250); // After it starts up, only joba should be running expectedStateMap.put("joba", Status.RUNNING); expectedStateMap.put("joba1", Status.RUNNING); compareStates(expectedStateMap, nodeMap); // 2. JOB A COMPLETES SUCCESSFULLY, others should be skipped InteractiveTestJob.getTestJob("joba").failJob(); pause(250); Assert.assertEquals(Status.FAILED_FINISHING, flow.getStatus()); expectedStateMap.put("joba", Status.FAILED); expectedStateMap.put("joba1", Status.RUNNING); expectedStateMap.put("jobb", Status.CANCELLED); expectedStateMap.put("jobc", Status.CANCELLED); expectedStateMap.put("jobd", Status.CANCELLED); expectedStateMap.put("jobd:innerJobA", Status.READY); expectedStateMap.put("jobd:innerFlow2", Status.READY); expectedStateMap.put("jobb:innerJobA", Status.READY); expectedStateMap.put("jobb:innerFlow", Status.READY); expectedStateMap.put("jobe", Status.CANCELLED); compareStates(expectedStateMap, nodeMap); // 3. jobb:Inner completes /// innerJobA completes InteractiveTestJob.getTestJob("joba1").succeedJob(); pause(250); expectedStateMap.put("jobf", Status.CANCELLED); Assert.assertEquals(Status.FAILED, flow.getStatus()); Assert.assertFalse(thread.isAlive()); }
From source file:org.apache.flink.streaming.connectors.kafka.KafkaConsumerTestBase.java
protected String writeSequence(String baseTopicName, final int numElements, final int parallelism, final int replicationFactor) throws Exception { LOG.info("\n===================================\n" + "== Writing sequence of " + numElements + " into " + baseTopicName + " with p=" + parallelism + "\n" + "==================================="); final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation .of(new TypeHint<Tuple2<Integer, Integer>>() { });/*from w w w . ja v a2 s .com*/ final KeyedSerializationSchema<Tuple2<Integer, Integer>> serSchema = new KeyedSerializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final KeyedDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KeyedDeserializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final int maxNumAttempts = 10; for (int attempt = 1; attempt <= maxNumAttempts; attempt++) { final String topicName = baseTopicName + '-' + attempt; LOG.info("Writing attempt #1"); // -------- Write the Sequence -------- createTestTopic(topicName, parallelism, replicationFactor); StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort); writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart()); writeEnv.getConfig().disableSysoutLogging(); DataStream<Tuple2<Integer, Integer>> stream = writeEnv .addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() { private boolean running = true; @Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { int cnt = 0; int partition = getRuntimeContext().getIndexOfThisSubtask(); while (running && cnt < numElements) { ctx.collect(new Tuple2<>(partition, cnt)); cnt++; } } @Override public void cancel() { running = false; } }).setParallelism(parallelism); // the producer must not produce duplicates Properties producerProperties = FlinkKafkaProducerBase .getPropertiesFromBrokerList(brokerConnectionStrings); producerProperties.setProperty("retries", "0"); producerProperties.putAll(secureProps); kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2Partitioner(parallelism)).setParallelism(parallelism); try { writeEnv.execute("Write sequence"); } catch (Exception e) { LOG.error("Write attempt failed, trying again", e); deleteTestTopic(topicName); JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout)); continue; } LOG.info("Finished writing sequence"); // -------- Validate the Sequence -------- // we need to validate the sequence, because kafka's producers are not exactly once LOG.info("Validating sequence"); JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout)); final StreamExecutionEnvironment readEnv = StreamExecutionEnvironment .createRemoteEnvironment("localhost", flinkPort); readEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart()); readEnv.getConfig().disableSysoutLogging(); readEnv.setParallelism(parallelism); Properties readProps = (Properties) standardProps.clone(); readProps.setProperty("group.id", "flink-tests-validator"); readProps.putAll(secureProps); FlinkKafkaConsumerBase<Tuple2<Integer, Integer>> consumer = kafkaServer.getConsumer(topicName, deserSchema, readProps); readEnv.addSource(consumer) .map(new RichMapFunction<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>>() { private final int totalCount = parallelism * numElements; private int count = 0; @Override public Tuple2<Integer, Integer> map(Tuple2<Integer, Integer> value) throws Exception { if (++count == totalCount) { throw new SuccessException(); } else { return value; } } }).setParallelism(1).addSink(new DiscardingSink<Tuple2<Integer, Integer>>()).setParallelism(1); final AtomicReference<Throwable> errorRef = new AtomicReference<>(); Thread runner = new Thread() { @Override public void run() { try { tryExecute(readEnv, "sequence validation"); } catch (Throwable t) { errorRef.set(t); } } }; runner.start(); final long deadline = System.nanoTime() + 10_000_000_000L; long delay; while (runner.isAlive() && (delay = deadline - System.nanoTime()) > 0) { runner.join(delay / 1_000_000L); } boolean success; if (runner.isAlive()) { // did not finish in time, maybe the producer dropped one or more records and // the validation did not reach the exit point success = false; JobManagerCommunicationUtils.cancelCurrentJob(flink.getLeaderGateway(timeout)); } else { Throwable error = errorRef.get(); if (error != null) { success = false; LOG.info("Attempt " + attempt + " failed with exception", error); } else { success = true; } } JobManagerCommunicationUtils.waitUntilNoJobIsRunning(flink.getLeaderGateway(timeout)); if (success) { // everything is good! return topicName; } else { deleteTestTopic(topicName); // fall through the loop } } throw new Exception("Could not write a valid sequence to Kafka after " + maxNumAttempts + " attempts"); }
From source file:gov.noaa.pfel.erddap.util.EDStatic.java
/** This interrupts the thread and waits up to maxSeconds for it to finish. * If it still isn't finished, it is stopped. * /* www.j a v a 2 s .c om*/ */ public static void stopThread(Thread thread, int maxSeconds) { try { if (thread == null) return; String name = thread.getName(); if (verbose) String2.log("stopThread(" + name + ")..."); if (!thread.isAlive()) { if (verbose) String2.log("thread=" + name + " was already not alive."); return; } thread.interrupt(); int waitSeconds = 0; while (thread.isAlive() && waitSeconds < maxSeconds) { waitSeconds += 2; Math2.sleep(2000); } if (thread.isAlive()) { if (verbose) String2.log("!!!Stopping thread=" + name + " after " + waitSeconds + " s"); thread.stop(); } else { if (verbose) String2.log("thread=" + name + " noticed interrupt in " + waitSeconds + " s"); } } catch (Throwable t) { String2.log(MustBe.throwableToString(t)); } }
From source file:net.sf.jasperreports.customvisualization.export.CVElementPhantomJSImageDataProvider.java
/** * Executes a command within the given timeout. * // w ww . j a va 2 s . com * @param args * @param currentDirectory * @param timeout */ private static void runCommand(String[] args, File currentDirectory, final int timeout) { Thread loggingThread = null; Thread interruptingThread = null; try { String cmd = ""; for (String arg : args) { cmd += " " + arg; } if (log.isDebugEnabled()) { log.debug("Executing external command: " + cmd); } //System.out.println(cmd); ProcessBuilder pb = new ProcessBuilder(Arrays.asList(args)); pb.directory(currentDirectory); final Process externalProcess = pb.start(); final StringBuilder processOutput = new StringBuilder(); final boolean[] success = new boolean[1]; success[0] = false; loggingThread = new Thread(new Runnable() { @Override public void run() { BufferedReader br = null; try { br = new BufferedReader(new InputStreamReader(externalProcess.getInputStream())); String line; while ((line = br.readLine()) != null) { processOutput.append(line).append("\n"); if (line.indexOf("SCRIPT_SUCCESS") >= 0) { success[0] = true; killProcess(externalProcess, 100); } else if (line.indexOf("SCRIPT_ERROR") >= 0) { success[0] = false; killProcess(externalProcess, 100); } } if (log.isDebugEnabled()) { log.debug("External process output:\n" + processOutput.toString()); } } catch (IOException e) { if (log.isDebugEnabled()) { log.debug(e.getMessage()); } } finally { if (br != null) { try { br.close(); } catch (IOException e) { if (log.isWarnEnabled()) { log.warn("Failed to close phantomjs process' inputstream", e); } } } } } }); interruptingThread = new Thread(new Runnable() { @Override public void run() { if (killProcess(externalProcess, timeout)) { success[0] = false; } } }); loggingThread.start(); interruptingThread.start(); externalProcess.waitFor(); // We should not care if the phantomjs process does not end on time if it succeeds in producing the desired output. if (externalProcess.exitValue() != 0 && !success[0]) { // FIXME we should do loggingThread.join(millis) because the // process might end before its output if fully processed throw new JRRuntimeException("External process did not end properly; exit value: " + externalProcess.exitValue() + (processOutput.length() > 0 ? "; process output:\n" + processOutput + "\n" : ".")); } } catch (IOException e) { throw new JRRuntimeException(e); } catch (InterruptedException e) { throw new JRRuntimeException(e); } finally { if (interruptingThread != null && interruptingThread.isAlive()) { try { interruptingThread.interrupt(); } catch (Exception ex) { } } if (loggingThread != null && loggingThread.isAlive()) { try { loggingThread.interrupt(); } catch (Exception ex) { } } } }
From source file:org.apache.hadoop.hbase.rest.PerformanceEvaluation.java
private void doMultipleClients(final Class<? extends Test> cmd) throws IOException { final List<Thread> threads = new ArrayList<Thread>(this.N); final long[] timings = new long[this.N]; final int perClientRows = R / N; final TableName tableName = this.tableName; final DataBlockEncoding encoding = this.blockEncoding; final boolean flushCommits = this.flushCommits; final Compression.Algorithm compression = this.compression; final boolean writeToWal = this.writeToWAL; final int preSplitRegions = this.presplitRegions; final boolean useTags = this.useTags; final int numTags = this.noOfTags; final HConnection connection = HConnectionManager.createConnection(getConf()); for (int i = 0; i < this.N; i++) { final int index = i; Thread t = new Thread("TestClient-" + i) { @Override//from ww w . ja v a 2s . co m public void run() { super.run(); PerformanceEvaluation pe = new PerformanceEvaluation(getConf()); pe.tableName = tableName; pe.blockEncoding = encoding; pe.flushCommits = flushCommits; pe.compression = compression; pe.writeToWAL = writeToWal; pe.presplitRegions = preSplitRegions; pe.N = N; pe.connection = connection; pe.useTags = useTags; pe.noOfTags = numTags; try { long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, flushCommits, writeToWAL, useTags, noOfTags, connection, new Status() { public void setStatus(final String msg) throws IOException { LOG.info("client-" + getName() + " " + msg); } }); timings[index] = elapsedTime; LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + " rows"); } catch (IOException e) { throw new RuntimeException(e); } } }; threads.add(t); } for (Thread t : threads) { t.start(); } for (Thread t : threads) { while (t.isAlive()) { try { t.join(); } catch (InterruptedException e) { LOG.debug("Interrupted, continuing" + e.toString()); } } } final String test = cmd.getSimpleName(); LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(timings)); Arrays.sort(timings); long total = 0; for (int i = 0; i < this.N; i++) { total += timings[i]; } LOG.info("[" + test + "]" + "\tMin: " + timings[0] + "ms" + "\tMax: " + timings[this.N - 1] + "ms" + "\tAvg: " + (total / this.N) + "ms"); }