List of usage examples for java.lang Thread isAlive
public final native boolean isAlive();
From source file:com.gargoylesoftware.htmlunit.WebClientTest.java
/** * @throws Exception if the test fails/*from w w w . j av a 2s . co m*/ */ @Test public void testJavaScriptTimeout() throws Exception { final WebClient client = getWebClient(); final long timeout = 2000; final long oldTimeout = client.getJavaScriptTimeout(); client.setJavaScriptTimeout(timeout); try { client.getOptions().setThrowExceptionOnScriptError(false); final String content = "<html><body><script>while(1) {}</script></body></html>"; final MockWebConnection webConnection = new MockWebConnection(); webConnection.setDefaultResponse(content); client.setWebConnection(webConnection); final Exception[] exceptions = { null }; final Thread runner = new Thread() { @Override public void run() { try { client.getPage(URL_FIRST); } catch (final Exception e) { exceptions[0] = e; } } }; runner.start(); runner.join(timeout * 2); if (runner.isAlive()) { runner.interrupt(); fail("Script was still running after timeout"); } assertNull(exceptions[0]); } finally { client.setJavaScriptTimeout(oldTimeout); } }
From source file:org.pentaho.di.job.entries.hadoopjobexecutor.JobEntryHadoopJobExecutor.java
public Result execute(final Result result, int arg1) throws KettleException { result.setNrErrors(0);/* www.j a v a 2 s . c om*/ Log4jFileAppender appender = null; String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$ try { appender = LogWriter.createFileAppender(logFileName, true, false); LogWriter.getInstance().addAppender(appender); log.setLogLevel(parentJob.getLogLevel()); } catch (Exception e) { logError(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.FailedToOpenLogFile", logFileName, //$NON-NLS-1$ e.toString())); logError(Const.getStackTracker(e)); } try { URL resolvedJarUrl = resolveJarUrl(jarUrl); if (log.isDetailed()) { logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.ResolvedJar", resolvedJarUrl.toExternalForm())); } HadoopShim shim = getHadoopConfiguration().getHadoopShim(); if (isSimple) { String simpleLoggingIntervalS = environmentSubstitute(getSimpleLoggingInterval()); int simpleLogInt = 60; try { simpleLogInt = Integer.parseInt(simpleLoggingIntervalS, 10); } catch (NumberFormatException e) { logError(BaseMessages.getString(PKG, "ErrorParsingLogInterval", simpleLoggingIntervalS, simpleLogInt)); } final Class<?> mainClass = locateDriverClass(resolvedJarUrl, shim); if (log.isDetailed()) { logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.UsingDriverClass", mainClass == null ? "null" : mainClass.getName())); logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.SimpleMode")); } final AtomicInteger threads = new AtomicInteger(1); final NoExitSecurityManager nesm = new NoExitSecurityManager(System.getSecurityManager()); smStack.setSecurityManager(nesm); try { Runnable r = new Runnable() { public void run() { try { try { executeMainMethod(mainClass); } finally { restoreSecurityManager(threads, nesm); } } catch (NoExitSecurityManager.NoExitSecurityException ex) { // Only log if we're blocking and waiting for this to complete if (simpleBlocking) { logExitStatus(result, mainClass, ex); } } catch (InvocationTargetException ex) { if (ex.getTargetException() instanceof NoExitSecurityManager.NoExitSecurityException) { // Only log if we're blocking and waiting for this to complete if (simpleBlocking) { logExitStatus(result, mainClass, (NoExitSecurityManager.NoExitSecurityException) ex .getTargetException()); } } else { throw new RuntimeException(ex); } } catch (Exception ex) { throw new RuntimeException(ex); } } }; Thread t = new Thread(r); t.setDaemon(true); t.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { restoreSecurityManager(threads, nesm); if (simpleBlocking) { // Only log if we're blocking and waiting for this to complete logError(BaseMessages.getString(JobEntryHadoopJobExecutor.class, "JobEntryHadoopJobExecutor.ErrorExecutingClass", mainClass.getName()), e); result.setResult(false); } } }); nesm.addBlockedThread(t); t.start(); if (simpleBlocking) { // wait until the thread is done do { logDetailed(BaseMessages.getString(JobEntryHadoopJobExecutor.class, "JobEntryHadoopJobExecutor.Blocking", mainClass.getName())); t.join(simpleLogInt * 1000); } while (!parentJob.isStopped() && t.isAlive()); if (t.isAlive()) { // Kill thread if it's still running. The job must have been stopped. t.interrupt(); } } } finally { // If we're not performing simple blocking spawn a watchdog thread to restore the security manager when all // threads are complete if (!simpleBlocking) { Runnable threadWatchdog = new Runnable() { @Override public void run() { while (threads.get() > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { /* ignore */ } } restoreSecurityManager(threads, nesm); } }; Thread watchdog = new Thread(threadWatchdog); watchdog.setDaemon(true); watchdog.start(); } } } else { if (log.isDetailed()) { logDetailed(BaseMessages.getString(PKG, "JobEntryHadoopJobExecutor.AdvancedMode")); } Configuration conf = shim.createConfiguration(); FileSystem fs = shim.getFileSystem(conf); URL[] urls = new URL[] { resolvedJarUrl }; URLClassLoader loader = new URLClassLoader(urls, shim.getClass().getClassLoader()); String hadoopJobNameS = environmentSubstitute(hadoopJobName); conf.setJobName(hadoopJobNameS); String outputKeyClassS = environmentSubstitute(outputKeyClass); conf.setOutputKeyClass(loader.loadClass(outputKeyClassS)); String outputValueClassS = environmentSubstitute(outputValueClass); conf.setOutputValueClass(loader.loadClass(outputValueClassS)); if (mapperClass != null) { String mapperClassS = environmentSubstitute(mapperClass); Class<?> mapper = loader.loadClass(mapperClassS); conf.setMapperClass(mapper); } if (combinerClass != null) { String combinerClassS = environmentSubstitute(combinerClass); Class<?> combiner = loader.loadClass(combinerClassS); conf.setCombinerClass(combiner); } if (reducerClass != null) { String reducerClassS = environmentSubstitute(reducerClass); Class<?> reducer = loader.loadClass(reducerClassS); conf.setReducerClass(reducer); } if (inputFormatClass != null) { String inputFormatClassS = environmentSubstitute(inputFormatClass); Class<?> inputFormat = loader.loadClass(inputFormatClassS); conf.setInputFormat(inputFormat); } if (outputFormatClass != null) { String outputFormatClassS = environmentSubstitute(outputFormatClass); Class<?> outputFormat = loader.loadClass(outputFormatClassS); conf.setOutputFormat(outputFormat); } String hdfsHostnameS = environmentSubstitute(hdfsHostname); String hdfsPortS = environmentSubstitute(hdfsPort); String jobTrackerHostnameS = environmentSubstitute(jobTrackerHostname); String jobTrackerPortS = environmentSubstitute(jobTrackerPort); List<String> configMessages = new ArrayList<String>(); shim.configureConnectionInformation(hdfsHostnameS, hdfsPortS, jobTrackerHostnameS, jobTrackerPortS, conf, configMessages); for (String m : configMessages) { logBasic(m); } String inputPathS = environmentSubstitute(inputPath); String[] inputPathParts = inputPathS.split(","); List<Path> paths = new ArrayList<Path>(); for (String path : inputPathParts) { paths.add(fs.asPath(conf.getDefaultFileSystemURL(), path)); } Path[] finalPaths = paths.toArray(new Path[paths.size()]); conf.setInputPaths(finalPaths); String outputPathS = environmentSubstitute(outputPath); conf.setOutputPath(fs.asPath(conf.getDefaultFileSystemURL(), outputPathS)); // process user defined values for (UserDefinedItem item : userDefined) { if (item.getName() != null && !"".equals(item.getName()) && item.getValue() != null && !"".equals(item.getValue())) { String nameS = environmentSubstitute(item.getName()); String valueS = environmentSubstitute(item.getValue()); conf.set(nameS, valueS); } } conf.setJar(environmentSubstitute(jarUrl)); String numMapTasksS = environmentSubstitute(numMapTasks); String numReduceTasksS = environmentSubstitute(numReduceTasks); int numM = 1; try { numM = Integer.parseInt(numMapTasksS); } catch (NumberFormatException e) { logError("Can't parse number of map tasks '" + numMapTasksS + "'. Setting num" + "map tasks to 1"); } int numR = 1; try { numR = Integer.parseInt(numReduceTasksS); } catch (NumberFormatException e) { logError("Can't parse number of reduce tasks '" + numReduceTasksS + "'. Setting num" + "reduce tasks to 1"); } conf.setNumMapTasks(numM); conf.setNumReduceTasks(numR); RunningJob runningJob = shim.submitJob(conf); String loggingIntervalS = environmentSubstitute(getLoggingInterval()); int logIntv = 60; try { logIntv = Integer.parseInt(loggingIntervalS); } catch (NumberFormatException e) { logError(BaseMessages.getString(PKG, "ErrorParsingLogInterval", loggingIntervalS, logIntv)); } if (blocking) { try { int taskCompletionEventIndex = 0; while (!parentJob.isStopped() && !runningJob.isComplete()) { if (logIntv >= 1) { printJobStatus(runningJob); taskCompletionEventIndex = logTaskMessages(runningJob, taskCompletionEventIndex); Thread.sleep(logIntv * 1000); } else { Thread.sleep(60000); } } if (parentJob.isStopped() && !runningJob.isComplete()) { // We must stop the job running on Hadoop runningJob.killJob(); // Indicate this job entry did not complete result.setResult(false); } printJobStatus(runningJob); // Log any messages we may have missed while polling logTaskMessages(runningJob, taskCompletionEventIndex); } catch (InterruptedException ie) { logError(ie.getMessage(), ie); } // Entry is successful if the MR job is successful overall result.setResult(runningJob.isSuccessful()); } } } catch (Throwable t) { t.printStackTrace(); result.setStopped(true); result.setNrErrors(1); result.setResult(false); logError(t.getMessage(), t); } if (appender != null) { LogWriter.getInstance().removeAppender(appender); appender.close(); ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(), parentJob.getJobname(), getName()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } return result; }
From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java
private void waitForThreads(List<Thread> threads) { for (final Thread thread : threads) { while (thread.isAlive()) { try { thread.join();//from www. j a v a 2 s .co m } catch (InterruptedException e) { log.debug(e, e); } } } }
From source file:ffx.ui.MainPanel.java
/** * <p>//from w w w. java 2 s .c o m * closeWait</p> */ public synchronized void closeWait() { FFXSystem active = hierarchy.getActive(); if (active == null) { logger.log(Level.INFO, " No active system to close."); return; } Thread thread = close(active); while (thread != null && thread.isAlive()) { try { wait(1); } catch (InterruptedException e) { String message = "Exception waiting for " + active + " to close."; logger.log(Level.WARNING, message, e); } } }
From source file:com.mwebster.exchange.SyncManager.java
private long checkMailboxes() { // First, see if any running mailboxes have been deleted ArrayList<Long> deletedMailboxes = new ArrayList<Long>(); synchronized (sSyncLock) { for (long mailboxId : mServiceMap.keySet()) { Mailbox m = Mailbox.restoreMailboxWithId(this, mailboxId); if (m == null) { deletedMailboxes.add(mailboxId); }/*from w w w . j a v a 2s . c o m*/ } // If so, stop them or remove them from the map for (Long mailboxId : deletedMailboxes) { AbstractSyncService svc = mServiceMap.get(mailboxId); if (svc == null || svc.mThread == null) { releaseMailbox(mailboxId); continue; } else { boolean alive = svc.mThread.isAlive(); log("Deleted mailbox: " + svc.mMailboxName); if (alive) { stopManualSync(mailboxId); } else { log("Removing from serviceMap"); releaseMailbox(mailboxId); } } } } long nextWait = SYNC_MANAGER_HEARTBEAT_TIME; long now = System.currentTimeMillis(); // Start up threads that need it; use a query which finds eas mailboxes where the // the sync interval is not "never". This is the set of mailboxes that we control if (mAccountObserver == null) { log("mAccountObserver null; service died??"); return nextWait; } Cursor c = getContentResolver().query(Mailbox.CONTENT_URI, Mailbox.CONTENT_PROJECTION, mAccountObserver.getSyncableEasMailboxWhere(), null, null); // Contacts/Calendar obey this setting from ContentResolver // Mail is on its own schedule boolean masterAutoSync = ContentResolver.getMasterSyncAutomatically(); try { while (c.moveToNext()) { long mid = c.getLong(Mailbox.CONTENT_ID_COLUMN); AbstractSyncService service = null; synchronized (sSyncLock) { service = mServiceMap.get(mid); } if (service == null) { // We handle a few types of mailboxes specially int type = c.getInt(Mailbox.CONTENT_TYPE_COLUMN); // If background data is off, we only sync Outbox // Manual syncs are initiated elsewhere, so they will continue to be respected if (!mBackgroundData && type != Mailbox.TYPE_OUTBOX) { continue; } if (type == Mailbox.TYPE_CONTACTS || type == Mailbox.TYPE_CALENDAR) { // We don't sync these automatically if master auto sync is off if (!masterAutoSync) { continue; } // Get the right authority for the mailbox String authority; Account account = getAccountById(c.getInt(Mailbox.CONTENT_ACCOUNT_KEY_COLUMN)); if (account != null) { if (type == Mailbox.TYPE_CONTACTS) { authority = ContactsContract.AUTHORITY; } else { authority = Calendar.AUTHORITY; if (!mCalendarObservers.containsKey(account.mId)) { // Make sure we have an observer for this Calendar, as // we need to be able to detect sync state changes, sigh registerCalendarObserver(account); } } android.accounts.Account a = new android.accounts.Account(account.mEmailAddress, Email.EXCHANGE_ACCOUNT_MANAGER_TYPE); // See if "sync automatically" is set; if not, punt if (!ContentResolver.getSyncAutomatically(a, authority)) { continue; // See if the calendar is enabled; if not, punt } else if ((type == Mailbox.TYPE_CALENDAR) && !isCalendarEnabled(account.mId)) { continue; } } } else if (type == Mailbox.TYPE_TRASH) { continue; } // Check whether we're in a hold (temporary or permanent) SyncError syncError = mSyncErrorMap.get(mid); if (syncError != null) { // Nothing we can do about fatal errors if (syncError.fatal) continue; if (now < syncError.holdEndTime) { // If release time is earlier than next wait time, // move next wait time up to the release time if (syncError.holdEndTime < now + nextWait) { nextWait = syncError.holdEndTime - now; mNextWaitReason = "Release hold"; } continue; } else { // Keep the error around, but clear the end time syncError.holdEndTime = 0; } } // Otherwise, we use the sync interval long interval = c.getInt(Mailbox.CONTENT_SYNC_INTERVAL_COLUMN); if (interval == Mailbox.CHECK_INTERVAL_PUSH) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_PUSH, null); } else if (type == Mailbox.TYPE_OUTBOX) { int cnt = EmailContent.count(this, Message.CONTENT_URI, EasOutboxService.MAILBOX_KEY_AND_NOT_SEND_FAILED, new String[] { Long.toString(mid) }); if (cnt > 0) { Mailbox m = EmailContent.getContent(c, Mailbox.class); startServiceThread(new EasOutboxService(this, m), m); } } else if (interval > 0 && interval <= ONE_DAY_MINUTES) { long lastSync = c.getLong(Mailbox.CONTENT_SYNC_TIME_COLUMN); long sinceLastSync = now - lastSync; if (sinceLastSync < 0) { log("WHOA! lastSync in the future for mailbox: " + mid); sinceLastSync = interval * MINUTES; } long toNextSync = interval * MINUTES - sinceLastSync; String name = c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN); if (toNextSync <= 0) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_SCHEDULED, null); } else if (toNextSync < nextWait) { nextWait = toNextSync; if (Eas.USER_LOG) { log("Next sync for " + name + " in " + nextWait / 1000 + "s"); } mNextWaitReason = "Scheduled sync, " + name; } else if (Eas.USER_LOG) { log("Next sync for " + name + " in " + toNextSync / 1000 + "s"); } } } else { Thread thread = service.mThread; // Look for threads that have died and remove them from the map if (thread != null && !thread.isAlive()) { if (Eas.USER_LOG) { log("Dead thread, mailbox released: " + c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN)); } releaseMailbox(mid); // Restart this if necessary if (nextWait > 3 * SECONDS) { nextWait = 3 * SECONDS; mNextWaitReason = "Clean up dead thread(s)"; } } else { long requestTime = service.mRequestTime; if (requestTime > 0) { long timeToRequest = requestTime - now; if (service instanceof AbstractSyncService && timeToRequest <= 0) { service.mRequestTime = 0; service.alarm(); } else if (requestTime > 0 && timeToRequest < nextWait) { if (timeToRequest < 11 * MINUTES) { nextWait = timeToRequest < 250 ? 250 : timeToRequest; mNextWaitReason = "Sync data change"; } else { log("Illegal timeToRequest: " + timeToRequest); } } } } } } } finally { c.close(); } return nextWait; }
From source file:ffx.ui.MainPanel.java
/** * <p>//from w w w . j av a 2s .co m * openWait</p> * * @param file a {@link java.lang.String} object. * @return an array of {@link ffx.ui.FFXSystem} objects. */ public synchronized FFXSystem[] openWait(String file) { Thread thread = open(file); while (thread != null && thread.isAlive()) { try { wait(1); } catch (InterruptedException e) { String message = "Exception waiting for " + file + " to open."; logger.log(Level.WARNING, message, e); return null; } } MolecularAssembly systems[] = activeFilter.getMolecularAssemblys(); if (systems != null) { int n = systems.length; FFXSystem ffxSystems[] = new FFXSystem[n]; FFXSystem allSystems[] = getHierarchy().getSystems(); int total = allSystems.length; for (int i = 0; i < n; i++) { ffxSystems[i] = allSystems[total - n + i]; } return ffxSystems; } else { return null; } }
From source file:ffx.ui.MainPanel.java
/** * <p>/* w ww. j a v a2 s . c o m*/ * openWait</p> * * @param files an array of {@link java.lang.String} objects. * @return an array of {@link ffx.ui.FFXSystem} objects. */ public synchronized FFXSystem[] openWait(String files[]) { Thread thread = open(files); while (thread != null && thread.isAlive()) { try { wait(1); } catch (InterruptedException e) { String message = "Exception waiting for " + files[0] + " to open."; logger.log(Level.WARNING, message, e); return null; } } MolecularAssembly systems[] = activeFilter.getMolecularAssemblys(); if (systems != null) { int n = systems.length; FFXSystem ffxSystems[] = new FFXSystem[n]; FFXSystem allSystems[] = getHierarchy().getSystems(); int total = allSystems.length; for (int i = 0; i < n; i++) { ffxSystems[i] = allSystems[total - n + i]; } return ffxSystems; } else { return null; } }
From source file:com.android.exchange.SyncManager.java
private long checkMailboxes() { // First, see if any running mailboxes have been deleted ArrayList<Long> deletedMailboxes = new ArrayList<Long>(); synchronized (sSyncLock) { for (long mailboxId : mServiceMap.keySet()) { Mailbox m = Mailbox.restoreMailboxWithId(this, mailboxId); if (m == null) { deletedMailboxes.add(mailboxId); }//from www . ja va 2s .c om } // If so, stop them or remove them from the map for (Long mailboxId : deletedMailboxes) { AbstractSyncService svc = mServiceMap.get(mailboxId); if (svc == null || svc.mThread == null) { releaseMailbox(mailboxId); continue; } else { boolean alive = svc.mThread.isAlive(); log("Deleted mailbox: " + svc.mMailboxName); if (alive) { stopManualSync(mailboxId); } else { log("Removing from serviceMap"); releaseMailbox(mailboxId); } } } } long nextWait = SYNC_MANAGER_HEARTBEAT_TIME; long now = System.currentTimeMillis(); // Start up threads that need it; use a query which finds eas mailboxes where the // the sync interval is not "never". This is the set of mailboxes that we control if (mAccountObserver == null) { log("mAccountObserver null; service died??"); return nextWait; } Cursor c = getContentResolver().query(Mailbox.CONTENT_URI, Mailbox.CONTENT_PROJECTION, mAccountObserver.getSyncableEasMailboxWhere(), null, null); // Contacts/Calendar obey this setting from ContentResolver // Mail is on its own schedule boolean masterAutoSync = ContentResolver.getMasterSyncAutomatically(); try { while (c.moveToNext()) { long mid = c.getLong(Mailbox.CONTENT_ID_COLUMN); AbstractSyncService service = null; synchronized (sSyncLock) { service = mServiceMap.get(mid); } if (service == null) { // We handle a few types of mailboxes specially int type = c.getInt(Mailbox.CONTENT_TYPE_COLUMN); // If background data is off, we only sync Outbox // Manual syncs are initiated elsewhere, so they will continue to be respected if (!mBackgroundData && type != Mailbox.TYPE_OUTBOX) { continue; } if (type == Mailbox.TYPE_CONTACTS || type == Mailbox.TYPE_CALENDAR) { // We don't sync these automatically if master auto sync is off if (!masterAutoSync) { continue; } // Get the right authority for the mailbox String authority; Account account = getAccountById(c.getInt(Mailbox.CONTENT_ACCOUNT_KEY_COLUMN)); if (account != null) { if (type == Mailbox.TYPE_CONTACTS) { authority = ContactsContract.AUTHORITY; } else { authority = Calendar.AUTHORITY; if (!mCalendarObservers.containsKey(account.mId)) { // Make sure we have an observer for this Calendar, as // we need to be able to detect sync state changes, sigh registerCalendarObserver(account); } } android.accounts.Account a = new android.accounts.Account(account.mEmailAddress, Email.EXCHANGE_ACCOUNT_MANAGER_TYPE); // See if "sync automatically" is set; if not, punt if (!ContentResolver.getSyncAutomatically(a, authority)) { continue; // See if the calendar is enabled; if not, punt } else if ((type == Mailbox.TYPE_CALENDAR) && !isCalendarEnabled(account.mId)) { continue; } } } else if (type == Mailbox.TYPE_TRASH) { continue; } // Check whether we're in a hold (temporary or permanent) SyncError syncError = mSyncErrorMap.get(mid); if (syncError != null) { // Nothing we can do about fatal errors if (syncError.fatal) continue; if (now < syncError.holdEndTime) { // If release time is earlier than next wait time, // move next wait time up to the release time if (syncError.holdEndTime < now + nextWait) { nextWait = syncError.holdEndTime - now; mNextWaitReason = "Release hold"; } continue; } else { // Keep the error around, but clear the end time syncError.holdEndTime = 0; } } // Otherwise, we use the sync interval long interval = c.getInt(Mailbox.CONTENT_SYNC_INTERVAL_COLUMN); if (interval == Mailbox.CHECK_INTERVAL_PUSH) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_PUSH, null); } else if (type == Mailbox.TYPE_OUTBOX) { int cnt = EmailContent.count(this, Message.CONTENT_URI, EasOutboxService.MAILBOX_KEY_AND_NOT_SEND_FAILED, new String[] { Long.toString(mid) }); if (cnt > 0) { Mailbox m = EmailContent.getContent(c, Mailbox.class); startServiceThread(new EasOutboxService(this, m), m); } } else if (interval > 0 && interval <= ONE_DAY_MINUTES) { long lastSync = c.getLong(Mailbox.CONTENT_SYNC_TIME_COLUMN); long sinceLastSync = now - lastSync; if (sinceLastSync < 0) { log("WHOA! lastSync in the future for mailbox: " + mid); sinceLastSync = interval * MINUTES; } long toNextSync = interval * MINUTES - sinceLastSync; String name = c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN); if (toNextSync <= 0) { Mailbox m = EmailContent.getContent(c, Mailbox.class); requestSync(m, SYNC_SCHEDULED, null); } else if (toNextSync < nextWait) { nextWait = toNextSync; if (Eas.USER_LOG) { log("Next sync for " + name + " in " + nextWait / 1000 + "s"); } mNextWaitReason = "Scheduled sync, " + name; } else if (Eas.USER_LOG) { log("Next sync for " + name + " in " + toNextSync / 1000 + "s"); } } } else { Thread thread = service.mThread; // Look for threads that have died and remove them from the map if (thread != null && !thread.isAlive()) { if (Eas.USER_LOG) { log("Dead thread, mailbox released: " + c.getString(Mailbox.CONTENT_DISPLAY_NAME_COLUMN)); } releaseMailbox(mid); // Restart this if necessary if (nextWait > 3 * SECONDS) { nextWait = 3 * SECONDS; mNextWaitReason = "Clean up dead thread(s)"; } } else { long requestTime = service.mRequestTime; if (requestTime > 0) { long timeToRequest = requestTime - now; if (timeToRequest <= 0) { service.mRequestTime = 0; service.alarm(); } else if (requestTime > 0 && timeToRequest < nextWait) { if (timeToRequest < 11 * MINUTES) { nextWait = timeToRequest < 250 ? 250 : timeToRequest; mNextWaitReason = "Sync data change"; } else { log("Illegal timeToRequest: " + timeToRequest); } } } } } } } finally { c.close(); } return nextWait; }
From source file:ffx.ui.MainPanel.java
public synchronized MolecularAssembly[] openWaitUtils(String file) { UIFileOpener opener = openFromUtils(file); Thread thread = new Thread(opener); while (thread != null && thread.isAlive()) { try {/*from www. j a v a 2s . co m*/ wait(1); } catch (InterruptedException e) { String message = "Exception waiting for " + file + " to open."; logger.log(Level.WARNING, message, e); return null; } } MolecularAssembly systems[] = activeFilter.getMolecularAssemblys(); if (systems != null) { int n = systems.length; FFXSystem ffxSystems[] = new FFXSystem[n]; FFXSystem allSystems[] = getHierarchy().getSystems(); int total = allSystems.length; for (int i = 0; i < n; i++) { ffxSystems[i] = allSystems[total - n + i]; } return ffxSystems; } else { return null; } }
From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java
private void waitForThreads(List<Thread> threads, int maxThreads) { if (threads.isEmpty() || (threads.size() < maxThreads)) { return;/* w ww .j av a 2s . c o m*/ } int i = 0; while (threads.size() >= maxThreads) { i = ((i >= threads.size()) || (i < 0)) ? 0 : i; final Thread thread = threads.get(i); try { if (thread.isAlive()) { thread.join(100); } if (!thread.isAlive()) { threads.remove(i); } else { i++; } } catch (InterruptedException e) { log.debug(e, e); } } }