List of usage examples for java.lang Thread getName
public final String getName()
From source file:org.lockss.db.DbManager.java
private synchronized List<String> getPendingUpdates() { final String DEBUG_HEADER = "getPendingUpdates(): "; List<String> result = new ArrayList<String>(); for (Thread thread : threads) { if (log.isDebug3()) log.debug3(DEBUG_HEADER + "thread = '" + thread + "'"); String name = thread.getName(); if (log.isDebug3()) log.debug3(DEBUG_HEADER + "name = '" + name + "'"); String from = name.substring(9, name.indexOf("To")); if (log.isDebug3()) log.debug3(DEBUG_HEADER + "from = '" + from + "'"); String to = name.substring(name.indexOf("To") + 2, name.indexOf("Migrator")); if (log.isDebug3()) log.debug3(DEBUG_HEADER + "to = '" + to + "'"); result.add(from + " -> " + to); }/*www . j a va 2 s.c o m*/ if (log.isDebug2()) log.debug2(DEBUG_HEADER + "result = " + result); return result; }
From source file:org.lockss.db.DbManager.java
/** * Removes the record of a spawned thread. Useful to avoid ugly but harmless * exceptions when running tests.// w w w. j a v a 2 s . c o m * * @param name A String with the name to be cleaned up. */ synchronized void cleanUpThread(String name) { final String DEBUG_HEADER = "cleanUpThread(): "; if (log.isDebug2()) log.debug2(DEBUG_HEADER + "name = '" + name + "'"); Thread namedThread = null; for (Thread thread : threads) { if (log.isDebug3()) log.debug3(DEBUG_HEADER + "thread = '" + thread + "'"); if (name.equals(thread.getName())) { namedThread = thread; if (log.isDebug3()) log.debug3(DEBUG_HEADER + "namedThread = '" + namedThread + "'"); break; } } if (namedThread != null) { if (log.isDebug3()) log.debug3(DEBUG_HEADER + "Removing namedThread = '" + namedThread + "'..."); threads.remove(namedThread); if (log.isDebug3()) log.debug3(DEBUG_HEADER + "Done."); } if (log.isDebug2()) log.debug2(DEBUG_HEADER + "Done."); }
From source file:com.meltmedia.cadmium.servlets.ClassLoaderLeakPreventor.java
/** * Partially inspired by org.apache.catalina.loader.WebappClassLoader.clearReferencesThreads() *//*from w w w. j a v a2s . c o m*/ @SuppressWarnings("deprecation") protected void stopThreads() { final Class<?> workerClass = findClass("java.util.concurrent.ThreadPoolExecutor$Worker"); final Field oracleTarget = findField(Thread.class, "target"); // Sun/Oracle JRE final Field ibmRunnable = findField(Thread.class, "runnable"); // IBM JRE for (Thread thread : getAllThreads()) { @SuppressWarnings("RedundantCast") final Runnable runnable = (oracleTarget != null) ? (Runnable) getFieldValue(oracleTarget, thread) : // Sun/Oracle JRE (Runnable) getFieldValue(ibmRunnable, thread); // IBM JRE if (thread != Thread.currentThread() && // Ignore current thread (isThreadInWebApplication(thread) || isLoadedInWebApplication(runnable))) { if (thread.getClass().getName().startsWith(JURT_ASYNCHRONOUS_FINALIZER)) { // Note, the thread group of this thread may be "system" if it is triggered by the Garbage Collector // however if triggered by us in forceStartOpenOfficeJurtCleanup() it may depend on the application server if (stopThreads) { info("Found JURT thread " + thread.getName() + "; starting " + JURTKiller.class.getSimpleName()); new JURTKiller(thread).start(); } else warn("JURT thread " + thread.getName() + " is still running in web app"); } else if (thread.getThreadGroup() != null && ("system".equals(thread.getThreadGroup().getName()) || // System thread "RMI Runtime".equals(thread.getThreadGroup().getName()))) { // RMI thread (honestly, just copied from Tomcat) if ("Keep-Alive-Timer".equals(thread.getName())) { thread.setContextClassLoader(getWebApplicationClassLoader().getParent()); debug("Changed contextClassLoader of HTTP keep alive thread"); } } else if (thread.isAlive()) { // Non-system, running in web app if ("java.util.TimerThread".equals(thread.getClass().getName())) { if (stopTimerThreads) { warn("Stopping Timer thread running in classloader."); stopTimerThread(thread); } else { info("Timer thread is running in classloader, but will not be stopped"); } } else { // If threads is running an java.util.concurrent.ThreadPoolExecutor.Worker try shutting down the executor if (workerClass != null && workerClass.isInstance(runnable)) { if (stopThreads) { warn("Shutting down " + ThreadPoolExecutor.class.getName() + " running within the classloader."); try { // java.util.concurrent.ThreadPoolExecutor, introduced in Java 1.5 final Field workerExecutor = findField(workerClass, "this$0"); final ThreadPoolExecutor executor = getFieldValue(workerExecutor, runnable); executor.shutdownNow(); } catch (Exception ex) { error(ex); } } else info(ThreadPoolExecutor.class.getName() + " running within the classloader will not be shut down."); } final String displayString = "'" + thread + "' of type " + thread.getClass().getName(); if (stopThreads) { final String waitString = (threadWaitMs > 0) ? "after " + threadWaitMs + " ms " : ""; warn("Stopping Thread " + displayString + " running in web app " + waitString); if (threadWaitMs > 0) { try { thread.join(threadWaitMs); // Wait for thread to run } catch (InterruptedException e) { // Do nothing } } // Normally threads should not be stopped (method is deprecated), since it may cause an inconsistent state. // In this case however, the alternative is a classloader leak, which may or may not be considered worse. if (thread.isAlive()) thread.stop(); } else { warn("Thread " + displayString + " is still running in web app"); } } } } } }
From source file:com.mirth.connect.donkey.server.channel.Channel.java
protected DispatchResult dispatchRawMessage(RawMessage rawMessage, boolean batch) throws ChannelException { // Allow messages to continue processing while the channel is stopping if they are part of an existing batch if ((currentState == DeployedState.STOPPING && !batch) || currentState == DeployedState.STOPPED) { throw new ChannelException(true); }//from w w w. j a v a2 s. c o m Thread currentThread = Thread.currentThread(); String originalThreadName = currentThread.getName(); boolean lockAcquired = false; Long persistedMessageId = null; try { synchronized (dispatchThreads) { if (!shuttingDown) { dispatchThreads.add(currentThread); } else { throw new ChannelException(true); } } if (StringUtils.contains(originalThreadName, channelId)) { currentThread.setName("Channel Dispatch Thread < " + originalThreadName); } else { currentThread.setName( "Channel Dispatch Thread on " + name + " (" + channelId + ") < " + originalThreadName); } DonkeyDao dao = null; Message processedMessage = null; Response response = null; String responseErrorMessage = null; DispatchResult dispatchResult = null; try { obtainProcessLock(); lockAcquired = true; /* * TRANSACTION: Create Raw Message - create a source connector message from the raw * message and set the status as RECEIVED - store attachments */ dao = daoFactory.getDao(); ConnectorMessage sourceMessage = createAndStoreSourceMessage(dao, rawMessage); ThreadUtils.checkInterruptedStatus(); if (sourceConnector.isRespondAfterProcessing()) { dao.commit(storageSettings.isRawDurable()); persistedMessageId = sourceMessage.getMessageId(); dao.close(); markDeletedQueuedMessages(rawMessage, persistedMessageId); processedMessage = process(sourceMessage, false); } else { // Block other threads from adding to the source queue until both the current commit and queue addition finishes synchronized (sourceQueue) { dao.commit(storageSettings.isRawDurable()); persistedMessageId = sourceMessage.getMessageId(); dao.close(); queue(sourceMessage); } markDeletedQueuedMessages(rawMessage, persistedMessageId); } if (responseSelector.canRespond()) { try { response = responseSelector.getResponse(sourceMessage, processedMessage); } catch (Exception e) { responseErrorMessage = ExceptionUtils.getStackTrace(e); } } } catch (RuntimeException e) { // TODO determine behavior if this occurs. throw new ChannelException(true, e); } finally { if (lockAcquired && (!sourceConnector.isRespondAfterProcessing() || persistedMessageId == null || Thread.currentThread().isInterrupted())) { // Release the process lock if an exception was thrown before a message was persisted // or if the thread was interrupted because no additional processing will be done. releaseProcessLock(); lockAcquired = false; } if (dao != null && !dao.isClosed()) { dao.close(); } // Create the DispatchResult at the very end because lockAcquired might have changed if (persistedMessageId != null) { dispatchResult = new DispatchResult(persistedMessageId, processedMessage, response, sourceConnector.isRespondAfterProcessing(), lockAcquired); if (StringUtils.isNotBlank(responseErrorMessage)) { dispatchResult.setResponseError(responseErrorMessage); } } } return dispatchResult; } catch (InterruptedException e) { // This exception should only ever be thrown during a halt. // It is impossible to know whether or not the message was persisted because the task will continue to run // even though we are no longer waiting for it. Furthermore it is possible the message was actually sent. // The best we can do is cancel the task and throw a channel exception. // If the message was not queued on the source connector, recovery should take care of it. // If the message was queued, the source of the message will be notified that the message was not persisted to be safe. // This could lead to a potential duplicate message being received/sent, but it is one of the consequences of using halt. throw new ChannelException(true, e); } catch (Throwable t) { Throwable cause = t.getCause(); ChannelException channelException = null; if (cause instanceof InterruptedException) { channelException = new ChannelException(true, cause); } else if (cause instanceof ChannelException) { logger.error("Runtime error in channel " + name + " (" + channelId + ").", cause); channelException = (ChannelException) cause; } else { logger.error("Error processing message in channel " + name + " (" + channelId + ").", t); channelException = new ChannelException(false, t); } if (persistedMessageId == null) { throw channelException; } return new DispatchResult(persistedMessageId, null, null, false, lockAcquired, channelException); } finally { synchronized (dispatchThreads) { dispatchThreads.remove(currentThread); } currentThread.setName(originalThreadName); } }
From source file:se.jiderhamn.classloader.leak.prevention.ClassLoaderLeakPreventor.java
/** * Partially inspired by org.apache.catalina.loader.WebappClassLoader.clearReferencesThreads() *///from w w w . j a v a 2 s . c om @SuppressWarnings("deprecation") protected void stopThreads() { final Class<?> workerClass = findClass("java.util.concurrent.ThreadPoolExecutor$Worker"); final Field oracleTarget = findField(Thread.class, "target"); // Sun/Oracle JRE final Field ibmRunnable = findField(Thread.class, "runnable"); // IBM JRE for (Thread thread : getAllThreads()) { final Runnable runnable = (oracleTarget != null) ? (Runnable) getFieldValue(oracleTarget, thread) : // Sun/Oracle JRE (Runnable) getFieldValue(ibmRunnable, thread); // IBM JRE if (thread != Thread.currentThread() && // Ignore current thread (isThreadInWebApplication(thread) || isLoadedInWebApplication(runnable))) { if (thread.getClass().getName().startsWith(JURT_ASYNCHRONOUS_FINALIZER)) { // Note, the thread group of this thread may be "system" if it is triggered by the Garbage Collector // however if triggered by us in forceStartOpenOfficeJurtCleanup() it may depend on the application server if (stopThreads) { info("Found JURT thread " + thread.getName() + "; starting " + JURTKiller.class.getSimpleName()); new JURTKiller(thread).start(); } else warn("JURT thread " + thread.getName() + " is still running in web app"); } else if (thread.getThreadGroup() != null && ("system".equals(thread.getThreadGroup().getName()) || // System thread "RMI Runtime".equals(thread.getThreadGroup().getName()))) { // RMI thread (honestly, just copied from Tomcat) if ("Keep-Alive-Timer".equals(thread.getName())) { thread.setContextClassLoader(getWebApplicationClassLoader().getParent()); debug("Changed contextClassLoader of HTTP keep alive thread"); } } else if (thread.isAlive()) { // Non-system, running in web app if ("java.util.TimerThread".equals(thread.getClass().getName())) { if (stopTimerThreads) { warn("Stopping Timer thread running in classloader."); stopTimerThread(thread); } else { info("Timer thread is running in classloader, but will not be stopped"); } } else { // If threads is running an java.util.concurrent.ThreadPoolExecutor.Worker try shutting down the executor if (workerClass != null && workerClass.isInstance(runnable)) { if (stopThreads) { warn("Shutting down " + ThreadPoolExecutor.class.getName() + " running within the classloader."); try { // java.util.concurrent.ThreadPoolExecutor, introduced in Java 1.5 final Field workerExecutor = findField(workerClass, "this$0"); final ThreadPoolExecutor executor = getFieldValue(workerExecutor, runnable); executor.shutdownNow(); } catch (Exception ex) { error(ex); } } else info(ThreadPoolExecutor.class.getName() + " running within the classloader will not be shut down."); } final String displayString = "'" + thread + "' of type " + thread.getClass().getName(); if (stopThreads) { final String waitString = (threadWaitMs > 0) ? "after " + threadWaitMs + " ms " : ""; warn("Stopping Thread " + displayString + " running in web app " + waitString); if (threadWaitMs > 0) { try { thread.join(threadWaitMs); // Wait for thread to run } catch (InterruptedException e) { // Do nothing } } // Normally threads should not be stopped (method is deprecated), since it may cause an inconsistent state. // In this case however, the alternative is a classloader leak, which may or may not be considered worse. if (thread.isAlive()) thread.stop(); } else { warn("Thread " + displayString + " is still running in web app"); } } } } } }
From source file:org.jvnet.hudson.test.JenkinsRule.java
public Statement apply(final Statement base, final Description description) { if (description.getAnnotation(WithoutJenkins.class) != null) { // request has been made to not create the instance for this test method return base; }/* w w w. ja v a 2s.c o m*/ return new Statement() { @Override public void evaluate() throws Throwable { testDescription = description; Thread t = Thread.currentThread(); String o = t.getName(); t.setName("Executing " + testDescription.getDisplayName()); before(); try { System.out.println("=== Starting " + testDescription.getDisplayName()); // so that test code has all the access to the system ACL.impersonate(ACL.SYSTEM); try { base.evaluate(); } catch (Throwable th) { // allow the late attachment of a debugger in case of a failure. Useful // for diagnosing a rare failure try { throw new BreakException(); } catch (BreakException e) { } RandomlyFails rf = testDescription.getAnnotation(RandomlyFails.class); if (rf != null) { System.err.println("Note: known to randomly fail: " + rf.value()); } throw th; } } finally { after(); testDescription = null; t.setName(o); } } }; }
From source file:gov.noaa.pfel.erddap.util.EDStatic.java
/** This interrupts the thread and waits up to maxSeconds for it to finish. * If it still isn't finished, it is stopped. * /*from w w w .j av a2 s. c om*/ */ public static void stopThread(Thread thread, int maxSeconds) { try { if (thread == null) return; String name = thread.getName(); if (verbose) String2.log("stopThread(" + name + ")..."); if (!thread.isAlive()) { if (verbose) String2.log("thread=" + name + " was already not alive."); return; } thread.interrupt(); int waitSeconds = 0; while (thread.isAlive() && waitSeconds < maxSeconds) { waitSeconds += 2; Math2.sleep(2000); } if (thread.isAlive()) { if (verbose) String2.log("!!!Stopping thread=" + name + " after " + waitSeconds + " s"); thread.stop(); } else { if (verbose) String2.log("thread=" + name + " noticed interrupt in " + waitSeconds + " s"); } } catch (Throwable t) { String2.log(MustBe.throwableToString(t)); } }
From source file:org.apache.hadoop.hbase.regionserver.HRegionServer.java
/** * Starts a HRegionServer at the default location * @param conf/*from w w w . java2s .co m*/ * @param csm implementation of CoordinatedStateManager to be used * @throws IOException * @throws InterruptedException */ public HRegionServer(Configuration conf, CoordinatedStateManager csm) throws IOException, InterruptedException { this.fsOk = true; this.conf = conf; checkCodecs(this.conf); this.online = new AtomicBoolean(false); this.userProvider = UserProvider.instantiate(conf); FSUtils.setupShortCircuitRead(this.conf); // Config'ed params this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); this.sleeper = new Sleeper(this.msgInterval, this); boolean isNoncesEnabled = conf.getBoolean(HConstants.HBASE_RS_NONCES_ENABLED, true); this.nonceManager = isNoncesEnabled ? new ServerNonceManager(this.conf) : null; this.numRegionsToReport = conf.getInt("hbase.regionserver.numregionstoreport", 10); this.operationTimeout = conf.getInt(HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT); this.abortRequested = false; this.stopped = false; rpcServices = createRpcServices(); this.startcode = System.currentTimeMillis(); String hostName = rpcServices.isa.getHostName(); serverName = ServerName.valueOf(hostName, rpcServices.isa.getPort(), startcode); this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf); // login the zookeeper client principal (if using security) ZKUtil.loginClient(this.conf, "hbase.zookeeper.client.keytab.file", "hbase.zookeeper.client.kerberos.principal", hostName); // login the server principal (if using secure Hadoop) login(userProvider, hostName); regionServerAccounting = new RegionServerAccounting(); cacheConfig = new CacheConfig(conf); uncaughtExceptionHandler = new UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { abort("Uncaught exception in service thread " + t.getName(), e); } }; // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else // underlying hadoop hdfs accessors will be going against wrong filesystem // (unless all is set to defaults). FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf)); // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase // checksum verification enabled, then automatically switch off hdfs checksum verification. boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); this.fs = new HFileSystem(this.conf, useHBaseChecksum); this.rootDir = FSUtils.getRootDir(this.conf); this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, !canUpdateTableDescriptor()); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); // Some unit tests don't need a cluster, so no zookeeper at all if (!conf.getBoolean("hbase.testing.nocluster", false)) { // Open connection to zookeeper and set primary watcher zooKeeper = new ZooKeeperWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this, canCreateBaseZNode()); this.csm = csm; this.csm.initialize(this); this.csm.start(); tableLockManager = TableLockManager.createTableLockManager(conf, zooKeeper, serverName); masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this); masterAddressTracker.start(); clusterStatusTracker = new ClusterStatusTracker(zooKeeper, this); clusterStatusTracker.start(); } rpcServices.start(); putUpWebUI(); }
From source file:org.hyperic.hq.agent.server.session.AgentSynchronizer.java
private void executeJob(final StatefulAgentDataTransferJob job) throws InterruptedException { final String name = Thread.currentThread().getName() + "-" + executorNum.getAndIncrement(); final Thread thread = new Thread(name) { @Override/*from w w w . j a v a 2 s .c o m*/ public void run() { job.setLastRuntime(); if (agentIsPingable(job)) { try { job.execute(); } catch (Throwable e) { if (e instanceof InterruptedException) { log.warn("jobdesc=" + job.getJobDescription() + " was interrupted: " + e); log.debug(e, e); } else { log.error(e, e); } } return; } else { log.warn("Could not ping agent in order to run job " + getJobInfo(job)); } } }; thread.start(); thread.join(WAIT_TIME); // if the thread is alive just try to interrupt it and keep going final boolean threadIsAlive = thread.isAlive(); final boolean jobWasSuccessful = job.wasSuccessful(); final AvailabilityManager availabilityManager = ctx.getBean(AvailabilityManager.class); final boolean platformIsAvailable = availabilityManager.platformIsAvailableOrUnknown(job.getAgentId()) || isInRestartState(job.getAgentId()); if (jobWasSuccessful) { // do nothing, this is good! return; } else if (platformIsAvailable) { if (threadIsAlive) { thread.interrupt(); } job.incrementFailures(); if (log.isDebugEnabled()) { log.debug("executeJob, number of failures for execute job=" + getJobInfo(job) + " RuntimeId: " + job.getRuntimeTime() + " " + job.getNumberOfFailures()); } if (job.discardJob()) { job.onFailure( "Too many failures on agent " + job.getAgentId() + " RuntimeId: " + job.getRuntimeTime()); } else { reAddJob(job); if (threadIsAlive) { log.warn("AgentDataTransferJob=" + getJobInfo(job) + " has take more than " + WAIT_TIME / 1000 / 60 + " minutes to run. The agent appears alive so therefore the job was" + " interrupted and requeued. Job threadName={" + thread.getName() + "}"); } else { log.warn("AgentDataTransferJob=" + getJobInfo(job) + " died and was not successful. The agent appears alive and" + " therefore the job was requeued. " + " Job threadName={" + thread.getName() + "}" + " RuntimeId: " + job.getRuntimeTime()); } } } else { if (threadIsAlive) { thread.interrupt(); log.warn("AgentDataTransferJob=" + getJobInfo(job) + " has take more than " + WAIT_TIME / 1000 / 60 + " minutes to run. Discarding job threadName={" + thread.getName() + "}"); } // Can't ping agent and platform availability is down, therefore agent must be down job.onFailure("Platform associated with agent " + job.getAgentId() + " is not available"); } }
From source file:API.amazon.mws.feeds.service.MarketplaceWebServiceClient.java
/** * Constructs MarketplaceWebServiceClient with AWS Access Key ID, AWS Secret Key * and MarketplaceWebServiceConfig. Use MarketplaceWebServiceConfig to pass additional * configuration that affects how service is being called. * * @param awsAccessKeyId// ww w .ja v a2 s. c om * AWS Access Key ID * @param awsSecretAccessKey * AWS Secret Access Key * @param config * Additional configuration options */ @SuppressWarnings("serial") public MarketplaceWebServiceClient(String awsAccessKeyId, String awsSecretAccessKey, String applicationName, String applicationVersion, MarketplaceWebServiceConfig config) { this.awsAccessKeyId = awsAccessKeyId; this.awsSecretAccessKey = awsSecretAccessKey; this.config = config; this.httpClient = configureHttpClient(applicationName, applicationVersion); this.asyncExecutor = new ThreadPoolExecutor(config.getMaxAsyncThreads(), config.getMaxAsyncThreads(), 60L, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(config.getMaxAsyncQueueSize()) { @Override public boolean offer(Runnable task) { log.debug("Maximum number of concurrent threads reached, queuing task..."); return super.offer(task); } }, new ThreadFactory() { private final AtomicInteger threadNumber = new AtomicInteger(1); public Thread newThread(Runnable task) { Thread thread = new Thread(task, "MarketplaceWebServiceClient-Thread-" + threadNumber.getAndIncrement()); thread.setDaemon(true); if (thread.getPriority() != Thread.NORM_PRIORITY) { thread.setPriority(Thread.NORM_PRIORITY); } log.debug("ThreadFactory created new thread: " + thread.getName()); return thread; } }, new RejectedExecutionHandler() { public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) { log.debug("Maximum number of concurrent threads reached, and queue is full. " + "Running task in the calling thread..." + Thread.currentThread().getName()); if (!executor.isShutdown()) { task.run(); } } }); }