List of usage examples for java.lang Thread interrupted
public static boolean interrupted()
From source file:org.eclipse.kura.deployment.agent.impl.DeploymentAgent.java
private void uninstaller() { do {/* w ww . ja va 2s . c o m*/ try { try { while (this.m_uninstPackageNames.isEmpty()) { synchronized (this.m_uninstPackageNames) { this.m_uninstPackageNames.wait(); } } String name = this.m_uninstPackageNames.peek(); if (name != null) { s_logger.info("About to uninstall package ", name); DeploymentPackage dp = null; boolean successful = false; Exception ex = null; try { dp = this.m_deploymentAdmin.getDeploymentPackage(name); if (dp != null) { dp.uninstall(); String sUrl = this.m_deployedPackages.getProperty(name); File dpFile = new File(new URL(sUrl).getPath()); if (!dpFile.delete()) { s_logger.warn("Cannot delete file at URL: {}", sUrl); } successful = true; removePackageFromConfFile(name); } } catch (Exception e) { ex = e; s_logger.error("Exception uninstalling package {}", name, e); } finally { s_logger.info("Posting UNINSTALLED event for package {}: {}", name, successful ? "successful" : "unsuccessful"); this.m_uninstPackageNames.poll(); postUninstalledEvent(name, successful, ex); } } } catch (InterruptedException e) { s_logger.info("Exiting..."); Thread.interrupted(); return; } } catch (Throwable t) { s_logger.error("Unexpected throwable", t); } } while (true); }
From source file:com.mirth.connect.plugins.datapruner.DataPruner.java
@Override public void run() { try {/* ww w .j a v a 2s. com*/ logger.debug("Executing pruner, started at " + new SimpleDateFormat("MM/dd/yyyy hh:mm aa").format(Calendar.getInstance().getTime())); if (pruneEvents) { pruneEvents(); } String date = new SimpleDateFormat(MessageWriterFactory.ARCHIVE_DATE_PATTERN) .format(Calendar.getInstance().getTime()); String archiveFolder = (archiveEnabled) ? archiverOptions.getRootFolder() + IOUtils.DIR_SEPARATOR + date : null; Queue<PrunerTask> taskQueue; try { taskQueue = buildTaskQueue(); } catch (Exception e) { // the error should already be logged return; } logger.debug("Pruner task queue built, " + taskQueue.size() + " channels will be processed"); Map<String, String> attributes = new HashMap<String, String>(); if (taskQueue.isEmpty()) { attributes.put("No messages to prune.", ""); eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT, Level.INFORMATION, Outcome.SUCCESS, attributes)); } while (!taskQueue.isEmpty()) { ThreadUtils.checkInterruptedStatus(); PrunerTask task = taskQueue.poll(); try { status.setCurrentChannelId(task.getChannelId()); status.setCurrentChannelName(task.getChannelName()); status.setTaskStartTime(Calendar.getInstance()); PruneResult result = pruneChannel(task.getChannelId(), task.getChannelName(), task.getMessageDateThreshold(), task.getContentDateThreshold(), archiveFolder, task.isArchiveEnabled()); status.getProcessedChannelIds().add(task.getChannelId()); attributes.put("Channel ID", task.getChannelId()); attributes.put("Channel Name", task.getChannelName()); if (archiveEnabled && task.isArchiveEnabled()) { attributes.put("Messages Archived", Long.toString(result.numMessagesArchived)); } attributes.put("Messages Pruned", Long.toString(result.numMessagesPruned)); attributes.put("Content Rows Pruned", Long.toString(result.numContentPruned)); attributes.put("Time Elapsed", getTimeElapsed()); eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT, Level.INFORMATION, Outcome.SUCCESS, attributes)); } catch (InterruptedException e) { throw e; } catch (Exception e) { status.getFailedChannelIds().add(task.getChannelId()); attributes.put("channel", task.getChannelName()); attributes.put("error", e.getMessage()); attributes.put("trace", ExceptionUtils.getStackTrace(e)); eventController.dispatchEvent(new ServerEvent(serverId, DataPrunerService.PLUGINPOINT, Level.ERROR, Outcome.FAILURE, attributes)); Throwable t = e; if (e instanceof DataPrunerException) { t = e.getCause(); } logger.error("Failed to prune messages for channel " + task.getChannelName() + " (" + task.getChannelId() + ").", t); } finally { status.getPendingChannelIds().remove(task.getChannelId()); status.setCurrentChannelId(null); status.setCurrentChannelName(null); } } logger.debug("Pruner job finished executing"); } catch (InterruptedException e) { // We need to clear this thread's interrupted status, or else the EventController will fail to dispatch the event Thread.interrupted(); ServerEvent event = new ServerEvent(serverId, DataPrunerService.PLUGINPOINT + " Halted"); event.setLevel(Level.INFORMATION); event.setOutcome(Outcome.SUCCESS); eventController.dispatchEvent(event); logger.debug("Data Pruner halted"); } catch (Throwable t) { logger.error("An error occurred while executing the data pruner", t); } finally { status.setEndTime(Calendar.getInstance()); lastStatus = SerializationUtils.clone(status); running.set(false); } }
From source file:com.btoddb.fastpersitentqueue.FpqIT.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; fpq1.setMaxTransactionSize(2000);/* w ww . j a v a 2s.co m*/ final int popBatchSize = 100; fpq1.setMaxMemorySegmentSizeInBytes(10000000); fpq1.setMaxJournalFileSize(10000000); fpq1.setMaxJournalDurationInMs(30000); fpq1.setFlushPeriodInMs(1000); fpq1.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); fpq1.init(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); fpq1.beginTransaction(); fpq1.push(bb.array()); fpq1.commit(); if ((x + 1) % 500 == 0) { System.out.println("pushed ID = " + x); } Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) { try { fpq1.beginTransaction(); try { Collection<FpqEntry> entries = fpq1.pop(popBatchSize); if (null == entries) { Thread.sleep(100); continue; } for (FpqEntry entry : entries) { ByteBuffer bb = ByteBuffer.wrap(entry.getData()); popSum.addAndGet(bb.getLong()); if (entry.getId() % 500 == 0) { System.out.println("popped ID = " + entry.getId()); } } numPops.addAndGet(entries.size()); fpq1.commit(); entries.clear(); } finally { if (fpq1.isTransactionActive()) { fpq1.rollback(); } } Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(fpq1.getNumberOfEntries(), is(0L)); assertThat(pushSum.get(), is(popSum.get())); assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1)); assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1)); assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1)); assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty())); assertThat( FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), hasSize(1)); }
From source file:org.apache.hadoop.hbase.regionserver.SplitLogWorker.java
/** * try to grab a 'lock' on the task zk node to own and execute the task. * <p>//from ww w .ja v a 2s .co m * @param path zk node for the task */ private void grabTask(String path) { Stat stat = new Stat(); long t = -1; byte[] data; synchronized (grabTaskLock) { currentTask = path; workerInGrabTask = true; if (Thread.interrupted()) { return; } } try { try { if ((data = ZKUtil.getDataNoWatch(this.watcher, path, stat)) == null) { SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.incrementAndGet(); return; } } catch (KeeperException e) { LOG.warn("Failed to get data for znode " + path, e); SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet(); return; } SplitLogTask slt; try { slt = SplitLogTask.parseFrom(data); } catch (DeserializationException e) { LOG.warn("Failed parse data for znode " + path, e); SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet(); return; } if (!slt.isUnassigned()) { SplitLogCounters.tot_wkr_failed_to_grab_task_owned.incrementAndGet(); return; } currentVersion = attemptToOwnTask(true, watcher, serverName, path, stat.getVersion()); if (currentVersion < 0) { SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.incrementAndGet(); return; } if (ZKSplitLog.isRescanNode(watcher, currentTask)) { HLogSplitterHandler.endTask(watcher, new SplitLogTask.Done(this.serverName), SplitLogCounters.tot_wkr_task_acquired_rescan, currentTask, currentVersion); return; } LOG.info("worker " + serverName + " acquired task " + path); SplitLogCounters.tot_wkr_task_acquired.incrementAndGet(); getDataSetWatchAsync(); submitTask(path, currentVersion, this.report_period); // after a successful submit, sleep a little bit to allow other RSs to grab the rest tasks try { int sleepTime = RandomUtils.nextInt(500) + 500; Thread.sleep(sleepTime); } catch (InterruptedException e) { LOG.warn("Interrupted while yielding for other region servers", e); Thread.currentThread().interrupt(); } } finally { synchronized (grabTaskLock) { workerInGrabTask = false; // clear the interrupt from stopTask() otherwise the next task will // suffer Thread.interrupted(); } } }
From source file:org.apache.solr.util.TestSolrCLIRunExample.java
protected void testExample(String exampleName) throws Exception { File solrHomeDir = new File(ExternalPaths.SERVER_HOME); if (!solrHomeDir.isDirectory()) fail(solrHomeDir.getAbsolutePath() + " not found and is required to run this test!"); Path tmpDir = createTempDir(); File solrExampleDir = tmpDir.toFile(); File solrServerDir = solrHomeDir.getParentFile(); // need a port to start the example server on int bindPort = -1; try (ServerSocket socket = new ServerSocket(0)) { bindPort = socket.getLocalPort(); }//w w w. j av a 2s . com log.info("Selected port " + bindPort + " to start " + exampleName + " example Solr instance on ..."); String[] toolArgs = new String[] { "-e", exampleName, "-serverDir", solrServerDir.getAbsolutePath(), "-exampleDir", solrExampleDir.getAbsolutePath(), "-p", String.valueOf(bindPort) }; // capture tool output to stdout ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream stdoutSim = new PrintStream(baos, true, StandardCharsets.UTF_8.name()); RunExampleExecutor executor = new RunExampleExecutor(stdoutSim); closeables.add(executor); SolrCLI.RunExampleTool tool = new SolrCLI.RunExampleTool(executor, System.in, stdoutSim); try { tool.runTool( SolrCLI.processCommandLineArgs(SolrCLI.joinCommonAndToolOptions(tool.getOptions()), toolArgs)); } catch (Exception e) { log.error("RunExampleTool failed due to: " + e + "; stdout from tool prior to failure: " + baos.toString(StandardCharsets.UTF_8.name())); throw e; } String toolOutput = baos.toString(StandardCharsets.UTF_8.name()); // dump all the output written by the SolrCLI commands to stdout //System.out.println("\n\n"+toolOutput+"\n\n"); File exampleSolrHomeDir = new File(solrExampleDir, exampleName + "/solr"); assertTrue(exampleSolrHomeDir.getAbsolutePath() + " not found! run " + exampleName + " example failed; output: " + toolOutput, exampleSolrHomeDir.isDirectory()); if ("techproducts".equals(exampleName)) { HttpSolrClient solrClient = getHttpSolrClient("http://localhost:" + bindPort + "/solr/" + exampleName); SolrQuery query = new SolrQuery("*:*"); QueryResponse qr = solrClient.query(query); long numFound = qr.getResults().getNumFound(); if (numFound == 0) { // brief wait in case of timing issue in getting the new docs committed log.warn("Going to wait for 1 second before re-trying query for techproduct example docs ..."); try { Thread.sleep(1000); } catch (InterruptedException ignore) { Thread.interrupted(); } numFound = solrClient.query(query).getResults().getNumFound(); } assertTrue("expected 32 docs in the " + exampleName + " example but found " + numFound + ", output: " + toolOutput, numFound == 32); } // stop the test instance executor.execute(org.apache.commons.exec.CommandLine.parse("bin/solr stop -p " + bindPort)); }
From source file:com.intel.cosbench.driver.service.MissionHandler.java
private void abortAgents(boolean shutdownNow) { Thread.interrupted(); // clear interruption status executor.shutdown();//from w w w .j a v a2 s .c om try { // Wait a few seconds for existing tasks to terminate if (!executor.awaitTermination(5, TimeUnit.SECONDS)) { executor.shutdownNow(); String id = missionContext.getId(); if (!awaitTermination(5) && !awaitTermination(10) && !awaitTermination(30)) LOGGER.warn("fail to abort agents for mission {}", id); else LOGGER.info("all agents have been aborted in mission {}", id); LOGGER.info("mission {} appears to be aborted", id); // agents aborted } } catch (InterruptedException ie) { executor.shutdownNow(); Thread.currentThread().interrupt(); } }
From source file:com.mellanox.r4h.LeaseRenewer.java
/** * Periodically check in with the namenode and renew all the leases * when the lease period is half over./*ww w .j a va 2 s .c o m*/ */ private void run(final int id) throws InterruptedException { for (long lastRenewed = Time.monotonicNow(); !Thread.interrupted(); Thread.sleep(getSleepPeriod())) { final long elapsed = Time.monotonicNow() - lastRenewed; if (elapsed >= getRenewalTime()) { try { renew(); if (LOG.isDebugEnabled()) { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " executed"); } lastRenewed = Time.monotonicNow(); } catch (SocketTimeoutException ie) { LOG.warn("Failed to renew lease for " + clientsString() + " for " + (elapsed / 1000) + " seconds. Aborting ...", ie); synchronized (this) { while (!dfsclients.isEmpty()) { dfsclients.get(0).abort(); } } break; } catch (IOException ie) { LOG.warn("Failed to renew lease for " + clientsString() + " for " + (elapsed / 1000) + " seconds. Will retry shortly ...", ie); } } synchronized (this) { if (id != currentId || isRenewerExpired()) { if (LOG.isDebugEnabled()) { if (id != currentId) { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " is not current"); } else { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " expired"); } } // no longer the current daemon or expired return; } // if no clients are in running state or there is no more clients // registered with this renewer, stop the daemon after the grace // period. if (!clientsRunning() && emptyTime == Long.MAX_VALUE) { emptyTime = Time.monotonicNow(); } } } }
From source file:org.apache.hadoop.hdfs.LeaseRenewer.java
/** * Periodically check in with the namenode and renew all the leases * when the lease period is half over./*from w w w .j av a 2s . co m*/ */ private void run(final int id) throws InterruptedException { for (long lastRenewed = Time.now(); !Thread.interrupted(); Thread.sleep(getSleepPeriod())) { final long elapsed = Time.now() - lastRenewed; if (elapsed >= getRenewalTime()) { try { renew(); if (LOG.isDebugEnabled()) { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " executed"); } lastRenewed = Time.now(); } catch (SocketTimeoutException ie) { LOG.warn("Failed to renew lease for " + clientsString() + " for " + (elapsed / 1000) + " seconds. Aborting ...", ie); synchronized (this) { while (!dfsclients.isEmpty()) { dfsclients.get(0).abort(); } } break; } catch (IOException ie) { LOG.warn("Failed to renew lease for " + clientsString() + " for " + (elapsed / 1000) + " seconds. Will retry shortly ...", ie); } } synchronized (this) { if (id != currentId || isRenewerExpired()) { if (LOG.isDebugEnabled()) { if (id != currentId) { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " is not current"); } else { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " expired"); } } //no longer the current daemon or expired return; } // if no clients are in running state or there is no more clients // registered with this renewer, stop the daemon after the grace // period. if (!clientsRunning() && emptyTime == Long.MAX_VALUE) { emptyTime = Time.now(); } } } }
From source file:net.sf.jasperreports.engine.export.JRHtmlExporter.java
/** * *//*from ww w. j a v a2s .com*/ protected void exportReportToWriter() throws JRException, IOException { JRHtmlExporterConfiguration configuration = getCurrentConfiguration(); if (configuration.isUsingImagesToAlign()) { loadPxImage(); } String htmlHeader = configuration.getHtmlHeader(); String betweenPagesHtml = configuration.getBetweenPagesHtml(); String htmlFooter = configuration.getHtmlFooter(); boolean flushOutput = configuration.isFlushOutput(); if (htmlHeader == null) { String encoding = getExporterOutput().getEncoding(); // no doctype because of bug 1430880 // writer.write("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n"); // writer.write("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n"); writer.write("<html>\n"); writer.write("<head>\n"); writer.write(" <title></title>\n"); writer.write(" <meta http-equiv=\"Content-Type\" content=\"text/html; charset=" + encoding + "\"/>\n"); writer.write(" <style type=\"text/css\">\n"); writer.write(" a {text-decoration: none}\n"); writer.write(" </style>\n"); writer.write("</head>\n"); writer.write("<body text=\"#000000\" link=\"#000000\" alink=\"#000000\" vlink=\"#000000\">\n"); writer.write("<table width=\"100%\" cellpadding=\"0\" cellspacing=\"0\" border=\"0\">\n"); writer.write("<tr><td width=\"50%\"> </td><td align=\"center\">\n"); writer.write("\n"); } else { writer.write(htmlHeader); } List<ExporterInputItem> items = exporterInput.getItems(); for (reportIndex = 0; reportIndex < items.size(); reportIndex++) { ExporterInputItem item = items.get(reportIndex); setCurrentExporterInputItem(item); List<JRPrintPage> pages = jasperPrint.getPages(); if (pages != null && pages.size() > 0) { PageRange pageRange = getPageRange(); int startPageIndex = (pageRange == null || pageRange.getStartPageIndex() == null) ? 0 : pageRange.getStartPageIndex(); int endPageIndex = (pageRange == null || pageRange.getEndPageIndex() == null) ? (pages.size() - 1) : pageRange.getEndPageIndex(); JRPrintPage page = null; for (pageIndex = startPageIndex; pageIndex <= endPageIndex; pageIndex++) { if (Thread.interrupted()) { throw new JRException("Current thread interrupted."); } page = pages.get(pageIndex); writer.write("<a name=\"" + JR_PAGE_ANCHOR_PREFIX + reportIndex + "_" + (pageIndex + 1) + "\"></a>\n"); /* */ exportPage(page); if (reportIndex < items.size() - 1 || pageIndex < endPageIndex) { if (betweenPagesHtml == null) { writer.write("<br/>\n<br/>\n"); } else { writer.write(betweenPagesHtml); } } writer.write("\n"); } } } if (fontsToProcess != null && fontsToProcess.size() > 0)// when no fontHandler and/or resourceHandler, fonts are not processed { HtmlResourceHandler fontHandler = getExporterOutput().getFontHandler() == null ? getFontHandler() : getExporterOutput().getFontHandler(); for (HtmlFont htmlFont : fontsToProcess.values()) { writer.write("<link class=\"jrWebFont\" rel=\"stylesheet\" href=\"" + fontHandler.getResourcePath(htmlFont.getId()) + "\">\n"); } } // if (!isOutputResourcesToDir) { writer.write("<![if IE]>\n"); writer.write("<script>\n"); writer.write("var links = document.querySelectorAll('link.jrWebFont');\n"); writer.write( "setTimeout(function(){ if (links) { for (var i = 0; i < links.length; i++) { links.item(i).href = links.item(i).href; } } }, 0);\n"); writer.write("</script>\n"); writer.write("<![endif]>\n"); } if (htmlFooter == null) { writer.write("</td><td width=\"50%\"> </td></tr>\n"); writer.write("</table>\n"); writer.write("</body>\n"); writer.write("</html>\n"); } else { writer.write(htmlFooter); } if (flushOutput) { writer.flush(); } }
From source file:org.apache.hadoop.hdfs.client.impl.LeaseRenewer.java
/** * Periodically check in with the namenode and renew all the leases * when the lease period is half over./*from www.j a v a2s . c om*/ */ private void run(final int id) throws InterruptedException { for (long lastRenewed = Time.monotonicNow(); !Thread.interrupted(); Thread.sleep(getSleepPeriod())) { final long elapsed = Time.monotonicNow() - lastRenewed; if (elapsed >= getRenewalTime()) { try { renew(); if (LOG.isDebugEnabled()) { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " executed"); } lastRenewed = Time.monotonicNow(); } catch (SocketTimeoutException ie) { LOG.warn("Failed to renew lease for " + clientsString() + " for " + (elapsed / 1000) + " seconds. Aborting ...", ie); synchronized (this) { while (!dfsclients.isEmpty()) { DFSClient dfsClient = dfsclients.get(0); dfsClient.closeAllFilesBeingWritten(true); closeClient(dfsClient); } //Expire the current LeaseRenewer thread. emptyTime = 0; } break; } catch (IOException ie) { LOG.warn("Failed to renew lease for " + clientsString() + " for " + (elapsed / 1000) + " seconds. Will retry shortly ...", ie); } } synchronized (this) { if (id != currentId || isRenewerExpired()) { if (LOG.isDebugEnabled()) { if (id != currentId) { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " is not current"); } else { LOG.debug("Lease renewer daemon for " + clientsString() + " with renew id " + id + " expired"); } } //no longer the current daemon or expired return; } // if no clients are in running state or there is no more clients // registered with this renewer, stop the daemon after the grace // period. if (!clientsRunning() && emptyTime == Long.MAX_VALUE) { emptyTime = Time.monotonicNow(); } } } }