List of usage examples for java.lang InterruptedException getMessage
public String getMessage()
From source file:org.atomserver.ThrottledAtomServer.java
/** * Execute the CallableTask on a ThreadPoolTaskExecutor. <br/> * NOTE: the standard Exception handling of AtomServer still happens in the AtomServer class. * Any Exception handling done here is for Exceptions that actually are thrown this far up * the food chain -- Exceptions that pertain directly to the TaskExecutor -- * for example, TimeoutException or ExecutionException. * * @param request The Abdera RequestContext * @param callableTask The CallableTask, which shoudl just be a wrapped call to * the corresponding super task. * @return The Abdera ResponseContext//from w w w .ja v a 2 s . co m */ private ResponseContext executePooledTask(final RequestContext request, final Callable<ResponseContext> callableTask) { ResponseContext response = null; Abdera abdera = request.getServiceContext().getAbdera(); try { FutureTask<ResponseContext> futureTask = new FutureTask(callableTask); threadPool.execute(futureTask); try { logger.debug("starting to wait for the task to complete"); response = futureTask.get(taskTimeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // InterruptedException - if the current thread was interrupted while waiting // Re-assert the thread's interrupted status Thread.currentThread().interrupt(); logger.error("InterruptedException in executePooledTask: Cause= " + e.getCause() + " Message= " + e.getMessage(), e); return getAtomServer().servererror(abdera, request, "InterruptedException occurred:: " + e.getCause(), e); } catch (ExecutionException e) { // ExecutionException - if the computation threw an exception // Because all Exception handling is done in the super class; AtomServer, we should never get this logger.error("ExecutionException in executePooledTask: Cause= " + e.getCause() + " Message= " + e.getMessage(), e); return getAtomServer().servererror(abdera, request, "ExecutionException occurred:: " + e.getCause(), e); } catch (TimeoutException e) { // TimeoutException - if the wait timed out logger.error("TimeoutException in executePooledTask: Cause= " + e.getCause() + " Message= " + e.getMessage(), e); return getAtomServer().servererror(abdera, request, "TimeoutException occurred:: " + e.getCause(), e); } catch (Exception e) { logger.error("Unknown Exception in executePooledTask: Cause= " + e.getCause() + " Message= " + e.getMessage(), e); return getAtomServer().servererror(abdera, request, "Unknown Exception occurred:: " + e.getCause(), e); } finally { // Best practice is to cancel tasks whose result is no longer needed // NOTE; task.cancel() is harmless if the task has already completed // Interrupt if running... futureTask.cancel(true); // Help out the garbage collector futureTask = null; } } finally { // Log all thread pool statistics at INFO level. // This information is very critical in understanding the effectiveness of the pool logThreadPoolStats(); } return response; }
From source file:org.openbaton.vnfm.MediaServerManager.java
@Override public void onApplicationEvent(ContextClosedEvent event) { Set<Future<Boolean>> pendingTasks = new HashSet<>(); for (ManagedVNFR managedVNFR : managedVnfrRepository.findAll()) { pendingTasks.add(elasticityManagement.deactivate(managedVNFR.getNsrId(), managedVNFR.getVnfrId())); }// w w w .j a v a 2s . c o m for (Future<Boolean> pendingTask : pendingTasks) { try { pendingTask.get(100, TimeUnit.SECONDS); } catch (InterruptedException e) { log.error(e.getMessage(), e); } catch (ExecutionException e) { log.error(e.getMessage(), e); } catch (TimeoutException e) { log.error(e.getMessage(), e); } } try { Thread.sleep(2500); } catch (InterruptedException e) { log.error(e.getMessage(), e); } destroyPlugins(); }
From source file:io.cloudslang.worker.execution.services.ExecutionServiceImpl.java
@Override public Execution execute(Execution execution) throws InterruptedException { try {/* ww w.j ava 2 s .c o m*/ // handle flow cancellation if (handleCancelledFlow(execution)) { return execution; } ExecutionStep currStep = loadExecutionStep(execution); // Check if this execution was paused if (!isDebuggerMode(execution.getSystemContext()) && handlePausedFlow(execution)) { return null; } // dum bus event dumpBusEvents(execution); // Run the execution step executeStep(execution, currStep); // Run the navigation navigate(execution, currStep); // currently handles groups and jms optimizations postExecutionSettings(execution); // If execution was paused in language - to avoid delay of configuration if (execution.getSystemContext().isPaused()) { if (handlePausedFlowAfterStep(execution)) { return null; } } // dum bus event dumpBusEvents(execution); if (logger.isDebugEnabled()) { logger.debug("End of step: " + execution.getPosition() + " in execution id: " + execution.getExecutionId()); } return execution; } catch (InterruptedException ex) { throw ex; } catch (Exception ex) { logger.error("Error during execution: ", ex); execution.getSystemContext().setStepErrorKey(ex.getMessage()); // this is done only fo reporting execution.getSystemContext().setFlowTerminationType(ExecutionStatus.SYSTEM_FAILURE); execution.setPosition(null); // this ends the flow!!! return execution; } }
From source file:dk.dbc.opensearch.datadock.DatadockMain.java
/** * This method does the actual work of nudging the datadockmanager to get * on with processing data from the harvester. If any exceptions are thrown * from the manager, this method will issue a shutdown, and exit. * * @return the number of jobs that have been submitted for processing up until a shutdown request *//* www . j ava2s . co m*/ private int runServer() { int mainJobsSubmitted = 0; try { while (!isShutdownRequested()) { log.trace("DatadockMain calling datadockManager update"); long timer = System.currentTimeMillis(); int jobsSubmitted = datadockManager.update(this.maxToHarvest); log.debug(String.format("%s jobs submitted according to the DatadockManager", jobsSubmitted)); timer = System.currentTimeMillis() - timer; mainJobsSubmitted += jobsSubmitted; if (jobsSubmitted > 0) { log.info(String.format("%1$d Jobs submitted in %2$d ms - %3$f jobs/s", jobsSubmitted, timer, jobsSubmitted / (timer / 1000.0))); } else { log.info(String.format("%1$d Jobs submitted in %2$d ms - ", jobsSubmitted, timer)); if (terminateOnZeroSubmitted) { log.info("Program set to terminate on empty job queue. Shutting down now"); this.shutdown(); } else { Thread.currentThread(); Thread.sleep(this.pollTime); } } } } catch (HarvesterIOException hioe) { String fatal = String.format("A fatal error occured in the communication with the database: %s", hioe.getMessage()); log.fatal(fatal, hioe); } catch (InterruptedException ie) { log.fatal(String.format("InterruptedException caught in Main.runServer: %s", ie.getMessage()), ie); } catch (RuntimeException re) { log.fatal(String.format("RuntimeException caught in Main.runServer: %s", re.getMessage()), re); } catch (Exception e) { log.fatal(String.format("Exception caught in Main.runServer: %s", e.getMessage()), e); } // finally // { // this.shutdown(); // } log.debug(String.format("Total # jobs submitted to main: %s", mainJobsSubmitted)); return mainJobsSubmitted; }
From source file:edu.harvard.i2b2.workplace.ws.WorkplaceService.java
private OMElement execute(RequestHandler handler, long waitTime) throws I2B2Exception { //do workplace processing inside thread, so that // service could send back message with timeout error. log.debug("In execute"); OMElement returnElement = null;//from ww w . ja v a 2 s .c om String unknownErrorMessage = "Error message delivered from the remote server \n" + "You may wish to retry your last action"; ExecutorRunnable er = new ExecutorRunnable(); er.setRequestHandler(handler); Thread t = new Thread(er); String workplaceDataResponse = null; synchronized (t) { t.start(); // try { // if (waitTime > 0) { // t.wait(waitTime); // } else { // t.wait(); // } try { long startTime = System.currentTimeMillis(); long deltaTime = -1; while ((er.isJobCompleteFlag() == false) && (deltaTime < waitTime)) { if (waitTime > 0) { t.wait(waitTime - deltaTime); deltaTime = System.currentTimeMillis() - startTime; } else { t.wait(); } } workplaceDataResponse = er.getOutputString(); if (workplaceDataResponse == null) { if (er.getJobException() != null) { log.error("er.jobException is " + er.getJobException().getMessage()); log.info("waitTime is " + waitTime); ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null, unknownErrorMessage); workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType); } else if (er.isJobCompleteFlag() == false) { //<result_waittime_ms>5000</result_waittime_ms> String timeOuterror = "Remote server timed out \n" + "Result waittime = " + waitTime + " ms elapsed,\nPlease try again"; log.error(timeOuterror); log.debug("workplace waited " + deltaTime + "ms for " + er.getRequestHandler().getClass().getName()); ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null, timeOuterror); workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType); } else { log.error("workplace data response is null"); log.info("waitTime is " + waitTime); log.debug("workplace waited " + deltaTime + "ms for " + er.getRequestHandler().getClass().getName()); ResponseMessageType responseMsgType = MessageFactory.doBuildErrorResponse(null, unknownErrorMessage); workplaceDataResponse = MessageFactory.convertToXMLString(responseMsgType); } } } catch (InterruptedException e) { log.error(e.getMessage()); throw new I2B2Exception("Thread error while running Workplace job "); } finally { t.interrupt(); er = null; t = null; } } returnElement = MessageFactory.createResponseOMElementFromString(workplaceDataResponse); return returnElement; }
From source file:com.greenline.guahao.biz.manager.image.codes.gif.GifEncoder.java
/** * Convenience constructor for class <CODE>GIFEncoder</CODE>. The argument * will be converted to an indexed color array. <B>This may take some * time.</B>//from w ww . ja va 2 s . com * * @param image The image to encode. The image <B>must</B> be completely * loaded. * @exception AWTException Will be thrown if the pixel grab fails. This can * happen if Java runs out of memory. It may also indicate * that the image contains more than 256 colors. */ public GifEncoder(Image image) throws AWTException { this.imageWidth = (short) image.getWidth(null); this.imageHeight = (short) image.getHeight(null); int values[] = new int[this.imageWidth * this.imageHeight]; PixelGrabber grabber = new PixelGrabber(image, 0, 0, this.imageWidth, this.imageHeight, values, 0, this.imageWidth); try { if (grabber.grabPixels() != true) { log.error("GifEncoder#GifEncoder Grabber returned false: " + grabber.status()); throw new AWTException("Grabber returned false: " + grabber.status()); } } // ends try catch (InterruptedException ie) { log.error("GifEncoder#GifEncoder " + ie.getMessage(), ie); } byte[][] r = new byte[this.imageWidth][this.imageHeight]; byte[][] g = new byte[this.imageWidth][this.imageHeight]; byte[][] b = new byte[this.imageWidth][this.imageHeight]; int index = 0; for (int y = 0; y < this.imageHeight; y++) { for (int x = 0; x < this.imageWidth; x++, index++) { r[x][y] = (byte) ((values[index] >> 16) & 0xFF); g[x][y] = (byte) ((values[index] >> 8) & 0xFF); b[x][y] = (byte) ((values[index]) & 0xFF); } // ends for } // ends for this.toIndexColor(r, g, b); }
From source file:io.openvidu.server.kurento.core.KurentoParticipant.java
public PublisherEndpoint getPublisher() { try {//from w ww . j ava 2 s . c o m if (!endPointLatch.await(KurentoSession.ASYNC_LATCH_TIMEOUT, TimeUnit.SECONDS)) { throw new OpenViduException(Code.MEDIA_ENDPOINT_ERROR_CODE, "Timeout reached while waiting for publisher endpoint to be ready"); } } catch (InterruptedException e) { throw new OpenViduException(Code.MEDIA_ENDPOINT_ERROR_CODE, "Interrupted while waiting for publisher endpoint to be ready: " + e.getMessage()); } return this.publisher; }
From source file:de.lmu.ifi.dbs.jfeaturelib.utils.Extractor.java
/** * closes the thread pool and awaits termination *///from ww w . ja va 2 s .co m private void closePool() { log.debug("close pool"); try { pool.shutdown(); pool.awaitTermination(TERMINATION_TIMEOUT, TimeUnit.DAYS); } catch (InterruptedException ex) { log.warn(ex.getMessage(), ex); throw new IllegalStateException("error while shutting down pool"); } }
From source file:com.xpn.xwiki.plugin.lucene.IndexRebuilder.java
@Override protected void runInternal() { LOGGER.debug("Starting lucene index rebuild"); XWikiContext context = null;/*from w w w. jav a 2 s.c o m*/ try { // The context must be cloned, as otherwise setDatabase() might affect the response to // the current request. // TODO This is not a good way to do this; ideally there would be a method that creates // a new context and copies only a few needed objects, as some objects are not supposed // to be used in 2 different contexts. // TODO This seems to work on a simple run: // context = new XWikiContext(); // context.setWiki(this.context.getWiki()); // context.setEngineContext(this.context.getEngineContext()); // context.setMode(this.context.getMode()); // context.setAction(this.context.getAction()); // context.put("msg", this.context.get("msg")); // context.setMainXWiki(this.context.getMainXWiki()); // context.setURLFactory(this.context.getURLFactory()); // context.setLanguage(this.context.getLanguage()); // context.setDatabase(this.context.getDatabase()); // context.put("org.xwiki.component.manager.ComponentManager", this.context // .get("org.xwiki.component.manager.ComponentManager")); context = getContext(); // For example, we definitely don't want to use the same hibernate session... context.remove("hibsession"); context.remove("hibtransaction"); // This is also causing serious problems, as the same xcontext gets shared between // threads and causes the hibernate session to be shared in the end. The vcontext is // automatically recreated by the velocity renderer, if it isn't found in the xcontext. context.remove("vcontext"); // The original request and response should not be used outside the actual request // processing thread, as they will be cleaned later by the container. context.setRequest(null); context.setResponse(null); rebuildIndex(context); } catch (InterruptedException e) { LOGGER.warn("The index rebuilder thread has been interrupted"); } catch (Exception e) { LOGGER.error("Error in lucene rebuild thread: {}", e.getMessage(), e); } finally { this.rebuildInProgress = false; if (context != null) { context.getWiki().getStore().cleanUp(context); } } LOGGER.debug("Lucene index rebuild done"); }