List of usage examples for java.lang Thread interrupted
public static boolean interrupted()
From source file:net.sf.jasperreports.engine.export.JsonExporter.java
protected void exportReportToWriter() throws JRException, IOException { writer.write("{\n"); List<ExporterInputItem> items = exporterInput.getItems(); for (reportIndex = 0; reportIndex < items.size(); reportIndex++) { ExporterInputItem item = items.get(reportIndex); setCurrentExporterInputItem(item); List<JRPrintPage> pages = jasperPrint.getPages(); if (pages != null && pages.size() > 0) { PageRange pageRange = getPageRange(); int startPageIndex = (pageRange == null || pageRange.getStartPageIndex() == null) ? 0 : pageRange.getStartPageIndex(); int endPageIndex = (pageRange == null || pageRange.getEndPageIndex() == null) ? (pages.size() - 1) : pageRange.getEndPageIndex(); JRPrintPage page = null;/*from w w w. j a v a2 s. c om*/ for (pageIndex = startPageIndex; pageIndex <= endPageIndex; pageIndex++) { if (Thread.interrupted()) { throw new ExportInterruptedException(); } page = pages.get(pageIndex); exportPage(page); if (reportIndex < items.size() - 1 || pageIndex < endPageIndex) { writer.write("\n"); } } } } writer.write("\n}"); boolean flushOutput = getCurrentConfiguration().isFlushOutput(); if (flushOutput) { writer.flush(); } }
From source file:org.freemedsoftware.shim.MasterControlServlet.java
@SuppressWarnings("unchecked") public void launchWorkerThreads() { logger.info("Launching worker threads"); HashMap<String, Object> driverConfig = new HashMap<String, Object>(); Iterator<String> configKeys = config.getKeys(); while (configKeys.hasNext()) { String k = configKeys.next(); driverConfig.put(k, config.getString(k)); }/*from ww w .java 2 s .com*/ // Initialize signature pad if driver is defined String signatureDriver = config.getString("driver.signature"); if (signatureDriver != null) { logger.info("Initializing signature pad driver " + signatureDriver); try { logger.debug("instantiating driver"); signatureDeviceManager = new ShimDeviceManager<SignatureInterface>(signatureDriver); signatureDeviceManager.getDeviceInstance().configure(driverConfig); logger.debug("running init() for driver"); if (signatureDeviceManager == null) { logger.error("Signature manager is null!!"); } signatureDeviceManager.init(); } catch (Exception e) { logger.error(e); } } else { logger.warn("No signature pad driver specified, skipping."); } // Initialize signature pad if driver is defined String dosingPumpDriver = config.getString("driver.dosingpump"); if (dosingPumpDriver != null) { logger.info("Initializing dosing pump driver " + dosingPumpDriver); try { logger.debug("instantiating driver"); dosingPumpDeviceManager = new ShimDeviceManager<DosingPumpInterface>(dosingPumpDriver); dosingPumpDeviceManager.getDeviceInstance().configure(driverConfig); logger.debug("running init() for driver"); if (dosingPumpDeviceManager == null) { logger.error("Dosing pump manager is null!!"); } dosingPumpDeviceManager.init(); } catch (Exception e) { logger.error(e); } } else { logger.warn("No dosing pump driver specified, skipping."); } // Initialize label printer, if a driver is defined String labelPrinterDriver = config.getString("driver.labelprinter"); if (labelPrinterDriver != null) { logger.info("Initializing label printer driver " + labelPrinterDriver); try { logger.debug("instantiating driver"); labelPrinterDeviceManager = new ShimDeviceManager<LabelPrinterInterface>(labelPrinterDriver); labelPrinterDeviceManager.getDeviceInstance().configure(driverConfig); logger.debug("running init() for driver"); if (labelPrinterDeviceManager == null) { logger.error("Label printer manager is null!!"); } labelPrinterDeviceManager.init(); } catch (Exception e) { logger.error(e); } } else { logger.warn("No signature pad driver specified, skipping."); } logger.info("Launching job store scheduler timer"); timer = new Timer(); timer.schedule(new TimerTask() { @Override public void run() { while (!Thread.interrupted()) { try { Thread.sleep(THREAD_SLEEP_TIME); scanForJobs(); } catch (InterruptedException e) { logger.warn(e); } } } protected void scanForJobs() { JobStoreItem labelItem = null; JobStoreItem signatureItem = null; JobStoreItem vitalsItem = null; try { List<JobStoreItem> items = PersistentJobStoreDAO.unassignedJobs(); Iterator<JobStoreItem> iter = items.iterator(); while (iter.hasNext() && signatureItem == null && vitalsItem == null && labelItem == null) { JobStoreItem thisItem = iter.next(); if (thisItem.getDevice().equalsIgnoreCase(JobStoreItem.DEVICE_SIGNATURE) && signatureItem == null) { signatureItem = thisItem; } if (thisItem.getDevice().equalsIgnoreCase(JobStoreItem.DEVICE_LABEL) && labelItem == null) { labelItem = thisItem; } if (thisItem.getDevice().equalsIgnoreCase(JobStoreItem.DEVICE_VITALS) && signatureItem == null) { vitalsItem = thisItem; } } } catch (SqlJetException e) { logger.error(e); } // Process any new signature requests if (signatureItem != null && signatureDeviceManager != null) { logger.info("Found signature item to be processed (id = " + signatureItem.getId() + ")"); if (!signatureDeviceManager.getDeviceInstance().isProcessing()) { try { signatureDeviceManager.getDeviceInstance().initJobRequest(signatureItem); // Update with pending status signatureItem.setStatus(JobStoreItem.STATUS_PENDING); PersistentJobStoreDAO.update(signatureItem); } catch (Exception e) { logger.error(e); } } else { logger.warn("Device is processing, skipping new job load"); } } // Process any new label requests if (labelItem != null && labelPrinterDeviceManager != null) { logger.info("Found label item to be processed (id = " + labelItem.getId() + ")"); if (!labelPrinterDeviceManager.getDeviceInstance().isProcessing()) { try { labelPrinterDeviceManager.getDeviceInstance().initJobRequest(labelItem); // Update with pending status labelItem.setStatus(JobStoreItem.STATUS_PENDING); PersistentJobStoreDAO.update(labelItem); } catch (Exception e) { logger.error(e); } } else { logger.warn("Device is processing, skipping new job load"); } } } }, THREAD_SLEEP_TIME, THREAD_SLEEP_TIME); }
From source file:io.tilt.minka.business.impl.CoordinatorImpl.java
/** * Executes a lambda before acquiring a service permission, then it releases it. * It loops in the specified case. //from ww w . j a v a 2 s . c o m */ @SuppressWarnings("unchecked") private <R> R runSynchronized(final Synchronized sync) { Validate.notNull(sync); final boolean untilGrant = sync.getPriority() == PriorityLock.MEDIUM_BLOCKING; if (sync.getPriority() == PriorityLock.HIGH_ISOLATED) { sync.execute(); return (R) new Boolean(true); } int retries = 0; while (!Thread.interrupted()) { final Permission p = untilGrant ? acquireBlocking(sync.getAction()) : acquire(sync.getAction()); if (logger.isDebugEnabled()) { logger.debug("{}: ({}) {} operation {} to {}", getClass().getSimpleName(), shardId, sync.getAction(), p, sync.getTask().getClass().getSimpleName()); } if (p == GRANTED) { try { if (sync.getTask() instanceof Runnable) { sync.execute(); return (R) new Boolean(true); } else if (sync.getTask() instanceof Callable) { // TODO R call = ((Callable<R>) sync.getTask()).call(); return call; } } catch (Exception e) { logger.error("{}: ({}) Untrapped task's exception while executing: {} task: {}", getClass().getSimpleName(), shardId, sync.getTask().getClass().getName(), sync.getAction(), e); } finally { release(sync.getAction()); } break; } else if (p == RETRY && untilGrant) { if (retries++ < Config.SEMAPHORE_UNLOCK_MAX_RETRIES) { if (logger.isDebugEnabled()) { logger.debug("{}: Sleeping while waiting to acquire lock: {}", getClass().getSimpleName(), shardId, sync.getAction()); } // TODO: WTF -> LockSupport.parkUntil(Config.SEMAPHORE_UNLOCK_RETRY_DELAY_MS); try { Thread.sleep(Config.SEMAPHORE_UNLOCK_RETRY_DELAY_MS); } catch (InterruptedException e) { logger.error("{}: ({}) While sleeping for unlock delay", getClass().getSimpleName(), shardId, e); } } else { logger.warn("{}: ({}) Coordination starved ({}) for action: {} too many retries ({})", getClass().getSimpleName(), shardId, p, sync.getAction(), retries); /*throw new RuntimeException("Coordination starved for action: " + dispatch.getClass() + " too many retries");*/ } } else { break; } } return null; }
From source file:org.fusesource.meshkeeper.distribution.provisioner.embedded.StreamPumper.java
private void waitForInput(InputStream is) throws IOException, InterruptedException { if (useAvailable) { while (!finish && is.available() == 0) { if (Thread.interrupted()) { throw new InterruptedException(); }//from w w w . jav a 2 s. c o m synchronized (this) { this.wait(POLL_INTERVAL); } } } }
From source file:eu.stratosphere.runtime.io.gates.InputGate.java
/** * Reads a record from one of the associated input channels. Channels are read such that one buffer from a channel is * consecutively consumed. The buffers in turn are consumed in the order in which they arrive. * Note that this method is not guaranteed to return a record, because the currently available channel data may not always * constitute an entire record, when events or partial records are part of the data. * * When called even though no data is available, this call will block until data is available, so this method should be called * when waiting is desired (such as when synchronously consuming a single gate) or only when it is known that data is available * (such as when reading a union of multiple input gates). * * @param target The record object into which to construct the complete record. * @return The result indicating whether a complete record is available, a event is available, only incomplete data * is available (NONE), or the gate is exhausted. * @throws IOException Thrown when an error occurred in the network stack relating to this channel. * @throws InterruptedException Thrown, when the thread working on this channel is interrupted. *//*ww w . j a v a 2 s .c o m*/ public InputChannelResult readRecord(T target) throws IOException, InterruptedException { if (this.channelToReadFrom == -1) { if (this.isClosed()) { return InputChannelResult.END_OF_STREAM; } if (Thread.interrupted()) { throw new InterruptedException(); } this.channelToReadFrom = waitForAnyChannelToBecomeAvailable(); } InputChannelResult result = this.getInputChannel(this.channelToReadFrom).readRecord(target); switch (result) { case INTERMEDIATE_RECORD_FROM_BUFFER: // full record and we can stay on the same channel return InputChannelResult.INTERMEDIATE_RECORD_FROM_BUFFER; case LAST_RECORD_FROM_BUFFER: // full record, but we must switch the channel afterwards this.channelToReadFrom = -1; return InputChannelResult.LAST_RECORD_FROM_BUFFER; case END_OF_SUPERSTEP: this.channelToReadFrom = -1; return InputChannelResult.END_OF_SUPERSTEP; case TASK_EVENT: // task event this.currentEvent = this.getInputChannel(this.channelToReadFrom).getCurrentEvent(); this.channelToReadFrom = -1; // event always marks a unit as consumed return InputChannelResult.TASK_EVENT; case NONE: // internal event or an incomplete record that needs further chunks // the current unit is exhausted this.channelToReadFrom = -1; return InputChannelResult.NONE; case END_OF_STREAM: // channel is done this.channelToReadFrom = -1; return isClosed() ? InputChannelResult.END_OF_STREAM : InputChannelResult.NONE; default: // silence the compiler throw new RuntimeException(); } }
From source file:com.sillelien.dollar.api.types.DollarList.java
@NotNull @Override/*from w ww . j av a 2 s . c o m*/ public ImmutableList<var> toVarList() { try { return ImmutableList.copyOf(executor .submit(() -> $stream(false).map(v -> v._fix(false)).collect(Collectors.toList())).get()); } catch (InterruptedException e) { Thread.interrupted(); return ImmutableList.of(DollarFactory.failure(ErrorType.INTERRUPTED, e, false)); } catch (ExecutionException e) { return ImmutableList.of(DollarFactory.failure(ErrorType.EXECUTION_FAILURE, e, false)); } }
From source file:net.paoding.spdy.client.netty.ResponseFuture.java
private boolean await0(long timeoutNanos, boolean interruptable) throws InterruptedException { if (interruptable && Thread.interrupted()) { throw new InterruptedException(); }/* ww w .j ava 2 s . co m*/ long startTime = timeoutNanos <= 0 ? 0 : System.nanoTime(); long waitTime = timeoutNanos; boolean interrupted = false; try { synchronized (this) { if (done) { return done; } else if (waitTime <= 0) { return done; } checkDeadLock(); waiters++; try { for (;;) { try { this.wait(waitTime / 1000000, (int) (waitTime % 1000000)); } catch (InterruptedException e) { if (interruptable) { throw e; } else { interrupted = true; } } if (done) { return true; } else { waitTime = timeoutNanos - (System.nanoTime() - startTime); if (waitTime <= 0) { return done; } } } } finally { waiters--; } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } }
From source file:org.apache.nifi.toolkit.cli.impl.command.nifi.pg.PGDisableControllerServices.java
private void sleep(long millis) { try {//from w w w. j a v a 2s . c om Thread.sleep(millis); } catch (InterruptedException e) { Thread.interrupted(); } }
From source file:be.fgov.kszbcss.rhq.websphere.config.ConfigObjectInvocationHandler.java
@Override public String toString() { StringBuffer buffer = new StringBuffer("("); boolean first = true; for (ConfigObjectAttributeDesc desc : type.getAttributeDescriptors()) { if (first) { first = false;/*w w w.j a v a2 s .c o m*/ } else { buffer.append(','); } buffer.append(desc.getName()); buffer.append('='); try { buffer.append(getAttributeValue(desc)); } catch (JMException ex) { buffer.append("#ERROR#"); } catch (ConnectorException ex) { buffer.append("#ERROR#"); } catch (InterruptedException ex) { Thread.interrupted(); } } return buffer.toString(); }
From source file:org.apache.hadoop.hdfs.server.datanode.BlockReceiver.java
/** * While writing to mirrorOut, failure to write to mirror should not * affect this datanode.//from w w w.j a v a 2s. co m */ private void handleMirrorOutError(IOException ioe) throws IOException { LOG.info(datanode.dnRegistration + ": Exception writing block " + block + " to mirror " + mirrorAddr + "\n" + StringUtils.stringifyException(ioe)); if (Thread.interrupted()) { // shut down if the thread is interrupted throw ioe; } else { // encounter an error while writing to mirror // continue to run even if can not write to mirror // notify client of the error // and wait for the client to shut down the pipeline mirrorError = true; } }