List of usage examples for java.lang Thread interrupted
public static boolean interrupted()
From source file:org.apache.tomee.embedded.Container.java
@Override public void close() { final CountDownLatch end = new CountDownLatch(1); final Container container = Container.this; new Thread() { {// ww w .j a v a 2 s .c om setName("tomee-embedded-await-" + hashCode()); } @Override public void run() { try { container.await(); end.countDown(); } catch (final Exception e) { end.countDown(); throw new IllegalStateException(e); } } }.start(); try { stop(); } catch (final Exception e) { throw new IllegalStateException("Failed to stop container", e); } try { end.await(); } catch (final InterruptedException e) { Thread.interrupted(); } }
From source file:net.yacy.peers.Protocol.java
private static void remoteSearchProcess(final SearchEvent event, final int count, final long time, final String wordhashes, final Seed target, final Blacklist blacklist, final SearchResult result) throws SpaceExceededException, InterruptedException { // create containers final int words = wordhashes.length() / Word.commonHashLength; assert words > 0 : "wordhashes = " + wordhashes; final List<ReferenceContainer<WordReference>> container = new ArrayList<ReferenceContainer<WordReference>>( words);//from www . java 2s. co m for (int i = 0; i < words; i++) { container.add(ReferenceContainer.emptyContainer(Segment.wordReferenceFactory, ASCII.getBytes( wordhashes.substring(i * Word.commonHashLength, (i + 1) * Word.commonHashLength)), count)); // throws SpaceExceededException } // insert results to containers int term = count; Map<String, LinkedHashSet<String>> snip; if (event.addResultsToLocalIndex) { snip = null; } else { snip = new HashMap<String, LinkedHashSet<String>>(); // needed to display nodestack results } List<URIMetadataNode> storeDocs = new ArrayList<URIMetadataNode>(result.links.size()); for (final URIMetadataNode urlEntry : result.links) { if (term-- <= 0) { break; // do not process more that requested (in case that evil peers fill us up with rubbish) } // get one single search result if (urlEntry == null) { continue; } assert (urlEntry.hash().length == 12) : "urlEntry.hash() = " + ASCII.String(urlEntry.hash()); if (urlEntry.hash().length != 12) { continue; // bad url hash } if (blacklist.isListed(BlacklistType.SEARCH, urlEntry.url())) { if (Network.log.isInfo()) { Network.log.info("remote search: filtered blacklisted url " + urlEntry.url().toNormalform(true) + " from peer " + target.getName()); } continue; // block with backlist } final String urlRejectReason = Switchboard.getSwitchboard().crawlStacker .urlInAcceptedDomain(urlEntry.url()); if (urlRejectReason != null) { if (Network.log.isInfo()) { Network.log.info("remote search: rejected url '" + urlEntry.url().toNormalform(true) + "' (" + urlRejectReason + ") from peer " + target.getName()); } continue; // reject url outside of our domain } // save the url entry final Reference entry = urlEntry.word(); if (entry == null) { if (Network.log.isWarn()) { Network.log.warn("remote search: no word attached from peer " + target.getName() + ", version " + target.getVersion()); } continue; // no word attached } // the search-result-url transports all the attributes of word indexes if (!Base64Order.enhancedCoder.equal(entry.urlhash(), urlEntry.hash())) { Network.log.info("remote search: url-hash " + ASCII.String(urlEntry.hash()) + " does not belong to word-attached-hash " + ASCII.String(entry.urlhash()) + "; url = " + urlEntry.url().toNormalform(true) + " from peer " + target.getName()); continue; // spammed } // passed all checks, store url storeDocs.add(urlEntry); ResultURLs.stack(ASCII.String(urlEntry.url().hash()), urlEntry.url().getHost(), event.peers.mySeed().hash.getBytes(), UTF8.getBytes(target.hash), EventOrigin.QUERIES); if (urlEntry.snippet() != null && urlEntry.snippet().length() > 0 && !urlEntry.snippet().equals("null")) { // we don't store the snippets along the url entry, // because they are search-specific. // instead, they are placed in a snipped-search cache. // System.out.println("--- RECEIVED SNIPPET '" + urlEntry.snippet() + "'"); TextSnippet.snippetsCache.put(wordhashes, ASCII.String(urlEntry.hash()), urlEntry.snippet()); // add snippet for snippethandling for nodestack entries (used if not stored to index) if (!event.addResultsToLocalIndex) { // TODO: must have a snippet even to get the snippetcache entry back when adding to nodestack LinkedHashSet<String> sniptxt = new LinkedHashSet<String>(); sniptxt.add(urlEntry.snippet()); snip.put(ASCII.String(urlEntry.hash()), sniptxt); } } // add the url entry to the word indexes for (final ReferenceContainer<WordReference> c : container) { try { c.add(entry); } catch (final SpaceExceededException e) { ConcurrentLog.logException(e); break; } } } // store remote result to local result container // insert one container into the search result buffer // one is enough, only the references are used, not the word if (event.addResultsToLocalIndex) { /* * Current thread might be interrupted by SearchEvent.cleanup() */ if (Thread.interrupted()) { throw new InterruptedException("solrQuery interrupted"); } WriteMetadataNodeToLocalIndexThread writerToLocalIndex = new WriteMetadataNodeToLocalIndexThread( event.query.getSegment(), storeDocs); writerToLocalIndex.start(); try { writerToLocalIndex.join(); } catch (InterruptedException e) { /* * Current thread interruption might happen while waiting * for writeToLocalIndexThread. */ writerToLocalIndex.stopWriting(); throw new InterruptedException("remoteProcess stopped!"); } event.addRWIs(container.get(0), false, target.getName() + "/" + target.hash, result.totalCount, time); } else { // feed results as nodes (SolrQuery results) which carry metadata, // to prevent a call to getMetaData for RWI results, which would fail (if no metadata in index and no display of these results) event.addNodes(storeDocs, null, snip, false, target.getName() + "/" + target.hash, count, true); } event.addFinalize(); event.addExpectedRemoteReferences(-count); // insert the containers to the index for (final ReferenceContainer<WordReference> c : container) { try { event.query.getSegment().storeRWI(c); } catch (final Exception e) { ConcurrentLog.logException(e); } } // integrate remote top-words/topics if (result.references != null && result.references.length > 0) { Network.log.info( "remote search: peer " + target.getName() + " sent " + result.references.length + " topics"); // add references twice, so they can be counted (must have at least 2 entries) synchronized (event) { event.addTopic(result.references); event.addTopic(result.references); } } Network.log.info("remote search: peer " + target.getName() + " sent " + container.get(0).size() + "/" + result.totalCount + " references"); }
From source file:org.apache.hadoop.hdfs.server.datanode.DWRRBlockReceiver.java
void finalizeReceiveBlock() throws IOException { try {/* ww w . j a v a 2 s. co m*/ while (toBeWritten.size() > 0) { finalizeReceivePacket(toBeWritten.poll()); } toBeWritten = null; // wait for all outstanding packet responses. And then // indicate responder to gracefully shutdown. // Mark that responder has been closed for future processing if (responder != null) { ((PacketResponder) responder.getRunnable()).close(); responderClosed = true; } // If this write is for a replication or transfer-RBW/Finalized, // then finalize block or convert temporary to RBW. // For client-writes, the block is finalized in the PacketResponder. if (isDatanode || isTransfer) { // close the block/crc files close(); block.setNumBytes(replicaInfo.getNumBytes()); if (stage == BlockConstructionStage.TRANSFER_RBW) { // for TRANSFER_RBW, convert temporary to RBW datanode.data.convertTemporaryToRbw(block); } else { // for isDatnode or TRANSFER_FINALIZED // Finalize the block. datanode.data.finalizeBlock(block); } datanode.metrics.incrBlocksWritten(); } if (isClient) { close(); final long endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; block.setNumBytes(replicaInfo.getNumBytes()); datanode.data.finalizeBlock(block); datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT, replicaInfo.getStorageUuid()); if (ClientTraceLog.isInfoEnabled() && isClient) { long offset = 0; DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block.getBlockPoolId()); ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr, myAddr, block.getNumBytes(), "HDFS_WRITE", clientname, offset, dnR.getDatanodeUuid(), block, endTime)); } else { LOG.info("Received " + block + " size " + block.getNumBytes() + " from " + inAddr); } } } catch (IOException ioe) { LOG.error("CAMAMILLA " + this + " ioexception finalizeReceiveBlock " + ioe); // TODO TODO log if (datanode.isRestarting()) { // Do not throw if shutting down for restart. Otherwise, it will cause // premature termination of responder. LOG.info("Shutting down for restart (" + block + ")."); } else { LOG.info("Exception for " + block, ioe); throw ioe; } } catch (Exception e) { // TODO TODO no hi era en l'ogriginal LOG.error("CAMAMILLA " + this + " exception finalizeReceiveBlock " + e); // TODO TODO log } finally { // Clear the previous interrupt state of this thread. Thread.interrupted(); // If a shutdown for restart was initiated, upstream needs to be notified. // There is no need to do anything special if the responder was closed // normally. if (!responderClosed) { // Data transfer was not complete. LOG.info("CAMAMILLA " + this + " finally finalizeReceiveBlock responderClosed " + responderClosed); // TODO TODO log if (responder != null) { // In case this datanode is shutting down for quick restart, // send a special ack upstream. if (datanode.isRestarting() && isClient && !isTransfer) { File blockFile = ((ReplicaInPipeline) replicaInfo).getBlockFile(); File restartMeta = new File(blockFile.getParent() + File.pathSeparator + "." + blockFile.getName() + ".restart"); if (restartMeta.exists() && !restartMeta.delete()) { LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath()); } try { FileWriter out = new FileWriter(restartMeta); // write out the current time. out.write(Long.toString(Time.now() + restartBudget)); out.flush(); out.close(); } catch (IOException ioe) { // The worst case is not recovering this RBW replica. // Client will fall back to regular pipeline recovery. } LOG.info("CAMAMILLA " + this + " finally finalizeReceiveBlock send OOB Ack"); // TODO TODO log try { ((PacketResponder) responder.getRunnable()) .sendOOBResponse(PipelineAck.getRestartOOBStatus()); // Even if the connection is closed after the ack packet is // flushed, the client can react to the connection closure // first. Insert a delay to lower the chance of client // missing the OOB ack. Thread.sleep(1000); } catch (InterruptedException ie) { // It is already going down. Ignore this. } catch (IOException ioe) { LOG.info("Error sending OOB Ack.", ioe); } } responder.interrupt(); } IOUtils.closeStream(this); cleanupBlock(); } if (responder != null) { try { responder.interrupt(); // join() on the responder should timeout a bit earlier than the // configured deadline. Otherwise, the join() on this thread will // likely timeout as well. long joinTimeout = datanode.getDnConf().getXceiverStopTimeout(); joinTimeout = joinTimeout > 1 ? joinTimeout * 8 / 10 : joinTimeout; responder.join(joinTimeout); if (responder.isAlive()) { String msg = "Join on responder thread " + responder + " timed out"; LOG.warn(msg + "\n" + StringUtils.getStackTrace(responder)); throw new IOException(msg); } } catch (InterruptedException e) { responder.interrupt(); // do not throw if shutting down for restart. if (!datanode.isRestarting()) { throw new IOException("Interrupted receiveBlock"); } } responder = null; } } }
From source file:org.apache.geode.distributed.internal.InternalLocator.java
/** * Stop this locator//from ww w . jav a2 s . c om * * @param stopForReconnect - stopping for distributed system reconnect * @param waitForDisconnect - wait up to 60 seconds for the locator to completely stop */ public void stop(boolean forcedDisconnect, boolean stopForReconnect, boolean waitForDisconnect) { final boolean isDebugEnabled = logger.isDebugEnabled(); this.stoppedForReconnect = stopForReconnect; this.forcedDisconnect = forcedDisconnect; if (this.server.isShuttingDown()) { // fix for bug 46156 // If we are already shutting down don't do all of this again. // But, give the server a bit of time to shut down so a new // locator can be created, if desired, when this method returns if (!stopForReconnect && waitForDisconnect) { long endOfWait = System.currentTimeMillis() + 60000; if (isDebugEnabled && this.server.isAlive()) { logger.debug("sleeping to wait for the locator server to shut down..."); } while (this.server.isAlive() && System.currentTimeMillis() < endOfWait) { try { Thread.sleep(500); } catch (InterruptedException ignored) { Thread.currentThread().interrupt(); return; } } if (isDebugEnabled) { if (this.server.isAlive()) { logger.debug( "60 seconds have elapsed waiting for the locator server to shut down - terminating wait and returning"); } else { logger.debug("the locator server has shut down"); } } } return; } if (this.locatorDiscoverer != null) { this.locatorDiscoverer.stop(); this.locatorDiscoverer = null; } if (this.server.isAlive()) { logger.info(LocalizedMessage.create(LocalizedStrings.InternalLocator_STOPPING__0, this)); try { new TcpClient().stop(this.bindAddress, getPort()); } catch (ConnectException ignore) { // must not be running } boolean interrupted = Thread.interrupted(); try { this.server.join(TcpServer.SHUTDOWN_WAIT_TIME * 1000 + 10000); } catch (InterruptedException ex) { interrupted = true; logger.warn(LocalizedMessage.create(LocalizedStrings.InternalLocator_INTERRUPTED_WHILE_STOPPING__0, this), ex); // Continue running -- doing our best to stop everything... } finally { if (interrupted) { Thread.currentThread().interrupt(); } } if (this.server.isAlive()) { logger.fatal(LocalizedMessage .create(LocalizedStrings.InternalLocator_COULD_NOT_STOP__0__IN_60_SECONDS, this)); } } removeLocator(this); handleShutdown(); logger.info(LocalizedMessage.create(LocalizedStrings.InternalLocator_0__IS_STOPPED, this)); if (this.stoppedForReconnect) { if (this.myDs != null) { launchRestartThread(); } } }
From source file:net.sf.jasperreports.engine.export.JRPdfExporter.java
/** * *//*from w w w .jav a 2 s .c o m*/ protected void exportReportToStream(OutputStream os) throws JRException { //ByteArrayOutputStream baos = new ByteArrayOutputStream(); PdfExporterConfiguration configuration = getCurrentConfiguration(); pageFormat = jasperPrint.getPageFormat(0); document = new Document(new Rectangle(pageFormat.getPageWidth(), pageFormat.getPageHeight())); imageTesterDocument = new Document(new Rectangle(10, //jasperPrint.getPageWidth(), 10 //jasperPrint.getPageHeight() )); boolean closeDocuments = true; try { pdfWriter = PdfWriter.getInstance(document, os); pdfWriter.setCloseStream(false); tagHelper.setPdfWriter(pdfWriter); PdfVersionEnum pdfVersion = configuration.getPdfVersion(); if (pdfVersion != null) { pdfWriter.setPdfVersion(pdfVersion.getName().charAt(0)); } if (minimalVersion != null) { pdfWriter.setAtLeastPdfVersion(minimalVersion.getName().charAt(0)); } if (configuration.isCompressed()) { pdfWriter.setFullCompression(); } if (configuration.isEncrypted()) { int perms = configuration.isOverrideHints() == null || configuration.isOverrideHints() ? (configuration.getPermissions() != null ? (Integer) configuration.getPermissions() : permissions) : (permissions != 0 ? permissions : (configuration.getPermissions() != null ? (Integer) configuration.getPermissions() : 0)); pdfWriter.setEncryption(PdfWriter.getISOBytes(configuration.getUserPassword()), PdfWriter.getISOBytes(configuration.getOwnerPassword()), perms, configuration.is128BitKey() ? PdfWriter.STANDARD_ENCRYPTION_128 : PdfWriter.STANDARD_ENCRYPTION_40); } PdfPrintScalingEnum printScaling = configuration.getPrintScaling(); if (PdfPrintScalingEnum.DEFAULT == printScaling) { pdfWriter.addViewerPreference(PdfName.PRINTSCALING, PdfName.APPDEFAULT); } else if (PdfPrintScalingEnum.NONE == printScaling) { pdfWriter.addViewerPreference(PdfName.PRINTSCALING, PdfName.NONE); } boolean justifiedLetterSpacing = propertiesUtil.getBooleanProperty(jasperPrint, PdfExporterConfiguration.PROPERTY_JUSTIFIED_LETTER_SPACING, false); if (!justifiedLetterSpacing) { pdfWriter.setSpaceCharRatio(PdfWriter.NO_SPACE_CHAR_RATIO); } // Add meta-data parameters to generated PDF document // mtclough@users.sourceforge.net 2005-12-05 String title = configuration.getMetadataTitle(); if (title != null) { document.addTitle(title); if (configuration.isDisplayMetadataTitle()) { pdfWriter.addViewerPreference(PdfName.DISPLAYDOCTITLE, new PdfBoolean(true)); } } String author = configuration.getMetadataAuthor(); if (author != null) { document.addAuthor(author); } String subject = configuration.getMetadataSubject(); if (subject != null) { document.addSubject(subject); } String keywords = configuration.getMetadataKeywords(); if (keywords != null) { document.addKeywords(keywords); } String creator = configuration.getMetadataCreator(); if (creator == null) { creator = "JasperReports Library version " + Package.getPackage("net.sf.jasperreports.engine").getImplementationVersion(); } document.addCreator(creator); //accessibility check: tab order follows the structure of the document pdfWriter.setTabs(PdfName.S); //accessibility check: setting the document primary language String language = configuration.getTagLanguage(); if (language != null) { pdfWriter.getExtraCatalog().put(PdfName.LANG, new PdfString(language)); } // BEGIN: PDF/A support PdfaConformanceEnum pdfaConformance = configuration.getPdfaConformance(); boolean gotPdfa = false; if (PdfaConformanceEnum.PDFA_1A == pdfaConformance) { pdfWriter.setPDFXConformance(PdfWriter.PDFA1A); gotPdfa = true; } else if (PdfaConformanceEnum.PDFA_1B == pdfaConformance) { pdfWriter.setPDFXConformance(PdfWriter.PDFA1B); gotPdfa = true; } if (gotPdfa) { if (PdfXmpCreator.supported()) { byte[] metadata = PdfXmpCreator.createXmpMetadata(pdfWriter); pdfWriter.setXmpMetadata(metadata); } else { if ((title != null || subject != null || keywords != null) && log.isWarnEnabled()) { // iText 2.1.7 does not properly write localized properties and keywords log.warn("XMP metadata might be non conforming, include the Adobe XMP library to correct"); } pdfWriter.createXmpMetadata(); } } else { pdfWriter.setRgbTransparencyBlending(true); } // END: PDF/A support document.open(); // BEGIN: PDF/A support if (gotPdfa) { String iccProfilePath = configuration.getIccProfilePath(); if (iccProfilePath != null) { PdfDictionary pdfDictionary = new PdfDictionary(PdfName.OUTPUTINTENT); pdfDictionary.put(PdfName.OUTPUTCONDITIONIDENTIFIER, new PdfString("sRGB IEC61966-2.1")); pdfDictionary.put(PdfName.INFO, new PdfString("sRGB IEC61966-2.1")); pdfDictionary.put(PdfName.S, PdfName.GTS_PDFA1); InputStream iccIs = RepositoryUtil.getInstance(jasperReportsContext) .getInputStreamFromLocation(iccProfilePath);//FIXME use getRepository? PdfICCBased pdfICCBased = new PdfICCBased(ICC_Profile.getInstance(iccIs)); pdfICCBased.remove(PdfName.ALTERNATE); pdfDictionary.put(PdfName.DESTOUTPUTPROFILE, pdfWriter.addToBody(pdfICCBased).getIndirectReference()); pdfWriter.getExtraCatalog().put(PdfName.OUTPUTINTENTS, new PdfArray(pdfDictionary)); } else { throw new JRPdfaIccProfileNotFoundException(); } } // END: PDF/A support String pdfJavaScript = configuration.getPdfJavaScript(); if (pdfJavaScript != null) { pdfWriter.addJavaScript(pdfJavaScript); } pdfContentByte = pdfWriter.getDirectContent(); tagHelper.init(pdfContentByte); PdfWriter imageTesterPdfWriter = PdfWriter.getInstance(imageTesterDocument, new NullOutputStream() // discard the output ); imageTesterDocument.open(); imageTesterDocument.newPage(); imageTesterPdfContentByte = imageTesterPdfWriter.getDirectContent(); imageTesterPdfContentByte.setLiteral("\n"); List<ExporterInputItem> items = exporterInput.getItems(); initBookmarks(items); boolean isCreatingBatchModeBookmarks = configuration.isCreatingBatchModeBookmarks(); for (reportIndex = 0; reportIndex < items.size(); reportIndex++) { ExporterInputItem item = items.get(reportIndex); setCurrentExporterInputItem(item); pageFormat = jasperPrint.getPageFormat(0); setPageSize(null); List<JRPrintPage> pages = jasperPrint.getPages(); if (pages != null && pages.size() > 0) { if (items.size() > 1) { document.newPage(); if (isCreatingBatchModeBookmarks) { //add a new level to our outline for this report addBookmark(0, jasperPrint.getName(), 0, 0); } } PdfReportConfiguration lcItemConfiguration = getCurrentItemConfiguration(); boolean sizePageToContent = lcItemConfiguration.isSizePageToContent(); PrintPageFormat oldPageFormat = null; PageRange pageRange = getPageRange(); int startPageIndex = (pageRange == null || pageRange.getStartPageIndex() == null) ? 0 : pageRange.getStartPageIndex(); int endPageIndex = (pageRange == null || pageRange.getEndPageIndex() == null) ? (pages.size() - 1) : pageRange.getEndPageIndex(); for (int pageIndex = startPageIndex; pageIndex <= endPageIndex; pageIndex++) { if (Thread.interrupted()) { throw new ExportInterruptedException(); } JRPrintPage page = pages.get(pageIndex); pageFormat = jasperPrint.getPageFormat(pageIndex); if (sizePageToContent || oldPageFormat != pageFormat) { setPageSize(sizePageToContent ? page : null); } document.newPage(); pdfContentByte = pdfWriter.getDirectContent(); pdfContentByte.setLineCap(2);//PdfContentByte.LINE_CAP_PROJECTING_SQUARE since iText 1.02b writePageAnchor(pageIndex); crtDocumentPageNumber++; /* */ exportPage(page); oldPageFormat = pageFormat; } } else { document.newPage(); pdfContentByte = pdfWriter.getDirectContent(); pdfContentByte.setLiteral("\n"); } } closeDocuments = false; document.close(); imageTesterDocument.close(); } catch (DocumentException e) { throw new JRException(EXCEPTION_MESSAGE_KEY_DOCUMENT_ERROR, new Object[] { jasperPrint.getName() }, e); } catch (IOException e) { throw new JRException(EXCEPTION_MESSAGE_KEY_REPORT_GENERATION_ERROR, new Object[] { jasperPrint.getName() }, e); } finally { if (closeDocuments) //only on exception { try { document.close(); } catch (Exception e) { // ignore, let the original exception propagate } try { imageTesterDocument.close(); } catch (Exception e) { // ignore, let the original exception propagate } } } //return os.toByteArray(); }
From source file:LinkedTransferQueue.java
/** * Transfers the element to a consumer, waiting if necessary to do so. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * else inserts the specified element at the tail of this queue * and waits until the element is received by a consumer. * * @throws NullPointerException if the specified element is null *///w w w . jav a2s .co m public void transfer(E e) throws InterruptedException { if (xfer(e, true, SYNC, 0) != null) { Thread.interrupted(); // failure possible only due to interrupt throw new InterruptedException(); } }
From source file:LinkedTransferQueue.java
/** * Transfers the element to a consumer if it is possible to do so * before the timeout elapses./*from ww w . ja va 2 s. c o m*/ * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * else inserts the specified element at the tail of this queue * and waits until the element is received by a consumer, * returning {@code false} if the specified wait time elapses * before the element can be transferred. * * @throws NullPointerException if the specified element is null */ public boolean tryTransfer(E e, long timeout, TimeUnit unit) throws InterruptedException { if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) return true; if (!Thread.interrupted()) return false; throw new InterruptedException(); }
From source file:LinkedTransferQueue.java
public E take() throws InterruptedException { E e = xfer(null, false, SYNC, 0);//from ww w. j a v a 2 s . c o m if (e != null) return e; Thread.interrupted(); throw new InterruptedException(); }
From source file:LinkedTransferQueue.java
public E poll(long timeout, TimeUnit unit) throws InterruptedException { E e = xfer(null, false, TIMED, unit.toNanos(timeout)); if (e != null || !Thread.interrupted()) return e; throw new InterruptedException(); }
From source file:org.apache.lucene.index.IndexWriter.java
private void closeInternal(boolean waitForMerges, boolean doFlush) throws IOException { boolean interrupted = false; try {/* w w w . j av a 2 s . co m*/ if (pendingCommit != null) { throw new IllegalStateException( "cannot close: prepareCommit was already called with no corresponding call to commit"); } if (infoStream.isEnabled("IW")) { infoStream.message("IW", "now flush at close waitForMerges=" + waitForMerges); } docWriter.close(); try { // Only allow a new merge to be triggered if we are // going to wait for merges: if (doFlush) { flush(waitForMerges, true); } else { docWriter.abort(this); // already closed -- never sync on IW } } finally { try { // clean up merge scheduler in all cases, although flushing may have failed: interrupted = Thread.interrupted(); if (waitForMerges) { try { // Give merge scheduler last chance to run, in case // any pending merges are waiting: mergeScheduler.merge(this); } catch (ThreadInterruptedException tie) { // ignore any interruption, does not matter interrupted = true; if (infoStream.isEnabled("IW")) { infoStream.message("IW", "interrupted while waiting for final merges"); } } } synchronized (this) { for (;;) { try { finishMerges(waitForMerges && !interrupted); break; } catch (ThreadInterruptedException tie) { // by setting the interrupted status, the // next call to finishMerges will pass false, // so it will not wait interrupted = true; if (infoStream.isEnabled("IW")) { infoStream.message("IW", "interrupted while waiting for merges to finish"); } } } stopMerges = true; } } finally { // shutdown policy, scheduler and all threads (this call is not interruptible): IOUtils.closeWhileHandlingException(mergePolicy, mergeScheduler); } } if (infoStream.isEnabled("IW")) { infoStream.message("IW", "now call final commit()"); } if (doFlush) { commitInternal(); } processEvents(false, true); synchronized (this) { // commitInternal calls ReaderPool.commit, which // writes any pending liveDocs from ReaderPool, so // it's safe to drop all readers now: readerPool.dropAll(true); deleter.close(); } if (infoStream.isEnabled("IW")) { infoStream.message("IW", "at close: " + segString()); } if (writeLock != null) { writeLock.close(); // release write lock writeLock = null; } synchronized (this) { closed = true; } assert docWriter.perThreadPool.numDeactivatedThreadStates() == docWriter.perThreadPool .getMaxThreadStates() : "" + docWriter.perThreadPool.numDeactivatedThreadStates() + " " + docWriter.perThreadPool.getMaxThreadStates(); } catch (OutOfMemoryError oom) { handleOOM(oom, "closeInternal"); } finally { synchronized (this) { closing = false; notifyAll(); if (!closed) { if (infoStream.isEnabled("IW")) { infoStream.message("IW", "hit exception while closing"); } } } // finally, restore interrupt status: if (interrupted) Thread.currentThread().interrupt(); } }