List of usage examples for java.lang Thread interrupt
public void interrupt()
From source file:org.alfresco.repo.transaction.ConnectionPoolOverloadTest.java
@Test public void testOverload() throws Exception { List<Thread> threads = new LinkedList<Thread>(); int numThreads = dbPoolMax + 1; int i = 0;/*from w w w . j av a 2 s . c o m*/ try { for (i = 0; i < numThreads; i++) { Thread thread = new TxnThread("Thread-" + i); thread.start(); threads.add(thread); } } finally { try { for (Thread thread : threads) { if (thread != null) { try { thread.join(dbPoolWaitMax); } catch (Exception e) { fail("The " + thread.getName() + " failed to join."); } } } } finally { for (Thread thread : threads) { if (thread != null) { thread.interrupt(); } } } assertTrue("The number of failed threads should not be 0.", failCount.intValue() > 0); assertTrue( "The number of open transactions should not be more that the db pool maximum." + "(Maybe a configuration of DB connection limit is less then db.pool.max)" + " db.pool.max is " + dbPoolMax + ", number of threads is " + numThreads + ", number of failed threads is" + failCount.intValue(), dbPoolMax >= numThreads - failCount.intValue()); } }
From source file:org.noroomattheinn.utils.ThreadManager.java
public synchronized void shutDown() { shuttingDown = true;/*from w w w . j a v a2 s . c om*/ timer.cancel(); for (Stoppable s : stopList) { s.stop(); } int nActive; do { nActive = 0; logger.finest("Iterating through terminate loop"); for (Thread t : threads) { Thread.State state = t.getState(); switch (state) { case NEW: case RUNNABLE: nActive++; logger.finest("Active thread: " + t.getName()); break; case TERMINATED: logger.finest("Terminated thread: " + t.getName()); break; case BLOCKED: case TIMED_WAITING: case WAITING: logger.finest("About to interrupt thread: " + t.getName()); nActive++; t.interrupt(); Utils.yieldFor(100); break; default: break; } } } while (nActive > 0); }
From source file:net.sf.taverna.t2.servicedescriptions.impl.ServiceDescriptionRegistryImpl.java
private void updateServiceDescriptions(boolean refreshAll, boolean waitFor) { List<Thread> threads = new ArrayList<>(); for (ServiceDescriptionProvider provider : getServiceDescriptionProviders()) { synchronized (providerDescriptions) { if (providerDescriptions.containsKey(provider) && !refreshAll) // We'll used the cached values continue; Thread oldThread = serviceDescriptionThreads.get(provider); if (oldThread != null && oldThread.isAlive()) { if (refreshAll) // New thread will override the old thread oldThread.interrupt(); else { // observers.notify(new ProviderStatusNotification(provider, "Waiting for provider")); continue; }//from w w w. ja v a2s.c o m } // Not run yet - we'll start a new tread Thread thread = new FindServiceDescriptionsThread(provider); threads.add(thread); serviceDescriptionThreads.put(provider, thread); thread.start(); } } if (waitFor) joinThreads(threads, DESCRIPTION_THREAD_TIMEOUT_MS); }
From source file:com.hellblazer.process.impl.AbstractManagedProcess.java
/** * The actual execution process. Control will not return until the command * list execution has finished./*from w ww .java 2 s. c o m*/ * * @param commands * - the command list to execute * * @throws IOException * - if anything goes wrong during the execution. */ protected void primitiveExecute(List<String> commands) throws IOException { ProcessBuilder builder = new ProcessBuilder(); builder.directory(directory); if (environment != null) { builder.environment().putAll(environment); } builder.command(commands); builder.redirectErrorStream(true); // combine OUT and ERR into one // stream Process p = builder.start(); final BufferedReader shellReader = new BufferedReader(new InputStreamReader(p.getInputStream())); Runnable reader = new Runnable() { @Override public void run() { String line; try { line = shellReader.readLine(); } catch (IOException e) { if (!"Stream closed".equals(e.getMessage()) && !e.getMessage().contains("Bad file descriptor")) { log.log(Level.SEVERE, "Failed reading process output", e); } return; } while (line != null) { if (log.isLoggable(Level.FINE) && line != null) { log.fine("[" + id + "] " + line); } try { line = shellReader.readLine(); } catch (IOException e) { if (!"Stream closed".equals(e.getMessage())) { log.log(Level.SEVERE, "Failed reading process output", e); } return; } } } }; Thread readerThread = new Thread(reader, "Process reader for: " + getCommand()); readerThread.setDaemon(true); readerThread.start(); try { p.waitFor(); } catch (InterruptedException e) { return; } finally { readerThread.interrupt(); p.destroy(); } }
From source file:net.greghaines.jesque.worker.WorkerImpl.java
/** * Shutdown this Worker.<br>/* w w w . java2 s . com*/ * <b>The worker cannot be started again; create a new worker in this * case.</b> * * @param now * if true, an effort will be made to stop any job in progress */ @Override public void end(final boolean now) { if (now) { this.state.set(SHUTDOWN_IMMEDIATE); final Thread workerThread = this.threadRef.get(); if (workerThread != null) { workerThread.interrupt(); } } else { this.state.set(SHUTDOWN); } togglePause(false); // Release any threads waiting in checkPaused() }
From source file:uk.bl.wa.solr.TikaExtractor.java
/** * //from w w w . ja va 2 s . c o m * @param solr * @param is * @param url * @return * @throws IOException */ @SuppressWarnings("deprecation") public SolrRecord extract(SolrRecord solr, InputStream is, String url) throws IOException { // Set up the TikaInputStream: TikaInputStream tikainput = null; if (this.maxBytesToParser > 0) { tikainput = TikaInputStream .get(new BoundedInputStream(new CloseShieldInputStream(is), maxBytesToParser)); } else { tikainput = TikaInputStream.get(new CloseShieldInputStream(is)); } // Also pass URL as metadata to allow extension hints to work: Metadata metadata = new Metadata(); if (url != null) metadata.set(Metadata.RESOURCE_NAME_KEY, url); final long detectStart = System.nanoTime(); StringBuilder detected = new StringBuilder(); try { DetectRunner detect = new DetectRunner(tika, tikainput, detected, metadata); Thread detectThread = new Thread(detect, Long.toString(System.currentTimeMillis())); detectThread.start(); detectThread.join(10000L); detectThread.interrupt(); } catch (NoSuchFieldError e) { // TODO Is this an Apache POI version issue? log.error("Tika.detect(): " + e.getMessage()); addExceptionMetadata(metadata, new Exception("detect threw " + e.getClass().getCanonicalName())); } catch (Exception e) { log.error("Tika.detect(): " + e.getMessage()); addExceptionMetadata(metadata, e); } Instrument.timeRel("WARCPayloadAnalyzers.analyze#tikasolrextract", "TikaExtractor.extract#detect", detectStart); // Only proceed if we have a suitable type: if (!this.checkMime(detected.toString())) { if ("".equals(detected.toString())) { solr.addField(SolrFields.SOLR_CONTENT_TYPE, MediaType.APPLICATION_OCTET_STREAM.toString()); } else { solr.addField(SolrFields.SOLR_CONTENT_TYPE, detected.toString()); } return solr; } // Context ParseContext context = new ParseContext(); StringWriter content = new StringWriter(); // Override the recursive parsing: if (embedded == null) embedded = new NonRecursiveEmbeddedDocumentExtractor(context); context.set(EmbeddedDocumentExtractor.class, embedded); try { final long parseStart = System.nanoTime(); ParseRunner runner = new ParseRunner(tika.getParser(), tikainput, this.getHandler(content), metadata, context); Thread parseThread = new Thread(runner, Long.toString(System.currentTimeMillis())); try { parseThread.start(); parseThread.join(this.parseTimeout); parseThread.interrupt(); parseThread.join(this.parseTimeout); } catch (OutOfMemoryError o) { log.error("TikaExtractor.parse() - OutOfMemoryError: " + o.getMessage()); addExceptionMetadata(metadata, new Exception("OutOfMemoryError")); } catch (RuntimeException r) { log.error("TikaExtractor.parse() - RuntimeException: " + r.getMessage()); addExceptionMetadata(metadata, r); } Instrument.timeRel("WARCPayloadAnalyzers.analyze#tikasolrextract", "TikaExtractor.extract#parse", parseStart); // If there was a parse error, report it: solr.addField(SolrFields.PARSE_ERROR, metadata.get(TikaExtractor.TIKA_PARSE_EXCEPTION)); final long extractStart = System.nanoTime(); // Copy the body text, forcing a UTF-8 encoding: String output = new String(content.toString().getBytes("UTF-8")); if (runner.complete || !output.equals("")) { if (output.length() > this.max_text_length) { output = output.substring(0, this.max_text_length); } log.debug("Extracted text from: " + url); log.debug("Extracted text: " + StringUtils.left(output, 300)); solr.setField(SolrFields.SOLR_EXTRACTED_TEXT, output); solr.setField(SolrFields.SOLR_EXTRACTED_TEXT_LENGTH, Integer.toString(output.length())); } else { //log.debug("Failed to extract any text from: "+url); } // Noisily report all metadata properties: /* * for( String m : metadata.names() ) { * log.info("For "+url.substring(url.length() - (int) * Math.pow(url.length(),0.85))+": "+m+" -> "+metadata.get(m)); } */ // Attempt to record all metadata discovered: if (this.extractAllMetadata) { for (String m : metadata.names()) { // Ignore these as they are not very interesting: if (Metadata.RESOURCE_NAME_KEY.equalsIgnoreCase(m) || "dc:title".equalsIgnoreCase(m) || "title".equalsIgnoreCase(m) || "description".equalsIgnoreCase(m) || "keywords".equalsIgnoreCase(m) || Metadata.CONTENT_ENCODING.equalsIgnoreCase(m) || Metadata.CONTENT_LOCATION.equalsIgnoreCase(m) || "ACTINICTITLE".equalsIgnoreCase(m) || Metadata.CONTENT_TYPE.equalsIgnoreCase(m)) { continue; } // Record in the document, but trim big ones: String value = metadata.get(m); if (value != null && value.length() > 100) { value = value.substring(0, 100); } solr.addField(SolrFields.SOLR_TIKA_METADATA, m + "=" + value); } } // Also Pick out particular metadata: String contentType = metadata.get(Metadata.CONTENT_TYPE); solr.addField(SolrFields.SOLR_CONTENT_TYPE, contentType); solr.addField(SolrFields.SOLR_TITLE, metadata.get(DublinCore.TITLE)); solr.addField(SolrFields.SOLR_DESCRIPTION, metadata.get(DublinCore.DESCRIPTION)); solr.addField(SolrFields.SOLR_KEYWORDS, metadata.get("keywords")); solr.addField(SolrFields.SOLR_AUTHOR, metadata.get(DublinCore.CREATOR)); solr.addField(SolrFields.CONTENT_ENCODING, metadata.get(Metadata.CONTENT_ENCODING)); // Parse out any embedded date that can act as a created/modified date. String date = null; if (metadata.get(Metadata.CREATION_DATE) != null) date = metadata.get(Metadata.CREATION_DATE); if (metadata.get(Metadata.DATE) != null) date = metadata.get(Metadata.DATE); if (metadata.get(Metadata.MODIFIED) != null) date = metadata.get(Metadata.MODIFIED); if (date != null) { DateTimeFormatter df = ISODateTimeFormat.dateTimeParser(); DateTime edate = null; try { edate = df.parseDateTime(date); } catch (IllegalArgumentException e) { log.error("Could not parse: " + date); } if (edate == null) { Date javadate = Times.extractDate(date); if (javadate != null) edate = new org.joda.time.DateTime(javadate); } if (edate != null) { solr.addField(SolrFields.LAST_MODIFIED_YEAR, "" + edate.getYear()); DateTimeFormatter iso_df = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC); // solr.getSolrDocument().setField(SolrFields.LAST_MODIFIED, // edate); solr.setField(SolrFields.LAST_MODIFIED, iso_df.print(edate)); } } // Also look to record the software identifiers: // Look for generic xmp:CreatorTool solr.addField(SolrFields.GENERATOR, metadata.get("xmp:CreatorTool")); // For PDF, support other metadata tags: //solr.addField(SolrFields.GENERATOR, metadata.get( "creator" )); // This appears to be dc:creator i.e. author. solr.addField(SolrFields.GENERATOR, metadata.get("producer")); solr.addField(SolrFields.GENERATOR, metadata.get(Metadata.SOFTWARE)); solr.addField(SolrFields.GENERATOR, metadata.get("generator")); solr.addField(SolrFields.GENERATOR, metadata.get("Software")); // Application ID, MS Office only AFAICT, and the VERSION is only doc String software = null; if (metadata.get(Metadata.APPLICATION_NAME) != null) software = metadata.get(Metadata.APPLICATION_NAME); if (metadata.get(Metadata.APPLICATION_VERSION) != null) software += " " + metadata.get(Metadata.APPLICATION_VERSION); // Images, e.g. JPEG and TIFF, can have 'Software', 'tiff:Software', // PNGs have a 'tEXt tEXtEntry: keyword=Software, value=GPL Ghostscript 8.71' String png_textentry = metadata.get("tEXt tEXtEntry"); if (png_textentry != null && png_textentry.contains("keyword=Software, value=")) software = png_textentry.replace("keyword=Software, value=", ""); /* Some JPEGs have this: Jpeg Comment: CREATOR: gd-jpeg v1.0 (using IJG JPEG v62), default quality comment: CREATOR: gd-jpeg v1.0 (using IJG JPEG v62), default quality */ if (software != null) { solr.addField(SolrFields.GENERATOR, software); } Instrument.timeRel("WARCPayloadAnalyzers.analyze#tikasolrextract", "TikaExtractor.extract#extract", extractStart); } catch (Exception e) { log.error("TikaExtractor.extract(): " + e.getMessage()); } // TODO: This should probably be wrapped in a method-spanning try-finally to guarantee close if (tikainput != null) { try { tikainput.close(); } catch (IOException e) { log.warn("Exception closing TikaInputStream. This leaves tmp-files: " + e.getMessage()); } } return solr; }
From source file:org.nuxeo.ecm.core.TestSQLRepositoryReadAcls.java
protected void doParallelPrepareUserReadAcls(int i) throws Throwable { // set ACP on root ACPImpl acp = new ACPImpl(); ACLImpl acl = new ACLImpl(); String username = "user" + i; acl.add(new ACE("Administrator", "Everything", true)); acl.add(new ACE(username, "Everything", true)); acp.addACL(acl);/*from w w w . j a v a 2 s. co m*/ String name = "doc" + i; DocumentModel doc = session.createDocumentModel("/", name, "File"); doc = session.createDocument(doc); doc.setACP(acp, true); session.save(); closeSession(); TransactionHelper.commitOrRollbackTransaction(); CyclicBarrier barrier = new CyclicBarrier(2); CountDownLatch firstReady = new CountDownLatch(1); PrepareUserReadAclsJob r1 = new PrepareUserReadAclsJob(name, username, database.repositoryName, firstReady, barrier); PrepareUserReadAclsJob r2 = new PrepareUserReadAclsJob(name, username, database.repositoryName, null, barrier); Thread t1 = null; Thread t2 = null; try { t1 = new Thread(r1, "t1"); t2 = new Thread(r2, "t2"); t1.start(); if (firstReady.await(60, TimeUnit.SECONDS)) { t2.start(); t1.join(); t1 = null; t2.join(); t2 = null; if (r1.throwable != null) { throw r1.throwable; } if (r2.throwable != null) { throw r2.throwable; } } // else timed out } finally { // error condition recovery if (t1 != null) { t1.interrupt(); } if (t2 != null) { t2.interrupt(); } } // after both threads have run, check that we don't see // duplicate documents TransactionHelper.startTransaction(); session = openSessionAs(username); checkOneDoc(session, name); // failed for PostgreSQL closeSession(); TransactionHelper.commitOrRollbackTransaction(); TransactionHelper.startTransaction(); openSession(); }
From source file:net.sf.jasperreports.engine.fill.BaseReportFiller.java
/** * Cancels the fill process./* w w w . j a v a 2 s .c om*/ * * @throws JRException */ @Override public void cancelFill() throws JRException { if (log.isDebugEnabled()) { log.debug("Fill " + fillerId + ": cancelling"); } fillContext.markCanceled(); if (fillContext.cancelRunningQuery()) { if (log.isDebugEnabled()) { log.debug("Fill " + fillerId + ": query cancelled"); } } else { Thread t = fillingThread; if (t != null) { if (log.isDebugEnabled()) { log.debug("Fill " + fillerId + ": Interrupting thread " + t); } t.interrupt(); } } }
From source file:com.janoz.usenet.searchers.impl.NewzbinConnectorTest.java
private void interruptThisThreadIn(final long milis) { final Thread t = Thread.currentThread(); new Thread(new Runnable() { @Override// w ww . j a v a 2 s . c om public void run() { try { Thread.sleep(milis); } catch (InterruptedException e) { e.printStackTrace(); } t.interrupt(); } }).start(); }
From source file:org.silverpeas.core.util.DBUtilIntegrationTest.java
@Test public void nextUniqueIdUpdateForAnExistingTableShouldWorkAndConcurrency() throws SQLException, InterruptedException { int nextIdBeforeTesting = actualMaxIdInUniqueIdFor("User"); assertThat(nextIdBeforeTesting, is(1)); int nbThreads = 2 + (int) (Math.random() * 10); Logger.getAnonymousLogger()// w ww .ja va 2 s. co m .info("Start at " + System.currentTimeMillis() + " with " + nbThreads + " threads"); final Thread[] threads = new Thread[nbThreads]; for (int i = 0; i < nbThreads; i++) { threads[i] = new Thread(() -> { try { int nextId = DBUtil.getNextId("User", "id"); Logger.getAnonymousLogger().info("Next id is " + nextId + " at " + System.currentTimeMillis()); Thread.sleep(10); } catch (InterruptedException | SQLException e) { throw new RuntimeException(e); } }); } try { for (Thread thread : threads) { thread.start(); } for (Thread thread : threads) { thread.join(); } int expectedNextId = nextIdBeforeTesting + nbThreads; Logger.getAnonymousLogger() .info("Verifying nextId is " + expectedNextId + " at " + System.currentTimeMillis()); assertThat(actualMaxIdInUniqueIdFor("User"), is(expectedNextId)); } finally { for (Thread thread : threads) { if (thread.isAlive()) { thread.interrupt(); } } } }