List of usage examples for java.lang Thread interrupted
public static boolean interrupted()
From source file:org.archive.modules.fetcher.CookieStoreTest.java
public void testConcurrentLoadNoDomainCookieLimitBreach() throws IOException, InterruptedException { bdbCookieStore().clear();// ww w . jav a 2s.c om basicCookieStore().clear(); final Random rand = new Random(); Runnable runnable = new Runnable() { @Override public void run() { try { while (!Thread.interrupted()) { BasicClientCookie cookie = new BasicClientCookie(UUID.randomUUID().toString(), UUID.randomUUID().toString()); cookie.setDomain("d" + rand.nextInt() + ".example.com"); bdbCookieStore().addCookie(cookie); basicCookieStore().addCookie(cookie); } } catch (Exception e) { throw new RuntimeException(e); } } }; Thread[] threads = new Thread[200]; for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(runnable); threads[i].setName("cookie-load-test-" + i); threads[i].start(); } Thread.sleep(5000); for (int i = 0; i < threads.length; i++) { threads[i].interrupt(); } for (int i = 0; i < threads.length; i++) { threads[i].join(); } List<Cookie> bdbCookieList = bdbCookieStore().getCookies(); assertTrue(bdbCookieList.size() > 3000); assertCookieListsEquivalent(bdbCookieList, basicCookieStore().getCookies()); }
From source file:net.sf.jasperreports.engine.export.JRRtfExporter.java
/** * Export report in .rtf format to a stream * @throws JRException// w ww . jav a 2 s. c o m * @throws IOException */ protected void exportReportToWriter(Writer writer) throws JRException, IOException { colorWriter = new FileBufferedWriter(); fontWriter = new FileBufferedWriter(); contentWriter = new FileBufferedWriter(); List<ExporterInputItem> items = exporterInput.getItems(); for (reportIndex = 0; reportIndex < items.size(); reportIndex++) { ExporterInputItem item = items.get(reportIndex); setCurrentExporterInputItem(item); List<JRPrintPage> pages = jasperPrint.getPages(); if (pages != null && pages.size() > 0) { PageRange pageRange = getPageRange(); int startPageIndex = (pageRange == null || pageRange.getStartPageIndex() == null) ? 0 : pageRange.getStartPageIndex(); int endPageIndex = (pageRange == null || pageRange.getEndPageIndex() == null) ? (pages.size() - 1) : pageRange.getEndPageIndex(); contentWriter.write("{\\info{\\nofpages"); contentWriter.write(String.valueOf(pages.size())); contentWriter.write("}}\n"); contentWriter.write("\\viewkind1\\paperw"); contentWriter.write(String.valueOf(LengthUtil.twip(jasperPrint.getPageWidth())));//FIXMEPART rtf does not work in batch mode contentWriter.write("\\paperh"); contentWriter.write(String.valueOf(LengthUtil.twip(jasperPrint.getPageHeight()))); contentWriter.write("\\marglsxn"); contentWriter.write(String.valueOf( LengthUtil.twip(jasperPrint.getLeftMargin() == null ? 0 : jasperPrint.getLeftMargin()))); contentWriter.write("\\margrsxn"); contentWriter.write(String.valueOf( LengthUtil.twip(jasperPrint.getRightMargin() == null ? 0 : jasperPrint.getRightMargin()))); contentWriter.write("\\margtsxn"); contentWriter.write(String.valueOf( LengthUtil.twip(jasperPrint.getTopMargin() == null ? 0 : jasperPrint.getTopMargin()))); contentWriter.write("\\margbsxn"); contentWriter.write(String.valueOf(LengthUtil .twip(jasperPrint.getBottomMargin() == null ? 0 : jasperPrint.getBottomMargin()))); contentWriter.write("\\deftab"); contentWriter.write( String.valueOf(LengthUtil.twip(new JRBasePrintText(jasperPrint.getDefaultStyleProvider()) .getParagraph().getTabStopWidth()))); if (jasperPrint.getOrientationValue() == OrientationEnum.LANDSCAPE) { contentWriter.write("\\lndscpsxn"); } for (int pageIndex = startPageIndex; pageIndex <= endPageIndex; pageIndex++) { if (Thread.interrupted()) { throw new ExportInterruptedException(); } JRPrintPage page = pages.get(pageIndex); contentWriter.write("\n"); writeAnchor(JR_PAGE_ANCHOR_PREFIX + reportIndex + "_" + (pageIndex + 1)); boolean lastPageFlag = false; if (pageIndex == endPageIndex && reportIndex == (items.size() - 1)) { lastPageFlag = true; } exportPage(page, lastPageFlag); } } } contentWriter.write("}\n"); contentWriter.close(); colorWriter.close(); fontWriter.close(); // create the header of the rtf file writer.write("{\\rtf1\\ansi\\deff0\n"); // create font and color tables writer.write("{\\fonttbl "); fontWriter.writeData(writer); writer.write("}\n"); writer.write("{\\colortbl ;"); colorWriter.writeData(writer); writer.write("}\n"); contentWriter.writeData(writer); writer.flush(); contentWriter.dispose(); colorWriter.dispose(); fontWriter.dispose(); }
From source file:rocks.inspectit.ui.rcp.storage.util.DataRetriever.java
/** * Retrieves the wanted data described in the {@link StorageDescriptor} from the desired * offline-available storage./* w w w . ja v a2 s . co m*/ * <p> * It is not guaranteed that amount of returned objects in the list is same as the amount of * provided descriptors. If some of the descriptors are pointing to the wrong files or files * positions, it can happen that this influences the rest of the descriptor that point to the * same file. Thus, a special care needs to be taken that the data in descriptors is correct. * * @param <E> * Type of the objects are wanted. * @param localStorageData * {@link LocalStorageData} that points to the wanted storage. * @param descriptors * Descriptors. * @return List of objects in the supplied generic type. Note that if the data described in the * descriptor is not of a supplied generic type, there will be a casting exception * thrown. * @throws SerializationException * If {@link SerializationException} occurs. * @throws IOException * If {@link IOException} occurs. */ @SuppressWarnings("unchecked") public <E extends DefaultData> List<E> getDataLocally(LocalStorageData localStorageData, List<IStorageDescriptor> descriptors) throws IOException, SerializationException { Map<Integer, List<IStorageDescriptor>> separateFilesGroup = createFilesGroup(descriptors); List<IStorageDescriptor> optimizedDescriptors = new ArrayList<>(); for (Map.Entry<Integer, List<IStorageDescriptor>> entry : separateFilesGroup.entrySet()) { StorageDescriptor storageDescriptor = null; for (IStorageDescriptor descriptor : entry.getValue()) { if (null == storageDescriptor) { storageDescriptor = new StorageDescriptor(entry.getKey()); storageDescriptor.setPositionAndSize(descriptor.getPosition(), descriptor.getSize()); } else { if (!storageDescriptor.join(descriptor)) { optimizedDescriptors.add(storageDescriptor); storageDescriptor = new StorageDescriptor(entry.getKey()); storageDescriptor.setPositionAndSize(descriptor.getPosition(), descriptor.getSize()); } } } optimizedDescriptors.add(storageDescriptor); } List<E> receivedData = new ArrayList<>(descriptors.size()); ISerializer serializer = null; try { serializer = serializerQueue.take(); } catch (InterruptedException e) { Thread.interrupted(); } InputStream inputStream = null; Input input = null; try { inputStream = streamProvider.getExtendedByteBufferInputStream(localStorageData, optimizedDescriptors); input = new Input(inputStream); while (KryoUtil.hasMoreBytes(input)) { Object object = serializer.deserialize(input); E element = (E) object; receivedData.add(element); } } finally { if (null != input) { input.close(); } serializerQueue.add(serializer); } return receivedData; }
From source file:info.novatec.inspectit.rcp.storage.util.DataRetriever.java
/** * Retrieves the wanted data described in the {@link StorageDescriptor} from the desired * offline-available storage.//from www .ja va 2 s . c o m * <p> * It is not guaranteed that amount of returned objects in the list is same as the amount of * provided descriptors. If some of the descriptors are pointing to the wrong files or files * positions, it can happen that this influences the rest of the descriptor that point to the * same file. Thus, a special care needs to be taken that the data in descriptors is correct. * * @param <E> * Type of the objects are wanted. * @param localStorageData * {@link LocalStorageData} that points to the wanted storage. * @param descriptors * Descriptors. * @return List of objects in the supplied generic type. Note that if the data described in the * descriptor is not of a supplied generic type, there will be a casting exception * thrown. * @throws SerializationException * If {@link SerializationException} occurs. * @throws IOException * If {@link IOException} occurs. */ @SuppressWarnings("unchecked") public <E extends DefaultData> List<E> getDataLocally(LocalStorageData localStorageData, List<IStorageDescriptor> descriptors) throws IOException, SerializationException { Map<Integer, List<IStorageDescriptor>> separateFilesGroup = createFilesGroup(descriptors); List<IStorageDescriptor> optimizedDescriptors = new ArrayList<IStorageDescriptor>(); for (Map.Entry<Integer, List<IStorageDescriptor>> entry : separateFilesGroup.entrySet()) { StorageDescriptor storageDescriptor = null; for (IStorageDescriptor descriptor : entry.getValue()) { if (null == storageDescriptor) { storageDescriptor = new StorageDescriptor(entry.getKey()); storageDescriptor.setPositionAndSize(descriptor.getPosition(), descriptor.getSize()); } else { if (!storageDescriptor.join(descriptor)) { optimizedDescriptors.add(storageDescriptor); storageDescriptor = new StorageDescriptor(entry.getKey()); storageDescriptor.setPositionAndSize(descriptor.getPosition(), descriptor.getSize()); } } } optimizedDescriptors.add(storageDescriptor); } List<E> receivedData = new ArrayList<E>(descriptors.size()); ISerializer serializer = null; try { serializer = serializerQueue.take(); } catch (InterruptedException e) { Thread.interrupted(); } InputStream inputStream = null; Input input = null; try { inputStream = streamProvider.getExtendedByteBufferInputStream(localStorageData, optimizedDescriptors); input = new Input(inputStream); while (KryoUtil.hasMoreBytes(input)) { Object object = serializer.deserialize(input); E element = (E) object; receivedData.add(element); } } finally { if (null != input) { input.close(); } serializerQueue.add(serializer); } return receivedData; }
From source file:com.cloudbees.jenkins.plugins.bitbucket.client.BitbucketCloudApiClient.java
/** * {@inheritDoc}/*w w w . j a v a2 s .c om*/ */ @NonNull @Override public List<BitbucketPullRequestValue> getPullRequests() throws InterruptedException, IOException { List<BitbucketPullRequestValue> pullRequests = new ArrayList<>(); UriTemplate template = UriTemplate.fromTemplate(REPO_URL_TEMPLATE + "/pullrequests{?page,pagelen}") .set("owner", owner).set("repo", repositoryName).set("pagelen", 50); BitbucketPullRequests page; int pageNumber = 1; do { if (Thread.interrupted()) { throw new InterruptedException(); } String url = template // .set("page", pageNumber++) // .expand(); String response = getRequest(url); try { page = JsonParser.toJava(response, BitbucketPullRequests.class); } catch (IOException e) { throw new IOException("I/O error when parsing response from URL: " + url, e); } pullRequests.addAll(page.getValues()); } while (page.getNext() != null); for (BitbucketPullRequestValue pullRequest : pullRequests) { setupClosureForPRBranch(pullRequest); } return pullRequests; }
From source file:gr.ntua.h2rdf.inputFormat2.TableMapReduceUtil.java
public static void initCredentials(Job job) throws IOException { if (User.isHBaseSecurityEnabled(job.getConfiguration())) { try {/* www . j av a 2 s .c om*/ // init credentials for remote cluster String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS); if (quorumAddress != null) { String[] parts = ZKUtil.transformClusterKey(quorumAddress); Configuration peerConf = HBaseConfiguration.create(job.getConfiguration()); peerConf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]); peerConf.set("hbase.zookeeper.client.port", parts[1]); peerConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]); User.getCurrent().obtainAuthTokenForJob(peerConf, job); } User.getCurrent().obtainAuthTokenForJob(job.getConfiguration(), job); } catch (InterruptedException ie) { LOG.info("Interrupted obtaining user authentication token"); Thread.interrupted(); } } }
From source file:org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler.java
/** * Update ZK or META. This can take a while if for example the * hbase:meta is not available -- if server hosting hbase:meta crashed and we are * waiting on it to come back -- so run in a thread and keep updating znode * state meantime so master doesn't timeout our region-in-transition. * Caller must cleanup region if this fails. */// w ww .j av a 2s . com boolean updateMeta(final HRegion r) { if (this.server.isStopped() || this.rsServices.isStopping()) { return false; } // Object we do wait/notify on. Make it boolean. If set, we're done. // Else, wait. final AtomicBoolean signaller = new AtomicBoolean(false); PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller); t.start(); // Post open deploy task: // meta => update meta location in ZK // other region => update meta // It could fail if ZK/meta is not available and // the update runs out of retries. long now = System.currentTimeMillis(); long lastUpdate = now; boolean tickleOpening = true; while (!signaller.get() && t.isAlive() && !this.server.isStopped() && !this.rsServices.isStopping() && isRegionStillOpening()) { long elapsed = now - lastUpdate; if (elapsed > 120000) { // 2 minutes, no need to tickleOpening too often // Only tickle OPENING if postOpenDeployTasks is taking some time. lastUpdate = now; tickleOpening = tickleOpening("post_open_deploy"); } synchronized (signaller) { try { // Wait for 10 seconds, so that server shutdown // won't take too long if this thread happens to run. signaller.wait(10000); } catch (InterruptedException e) { // Go to the loop check. } } now = System.currentTimeMillis(); } // Is thread still alive? We may have left above loop because server is // stopping or we timed out the edit. Is so, interrupt it. if (t.isAlive()) { if (!signaller.get()) { // Thread still running; interrupt LOG.debug("Interrupting thread " + t); t.interrupt(); } try { t.join(); } catch (InterruptedException ie) { LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNameAsString(), ie); Thread.currentThread().interrupt(); } } // Was there an exception opening the region? This should trigger on // InterruptedException too. If so, we failed. Even if tickle opening fails // then it is a failure. return ((!Thread.interrupted() && t.getException() == null) && tickleOpening); }
From source file:org.eclipse.kura.deployment.agent.impl.DeploymentAgent.java
private void installer() { do {//from w w w . j av a 2 s .c o m try { try { while (this.m_instPackageUrls.isEmpty()) { synchronized (this.m_instPackageUrls) { this.m_instPackageUrls.wait(); } } String url = this.m_instPackageUrls.peek(); if (url != null) { s_logger.info("About to install package at URL {}", url); DeploymentPackage dp = null; Exception ex = null; try { dp = installDeploymentPackageInternal(url); } catch (Exception e) { ex = e; s_logger.error("Exception installing package at URL {}", url, e); } finally { boolean successful = dp != null ? true : false; s_logger.info("Posting INSTALLED event for package at URL {}: {}", url, successful ? "successful" : "unsuccessful"); this.m_instPackageUrls.poll(); postInstalledEvent(dp, url, successful, ex); } } } catch (InterruptedException e) { s_logger.info("Exiting..."); Thread.interrupted(); return; } } catch (Throwable t) { s_logger.error("Unexpected throwable", t); } } while (true); }
From source file:com.yahoo.ads.pb.kafka.KafkaSimpleConsumer.java
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException { List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>(); FetchResponse response = null;//from w ww.jav a 2 s . c o m Broker previousLeader = leaderBroker; while (true) { ensureConsumer(previousLeader); FetchRequest request = new FetchRequestBuilder().clientId(clientId) .addFetch(topic, partitionId, offset, 100000000).maxWait(timeoutMs).minBytes(1).build(); //logger.debug("fetch offset {}", offset); try { response = consumer.fetch(request); } catch (Exception e) { // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio if (Thread.interrupted()) { logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}", e.getClass().getName(), topic, partitionId, offset); throw new InterruptedException(); } logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e); response = null; } if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); logger.debug("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode); boolean needNewLeader = false; if (errorCode == ErrorMapping.RequestTimedOutCode()) { //TODO: leave it here } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { //TODO: fetch the earliest offset or latest offset ? // seems no obvious correct way to handle it long earliestOffset = getOffset(true); logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId); if (earliestOffset < 0) { needNewLeader = true; } else { newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset)); offset = earliestOffset; continue; } } else { needNewLeader = true; } if (needNewLeader) { stopConsumer(); previousLeader = leaderBroker; leaderBroker = null; continue; } } else { break; } } return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS); }