List of usage examples for java.lang InterruptedException InterruptedException
public InterruptedException()
InterruptedException
with no detail message. From source file:org.geowebcache.diskquota.CacheCleaner.java
/** * This method is thread safe and will throw interrupted exception if the thread has been * interrupted or the {@link #destroy() shutdown hook} has been called to signal the calling * code of premature termination.//from www. j ava 2 s . co m * * @param layerNames * the layers to expire tile pages from * @param quotaResolver * live limit and used quota to monitor until it reaches its limit * @throws InterruptedException * @see {@link org.geowebcache.diskquota.ExpirationPolicy#expireByLayerNames} */ public void expireByLayerNames(final Set<String> layerNames, final QuotaResolver quotaResolver, final QuotaStore pageStore) throws InterruptedException { Quota limit; Quota used; Quota excess; while (true) { if (shutDown || Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } // get it everytime in case the admin changed it while we're processsing limit = quotaResolver.getLimit(); used = quotaResolver.getUsed(); excess = used.difference(limit); if (excess.getBytes().compareTo(BigInteger.ZERO) <= 0) { log.info("Reached back Quota: " + limit.toNiceString() + " (" + used.toNiceString() + ") for layers " + layerNames); return; } // same thing, check it every time ExpirationPolicy expirationPolicy = quotaResolver.getExpirationPolicy(); if (null == expirationPolicy) { log.warn("Aborting disk quota enforcement task, no expiration policy defined for layers " + layerNames); return; } TilePage tilePage = null; if (ExpirationPolicy.LFU.equals(expirationPolicy)) { tilePage = pageStore.getLeastFrequentlyUsedPage(layerNames); } else if (ExpirationPolicy.LRU.equals(expirationPolicy)) { tilePage = pageStore.getLeastRecentlyUsedPage(layerNames); } else { throw new IllegalStateException("Unrecognized expiration policy: " + expirationPolicy); } if (tilePage == null) { limit = quotaResolver.getLimit(); Quota usedQuota = quotaResolver.getUsed(); if (excess.getBytes().compareTo(BigInteger.ZERO) > 0) { log.warn("No more pages to expire, check if youd disk quota" + " database is out of date with your blob store. Quota: " + limit.toNiceString() + " used: " + usedQuota.toNiceString()); } return; } if (log.isDebugEnabled()) { log.debug("Expiring tile page " + tilePage + " based on the global " + expirationPolicy + " expiration policy"); } if (shutDown || Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } expirePage(pageStore, tilePage); } }
From source file:hudson.scm.subversion.CheckoutUpdater.java
@Override public UpdateTask createTask() { return new UpdateTask() { private static final long serialVersionUID = 8349986526712487762L; @Override//from w ww.jav a2 s . co m public List<External> perform() throws IOException, InterruptedException { final SVNUpdateClient svnuc = clientManager.getUpdateClient(); final List<External> externals = new ArrayList<External>(); // store discovered externals to here listener.getLogger().println("Cleaning local Directory " + location.getLocalDir()); Util.deleteContentsRecursive(new File(ws, location.getLocalDir())); // buffer the output by a separate thread so that the update operation // won't be blocked by the remoting of the data PipedOutputStream pos = new PipedOutputStream(); StreamCopyThread sct = new StreamCopyThread("svn log copier", new PipedInputStream(pos), listener.getLogger()); sct.start(); try { SVNRevision r = getRevision(location); String revisionName = r.getDate() != null ? fmt.format(r.getDate()) : r.toString(); listener.getLogger() .println("Checking out " + location.remote + " at revision " + revisionName); File local = new File(ws, location.getLocalDir()); SubversionUpdateEventHandler eventHandler = new SubversionUpdateEventHandler( new PrintStream(pos), externals, local, location.getLocalDir()); svnuc.setEventHandler(eventHandler); svnuc.setExternalsHandler(eventHandler); svnuc.setIgnoreExternals(location.isIgnoreExternalsOption()); SVNDepth svnDepth = getSvnDepth(location.getDepthOption()); svnuc.doCheckout(location.getSVNURL(), local.getCanonicalFile(), SVNRevision.HEAD, r, svnDepth, true); } catch (SVNCancelException e) { if (isAuthenticationFailedError(e)) { e.printStackTrace(listener.error("Failed to check out " + location.remote)); return null; } else { listener.error("Subversion checkout has been canceled"); throw (InterruptedException) new InterruptedException().initCause(e); } } catch (SVNException e) { e.printStackTrace(listener.error("Failed to check out " + location.remote)); throw new IOException("Failed to check out " + location.remote, e); } finally { try { pos.close(); } finally { try { sct.join(); // wait for all data to be piped. } catch (InterruptedException e) { throw new IOException2("interrupted", e); } } } return externals; } }; }
From source file:org.cloudfoundry.tools.timeout.HotSwappingTimeoutProtectionStrategyTest.java
@Test public void shouldTimeoutAwaitingPollResponse() throws Exception { this.strategy.setThreshold(0); given(this.requestCoordinator.consumePollResponse()).willReturn(null); willThrow(new InterruptedException()).given(this.requestCoordinator).awaitPollResponse(FAIL_TIMEOUT); HttpServletResponseMonitorFactory monitorFactory = this.strategy.handleRequest(this.request); this.thrown.expect(IllegalStateException.class); this.thrown.expectMessage("Timeout waiting for poll"); monitorFactory.getMonitor();// w w w. j a v a 2s. c om }
From source file:org.kalypso.simulation.core.ant.FeatureVisitorOperation.java
private IStatus executeFeaturePathes(final IProgressMonitor monitor, final GMLWorkspace workspace) throws InterruptedException { final String[] featurePathes = m_visitorTask.getFeaturePathes(); monitor.beginTask(m_visitorTask.getVisitorTaskDescription(), featurePathes.length); monitor.subTask("wird bearbeitet..."); final IStatusCollector stati = new StatusCollector(KalypsoSimulationCorePlugin.getID()); // FIXME: ERRROR-HANDLING: handling: always add OK-stati (but with good messages...) // FIXME: ERRROR-HANDLING: one status per- feature path (but remove this level if we have only one path) for (final String featurePath : featurePathes) { if (monitor.isCanceled()) throw new InterruptedException(); try {//w w w .ja v a2 s .c om if (featurePathes.length > 1) monitor.subTask(String.format("Bearbeite %s", featurePath)); final IStatus result = visitPath(workspace, featurePath, new SubProgressMonitor(monitor, 1, SubProgressMonitor.PREPEND_MAIN_LABEL_TO_SUBTASK)); if (!result.isOK()) stati.add(result); } catch (final IllegalArgumentException e) { final IStatus status = StatusUtilities.statusFromThrowable(e); if (m_visitorTask.doIgnoreIllegalFeaturePath()) { getLogger().log(Level.WARNING, -1, "Feature wird ignoriert (" + status.getMessage() + ")"); } else { getLogger().log(Level.WARNING, -1, status.getMessage()); stati.add(status); } } catch (final Throwable t) { final IStatus status = StatusUtilities.statusFromThrowable(t); getLogger().log(Level.SEVERE, -1, status.getMessage()); stati.add(status); } } // TODO: where to get the message from? // TODO: get message from the visitor-task return stati.asMultiStatusOrOK(""); //$NON-NLS-1$ }
From source file:org.roda_project.commons_ip.model.impl.eark.EARKUtils.java
protected static void addOtherMetadataToZipAndMETS(Map<String, ZipEntryInfo> zipEntries, MetsWrapper metsWrapper, List<IPMetadata> otherMetadata, String representationId) throws IPException, InterruptedException { if (otherMetadata != null && !otherMetadata.isEmpty()) { for (IPMetadata om : otherMetadata) { if (Thread.interrupted()) { throw new InterruptedException(); }//ww w .j a v a 2s . c om IPFile file = om.getMetadata(); String otherMetadataPath = IPConstants.OTHER_FOLDER + ModelUtils.getFoldersFromList(file.getRelativeFolders()) + file.getFileName(); MdRef mdRef = EARKMETSUtils.addOtherMetadataToMETS(metsWrapper, om, otherMetadataPath); if (representationId != null) { otherMetadataPath = IPConstants.REPRESENTATIONS_FOLDER + representationId + IPConstants.ZIP_PATH_SEPARATOR + otherMetadataPath; } ZIPUtils.addMdRefFileToZip(zipEntries, file.getPath(), otherMetadataPath, mdRef); } } }
From source file:com.splout.db.dnode.Fetcher.java
private File hdfsFetch(Path fromPath, Reporter reporter) throws IOException, InterruptedException { UUID uniqueId = UUID.randomUUID(); File toFile = new File(tempDir, uniqueId.toString() + "/" + fromPath.getName()); File toDir = new File(toFile.getParent()); if (toDir.exists()) { FileUtils.deleteDirectory(toDir); }//from w ww . ja v a 2 s . c om toDir.mkdirs(); Path toPath = new Path(toFile.getCanonicalPath()); FileSystem fS = fromPath.getFileSystem(hadoopConf); FileSystem tofS = FileSystem.getLocal(hadoopConf); Throttler throttler = new Throttler((double) bytesPerSecThrottle); try { for (FileStatus fStatus : fS.globStatus(fromPath)) { log.info("Copying " + fStatus.getPath() + " to " + toPath); long bytesSoFar = 0; FSDataInputStream iS = fS.open(fStatus.getPath()); FSDataOutputStream oS = tofS.create(toPath); byte[] buffer = new byte[downloadBufferSize]; int nRead; while ((nRead = iS.read(buffer, 0, buffer.length)) != -1) { // Needed to being able to be interrupted at any moment. if (Thread.interrupted()) { iS.close(); oS.close(); cleanDirNoExceptions(toDir); throw new InterruptedException(); } bytesSoFar += nRead; oS.write(buffer, 0, nRead); throttler.incrementAndThrottle(nRead); if (bytesSoFar >= bytesToReportProgress) { reporter.progress(bytesSoFar); bytesSoFar = 0l; } } if (reporter != null) { reporter.progress(bytesSoFar); } oS.close(); iS.close(); } return toDir; } catch (ClosedByInterruptException e) { // This can be thrown by the method read. cleanDirNoExceptions(toDir); throw new InterruptedIOException(); } }
From source file:net.paoding.spdy.client.netty.ResponseFuture.java
public ResponseFuture<Request, Response> await() throws InterruptedException { if (Thread.interrupted()) { throw new InterruptedException(); }//www . j a v a2 s. co m synchronized (this) { while (!done) { checkDeadLock(); waiters++; try { this.wait(); } finally { waiters--; } } } return this; }
From source file:gobblin.cluster.StreamingJobConfigurationManager.java
private void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutorInstance.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutorInstance.Verb, Spec>>) this.specExecutorInstanceConsumer .changedSpecs().get();//w w w . j av a 2s . c o m // propagate thread interruption so that caller will exit from loop if (Thread.interrupted()) { throw new InterruptedException(); } for (Pair<SpecExecutorInstance.Verb, Spec> entry : changesSpecs) { SpecExecutorInstance.Verb verb = entry.getKey(); if (verb.equals(SpecExecutorInstance.Verb.ADD)) { // Handle addition JobSpec jobSpec = (JobSpec) entry.getValue(); postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutorInstanceConsumer.Verb.UPDATE)) { // Handle update JobSpec jobSpec = (JobSpec) entry.getValue(); postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutorInstanceConsumer.Verb.DELETE)) { // Handle delete Spec anonymousSpec = (Spec) entry.getValue(); postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties()); } } }
From source file:net.minecraftforge.fml.server.FMLServerHandler.java
@Override public void queryUser(StartupQuery query) throws InterruptedException { if (query.getResult() == null) { FMLLog.warning("%s", query.getText()); query.finish();/*from www .ja v a 2s .c o m*/ } else { String text = query.getText() + "\n\nRun the command /fml confirm or or /fml cancel to proceed." + "\nAlternatively start the server with -Dfml.queryResult=confirm or -Dfml.queryResult=cancel to preselect the answer."; FMLLog.warning("%s", text); if (!query.isSynchronous()) return; // no-op until mc does commands in another thread (if ever) boolean done = false; while (!done && server.isServerRunning()) { if (Thread.interrupted()) throw new InterruptedException(); DedicatedServer dedServer = (DedicatedServer) server; // rudimentary command processing, check for fml confirm/cancel and stop commands synchronized (dedServer.pendingCommandList) { for (Iterator<PendingCommand> it = GenericIterableFactory .newCastingIterable(dedServer.pendingCommandList, PendingCommand.class).iterator(); it .hasNext();) { String cmd = it.next().command.trim().toLowerCase(); if (cmd.equals("/fml confirm")) { FMLLog.info("confirmed"); query.setResult(true); done = true; it.remove(); } else if (cmd.equals("/fml cancel")) { FMLLog.info("cancelled"); query.setResult(false); done = true; it.remove(); } else if (cmd.equals("/stop")) { StartupQuery.abort(); } } } Thread.sleep(10L); } query.finish(); } }
From source file:com.titilink.camel.rest.util.OtherUtil.java
/** * ?sleep?object.wait/*ww w . j av a 2s. com*/ * * @param lockObj ? * @param sometime ?? * @throws InterruptedException -- */ public static void blockSomeTime(final Object lockObj, long sometime) throws InterruptedException { if (Thread.interrupted()) { throw new InterruptedException(); } synchronized (lockObj) { long waitTime = sometime; long start = OtherUtil.getCurrentTime(); try { for (;;) { if (waitTime > 0) { lockObj.wait(waitTime); } else { break; } waitTime = sometime - (OtherUtil.getCurrentTime() - start); } } catch (InterruptedException ex) { lockObj.notifyAll(); throw ex; } } }