List of usage examples for java.lang InterruptedException InterruptedException
public InterruptedException()
InterruptedException
with no detail message. From source file:edu.uchc.octane.ParticleAnalysis.java
/** * Analyze the image//from w w w. ja va 2 s . c o m * @param ip The image to be analyzed * @param mask A rectangle of region of interest * @param threshold Lowest intensity to be analyzed * @param noise The noise threshold of the watershed algorithm * @throws InterruptedException */ public void process(ImageProcessor ip, Rectangle mask, int threshold, int noise) throws InterruptedException { int border = 1; if (g_ != null) { // g_.setImageData(ip, isZeroBg_); border = g_.getWindowSize(); } width_ = ip.getWidth(); height_ = ip.getHeight(); int[] offsets = { -width_, -width_ + 1, +1, +width_ + 1, +width_, +width_ - 1, -1, -width_ - 1 }; Rectangle bbox = new Rectangle(border, border, width_ - 2 * border, height_ - 2 * border); bbox = bbox.intersection(mask); ArrayList<Pixel> pixels = new ArrayList<Pixel>(); for (int y = bbox.y; y < bbox.y + bbox.height; y++) { for (int x = bbox.x; x < bbox.x + bbox.width; x++) { int v = ip.get(x, y); if (v > threshold) { pixels.add(new Pixel(x, y, v)); } } } Collections.sort(pixels); nParticles_ = 0; x_ = new double[pixels.size()]; y_ = new double[pixels.size()]; z_ = new double[pixels.size()]; h_ = new double[pixels.size()]; e_ = new double[pixels.size()]; FloodState floodState = new FloodState(width_, height_); floodState.floodBorders(bbox); int idxList, lenList; int[] listOfIndexes = new int[width_ * height_]; for (Pixel p : pixels) { if (Thread.interrupted()) { throw (new InterruptedException()); } int index = p.x + width_ * p.y; if (floodState.isProcessed(index)) { continue; } int v = p.value; boolean isMax = true; idxList = 0; lenList = 1; listOfIndexes[0] = index; floodState.flood(index); do { index = listOfIndexes[idxList]; for (int d = 0; d < 8; d++) { // analyze all neighbors (in 8 directions) at the same level int index2 = index + offsets[d]; if (floodState.isProcessed(index2)) { //conflict isMax = false; break; } if (!floodState.isFlooded(index2)) { int v2 = ip.get(index2); if (v2 >= v - noise) { listOfIndexes[lenList++] = index2; floodState.flood(index2); } } } } while (++idxList < lenList); for (idxList = 0; idxList < lenList; idxList++) { floodState.process(listOfIndexes[idxList]); } if (isMax) { if (g_ != null) { g_.setInitialCoordinates(p.x, p.y); try { double[] result = g_.fit(); if (result == null) { continue; } double h = g_.getH(); if (h < noise || h < getHeightMin() || h > getHeightMax()) { continue; } double e = g_.getE(); if (e < getFittingQualityMin()) { continue; } x_[nParticles_] = g_.getX(); y_[nParticles_] = g_.getY(); z_[nParticles_] = g_.getZ(); h_[nParticles_] = h; e_[nParticles_] = e; nParticles_++; } catch (MathIllegalStateException e) { //failed fitting continue; } } else { x_[nParticles_] = (double) p.x; y_[nParticles_] = (double) p.y; h_[nParticles_] = (double) p.value; nParticles_++; } } } }
From source file:de.bmarwell.j9kwsolver.action.CaptchaGetThread.java
@Override public final Captcha call() throws Exception { Captcha captcha = null;//from ww w. ja v a 2 s .c o m boolean accepted = false; CaptchaReturn cr = null; if (Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } /* * Step 1: * See if Captcha is available */ cr = getRequest(); if (cr == null) { return captcha; } if (Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } /* * Step 2: * Send "Accepting Captcha". */ if (Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } LOG.debug("CaptchaID {} gotten! Now accepting.", cr.getCaptchaID()); accepted = doAccept(); if (!accepted) { LOG.warn("Server didn't leave us Captcha {}.", cr.getCaptchaID()); return null; } else { LOG.debug("Server assigned Captcha {} to us.", cr.getCaptchaID()); } /* * Step 3: * Get Captcha Data. */ if (Thread.currentThread().isInterrupted()) { throw new InterruptedException(); } captcha = getCaptcha(cr); return captcha; }
From source file:at.alladin.rmbt.android.impl.TracerouteAndroidImpl.java
public List<HopDetail> call() throws Exception { isRunning.set(true);//from ww w . ja va 2s .c om List<HopDetail> pingDetailList = new ArrayList<HopDetail>(); final Runtime runtime = Runtime.getRuntime(); for (int i = 1; i <= maxHops; i++) { if (Thread.interrupted() || !isRunning.get()) { throw new InterruptedException(); } final long ts = System.nanoTime(); final Process mIpAddrProcess = runtime.exec("/system/bin/ping -c 1 -t " + i + " -W2 " + host); final String proc = readFromProcess(mIpAddrProcess); final PingDetailImpl pingDetail = new PingDetailImpl(proc, System.nanoTime() - ts); pingDetailList.add(pingDetail); if (pingDetail.getReceived() > 0) { hasMaxHopsExceeded = false; break; } } return pingDetailList; }
From source file:com.twitter.hbc.httpclient.BasicClientTest.java
@Test public void testInterruptedExceptionDuringProcessing() throws Exception { ClientBase clientBase = new ClientBase("name", mockClient, new HttpHosts("http://hi"), new RawEndpoint("/endpoint", HttpConstants.HTTP_GET), mockAuth, mockProcessor, mockReconnectionManager, mockRateTracker); when(mockStatusLine.getStatusCode()).thenReturn(200); doThrow(new InterruptedException()).when(mockProcessor).process(); when(mockClient.getConnectionManager()).thenReturn(mockConnectionManager); BasicClient client = new BasicClient(clientBase, executorService); assertFalse(clientBase.isDone());//from ww w .ja v a 2 s . c om client.connect(); assertTrue(client.waitForFinish(100)); assertTrue(client.isDone()); verify(mockProcessor).setup(any(InputStream.class)); verify(mockConnectionManager, atLeastOnce()).shutdown(); assertEquals(EventType.STOPPED_BY_ERROR, client.getExitEvent().getEventType()); assertTrue(client.getExitEvent().getUnderlyingException() instanceof InterruptedException); }
From source file:org.apache.gobblin.cluster.StreamingJobConfigurationManager.java
private void fetchJobSpecs() throws ExecutionException, InterruptedException { List<Pair<SpecExecutor.Verb, Spec>> changesSpecs = (List<Pair<SpecExecutor.Verb, Spec>>) this.specConsumer .changedSpecs().get();// w ww . j av a2 s . co m // propagate thread interruption so that caller will exit from loop if (Thread.interrupted()) { throw new InterruptedException(); } for (Pair<SpecExecutor.Verb, Spec> entry : changesSpecs) { SpecExecutor.Verb verb = entry.getKey(); if (verb.equals(SpecExecutor.Verb.ADD)) { // Handle addition JobSpec jobSpec = (JobSpec) entry.getValue(); postNewJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.UPDATE)) { // Handle update JobSpec jobSpec = (JobSpec) entry.getValue(); postUpdateJobConfigArrival(jobSpec.getUri().toString(), jobSpec.getConfigAsProperties()); } else if (verb.equals(SpecExecutor.Verb.DELETE)) { // Handle delete Spec anonymousSpec = (Spec) entry.getValue(); postDeleteJobConfigArrival(anonymousSpec.getUri().toString(), new Properties()); } } }
From source file:org.apache.tinkerpop.gremlin.server.op.AbstractOpProcessor.java
/** * Provides a generic way of iterating a result set back to the client. Implementers should respect the * {@link Settings#serializedResponseTimeout} configuration and break the serialization process if * it begins to take too long to do so, throwing a {@link java.util.concurrent.TimeoutException} in such * cases.// ww w .ja v a 2s . co m * * @param context The Gremlin Server {@link Context} object containing settings, request message, etc. * @param itty The result to iterator * @throws TimeoutException if the time taken to serialize the entire result set exceeds the allowable time. */ protected void handleIterator(final Context context, final Iterator itty) throws TimeoutException, InterruptedException { final ChannelHandlerContext ctx = context.getChannelHandlerContext(); final RequestMessage msg = context.getRequestMessage(); final Settings settings = context.getSettings(); final MessageSerializer serializer = ctx.channel().attr(StateKey.SERIALIZER).get(); final boolean useBinary = ctx.channel().attr(StateKey.USE_BINARY).get(); boolean warnOnce = false; // sessionless requests are always transaction managed, but in-session requests are configurable. final boolean managedTransactionsForRequest = manageTransactions ? true : (Boolean) msg.getArgs().getOrDefault(Tokens.ARGS_MANAGE_TRANSACTION, false); // we have an empty iterator - happens on stuff like: g.V().iterate() if (!itty.hasNext()) { // as there is nothing left to iterate if we are transaction managed then we should execute a // commit here before we send back a NO_CONTENT which implies success if (managedTransactionsForRequest) attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement); ctx.writeAndFlush(ResponseMessage.build(msg).code(ResponseStatusCode.NO_CONTENT).create()); return; } // timer for the total serialization time final StopWatch stopWatch = new StopWatch(); stopWatch.start(); // the batch size can be overridden by the request final int resultIterationBatchSize = (Integer) msg.optionalArgs(Tokens.ARGS_BATCH_SIZE) .orElse(settings.resultIterationBatchSize); List<Object> aggregate = new ArrayList<>(resultIterationBatchSize); // use an external control to manage the loop as opposed to just checking hasNext() in the while. this // prevent situations where auto transactions create a new transaction after calls to commit() withing // the loop on calls to hasNext(). boolean hasMore = itty.hasNext(); while (hasMore) { if (Thread.interrupted()) throw new InterruptedException(); // check if an implementation needs to force flush the aggregated results before the iteration batch // size is reached. final boolean forceFlush = isForceFlushed(ctx, msg, itty); // have to check the aggregate size because it is possible that the channel is not writeable (below) // so iterating next() if the message is not written and flushed would bump the aggregate size beyond // the expected resultIterationBatchSize. Total serialization time for the response remains in // effect so if the client is "slow" it may simply timeout. // // there is a need to check hasNext() on the iterator because if the channel is not writeable the // previous pass through the while loop will have next()'d the iterator and if it is "done" then a // NoSuchElementException will raise its head. also need a check to ensure that this iteration doesn't // require a forced flush which can be forced by sub-classes. // // this could be placed inside the isWriteable() portion of the if-then below but it seems better to // allow iteration to continue into a batch if that is possible rather than just doing nothing at all // while waiting for the client to catch up if (aggregate.size() < resultIterationBatchSize && itty.hasNext() && !forceFlush) aggregate.add(itty.next()); // send back a page of results if batch size is met or if it's the end of the results being iterated. // also check writeability of the channel to prevent OOME for slow clients. if (ctx.channel().isWritable()) { if (forceFlush || aggregate.size() == resultIterationBatchSize || !itty.hasNext()) { final ResponseStatusCode code = itty.hasNext() ? ResponseStatusCode.PARTIAL_CONTENT : ResponseStatusCode.SUCCESS; // serialize here because in sessionless requests the serialization must occur in the same // thread as the eval. as eval occurs in the GremlinExecutor there's no way to get back to the // thread that processed the eval of the script so, we have to push serialization down into that Frame frame = null; try { frame = makeFrame(ctx, msg, serializer, useBinary, aggregate, code, generateMetaData(ctx, msg, code, itty)); } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); // exception is handled in makeFrame() - serialization error gets written back to driver // at that point if (managedTransactionsForRequest) attemptRollback(msg, context.getGraphManager(), settings.strictTransactionManagement); break; } // track whether there is anything left in the iterator because it needs to be accessed after // the transaction could be closed - in that case a call to hasNext() could open a new transaction // unintentionally final boolean moreInIterator = itty.hasNext(); try { // only need to reset the aggregation list if there's more stuff to write if (moreInIterator) aggregate = new ArrayList<>(resultIterationBatchSize); else { // iteration and serialization are both complete which means this finished successfully. note that // errors internal to script eval or timeout will rollback given GremlinServer's global configurations. // local errors will get rolledback below because the exceptions aren't thrown in those cases to be // caught by the GremlinExecutor for global rollback logic. this only needs to be committed if // there are no more items to iterate and serialization is complete if (managedTransactionsForRequest) attemptCommit(msg, context.getGraphManager(), settings.strictTransactionManagement); // exit the result iteration loop as there are no more results left. using this external control // because of the above commit. some graphs may open a new transaction on the call to // hasNext() hasMore = false; } } catch (Exception ex) { // a frame may use a Bytebuf which is a countable release - if it does not get written // downstream it needs to be released here if (frame != null) frame.tryRelease(); throw ex; } if (!moreInIterator) iterateComplete(ctx, msg, itty); // the flush is called after the commit has potentially occurred. in this way, if a commit was // required then it will be 100% complete before the client receives it. the "frame" at this point // should have completely detached objects from the transaction (i.e. serialization has occurred) // so a new one should not be opened on the flush down the netty pipeline ctx.writeAndFlush(frame); } } else { // don't keep triggering this warning over and over again for the same request if (!warnOnce) { logger.warn( "Pausing response writing as writeBufferHighWaterMark exceeded on {} - writing will continue once client has caught up", msg); warnOnce = true; } // since the client is lagging we can hold here for a period of time for the client to catch up. // this isn't blocking the IO thread - just a worker. TimeUnit.MILLISECONDS.sleep(10); } stopWatch.split(); if (settings.serializedResponseTimeout > 0 && stopWatch.getSplitTime() > settings.serializedResponseTimeout) { final String timeoutMsg = String.format( "Serialization of the entire response exceeded the 'serializeResponseTimeout' setting %s", warnOnce ? "[Gremlin Server paused writes to client as messages were not being consumed quickly enough]" : ""); throw new TimeoutException(timeoutMsg.trim()); } stopWatch.unsplit(); } stopWatch.stop(); }
From source file:org.executequery.gui.resultset.ResultSetTableModel.java
public void createTable(ResultSet resultSet) { if (!isOpenAndValid(resultSet)) { clearData();/*from w w w .j a v a 2 s . co m*/ return; } try { resetMetaData(); ResultSetMetaData rsmd = resultSet.getMetaData(); columnHeaders.clear(); visibleColumnHeaders.clear(); tableData.clear(); int zeroBaseIndex = 0; int count = rsmd.getColumnCount(); for (int i = 1; i <= count; i++) { zeroBaseIndex = i - 1; columnHeaders.add(new ResultSetColumnHeader(zeroBaseIndex, rsmd.getColumnLabel(i), rsmd.getColumnName(i), rsmd.getColumnType(i), rsmd.getColumnTypeName(i))); } int recordCount = 0; interrupted = false; if (holdMetaData) { setMetaDataVectors(rsmd); } List<RecordDataItem> rowData; long time = System.currentTimeMillis(); while (resultSet.next()) { if (interrupted || Thread.interrupted()) { throw new InterruptedException(); } recordCount++; rowData = new ArrayList<RecordDataItem>(count); for (int i = 1; i <= count; i++) { zeroBaseIndex = i - 1; ResultSetColumnHeader header = columnHeaders.get(zeroBaseIndex); RecordDataItem value = recordDataItemFactory.create(header); try { int dataType = header.getDataType(); switch (dataType) { // some drivers (informix for example) // was noticed to return the hashcode from // getObject for -1 data types (eg. longvarchar). // force string for these - others stick with // getObject() for default value formatting case Types.CHAR: case Types.VARCHAR: value.setValue(resultSet.getString(i)); break; case Types.DATE: value.setValue(resultSet.getDate(i)); break; case Types.TIME: value.setValue(resultSet.getTime(i)); break; case Types.TIMESTAMP: value.setValue(resultSet.getTimestamp(i)); break; case Types.LONGVARCHAR: case Types.CLOB: value.setValue(resultSet.getClob(i)); break; case Types.LONGVARBINARY: case Types.VARBINARY: case Types.BINARY: value.setValue(resultSet.getBytes(i)); break; case Types.BLOB: value.setValue(resultSet.getBlob(i)); break; case Types.BIT: case Types.TINYINT: case Types.SMALLINT: case Types.INTEGER: case Types.BIGINT: case Types.FLOAT: case Types.REAL: case Types.DOUBLE: case Types.NUMERIC: case Types.DECIMAL: case Types.NULL: case Types.OTHER: case Types.JAVA_OBJECT: case Types.DISTINCT: case Types.STRUCT: case Types.ARRAY: case Types.REF: case Types.DATALINK: case Types.BOOLEAN: case Types.ROWID: case Types.NCHAR: case Types.NVARCHAR: case Types.LONGNVARCHAR: case Types.NCLOB: case Types.SQLXML: // use getObject for all other known types value.setValue(resultSet.getObject(i)); break; default: // otherwise try as string asStringOrObject(value, resultSet, i); break; } } catch (Exception e) { try { // ... and on dump, resort to string value.setValue(resultSet.getString(i)); } catch (SQLException sqlException) { // catch-all SQLException - yes, this is hideous // noticed with invalid date formatted values in mysql value.setValue("<Error - " + sqlException.getMessage() + ">"); } } if (resultSet.wasNull()) { value.setNull(); } rowData.add(value); } tableData.add(rowData); if (recordCount == maxRecords) { break; } } if (Log.isTraceEnabled()) { Log.trace("Finished populating table model - " + recordCount + " rows - [ " + MiscUtils.formatDuration(System.currentTimeMillis() - time) + "]"); } fireTableStructureChanged(); } catch (SQLException e) { System.err.println("SQL error populating table model at: " + e.getMessage()); Log.debug("Table model error - " + e.getMessage(), e); } catch (Exception e) { if (e instanceof InterruptedException) { Log.debug("ResultSet generation interrupted.", e); } else { String message = e.getMessage(); if (StringUtils.isBlank(message)) { System.err.println("Exception populating table model."); } else { System.err.println("Exception populating table model at: " + message); } Log.debug("Table model error - ", e); } } finally { if (resultSet != null) { try { resultSet.close(); Statement statement = resultSet.getStatement(); if (statement != null) { statement.close(); } } catch (SQLException e) { } } } }
From source file:org.eclipse.buckminster.jnlp.distroprovider.cloudsmith.DistroProvider.java
public Properties getDistroP2Properties(final boolean draft, final Long cspecId, final IProgressMonitor monitor) throws Exception { MethodWrapper<Properties> method = new MethodWrapper<Properties>() { @Override// w ww. j a v a 2s . c o m public Properties process() throws Exception { Properties properties = new Properties(); try { monitor.beginTask(null, IProgressMonitor.UNKNOWN); monitor.subTask("Starting distro resolution"); int lastWorked = 0; try { m_remoteDistroService.fireDistroResolution(draft, cspecId); } catch (Exception e) { throw BuckminsterException.wrap(e); } IProgressInfo progressInfo = null; while (progressInfo == null || !progressInfo.isDone()) { try { Thread.sleep(500); } catch (InterruptedException i) { } progressInfo = m_remoteDistroService.getProgressInfo(); String message = progressInfo.getMessage(); int worked = progressInfo.getWorked() * 100; monitor.subTask(message); monitor.worked(worked - lastWorked); lastWorked = worked; if (monitor.isCanceled()) { m_remoteDistroService.cancel(); throw new InterruptedException(); } } try { Map<String, String> remoteProperties = m_remoteDistroService.getDistroP2Properties(); if (remoteProperties != null) properties.putAll(remoteProperties); } catch (Exception e) { throw BuckminsterException.wrap(e); } } finally { monitor.done(); } return properties; } }; return method.run(); }
From source file:com.amazonaws.codepipeline.jobworker.JobWorkerDaemonTest.java
@Test public void shouldForceStoppingSchedulingJobPollerWhenInterruptedExceptionIsThrow() throws Exception { // given//from w w w . j a va 2 s . c om when(executorService.awaitTermination(1, TimeUnit.MINUTES)).thenThrow(new InterruptedException()); // when jobWorkerDaemon.stop(); // then verify(executorService).shutdown(); verify(executorService).shutdownNow(); }
From source file:de.blizzy.backup.check.CheckRun.java
@Override public void run(IProgressMonitor monitor) throws InvocationTargetException, InterruptedException { database = new Database(settings, false); final boolean[] ok = { true }; List<StorageInterceptorDescriptor> descs = BackupPlugin.getDefault().getStorageInterceptors(); for (final StorageInterceptorDescriptor desc : descs) { final IStorageInterceptor interceptor = desc.getStorageInterceptor(); SafeRunner.run(new ISafeRunnable() { @Override//from www. j av a 2s. c o m public void run() { IDialogSettings settings = Utils.getChildSection(Utils.getSection("storageInterceptors"), //$NON-NLS-1$ desc.getId()); if (!interceptor.initialize(parentShell, settings)) { ok[0] = false; } } @Override public void handleException(Throwable t) { ok[0] = false; interceptor.showErrorMessage(t, parentShell); BackupPlugin.getDefault() .logError("error while initializing storage interceptor '" + desc.getName() + "'", t); //$NON-NLS-1$ //$NON-NLS-2$ } }); storageInterceptors.add(interceptor); } if (!ok[0]) { monitor.done(); throw new InterruptedException(); } try { database.open(storageInterceptors); database.initialize(); int numFiles = database.factory().select(Factory.count()).from(Tables.FILES).fetchOne(Factory.count()) .intValue(); monitor.beginTask(Messages.Title_CheckBackupIntegrity, numFiles); Cursor<Record> cursor = null; try { cursor = database.factory().select(Tables.FILES.ID, Tables.FILES.BACKUP_PATH, Tables.FILES.CHECKSUM, Tables.FILES.LENGTH, Tables.FILES.COMPRESSION).from(Tables.FILES).fetchLazy(); while (cursor.hasNext()) { if (monitor.isCanceled()) { throw new InterruptedException(); } Record record = cursor.fetchOne(); String backupPath = record.getValue(Tables.FILES.BACKUP_PATH); String checksum = record.getValue(Tables.FILES.CHECKSUM); long length = record.getValue(Tables.FILES.LENGTH).longValue(); Compression compression = Compression .fromValue(record.getValue(Tables.FILES.COMPRESSION).intValue()); FileCheckResult checkResult = checkFile(backupPath, checksum, length, compression); if (!checkResult.ok) { backupOk = false; break; } if (checksum.length() != SHA256_LENGTH) { Integer id = record.getValue(Tables.FILES.ID); database.factory().update(Tables.FILES) .set(Tables.FILES.CHECKSUM, checkResult.checksumSHA256) .where(Tables.FILES.ID.equal(id)).execute(); } monitor.worked(1); } } finally { database.closeQuietly(cursor); } } catch (SQLException | IOException e) { boolean handled = false; for (IStorageInterceptor interceptor : storageInterceptors) { if (interceptor.showErrorMessage(e, parentShell)) { handled = true; } } if (handled) { throw new InterruptedException(); } throw new InvocationTargetException(e); } finally { database.close(); for (final IStorageInterceptor interceptor : storageInterceptors) { SafeRunner.run(new ISafeRunnable() { @Override public void run() { interceptor.destroy(); } @Override public void handleException(Throwable t) { BackupPlugin.getDefault().logError("error while destroying storage interceptor", t); //$NON-NLS-1$ } }); } System.gc(); monitor.done(); } }