List of usage examples for java.lang InterruptedException getLocalizedMessage
public String getLocalizedMessage()
From source file:sk.datalan.solr.impl.ConcurrentUpdateSolrServer.java
@Override public NamedList<Object> request(final SolrRequest request) throws SolrServerException, IOException { if (!(request instanceof UpdateRequest)) { return server.request(request); }/*from w ww . j a v a 2 s . c o m*/ UpdateRequest req = (UpdateRequest) request; // this happens for commit... if (streamDeletes) { if ((req.getDocuments() == null || req.getDocuments().isEmpty()) && (req.getDeleteById() == null || req.getDeleteById().isEmpty()) && (req.getDeleteByIdMap() == null || req.getDeleteByIdMap().isEmpty())) { if (req.getDeleteQuery() == null) { blockUntilFinished(); return server.request(request); } } } else { if ((req.getDocuments() == null || req.getDocuments().isEmpty())) { blockUntilFinished(); return server.request(request); } } SolrParams params = req.getParams(); if (params != null) { // check if it is waiting for the searcher if (params.getBool(UpdateParams.WAIT_SEARCHER, false)) { log.info("blocking for commit/optimize"); blockUntilFinished(); // empty the queue return server.request(request); } } try { CountDownLatch tmpLock = lock; if (tmpLock != null) { tmpLock.await(); } boolean success = queue.offer(req); for (;;) { synchronized (runners) { if (runners.isEmpty() || (queue.remainingCapacity() < queue.size() // queue // is // half // full // and // we // can // add // more // runners && runners.size() < threadCount)) { // We need more runners, so start a new one. Runner r = new Runner(); runners.add(r); scheduler.execute(r); } else { // break out of the retry loop if we added the element to the queue // successfully, *and* // while we are still holding the runners lock to prevent race // conditions. if (success) break; } } // Retry to add to the queue w/o the runners lock held (else we risk // temporary deadlock) // This retry could also fail because // 1) existing runners were not able to take off any new elements in the // queue // 2) the queue was filled back up since our last try // If we succeed, the queue may have been completely emptied, and all // runners stopped. // In all cases, we should loop back to the top to see if we need to // start more runners. // if (!success) { success = queue.offer(req, 100, TimeUnit.MILLISECONDS); } } } catch (InterruptedException e) { log.error("interrupted", e); throw new IOException(e.getLocalizedMessage()); } // RETURN A DUMMY result NamedList<Object> dummy = new NamedList<>(); dummy.add("NOTE", "the request is processed in a background stream"); return dummy; }
From source file:org.orekit.utils.GenericTimeStampedCacheTest.java
private int checkDatesMultiThread(final List<AbsoluteDate> centralDates, final GenericTimeStampedCache<AbsoluteDate> cache, final int threadPoolSize) throws TimeStampedCacheException { final int n = cache.getNeighborsSize(); final double step = ((Generator) cache.getGenerator()).getStep(); final AtomicReference<AbsoluteDate[]> failedDates = new AtomicReference<AbsoluteDate[]>(); final AtomicReference<TimeStampedCacheException> caught = new AtomicReference<TimeStampedCacheException>(); ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize); for (final AbsoluteDate central : centralDates) { executorService.execute(new Runnable() { public void run() { try { final List<AbsoluteDate> neighbors = cache.getNeighbors(central); Assert.assertEquals(n, neighbors.size()); for (final AbsoluteDate date : neighbors) { if (date.durationFrom(central) < -(n + 1) * step || date.durationFrom(central) > n * step) { AbsoluteDate[] dates = new AbsoluteDate[n + 1]; dates[0] = central; System.arraycopy(neighbors, 0, dates, 1, n); failedDates.set(dates); }// w w w .j a v a 2 s . c om } } catch (TimeStampedCacheException tce) { caught.set(tce); } } }); } try { executorService.shutdown(); Assert.assertTrue("Not enough time for all threads to complete, try increasing the timeout", executorService.awaitTermination(10, TimeUnit.MINUTES)); } catch (InterruptedException ie) { Assert.fail(ie.getLocalizedMessage()); } if (caught.get() != null) { throw caught.get(); } if (failedDates.get() != null) { AbsoluteDate[] dates = failedDates.get(); StringBuilder builder = new StringBuilder(); String eol = System.getProperty("line.separator"); builder.append("central = ").append(dates[0]).append(eol); builder.append("step = ").append(step).append(eol); builder.append("neighbors =").append(eol); for (int i = 1; i < dates.length; ++i) { builder.append(" ").append(dates[i]).append(eol); } Assert.fail(builder.toString()); } return centralDates.size(); }
From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer.java
@Override public NamedList<Object> request(final SolrRequest request) throws SolrServerException, IOException { if (!(request instanceof UpdateRequest)) { return server.request(request); }//from w w w . j a v a 2s . c om UpdateRequest req = (UpdateRequest) request; // this happens for commit... if (streamDeletes) { if ((req.getDocuments() == null || req.getDocuments().isEmpty()) && (req.getDeleteById() == null || req.getDeleteById().isEmpty()) && (req.getDeleteByIdMap() == null || req.getDeleteByIdMap().isEmpty())) { if (req.getDeleteQuery() == null) { blockUntilFinished(); return server.request(request); } } } else { if ((req.getDocuments() == null || req.getDocuments().isEmpty())) { blockUntilFinished(); return server.request(request); } } SolrParams params = req.getParams(); if (params != null) { // check if it is waiting for the searcher if (params.getBool(UpdateParams.WAIT_SEARCHER, false)) { log.info("blocking for commit/optimize"); blockUntilFinished(); // empty the queue return server.request(request); } } try { CountDownLatch tmpLock = lock; if (tmpLock != null) { tmpLock.await(); } boolean success = queue.offer(req); for (;;) { synchronized (runners) { // see if queue is half full and we can add more runners // special case: if only using a threadCount of 1 and the queue // is filling up, allow 1 add'l runner to help process the queue if (runners.isEmpty() || (queue.remainingCapacity() < queue.size() && runners.size() < threadCount)) { // We need more runners, so start a new one. Runner r = new Runner(); runners.add(r); scheduler.execute(r); } else { // break out of the retry loop if we added the element to the queue // successfully, *and* // while we are still holding the runners lock to prevent race // conditions. if (success) break; } } // Retry to add to the queue w/o the runners lock held (else we risk // temporary deadlock) // This retry could also fail because // 1) existing runners were not able to take off any new elements in the // queue // 2) the queue was filled back up since our last try // If we succeed, the queue may have been completely emptied, and all // runners stopped. // In all cases, we should loop back to the top to see if we need to // start more runners. // if (!success) { success = queue.offer(req, 100, TimeUnit.MILLISECONDS); } } } catch (InterruptedException e) { log.error("interrupted", e); throw new IOException(e.getLocalizedMessage()); } // RETURN A DUMMY result NamedList<Object> dummy = new NamedList<>(); dummy.add("NOTE", "the request is processed in a background stream"); return dummy; }
From source file:com.ebay.jetstream.event.processor.esper.EsperProcessor.java
@Override public void shutDown() { pausePublisher("Application getting gracefulShutdown"); while (getQueuedEventCount() != 0) { try {/*from www . j a va 2 s . c o m*/ Thread.sleep(100); } catch (InterruptedException e) { logger.error(e.getLocalizedMessage(), e); } } //If the queue is empty, flush Esper context esperEngineHolder.get(0).getEsperService().getEPRuntime().getEventSender("EsperEndEvent") .sendEvent(new Object()); try { Thread.sleep(100); } catch (InterruptedException e) { logger.error(e.getLocalizedMessage(), e); } stop(true); esperEngineHolder.get(0).clear(); logger.warn(getBeanName() + " Shutdown has been completed"); if (m_watchDog != null) { m_watchDog.shutdownNow(); } }
From source file:org.fdroid.fdroid.net.DownloaderService.java
/** * This method is invoked on the worker thread with a request to process. * Only one Intent is processed at a time, but the processing happens on a * worker thread that runs independently from other application logic. * So, if this code takes a long time, it will hold up other requests to * the same DownloaderService, but it will not hold up anything else. * When all requests have been handled, the DownloaderService stops itself, * so you should not ever call {@link #stopSelf}. * <p/>// ww w . j a v a 2 s .co m * Downloads are put into subdirectories based on hostname/port of each repo * to prevent files with the same names from conflicting. Each repo enforces * unique APK file names on the server side. * * @param intent The {@link Intent} passed via {@link * android.content.Context#startService(Intent)}. */ protected void handleIntent(Intent intent) { final Uri uri = intent.getData(); File downloadDir = new File(Utils.getApkCacheDir(this), uri.getHost() + "-" + uri.getPort()); downloadDir.mkdirs(); final SanitizedFile localFile = new SanitizedFile(downloadDir, uri.getLastPathSegment()); final String packageName = getPackageNameFromIntent(intent); sendBroadcast(uri, Downloader.ACTION_STARTED, localFile); if (Preferences.get().isUpdateNotificationEnabled()) { Notification notification = createNotification(intent.getDataString(), getPackageNameFromIntent(intent)) .build(); startForeground(NOTIFY_DOWNLOADING, notification); } try { downloader = DownloaderFactory.create(this, uri, localFile); downloader.setListener(new Downloader.DownloaderProgressListener() { @Override public void sendProgress(URL sourceUrl, int bytesRead, int totalBytes) { if (isActive(uri.toString())) { Intent intent = new Intent(Downloader.ACTION_PROGRESS); intent.setData(uri); intent.putExtra(Downloader.EXTRA_BYTES_READ, bytesRead); intent.putExtra(Downloader.EXTRA_TOTAL_BYTES, totalBytes); localBroadcastManager.sendBroadcast(intent); if (Preferences.get().isUpdateNotificationEnabled()) { NotificationManager nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); Notification notification = createNotification(uri.toString(), packageName) .setProgress(totalBytes, bytesRead, false).build(); nm.notify(NOTIFY_DOWNLOADING, notification); } } } }); downloader.download(); sendBroadcast(uri, Downloader.ACTION_COMPLETE, localFile); DownloadCompleteService.notify(this, packageName, intent.getDataString()); } catch (InterruptedException e) { sendBroadcast(uri, Downloader.ACTION_INTERRUPTED, localFile); } catch (IOException e) { e.printStackTrace(); sendBroadcast(uri, Downloader.ACTION_INTERRUPTED, localFile, e.getLocalizedMessage()); } finally { if (downloader != null) { downloader.close(); } // May have already been removed in response to a cancel intent, but that wont cause // problems if we ask to remove it again. QUEUE_WHATS.remove(uri.toString()); stopForeground(true); } downloader = null; }
From source file:net.mozq.picto.core.ProcessCore.java
public static void processFiles(ProcessCondition processCondition, Function<Integer, ProcessData> processDataGetter, IntConsumer processDataUpdater, Function<ProcessData, ProcessDataStatus> overwriteConfirm, BooleanSupplier processStopper) throws IOException { int index = 0; while (!processStopper.getAsBoolean()) { ProcessData processData = processDataGetter.apply(index); if (processData == null) { try { Thread.sleep(100); } catch (InterruptedException e) { // NOP }//from www .jav a 2 s .c om continue; } processData.setStatus(ProcessDataStatus.Processing); processDataUpdater.accept(index); ProcessDataStatus status; try { if (processCondition.isDryRun()) { // NOP status = ProcessDataStatus.Success; } else { status = process(processCondition, processData, overwriteConfirm); } processData.setStatus(status); } catch (Exception e) { processData.setStatus(ProcessDataStatus.Error); processData.setMessage(e.getLocalizedMessage()); App.handleWarn(e.getMessage(), e); } processDataUpdater.accept(index); if (processData.getStatus() == ProcessDataStatus.Error || processData.getStatus() == ProcessDataStatus.Terminated) { break; } index++; } }
From source file:org.deegree.tools.rendering.dem.filtering.DEMRasterFilterer.java
/** * @throws IOException//ww w . j a v a 2 s . c o m * @throws InterruptedException * @throws Exception * */ private void applyFilter() throws IOException, InterruptedException { Runtime rt = Runtime.getRuntime(); int processors = rt.availableProcessors(); LOG.info("Number of processors: {}", processors); // calculate the rows. RasterGeoReference geoRef = raster.getRasterReference(); Envelope renv = raster.getEnvelope(); RasterRect rect = geoRef.convertEnvelopeToRasterCRS(raster.getEnvelope()); int width = raster.getColumns(); int height = raster.getRows(); int numberOfTiles = Rasters.calcApproxTiles(width, height, TILE_SIZE); int tileWidth = Rasters.calcTileSize(width, numberOfTiles); int tileHeight = Rasters.calcTileSize(height, numberOfTiles); int columns = (int) Math.ceil(((double) width) / tileWidth); int rows = (int) Math.ceil((double) height / tileHeight); GridWriter gridWriter = new GridWriter(columns, rows, renv, geoRef, tmpGridFile, raster.getRasterDataInfo()); FilteredResultWiter resultWriter = new FilteredResultWiter(gridWriter); Stack<RasterFilterer> filters = new Stack<RasterFilterer>(); String lock = "lock"; for (int i = 0; i < processors; ++i) { RasterFilterer rf = new RasterFilterer(this.raster, kernelSize, resultWriter, stdCorr, lock, filters); filters.push(rf); } Thread outputThread = new Thread(resultWriter, "result writer"); outputThread.start(); LOG.info("Tiling raster of {} x {} pixels (width x height) into {} rows and {} columns.", new Object[] { rect.width, rect.height, rows, columns }); int kernelHalf = (this.kernelSize - 1) / 2; long totalTime = currentTimeMillis(); for (int row = 30; row < rows; ++row) { long currentTime = currentTimeMillis(); for (int col = 0; col < columns; ++col) { RasterFilterer filterer = null; while (filterer == null) { synchronized (lock) { if (filters.isEmpty()) { lock.wait(); } else { filterer = filters.pop(); } } } RasterRect outputRect = new RasterRect(((col * tileWidth) - kernelHalf), ((row * tileHeight) - kernelHalf), tileWidth + this.kernelSize, tileHeight + this.kernelSize); filterer.setRasterInformation(outputRect); new Thread(filterer, "row_" + row + "_col_" + col).start(); } double rPT = Math.round((Math.round((currentTimeMillis() - currentTime) / 10d) / 100d)); if (row + 1 < rows) { double remain = rPT * (rows - (row + 1)); LOG.info( "Filtering row: {}, took approximately: {} seconds, estimated remaining time: {} seconds " + ((remain > 60) ? "( {} minutes)." : "."), new Object[] { (row + 1), rPT, remain, Math.round(remain / 60d) }); } System.gc(); RasterCache.dispose(); } while (true) { synchronized (lock) { RasterCache.dispose(); if (filters.size() < processors) { try { // wait for all lock.wait(); } catch (InterruptedException e) { LOG.error( "Could not wait for all filter threads to end because: " + e.getLocalizedMessage(), e); } } else { break; } } } resultWriter.stop(); // outputThread.interrupt(); outputThread.join(); gridWriter.writeMetadataFile(null); StringBuilder sb = new StringBuilder("Processing "); sb.append(rows).append(" rows and "); sb.append(columns).append(" columns of rasters with width: "); sb.append(tileWidth).append(" and height: "); sb.append(tileHeight).append(", took: "); sb.append((Math.round((currentTimeMillis() - totalTime) / 10d) / 100d)).append(" seconds"); LOG.info(sb.toString()); // now output the filtered tiles. outputTiles(); }
From source file:org.kitodo.production.forms.IndexingForm.java
private void runIndexing(IndexWorker worker, ObjectType type) { currentState = IndexStates.NO_STATE; int attempts = 0; while (attempts < 10) { try {//from w w w . j ava2 s.c o m if (Objects.equals(currentIndexState, ObjectType.NONE) || Objects.equals(currentIndexState, type)) { if (Objects.equals(currentIndexState, ObjectType.NONE)) { indexingStartedTime = LocalDateTime.now(); currentIndexState = type; objectIndexingStates.put(type, IndexingStates.INDEXING_STARTED); pollingChannel.send(INDEXING_STARTED_MESSAGE + currentIndexState); } indexerThread = new Thread(worker); indexerThread.setDaemon(true); indexerThread.start(); indexerThread.join(); break; } else { logger.debug("Cannot start '{}' indexing while a different indexing process running: '{}'", type, this.currentIndexState); Thread.sleep(pause); attempts++; } } catch (InterruptedException e) { Helper.setErrorMessage(e.getLocalizedMessage(), logger, e); Thread.currentThread().interrupt(); } } }
From source file:no.sintef.jarfter.Jarfter.java
/** * Encapsulates the use of ProccessBuilder * @param command// www. j ava2s . c o m * @param arguments * @throws IOException * @throws InterruptedException */ private void runCommand(String command, String... arguments) throws JarfterException { log("runCommand - Starting " + command + "...\n"); List<String> commandList = new ArrayList<String>(); commandList.add(command); for (String argument : arguments) { commandList.add(argument); } ProcessBuilder procBuilder = new ProcessBuilder(commandList); Process detachedProc = null; try { detachedProc = procBuilder.start(); } catch (IOException ioe) { log("runCommand - Could not start the detachedProc..."); error(ioe); throw new JarfterException(); } String line; String stdout = ""; String stderr = ""; try { // Reading output BufferedReader outputReader = new BufferedReader(new InputStreamReader(detachedProc.getInputStream())); while ((line = outputReader.readLine()) != null) { stdout += line; } outputReader.close(); // Reading error BufferedReader errorReader = new BufferedReader(new InputStreamReader(detachedProc.getErrorStream())); while ((line = errorReader.readLine()) != null) { stderr += line; } errorReader.close(); } catch (IOException ioe) { log("runCommand - caught exception while reading stdout and stderr..."); error(ioe); throw new JarfterException(JarfterException.Error.IO_PROCESS_OUTPUT); } log("runCommand - stdout:\n" + stdout); log("runCommand - stderr:\n" + stderr); try { detachedProc.waitFor(); } catch (InterruptedException interruption) { log("runCommand - caught InterruptedException from detachedProc.waitFor()..."); error(interruption); throw new JarfterException(interruption.getClass().getName(), interruption.getLocalizedMessage()); } detachedProc.destroy(); if (!stderr.equals("")) { runCommandAnalyzeStderr(command, stderr); } }
From source file:org.sigimera.app.android.StatisticFragment.java
@Override public final View onCreateView(final LayoutInflater inflater, final ViewGroup container, final Bundle savedInstanceState) { view = inflater.inflate(R.layout.statistic, container, false); progessDialog = ProgressDialog.show(getActivity(), "Preparing crises information!", "Please be patient until the information are ready...", false); progessDialog.setCancelable(true);//from w ww. j ava2 s . c o m Thread worker = new Thread() { @Override public void run() { try { Looper.prepare(); authToken = ApplicationController.getInstance().getSessionHandler().getAuthenticationToken(); userLocation = LocationController.getInstance().getLastKnownLocation(); authToken = ApplicationController.getInstance().getSessionHandler().getAuthenticationToken(); try { if (!ApplicationController.getInstance().isUpdatedOne()) PersistanceController.getInstance().updateEverything(authToken); } catch (InterruptedException e) { Log.e("[StatisticFragment]", "Failed to update everything."); } crisesStats = PersistanceController.getInstance().getCrisesStats(authToken); guiHandler.post(updateGUI); } catch (AuthenticationErrorException e) { // SHOULD NEVER OCCUR: Check before calling this window Log.e(Constants.LOG_TAG_SIGIMERA_APP, "Error on authentification" + e.getLocalizedMessage()); } } }; worker.start(); return view; }