List of usage examples for java.lang InterruptedException getMessage
public String getMessage()
From source file:dk.dbc.opensearch.datadock.DatadockMain.java
/** * The shutdown hook. This method is called when the program catches a * kill signal.//w w w . ja v a 2s . co m * Or if internal code requests that the datadock is shutdown */ public void shutdown() { this.shutdownRequested = true; try { log.info("Shutting down."); datadockManager.shutdown(); } catch (InterruptedException e) { log.error(String.format("Interrupted while waiting on main daemon thread to complete: %s", e.getMessage())); System.exit(-1); } catch (HarvesterIOException hioe) { log.fatal(String.format("Some error occured while shutting down the harvester: %s", hioe.getMessage())); System.exit(-1); } catch (NullPointerException npe) { log.fatal( "DatadockManager does not seem to have been started or it crashed. Shutting down with the risk of inconsistencies"); System.exit(-1); } log.info("Exiting normally."); }
From source file:dk.dma.ais.view.rest.AisStoreResource.java
/** * Produce KML output for POSTed AIS data in NMEA format. * * Use 'curl -X POST -T <ais-data-file> http://127.0.0.1:8090/store/history/kml' to test * Or with expression filter:/*from w ww . ja va2s. c o m*/ * 'curl -X POST -T <ais-data-file> http://127.0.0.1:8090/store/history/kml?filter="m.mmsi=247469000" > test.kmz' */ @POST @Path("/history/kml") @Produces(MEDIA_TYPE_KMZ) public Response createKml(@QueryParam("filter") String filterExpression, InputStream inputStream) { LOG.debug("Filter expression: " + filterExpression); Predicate<AisPacket> filter = isBlank(filterExpression) ? p -> true : AisPacketFilters.parseExpressionFilter(filterExpression); ArrayList<AisPacket> packets = new ArrayList<>(); AisReader reader = AisReaders.createReaderFromInputStream(inputStream); reader.registerPacketHandler(aisPacket -> { if (filter.test(aisPacket)) packets.add(aisPacket); }); reader.start(); try { reader.join(); } catch (InterruptedException e) { LOG.error(e.getMessage(), e); return Response.serverError().build(); } StreamingOutput output = StreamingUtil.createZippedStreamingOutput(packets, AisPacketOutputSinks.newKmlSink(), "history.kml"); return Response.ok().entity(output).type(MEDIA_TYPE_KMZ).build(); }
From source file:io.fabric8.msg.jnatsd.TestProtocol.java
@Test public void testReplyArg() { final String replyExpected = "bar"; final String ts; final Channel<Boolean> ch = new Channel<Boolean>(); try (Connection c = connectionFactory.createConnection()) { try (AsyncSubscription s = c.subscribeAsync("foo", new MessageHandler() { @Override//from w ww .j av a 2s . c o m public void onMessage(Message msg) { assertEquals(replyExpected, msg.getReplyTo()); ch.add(true); } })) { try { sleep(200); } catch (InterruptedException e) { } c.publish("foo", "bar", (byte[]) null); assertTrue("Message not received.", ch.get(5, TimeUnit.SECONDS)); } } catch (IOException | TimeoutException e) { fail(e.getMessage()); } }
From source file:com.hurence.logisland.service.elasticsearch.Elasticsearch_2_4_0_ClientService.java
@OnDisabled public void shutdown() { if (bulkProcessor != null) { bulkProcessor.flush();/* w w w .j a v a 2 s.c o m*/ try { if (!bulkProcessor.awaitClose(10, TimeUnit.SECONDS)) { getLogger().error("some request could not be send to es because of time out"); } else { getLogger().info("all requests have been submitted to es"); } } catch (InterruptedException e) { getLogger().error(e.getMessage()); } } if (esClient != null) { getLogger().info("Closing ElasticSearch Client"); esClient.close(); esClient = null; } }
From source file:org.openbaton.vnfm.MediaServerManager.java
@Override public VirtualNetworkFunctionRecord terminate(VirtualNetworkFunctionRecord virtualNetworkFunctionRecord) { log.info("Terminating vnfr with id " + virtualNetworkFunctionRecord.getId()); ManagedVNFR managedVnfr = null;//from ww w . jav a 2 s. c o m Iterable<ManagedVNFR> managedVnfrs = managedVnfrRepository .findByVnfrId(virtualNetworkFunctionRecord.getId()); if (managedVnfrs.iterator().hasNext()) { managedVnfr = managedVnfrs.iterator().next(); } else { managedVnfr = new ManagedVNFR(); managedVnfr.setNsrId(virtualNetworkFunctionRecord.getParent_ns_id()); managedVnfr.setVnfrId(virtualNetworkFunctionRecord.getId()); } managedVnfr.setTask(Action.RELEASE_RESOURCES); managedVnfrRepository.save(managedVnfr); try { elasticityManagement.deactivate(virtualNetworkFunctionRecord.getParent_ns_id(), virtualNetworkFunctionRecord.getId()).get(60, TimeUnit.SECONDS); } catch (InterruptedException e) { log.error(e.getMessage(), e); } catch (ExecutionException e) { log.error(e.getMessage(), e); } catch (TimeoutException e) { log.error(e.getMessage(), e); } try { virtualNetworkFunctionRecord = nfvoRequestor.getNetworkServiceRecordAgent() .getVirtualNetworkFunctionRecord(virtualNetworkFunctionRecord.getParent_ns_id(), virtualNetworkFunctionRecord.getId()); } catch (SDKException e) { log.error(e.getMessage(), e); } for (VirtualDeploymentUnit vdu : virtualNetworkFunctionRecord.getVdu()) { Set<VNFCInstance> vnfciToRem = new HashSet<>(); VimInstance vimInstance = null; try { vimInstance = Utils.getVimInstance(vdu.getVimInstanceName(), nfvoRequestor); } catch (NotFoundException e) { log.error(e.getMessage(), e); } for (VNFCInstance vnfcInstance : vdu.getVnfc_instance()) { log.debug("Releasing resources for vdu with id " + vdu.getId()); try { mediaServerResourceManagement.release(vnfcInstance, vimInstance); log.debug("Removed VNFCinstance: " + vnfcInstance); } catch (VimException e) { log.error(e.getMessage(), e); throw new RuntimeException(e.getMessage(), e); } vnfciToRem.add(vnfcInstance); log.debug("Released resources for vdu with id " + vdu.getId()); } vdu.getVnfc_instance().removeAll(vnfciToRem); } log.info("Terminated vnfr with id " + virtualNetworkFunctionRecord.getId()); try { applicationManagement.deleteByVnfrId(virtualNetworkFunctionRecord.getId()); mediaServerManagement.deleteByVnfrId(virtualNetworkFunctionRecord.getId()); } catch (NotFoundException e) { log.warn(e.getMessage()); } try { managedVnfrRepository.deleteByVnfrId(virtualNetworkFunctionRecord.getId()); } catch (NotFoundException e) { log.warn("ManagedVNFR were not existing and therefore not deletable"); } return virtualNetworkFunctionRecord; }
From source file:com.sumzerotrading.broker.ib.InteractiveBrokersBroker.java
@Override public synchronized String getNextOrderId() { if (nextOrderId == -1) { try {//ww w . j a v a2 s . c om ibConnection.reqIds(1); nextOrderId = nextIdQueue.take(); return nextOrderId + ""; } catch (InterruptedException ex) { logger.error(ex.getMessage(), ex); return -1 + ""; } } return ++nextOrderId + ""; }
From source file:com.netflix.suro.input.thrift.ThriftServer.java
@Override public void start() throws TTransportException { msgProcessor.start();/* ww w . j a v a 2 s . c om*/ logger.info("Starting ThriftServer with config " + config); CustomServerSocket transport = new CustomServerSocket(config); port = transport.getPort(); SuroServer.Processor processor = new SuroServer.Processor<MessageSetProcessor>(msgProcessor); THsHaServer.Args serverArgs = new THsHaServer.Args(transport); serverArgs.workerThreads(config.getThriftWorkerThreadNum()); serverArgs.processor(processor); serverArgs.maxReadBufferBytes = config.getThriftMaxReadBufferBytes(); executor = Executors.newSingleThreadExecutor(); server = new THsHaServer(serverArgs); Future<?> serverStarted = executor.submit(new Runnable() { @Override public void run() { server.serve(); } }); try { serverStarted.get(config.getStartupTimeout(), TimeUnit.MILLISECONDS); if (server.isServing()) { logger.info("Server started on port:" + config.getPort()); } else { throw new RuntimeException("ThriftServer didn't start up within: " + config.getStartupTimeout()); } } catch (InterruptedException e) { // ignore this type of exception } catch (TimeoutException e) { if (server.isServing()) { logger.info("Server started on port:" + config.getPort()); } else { logger.error("ThriftServer didn't start up within: " + config.getStartupTimeout()); Throwables.propagate(e); } } catch (ExecutionException e) { logger.error("Exception on starting ThriftServer: " + e.getMessage(), e); Throwables.propagate(e); } }
From source file:ch.cyberduck.core.b2.B2LargeUploadService.java
@Override public BaseB2Response upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final DefaultThreadPool pool = new DefaultThreadPool("largeupload", concurrency); try {//from ww w. ja v a 2s.c o m final String fileid; // Get the results of the uploads in the order they were submitted // this is important for building the manifest, and is not a problem in terms of performance // because we should only continue when all segments have uploaded successfully final List<B2UploadPartResponse> completed = new ArrayList<B2UploadPartResponse>(); final Map<String, String> fileinfo = new HashMap<>(status.getMetadata()); final Checksum checksum = status.getChecksum(); if (Checksum.NONE != checksum) { switch (checksum.algorithm) { case sha1: fileinfo.put(X_BZ_INFO_LARGE_FILE_SHA1, status.getChecksum().hash); break; } } if (null != status.getTimestamp()) { fileinfo.put(X_BZ_INFO_SRC_LAST_MODIFIED_MILLIS, String.valueOf(status.getTimestamp())); } if (status.isAppend() || status.isRetry()) { // Add already completed parts final B2LargeUploadPartService partService = new B2LargeUploadPartService(session); final List<B2FileInfoResponse> uploads = partService.find(file); if (uploads.isEmpty()) { fileid = session.getClient() .startLargeFileUpload( new B2FileidProvider(session).getFileid(containerService.getContainer(file), new DisabledListProgressListener()), containerService.getKey(file), status.getMime(), fileinfo) .getFileId(); } else { fileid = uploads.iterator().next().getFileId(); completed.addAll(partService.list(fileid)); } } else { fileid = session.getClient() .startLargeFileUpload( new B2FileidProvider(session).getFileid(containerService.getContainer(file), new DisabledListProgressListener()), containerService.getKey(file), status.getMime(), fileinfo) .getFileId(); } // Submit file segments for concurrent upload final List<Future<B2UploadPartResponse>> parts = new ArrayList<Future<B2UploadPartResponse>>(); long remaining = status.getLength(); long offset = 0; for (int partNumber = 1; remaining > 0; partNumber++) { boolean skip = false; if (status.isAppend() || status.isRetry()) { if (log.isInfoEnabled()) { log.info(String.format("Determine if part number %d can be skipped", partNumber)); } for (B2UploadPartResponse c : completed) { if (c.getPartNumber().equals(partNumber)) { if (log.isInfoEnabled()) { log.info(String.format("Skip completed part number %d", partNumber)); } skip = true; offset += c.getContentLength(); break; } } } if (!skip) { final Long length = Math.min(Math.max( ((status.getLength() + status.getOffset()) / B2LargeUploadService.MAXIMUM_UPLOAD_PARTS), partSize), remaining); // Submit to queue parts.add(this.submit(pool, file, local, throttle, listener, status, partNumber, offset, length, callback)); if (log.isDebugEnabled()) { log.debug(String.format("Part %s submitted with size %d and offset %d", partNumber, length, offset)); } remaining -= length; offset += length; } } try { for (Future<B2UploadPartResponse> f : parts) { completed.add(f.get()); } } catch (InterruptedException e) { log.error("Part upload failed with interrupt failure"); status.setCanceled(); throw new ConnectionCanceledException(e); } catch (ExecutionException e) { log.warn(String.format("Part upload failed with execution failure %s", e.getMessage())); if (e.getCause() instanceof BackgroundException) { throw (BackgroundException) e.getCause(); } throw new DefaultExceptionMappingService().map(e.getCause()); } finally { pool.shutdown(false); } completed.sort(new Comparator<B2UploadPartResponse>() { @Override public int compare(final B2UploadPartResponse o1, final B2UploadPartResponse o2) { return o1.getPartNumber().compareTo(o2.getPartNumber()); } }); final List<String> checksums = new ArrayList<String>(); for (B2UploadPartResponse part : completed) { checksums.add(part.getContentSha1()); } final B2FinishLargeFileResponse response = session.getClient().finishLargeFileUpload(fileid, checksums.toArray(new String[checksums.size()])); if (log.isInfoEnabled()) { log.info(String.format("Finished large file upload %s with %d parts", file, completed.size())); } // Mark parent status as complete status.setComplete(); return response; } catch (B2ApiException e) { throw new B2ExceptionMappingService().map("Upload {0} failed", e, file); } catch (IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } }
From source file:com.redhat.victims.plugin.jenkins.VictimsPostBuildScanner.java
/** * Creates and synchronises the database then checks supplied dependencies * against the vulnerability database./*from w w w . ja v a2s.c o m*/ */ private void execute() throws AbortException { VictimsResultCache cache = ctx.getCache(); int cores = Runtime.getRuntime().availableProcessors(); ExecutorService executor = null; List<Future<FileStub>> jobs = null; PrintStream log = ctx.getLog(); boolean buildFailure = false; ArrayList<VulnerableDependencyException> vulnerabilities = new ArrayList<VulnerableDependencyException>(); try { // All files will be scanned for vulnerabilities and reported on at the end // rather than fail at the first one // Sync database updateDatabase(ctx); // Concurrency, yay! executor = Executors.newFixedThreadPool(cores); jobs = new ArrayList<Future<FileStub>>(); // Find all files under supplied path Collection<File> sources = listFiles(this.outputDir); log.println("Scanning Files:"); for (File f : sources) { if (printCheckedFiles) { log.println("\t- " + f.getAbsolutePath()); } FileStub fs; try { fs = new FileStub(f); } catch (Exception e) { log.println("ERROR : unable to generate filestub for file: " + f.getAbsolutePath()); continue; } String fsid = fs.getId(); // Check the cache if (cache.exists(fsid)) { HashSet<String> cves = cache.get(fsid); if (printCheckedFiles) { log.println("Cached: " + fsid); } /* Report vulnerabilities */ if (!cves.isEmpty()) { VulnerableDependencyException err = new VulnerableDependencyException(fs, Settings.FINGERPRINT, cves); vulnerabilities.add(err); log.println(err.getLogMessage()); if (err.isFatal(ctx)) { buildFailure = true; } } continue; } // Process dependencies that haven't been cached Callable<FileStub> worker = new VictimsCommand(ctx, fs); jobs.add(executor.submit(worker)); } executor.shutdown(); // Check the results for (Future<FileStub> future : jobs) { try { FileStub checked = future.get(); if (checked != null) { cache.add(checked.getId(), null); } } catch (InterruptedException ie) { log.println(ie.getMessage()); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof VulnerableDependencyException) { VulnerableDependencyException vbe = (VulnerableDependencyException) cause; cache.add(vbe.getId(), vbe.getVulnerabilites()); // Add exception to list for logging as group vulnerabilities.add(vbe); log.println(vbe.getLogMessage()); if (vbe.isFatal(ctx)) { buildFailure = true; } } else { throw new VictimsBuildException(e.getCause().getMessage()); } } } } catch (VictimsException ve) { log.println("vic exception found: " + ve.getMessage()); throw new VictimsBuildException(ve.getMessage()); } finally { if (executor != null) { executor.shutdown(); } } if (!vulnerabilities.isEmpty()) { for (VulnerableDependencyException ex : vulnerabilities) { log.println(ex.getErrorMessage()); } } if (buildFailure) { throw new AbortException("Vulnerable jar found"); } }
From source file:com.github.notizklotz.derbunddownloader.download.IssueDownloadService.java
@ServiceAction public void downloadIssue(int day, int month, int year) { Log.i(LOG_TAG, "Handling download intent"); try {//from w ww . j a va2 s. c o m boolean connected; final boolean wifiOnly = Settings.isWifiOnly(getApplicationContext()); if (wifiOnly) { connected = waitForWifiConnection(); if (!connected) { notifyUser(getText(R.string.download_wifi_connection_failed), getText(R.string.download_wifi_connection_failed_text), R.drawable.ic_stat_newspaper); } } else { NetworkInfo activeNetworkInfo = connectivityManager.getActiveNetworkInfo(); connected = activeNetworkInfo != null && activeNetworkInfo.isConnected(); if (!connected) { notifyUser(getText(R.string.download_connection_failed), getText(R.string.download_connection_failed_text), R.drawable.ic_stat_newspaper); } } if (connected) { if (!checkUserAccount()) { notifyUser(getText(R.string.download_login_failed), getText(R.string.download_login_failed_text), R.drawable.ic_stat_newspaper); } else { final LocalDate issueDate = new LocalDate(day, month, year); fetchThumbnail(issueDate); final CountDownLatch downloadDoneSignal = new CountDownLatch(1); receiver = new DownloadCompletedBroadcastReceiver(downloadDoneSignal); registerReceiver(receiver, new IntentFilter(DownloadManager.ACTION_DOWNLOAD_COMPLETE)); try { String title = startDownload(issueDate, wifiOnly); downloadDoneSignal.await(); notifyUser(title, getString(R.string.download_completed), R.drawable.ic_stat_newspaper); } catch (InterruptedException e) { Log.wtf(LOG_TAG, "Interrupted while waiting for the downloadDoneSignal"); } } } } catch (Exception e) { notifyUser(getText(R.string.download_service_error), getText(R.string.download_service_error_text) + " " + e.getMessage(), R.drawable.ic_stat_newspaper); } finally { cleanup(); } }