List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:fr.xebia.management.statistics.ServiceStatistics.java
@ManagedAttribute public void setSlowInvocationThresholdInMillis(long slowInvocationThresholdInMillis) { this.slowInvocationThresholdInNanos = TimeUnit.NANOSECONDS.convert(slowInvocationThresholdInMillis, TimeUnit.MILLISECONDS); }
From source file:com.phodev.http.tools.ConnectionHelper.java
public void tryRelaseConnetions() { httpClient.getConnectionManager().closeIdleConnections(CONNETIONS_MAX_IDLE_TIME, TimeUnit.NANOSECONDS); }
From source file:com.netflix.genie.core.services.impl.JobCoordinatorServiceImpl.java
private void setRuntimeEnvironment(final String jobId, final Cluster cluster, final Command command, final List<Application> applications, final int memory) throws GenieException { final long jobEnvironmentStart = System.nanoTime(); final Map<String, String> tags = MetricsUtils.newSuccessTagsMap(); try {/*from www . ja v a 2s. com*/ final String clusterId = cluster.getId() .orElseThrow(() -> new GenieServerException("Cluster has no id")); final String commandId = command.getId() .orElseThrow(() -> new GenieServerException("Command has no id")); this.jobPersistenceService .updateJobWithRuntimeEnvironment( jobId, clusterId, commandId, applications.stream().map(Application::getId) .filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList()), memory); } catch (Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(setJobEnvironmentTimerId.withTags(tags)) .record(System.nanoTime() - jobEnvironmentStart, TimeUnit.NANOSECONDS); } }
From source file:com.webtide.jetty.load.generator.jenkins.LoadGeneratorBuilder.java
protected void runProcess(TaskListener taskListener, FilePath workspace, Run<?, ?> run, Launcher launcher, Resource resource) throws Exception { // ------------------------- // listeners to get data files // ------------------------- List<Resource.NodeListener> nodeListeners = new ArrayList<>(); Path resultFilePath = Paths.get(launcher.getChannel() // .call(new LoadGeneratorProcessFactory.RemoteTmpFileCreate())); ValuesFileWriter valuesFileWriter = new ValuesFileWriter(resultFilePath); nodeListeners.add(valuesFileWriter); List<LoadGenerator.Listener> loadGeneratorListeners = new ArrayList<>(); loadGeneratorListeners.add(valuesFileWriter); Path statsResultFilePath = Paths.get(launcher.getChannel() // .call(new LoadGeneratorProcessFactory.RemoteTmpFileCreate())); ArgumentListBuilder args = getArgsProcess(resource, launcher.getComputer(), taskListener, // run, statsResultFilePath.toString()); String monitorUrl = getMonitorUrl(taskListener, run); String alpnBootVersion = getAlpnVersion(); // well a quick marker to say we do not need alpn if (getTransport() == LoadGeneratorStarterArgs.Transport.HTTP // || getTransport() == LoadGeneratorStarterArgs.Transport.HTTPS) { alpnBootVersion = "N/A"; }/*from w w w .j ava 2 s .co m*/ LOGGER.info("load generator args:" + args.toString()); new LoadGeneratorProcessRunner().runProcess(taskListener, workspace, launcher, // this.jdkName, getCurrentNode(launcher.getComputer()), // nodeListeners, loadGeneratorListeners, // args.toList(), getJvmExtraArgs(), // alpnBootVersion, // AlpnBootVersions.getInstance().getJdkVersionAlpnBootVersion()); String stats = workspace.child(statsResultFilePath.toString()).readToString(); TimePerPathListener timePerPathListener = new TimePerPathListener(false); GlobalSummaryListener globalSummaryListener = new GlobalSummaryListener(); // this one will use some memory for a long load test!! // FIXME find a way to flush that somewhere!! DetailledTimeReportListener detailledTimeReportListener = new DetailledTimeReportListener(); // ----------------------------- // handle response time reports // ----------------------------- ResponsePerStatus responsePerStatus = new ResponsePerStatus(); ResponseNumberPerPath responseNumberPerPath = new ResponseNumberPerPath(); nodeListeners.clear(); if (this.nodeListeners != null) { nodeListeners.addAll(this.nodeListeners); } nodeListeners.add(responseNumberPerPath); nodeListeners.add(timePerPathListener); nodeListeners.add(globalSummaryListener); nodeListeners.add(detailledTimeReportListener); nodeListeners.add(responsePerStatus); LOGGER.info("LoadGenerator parsing response result files"); //------------------------------------------------- // time values //------------------------------------------------- parseTimeValues(workspace, resultFilePath, nodeListeners); //------------------------------------------------- // Monitor values //------------------------------------------------- String monitorJson = getMonitorValues(monitorUrl, taskListener); taskListener.getLogger().print("monitorJson: " + monitorJson); Map<String, Object> monitoringResultMap = null; try { monitoringResultMap = new ObjectMapper() // .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES) // .readValue(monitorJson, Map.class); } catch (Exception e) { LOGGER.warn("skip error parsing json monitoring result"); } // manage results SummaryReport summaryReport = new SummaryReport(run.getId()); timePerPathListener.getResponseTimePerPath().entrySet().stream().forEach(entry -> { String path = entry.getKey(); Histogram histogram = entry.getValue(); AtomicInteger number = responseNumberPerPath.getResponseNumberPerPath().get(path); LOGGER.debug("responseTimePerPath: {} - mean: {}ms - number: {}", // path, // TimeUnit.NANOSECONDS.toMillis(Math.round(histogram.getMean())), // number.get()); summaryReport.addResponseTimeInformations(path, new CollectorInformations(histogram, // TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS)); }); timePerPathListener.getLatencyTimePerPath().entrySet().stream().forEach(entry -> { String path = entry.getKey(); Histogram histogram = entry.getValue(); AtomicInteger number = responseNumberPerPath.getResponseNumberPerPath().get(path); LOGGER.debug("responseTimePerPath: {} - mean: {}ms - number: {}", // path, // TimeUnit.NANOSECONDS.toMillis(Math.round(histogram.getMean())), // number.get()); summaryReport.addLatencyTimeInformations(path, new CollectorInformations(histogram, // TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS)); }); // FIXME calculate score from previous build HealthReport healthReport = new HealthReport(30, "text"); Map<String, List<ResponseTimeInfo>> allResponseInfoTimePerPath = new HashMap<>(); detailledTimeReportListener.getDetailledLatencyTimeValuesReport().getEntries().stream().forEach(entry -> { List<ResponseTimeInfo> responseTimeInfos = allResponseInfoTimePerPath.get(entry.getPath()); if (responseTimeInfos == null) { responseTimeInfos = new ArrayList<>(); allResponseInfoTimePerPath.put(entry.getPath(), responseTimeInfos); } responseTimeInfos.add(new ResponseTimeInfo(entry.getTimeStamp(), // TimeUnit.NANOSECONDS.toMillis(entry.getTime()), // entry.getHttpStatus())); }); run.addAction(new LoadGeneratorBuildAction(healthReport, // summaryReport, // new CollectorInformations(globalSummaryListener.getResponseTimeHistogram().getIntervalHistogram(), // TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS), // new CollectorInformations(globalSummaryListener.getLatencyTimeHistogram().getIntervalHistogram(), // TimeUnit.NANOSECONDS, TimeUnit.MILLISECONDS), // allResponseInfoTimePerPath, run, monitoringResultMap, stats)); // cleanup getCurrentNode(launcher.getComputer()) // .getChannel() // .call(new LoadGeneratorProcessFactory.DeleteTmpFile(resultFilePath.toString())); LOGGER.info("LoadGenerator end"); }
From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java
@Override public CompletableFuture<List<Result>> smallScan(Scan scan, int limit) { if (!scan.isSmall()) { return failedFuture(new IllegalArgumentException("Only small scan is allowed")); }/*w w w. j a v a 2 s . c o m*/ if (scan.getBatch() > 0 || scan.getAllowPartialResults()) { return failedFuture( new IllegalArgumentException("Batch and allowPartial is not allowed for small scan")); } return conn.callerFactory.smallScan().table(tableName).setScan(setDefaultScanConfig(scan)).limit(limit) .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).rpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS) .call(); }
From source file:MSUmpire.LCMSPeakStructure.LCMSPeakDIAMS2.java
private void PrepareMGF_UnfragmentIon() throws IOException { String mgffile4 = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ3Name() + ".mgf.temp"; // FileWriter mgfWriter4 = new FileWriter(mgffile4, true); final BufferedWriter mgfWriter4 = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q3, mgffile4); // FileWriter mapwriter3 = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q3", true); final BufferedWriter mapwriter3 = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q3, FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q3"); ArrayList<PseudoMSMSProcessing> ScanList = new ArrayList<>(); ExecutorService executorPool = Executors.newFixedThreadPool(NoCPUs); for (PeakCluster ms2cluster : PeakClusters) { ArrayList<PrecursorFragmentPairEdge> frags = UnFragIonClu2Cur.get(ms2cluster.Index); if (frags != null && DIA_MZ_Range.getX() <= ms2cluster.TargetMz() && DIA_MZ_Range.getY() >= ms2cluster.TargetMz()) { // if (DIA_MZ_Range.getX() <= ms2cluster.TargetMz() && DIA_MZ_Range.getY() >= ms2cluster.TargetMz() && UnFragIonClu2Cur.containsKey(ms2cluster.Index)) { // ArrayList<PrecursorFragmentPairEdge> frags = UnFragIonClu2Cur.get(ms2cluster.Index); ms2cluster.GroupedFragmentPeaks.addAll(frags); PseudoMSMSProcessing mSMSProcessing = new PseudoMSMSProcessing(ms2cluster, parameter); executorPool.execute(mSMSProcessing); ScanList.add(mSMSProcessing); }/*from w w w . j a v a 2s . c o m*/ } executorPool.shutdown(); try { executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { Logger.getRootLogger().info("interrupted.."); } for (PseudoMSMSProcessing mSMSProcessing : ScanList) { if (MatchedFragmentMap.size() > 0) { mSMSProcessing.RemoveMatchedFrag(MatchedFragmentMap); } XYPointCollection Scan = mSMSProcessing.GetScan(); if (Scan != null && Scan.PointCount() > parameter.MinFrag) { parentDIA.Q3Scan++; // StringBuilder mgfString = new StringBuilder(); // mgfString.append("BEGIN IONS\n"); // mgfString.append("PEPMASS=" + mSMSProcessing.Precursorcluster.TargetMz() + "\n"); // mgfString.append("CHARGE=" + mSMSProcessing.Precursorcluster.Charge + "+\n"); // mgfString.append("RTINSECONDS=" + mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f + "\n"); // mgfString.append("TITLE=").append(GetQ3Name()).append(".").append(parentDIA.Q3Scan).append(".").append(parentDIA.Q3Scan).append(".").append(mSMSProcessing.Precursorcluster.Charge).append("\n"); // //mgfString.append("TITLE=" + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); // //mgfString.append("TITLE=" GetQ3Name() + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); // // for (int i = 0; i < Scan.PointCount(); i++) { // mgfString.append(Scan.Data.get(i).getX()).append(" ").append(Scan.Data.get(i).getY()).append("\n"); // } // mgfString.append("END IONS\n\n"); // mgfWriter4.write(mgfString.toString()); mgfWriter4.append("BEGIN IONS\n") .append("PEPMASS=" + mSMSProcessing.Precursorcluster.TargetMz() + "\n") .append("CHARGE=" + mSMSProcessing.Precursorcluster.Charge + "+\n") .append("RTINSECONDS=" + mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f + "\n") .append("TITLE=").append(GetQ3Name()).append(".").append(Integer.toString(parentDIA.Q3Scan)) .append(".").append(Integer.toString(parentDIA.Q3Scan)).append(".") .append(Integer.toString(mSMSProcessing.Precursorcluster.Charge)).append("\n"); //mgfWriter4.append("TITLE=" + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); //mgfWriter4.append("TITLE=" GetQ3Name() + WindowID + ";ClusterIndex:" + mSMSProcessing.ms2cluster.Index + "\n"); for (int i = 0; i < Scan.PointCount(); i++) { mgfWriter4.append(Float.toString(Scan.Data.get(i).getX())).append(" ") .append(Float.toString(Scan.Data.get(i).getY())).append("\n"); } mgfWriter4.append("END IONS\n\n"); mapwriter3.write( parentDIA.Q3Scan + ";" + WindowID + ";" + mSMSProcessing.Precursorcluster.Index + "\n"); } mSMSProcessing.Precursorcluster.GroupedFragmentPeaks.clear(); } // mgfWriter4.close(); // mapwriter3.close(); }
From source file:org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.java
private void onError(Throwable error) { error = translateException(error);/*from w ww . j ava 2 s. c o m*/ if (tries > startLogErrorsCnt) { LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + loc.getRegionInfo().getEncodedName() + " of " + loc.getRegionInfo().getTable() + " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + " ms", error); } boolean scannerClosed = error instanceof UnknownScannerException || error instanceof NotServingRegionException || error instanceof RegionServerStoppedException; RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext( error, EnvironmentEdgeManager.currentTime(), ""); exceptions.add(qt); if (tries >= maxAttempts) { completeExceptionally(!scannerClosed); return; } long delayNs; if (scanTimeoutNs > 0) { long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; if (maxDelayNs <= 0) { completeExceptionally(!scannerClosed); return; } delayNs = Math.min(maxDelayNs, getPauseTime(pauseNs, tries - 1)); } else { delayNs = getPauseTime(pauseNs, tries - 1); } if (scannerClosed) { completeWhenError(false); return; } if (error instanceof OutOfOrderScannerNextException || error instanceof ScannerResetException) { completeWhenError(true); return; } if (error instanceof DoNotRetryIOException) { completeExceptionally(true); return; } tries++; retryTimer.newTimeout(t -> call(), delayNs, TimeUnit.NANOSECONDS); }
From source file:edu.stanford.epad.epadws.queries.Dcm4CheeQueries.java
public static DICOMElementList getDICOMElementsFromWADO(String studyUID, String seriesUID, String imageUID, SegmentedProperty catTypeProp) { String catCode = ""; String typeCode = ""; DICOMElementList dicomElementList = new DICOMElementList(); DICOMElementList dicomElementListNoSkip = new DICOMElementList(); boolean skipThumbnail = false; try {//from w ww .j a v a2 s . c o m File temporaryDICOMFile = File.createTempFile(imageUID, ".tmp"); int wadoStatusCode = DCM4CHEEUtil.downloadDICOMFileFromWADO(studyUID, seriesUID, imageUID, temporaryDICOMFile); if (wadoStatusCode == HttpServletResponse.SC_OK) { File tempTag = File.createTempFile(imageUID, "_tag.tmp"); ExecutorService taskExecutor = Executors.newFixedThreadPool(4); taskExecutor.execute(new DicomHeadersTask(seriesUID, temporaryDICOMFile, tempTag)); taskExecutor.shutdown(); try { taskExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); BufferedReader tagReader = null; try { String dicomElementString; FileReader tagFileReader = new FileReader(tempTag.getAbsolutePath()); tagReader = new BufferedReader(tagFileReader); skipThumbnail = false; String currentSequence = ""; while ((dicomElementString = tagReader.readLine()) != null) { if (dicomElementString.contains("(0009,1110)")) // hard code for now TODO:??? skipThumbnail = true; if (dicomElementString.contains("(FFFE,E0DD)")) skipThumbnail = false; int sequence = dicomElementString.indexOf("SQ #-1"); if (sequence != -1) currentSequence = dicomElementString.substring(sequence + 7); if (dicomElementString.contains("Sequence Delimitation Item")) currentSequence = ""; DICOMElement dicomElement = decodeDICOMElementString(dicomElementString); DICOMElement dicomElementNoSkip = decodeDICOMElementString(dicomElementString); if (dicomElement != null) { if (!skipThumbnail) { dicomElement.parentSequenceName = currentSequence; dicomElementList.addDICOMElement(dicomElement); if (dicomElementString.contains("(0008,0100)")) { if (dicomElement.parentSequenceName != null && dicomElement.parentSequenceName.equalsIgnoreCase( "Segmented Property Category Code Sequence"))//category code { catCode = dicomElement.value.trim(); log.info("cat code is " + catCode); } else if (dicomElement.parentSequenceName != null && dicomElement.parentSequenceName .equalsIgnoreCase("Segmented Property Type Code Sequence"))//category code { typeCode = dicomElement.value.trim(); log.info("type code is " + typeCode); } } } //make a list with all the skip items //at the end if the skip is not closed then use this list else { log.warning("Warning: skip sequence. skipping " + dicomElementString); dicomElementNoSkip.parentSequenceName = currentSequence; dicomElementListNoSkip.addDICOMElement(dicomElementNoSkip); } } else { //too much log // log.warning("Warning: could not decode DICOM element " + dicomElementString + ""); } } } finally { IOUtils.closeQuietly(tagReader); try { temporaryDICOMFile.delete(); tempTag.delete(); } catch (Exception x) { } ; } } catch (InterruptedException e) { Thread.currentThread().interrupt(); log.warning("DICOM headers task for series " + seriesUID + " interrupted!"); } } else { log.warning("Error invoking dcm4chee to get DICOM headers for series " + seriesUID + "; status code=" + wadoStatusCode); } } catch (IOException e) { log.warning("IOException retrieving DICOM headers for image " + imageUID + " in series " + seriesUID, e); } try { if (catTypeProp != null && !catCode.equals("") && !typeCode.equals("")) { SegmentedPropertyHelper helper = new SegmentedPropertyHelper(); SegmentedProperty prop = helper.getProperty(catCode, typeCode); if (prop != null) { catTypeProp.copyValuesFrom(prop); } else { log.info("Category-type pair not found"); } } } catch (Exception ex) { log.warning("Exception in getting category type ", ex); } if (skipThumbnail) { log.warning("End of skip not found returning noskip data. "); return dicomElementListNoSkip; } return dicomElementList; }
From source file:fr.xebia.management.statistics.ServiceStatistics.java
@ManagedAttribute public void setVerySlowInvocationThresholdInMillis(long verySlowInvocationThresholdInMillis) { this.verySlowInvocationThresholdInNanos = TimeUnit.NANOSECONDS.convert(verySlowInvocationThresholdInMillis, TimeUnit.MILLISECONDS); }
From source file:org.apache.solr.client.solrj.retry.RetryingSolrServer.java
private boolean isShuttingDown() { try {/* w w w . j a va 2s. co m*/ return isShuttingDown.await(0, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { return true; } }