List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:ms1quant.MS1Quant.java
/** * @param args the command line arguments MS1Quant parameterfile *///from w ww. jav a 2 s .c o m public static void main(String[] args) throws Exception { BufferedReader reader = null; try { System.out.println( "================================================================================================="); System.out.println("Umpire MS1 quantification and feature detection analysis (version: " + UmpireInfo.GetInstance().Version + ")"); if (args.length < 3 || !args[1].startsWith("-mode")) { System.out .println("command : java -jar -Xmx10G MS1Quant.jar ms1quant.params -mode[1 or 2] [Option]"); System.out.println("\n-mode"); System.out.println("\t1:Single file mode--> mzXML_file PepXML_file"); System.out.println("\t\tEx: -mode1 file1.mzXML file1.pep.xml"); System.out.println( "\t2:Folder mode--> mzXML_Folder PepXML_Folder, all generated csv tables will be merged into a single csv file"); System.out.println("\t\tEx: -mode2 /data/mzxml/ /data/pepxml/"); System.out.println("\nOptions"); System.out.println( "\t-C\tNo of concurrent files to be processed (only for folder mode), Ex. -C5, default:1"); System.out.println("\t-p\tMinimum probability, Ex. -p0.9, default:0.9"); System.out.println("\t-ID\tDetect identified feature only"); System.out.println("\t-O\toutput folder, Ex. -O/data/"); return; } ConsoleLogger consoleLogger = new ConsoleLogger(); consoleLogger.SetConsoleLogger(Level.DEBUG); consoleLogger.SetFileLogger(Level.DEBUG, FilenameUtils.getFullPath(args[0]) + "ms1quant_debug.log"); Logger logger = Logger.getRootLogger(); logger.debug("Command: " + Arrays.toString(args)); logger.info("MS1Quant version: " + UmpireInfo.GetInstance().Version); String parameterfile = args[0]; logger.info("Parameter file: " + parameterfile); File paramfile = new File(parameterfile); if (!paramfile.exists()) { logger.error("Parameter file " + paramfile.getAbsolutePath() + " cannot be found. The program will exit."); } reader = new BufferedReader(new FileReader(paramfile.getAbsolutePath())); String line = ""; InstrumentParameter param = new InstrumentParameter(InstrumentParameter.InstrumentType.TOF5600); int NoCPUs = 2; int NoFile = 1; param.DetermineBGByID = false; param.EstimateBG = true; //<editor-fold defaultstate="collapsed" desc="Read parameter file"> while ((line = reader.readLine()) != null) { if (!"".equals(line) && !line.startsWith("#")) { logger.info(line); //System.out.println(line); if (line.split("=").length < 2) { continue; } if (line.split("=").length < 2) { continue; } String type = line.split("=")[0].trim(); if (type.startsWith("para.")) { type = type.replace("para.", "SE."); } String value = line.split("=")[1].trim(); switch (type) { case "Thread": { NoCPUs = Integer.parseInt(value); break; } //<editor-fold defaultstate="collapsed" desc="instrument parameters"> case "SE.MS1PPM": { param.MS1PPM = Float.parseFloat(value); break; } case "SE.MS2PPM": { param.MS2PPM = Float.parseFloat(value); break; } case "SE.SN": { param.SNThreshold = Float.parseFloat(value); break; } case "SE.MS2SN": { param.MS2SNThreshold = Float.parseFloat(value); break; } case "SE.MinMSIntensity": { param.MinMSIntensity = Float.parseFloat(value); break; } case "SE.MinMSMSIntensity": { param.MinMSMSIntensity = Float.parseFloat(value); break; } case "SE.MinRTRange": { param.MinRTRange = Float.parseFloat(value); break; } case "SE.MaxNoPeakCluster": { param.MaxNoPeakCluster = Integer.parseInt(value); param.MaxMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MinNoPeakCluster": { param.MinNoPeakCluster = Integer.parseInt(value); param.MinMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MinMS2NoPeakCluster": { param.MinMS2NoPeakCluster = Integer.parseInt(value); break; } case "SE.MaxCurveRTRange": { param.MaxCurveRTRange = Float.parseFloat(value); break; } case "SE.Resolution": { param.Resolution = Integer.parseInt(value); break; } case "SE.RTtol": { param.RTtol = Float.parseFloat(value); break; } case "SE.NoPeakPerMin": { param.NoPeakPerMin = Integer.parseInt(value); break; } case "SE.StartCharge": { param.StartCharge = Integer.parseInt(value); break; } case "SE.EndCharge": { param.EndCharge = Integer.parseInt(value); break; } case "SE.MS2StartCharge": { param.MS2StartCharge = Integer.parseInt(value); break; } case "SE.MS2EndCharge": { param.MS2EndCharge = Integer.parseInt(value); break; } case "SE.NoMissedScan": { param.NoMissedScan = Integer.parseInt(value); break; } case "SE.Denoise": { param.Denoise = Boolean.valueOf(value); break; } case "SE.EstimateBG": { param.EstimateBG = Boolean.valueOf(value); break; } case "SE.RemoveGroupedPeaks": { param.RemoveGroupedPeaks = Boolean.valueOf(value); break; } case "SE.MinFrag": { param.MinFrag = Integer.parseInt(value); break; } case "SE.IsoPattern": { param.IsoPattern = Float.valueOf(value); break; } case "SE.StartRT": { param.startRT = Float.valueOf(value); } case "SE.EndRT": { param.endRT = Float.valueOf(value); } //</editor-fold> } } } //</editor-fold> int mode = 1; if (args[1].equals("-mode2")) { mode = 2; } else if (args[1].equals("-mode1")) { mode = 1; } else { logger.error("-mode number not recongized. The program will exit."); } String mzXML = ""; String pepXML = ""; String mzXMLPath = ""; String pepXMLPath = ""; File mzXMLfile = null; File pepXMLfile = null; File mzXMLfolder = null; File pepXMLfolder = null; int idx = 0; if (mode == 1) { mzXML = args[2]; logger.info("Mode1 mzXML file: " + mzXML); mzXMLfile = new File(mzXML); if (!mzXMLfile.exists()) { logger.error("Mode1 mzXML file " + mzXMLfile.getAbsolutePath() + " cannot be found. The program will exit."); return; } pepXML = args[3]; logger.info("Mode1 pepXML file: " + pepXML); pepXMLfile = new File(pepXML); if (!pepXMLfile.exists()) { logger.error("Mode1 pepXML file " + pepXMLfile.getAbsolutePath() + " cannot be found. The program will exit."); return; } idx = 4; } else if (mode == 2) { mzXMLPath = args[2]; logger.info("Mode2 mzXML folder: " + mzXMLPath); mzXMLfolder = new File(mzXMLPath); if (!mzXMLfolder.exists()) { logger.error("Mode2 mzXML folder " + mzXMLfolder.getAbsolutePath() + " does not exist. The program will exit."); return; } pepXMLPath = args[3]; logger.info("Mode2 pepXML folder: " + pepXMLPath); pepXMLfolder = new File(pepXMLPath); if (!pepXMLfolder.exists()) { logger.error("Mode2 pepXML folder " + pepXMLfolder.getAbsolutePath() + " does not exist. The program will exit."); return; } idx = 4; } String outputfolder = ""; float MinProb = 0f; for (int i = idx; i < args.length; i++) { if (args[i].startsWith("-")) { if (args[i].equals("-ID")) { param.TargetIDOnly = true; logger.info("Detect ID feature only: true"); } if (args[i].startsWith("-O")) { outputfolder = args[i].substring(2); logger.info("Output folder: " + outputfolder); File outputfile = new File(outputfolder); if (!outputfolder.endsWith("\\") | outputfolder.endsWith("/")) { outputfolder += "/"; } if (!outputfile.exists()) { outputfile.mkdir(); } } if (args[i].startsWith("-C")) { try { NoFile = Integer.parseInt(args[i].substring(2)); logger.info("No of concurrent files: " + NoFile); } catch (Exception ex) { logger.error(args[i] + " is not a correct integer format, will process only one file at a time."); } } if (args[i].startsWith("-p")) { try { MinProb = Float.parseFloat(args[i].substring(2)); logger.info("probability threshold: " + MinProb); } catch (Exception ex) { logger.error(args[i] + " is not a correct format, will use 0 as threshold instead."); } } } } reader.close(); TandemParam tandemparam = new TandemParam(DBSearchParam.SearchInstrumentType.TOF5600); PTMManager.GetInstance(); if (param.TargetIDOnly) { param.EstimateBG = false; param.ApexDelta = 1.5f; param.NoMissedScan = 10; param.MiniOverlapP = 0.2f; param.RemoveGroupedPeaks = false; param.CheckMonoIsotopicApex = false; param.DetectByCWT = false; param.FillGapByBK = false; param.IsoCorrThreshold = -1f; param.SmoothFactor = 3; } if (mode == 1) { logger.info("Processing " + mzXMLfile.getAbsolutePath() + "...."); long time = System.currentTimeMillis(); LCMSPeakMS1 LCMS1 = new LCMSPeakMS1(mzXMLfile.getAbsolutePath(), NoCPUs); LCMS1.SetParameter(param); LCMS1.Resume = false; if (!param.TargetIDOnly) { LCMS1.CreatePeakFolder(); } LCMS1.ExportPeakClusterTable = true; if (pepXMLfile.exists()) { tandemparam.InteractPepXMLPath = pepXMLfile.getAbsolutePath(); LCMS1.ParsePepXML(tandemparam, MinProb); logger.info("No. of PSMs included: " + LCMS1.IDsummary.PSMList.size()); logger.info("No. of Peptide ions included: " + LCMS1.IDsummary.GetPepIonList().size()); } if (param.TargetIDOnly) { LCMS1.SaveSerializationFile = false; } if (param.TargetIDOnly || !LCMS1.ReadPeakCluster()) { LCMS1.PeakClusterDetection(); } if (pepXMLfile.exists()) { LCMS1.AssignQuant(false); LCMS1.IDsummary.ExportPepID(outputfolder); } time = System.currentTimeMillis() - time; logger.info(LCMS1.ParentmzXMLName + " processed time:" + String.format("%d hour, %d min, %d sec", TimeUnit.MILLISECONDS.toHours(time), TimeUnit.MILLISECONDS.toMinutes(time) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(time)), TimeUnit.MILLISECONDS.toSeconds(time) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(time)))); LCMS1.BaseClearAllPeaks(); LCMS1.SetSpectrumParser(null); LCMS1.IDsummary = null; LCMS1 = null; System.gc(); } else if (mode == 2) { LCMSID IDsummary = new LCMSID("", "", ""); logger.info("Parsing all pepXML files in " + pepXMLPath + "...."); for (File file : pepXMLfolder.listFiles()) { if (file.getName().toLowerCase().endsWith("pep.xml") || file.getName().toLowerCase().endsWith("pepxml")) { PepXMLParser pepXMLParser = new PepXMLParser(IDsummary, file.getAbsolutePath(), MinProb); } } HashMap<String, LCMSID> LCMSIDMap = IDsummary.GetLCMSIDFileMap(); ExecutorService executorPool = null; executorPool = Executors.newFixedThreadPool(NoFile); logger.info("Processing all mzXML files in " + mzXMLPath + "...."); for (File file : mzXMLfolder.listFiles()) { if (file.getName().toLowerCase().endsWith("mzxml")) { LCMSID id = LCMSIDMap.get(FilenameUtils.getBaseName(file.getName())); if (id == null || id.PSMList == null) { logger.warn("No IDs found in :" + FilenameUtils.getBaseName(file.getName()) + ". Quantification for this file is skipped"); continue; } if (!id.PSMList.isEmpty()) { MS1TargetQuantThread thread = new MS1TargetQuantThread(file, id, NoCPUs, outputfolder, param); executorPool.execute(thread); } } } LCMSIDMap.clear(); LCMSIDMap = null; IDsummary = null; executorPool.shutdown(); try { executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.info("interrupted.."); } if (outputfolder == null | outputfolder.equals("")) { outputfolder = mzXMLPath; } logger.info("Merging PSM files.."); File output = new File(outputfolder); FileWriter writer = new FileWriter(output.getAbsolutePath() + "/PSM_merge.csv"); boolean header = false; for (File csvfile : output.listFiles()) { if (csvfile.getName().toLowerCase().endsWith("_psms.csv")) { BufferedReader outreader = new BufferedReader(new FileReader(csvfile)); String outline = outreader.readLine(); if (!header) { writer.write(outline + "\n"); header = true; } while ((outline = outreader.readLine()) != null) { writer.write(outline + "\n"); } outreader.close(); csvfile.delete(); } } writer.close(); } logger.info("MS1 quant module is complete."); } catch (Exception e) { Logger.getRootLogger().error(ExceptionUtils.getStackTrace(e)); throw e; } }
From source file:io.netty.handler.timeout.IdleStateHandler.java
/** * Return the writerIdleTime that was given when instance this class in milliseconds. * *///from w ww . java 2 s . co m public long getWriterIdleTimeInMillis() { return TimeUnit.NANOSECONDS.toMillis(writerIdleTimeNanos); }
From source file:org.apache.solr.client.solrj.impl.HttpClusterStateProvider.java
@Override public Set<String> getLiveNodes() { if (liveNodes == null) { throw new RuntimeException( "We don't know of any live_nodes to fetch the" + " latest live_nodes information from. " + "If you think your Solr cluster is up and is accessible," + " you could try re-creating a new CloudSolrClient using working" + " solrUrl(s) or zkHost(s)."); }/*w w w . j a v a 2s. co m*/ if (TimeUnit.SECONDS.convert((System.nanoTime() - liveNodesTimestamp), TimeUnit.NANOSECONDS) > getCacheTimeout()) { for (String nodeName : liveNodes) { try (HttpSolrClient client = new HttpSolrClient.Builder() .withBaseSolrUrl(Utils.getBaseUrlForNodeName(nodeName, urlScheme)) .withHttpClient(httpClient).build()) { Set<String> liveNodes = fetchLiveNodes(client); this.liveNodes = (liveNodes); liveNodesTimestamp = System.nanoTime(); return liveNodes; } catch (Exception e) { log.warn("Attempt to fetch live_nodes from " + Utils.getBaseUrlForNodeName(nodeName, urlScheme) + " failed.", e); } } throw new RuntimeException("Tried fetching live_nodes using all the node names we knew of, i.e. " + liveNodes + ". However, " + "succeeded in obtaining the cluster state from none of them." + "If you think your Solr cluster is up and is accessible," + " you could try re-creating a new CloudSolrClient using working" + " solrUrl(s) or zkHost(s)."); } else { return liveNodes; // cached copy is fresh enough } }
From source file:com.netflix.genie.web.services.impl.HttpFileTransferImplTest.java
/** * Make sure can't get a file if the output location is a directory. * * @throws GenieException On Error/* w w w . ja va2 s .com*/ * @throws IOException On Error */ @Test(expected = ResourceAccessException.class) public void cantGetWithDirectoryAsOutput() throws GenieException, IOException { this.server.expect(MockRestRequestMatchers.requestTo(TEST_URL)) .andExpect(MockRestRequestMatchers.method(HttpMethod.GET)) .andRespond(MockRestResponseCreators.withSuccess("junk".getBytes(Charset.forName("UTF-8")), MediaType.APPLICATION_OCTET_STREAM)); try { this.httpFileTransfer.getFile(TEST_URL, this.temporaryFolder.getRoot().getCanonicalPath()); } finally { Mockito.verify(this.downloadTimerId, Mockito.times(1)) .withTags(MetricsUtils.newFailureTagsMapForException(new ResourceAccessException("test"))); Mockito.verify(this.downloadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); } }
From source file:com.keedio.nifi.processors.azure.blob.FetchAzureBlobObject.java
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get();/*from w w w .j av a 2s. c o m*/ if (flowFile == null) { return; } final long startNanos = System.nanoTime(); final String blobObject = context.getProperty(AZURE_STORAGE_BLOB_OBJECT) .evaluateAttributeExpressions(flowFile).getValue(); final AzureBlobConnectionService connectionService = context.getProperty(AZURE_STORAGE_CONTROLLER_SERVICE) .asControllerService(AzureBlobConnectionService.class); CloudBlobContainer blobContainer; try { blobContainer = connectionService.getCloudBlobContainerReference(); } catch (URISyntaxException | InvalidKeyException | StorageException e) { getLogger().error("Failed to retrieve Azure Object for {}; routing to failure", new Object[] { flowFile, e }); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } CloudBlobWrapper blockBlob = null; try { blockBlob = new CloudBlobWrapper("overwrite", blobObject, blobContainer); //blobContainer.getBlockBlobReference(blobObject); } catch (Exception e) { getLogger().error("Caught exception while retrieving blob object reference", e); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } InputStream blockBlobStream = null; ByteArrayOutputStream outputStream = null; try { outputStream = new ByteArrayOutputStream(); blockBlob.download(outputStream); outputStream.flush(); blockBlobStream = new ByteArrayInputStream(outputStream.toByteArray()); flowFile = session.importFrom(blockBlobStream, flowFile); } catch (IOException | StorageException e) { getLogger().error("Caught exception while retrieving blob object", e); } finally { IOUtils.closeQuietly(blockBlobStream); IOUtils.closeQuietly(outputStream); } final Map<String, String> attributes = new HashMap<>(); copyAttributes(connectionService.getContainerName(), blobObject, blockBlob, attributes); if (!attributes.isEmpty()) { flowFile = session.putAllAttributes(flowFile, attributes); } session.transfer(flowFile, REL_SUCCESS); final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); getLogger().info("Successfully retrieved Blob Object for {} in {} millis; routing to success", new Object[] { flowFile, transferMillis }); session.getProvenanceReporter().fetch(flowFile, blockBlob.toString(), transferMillis); }
From source file:com.bitplan.mjpegstreamer.MJpegRunnerBase.java
/** * get the total elapsedTime//from w ww . j a v a 2 s .co m * @return the total elapsed time in milliseconds */ public long elapsedTimeMillisecs() { long elapsed = this.now - this.firstFrameNanoTime; long result = TimeUnit.MILLISECONDS.convert(elapsed, TimeUnit.NANOSECONDS); return result; }
From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java
private <T> AdminRequestCallerBuilder<T> newAdminCaller() { return this.connection.callerFactory.<T>adminRequest().rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); }
From source file:org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore.java
@Override public String writeBlob(InputStream stream) throws IOException { boolean threw = true; try {/*from w w w . j ava 2 s . c o m*/ long start = System.nanoTime(); checkNotNull(stream); DataRecord dr = writeStream(stream); String id = getBlobId(dr); threw = false; stats.uploaded(System.nanoTime() - start, TimeUnit.NANOSECONDS, dr.getLength()); stats.uploadCompleted(id); return id; } catch (DataStoreException e) { throw new IOException(e); } finally { //DataStore does not closes the stream internally //So close the stream explicitly Closeables.close(stream, threw); } }
From source file:com.netflix.genie.web.jobs.workflow.impl.JobKickoffTask.java
/** * {@inheritDoc}// w ww . ja va 2 s . co m */ @Override public void executeTask(@NotNull final Map<String, Object> context) throws GenieException, IOException { final long start = System.nanoTime(); final Set<Tag> tags = Sets.newHashSet(); try { final JobExecutionEnvironment jobExecEnv = (JobExecutionEnvironment) context .get(JobConstants.JOB_EXECUTION_ENV_KEY); final String jobWorkingDirectory = jobExecEnv.getJobWorkingDir().getCanonicalPath(); final JobRequest jobRequest = jobExecEnv.getJobRequest(); final String user = jobRequest.getUser(); final Writer writer = (Writer) context.get(JobConstants.WRITER_KEY); final String jobId = jobRequest.getId() .orElseThrow(() -> new GeniePreconditionException("No job id found. Unable to continue.")); log.info("Starting Job Kickoff Task for job {}", jobId); // At this point all contents are written to the run script and we call an explicit flush and close to write // the contents to the file before we execute it. try { writer.flush(); writer.close(); } catch (IOException e) { throw new GenieServerException("Failed to execute job", e); } // Create user, if enabled if (isUserCreationEnabled) { createUser(user, jobRequest.getGroup().orElse(null)); } final List<String> command = new ArrayList<>(); // If the OS is linux use setsid to launch the process so that the entire process tree // is launched in process group id which is the same as the pid of the parent process if (SystemUtils.IS_OS_LINUX) { command.add("setsid"); } // Set the ownership to the user and run as the user, if enabled if (isRunAsUserEnabled) { changeOwnershipOfDirectory(jobWorkingDirectory, user); // This is needed because the genie.log file is still generated as the user running Genie system. makeDirGroupWritable(jobWorkingDirectory + "/genie/logs"); command.add("sudo"); command.add("-u"); command.add(user); } final String runScript = jobWorkingDirectory + JobConstants.FILE_PATH_DELIMITER + JobConstants.GENIE_JOB_LAUNCHER_SCRIPT; command.add(runScript); // Cannot convert to executor because it does not provide an api to get process id. final ProcessBuilder pb = new ProcessBuilder(command).directory(jobExecEnv.getJobWorkingDir()) .redirectOutput(new File(jobExecEnv.getJobWorkingDir() + JobConstants.GENIE_LOG_PATH)) .redirectError(new File(jobExecEnv.getJobWorkingDir() + JobConstants.GENIE_LOG_PATH)); // // Check if file can be executed. This is to fix issue where execution of the run script fails because // the file may be used by some other program // canExecute(runScript); try { final Process process = pb.start(); final int processId = this.getProcessId(process); final Instant timeout = Instant.now().plus( jobRequest.getTimeout().orElse(JobRequest.DEFAULT_TIMEOUT_DURATION), ChronoUnit.SECONDS); final JobExecution jobExecution = new JobExecution.Builder(this.hostname).withId(jobId) .withProcessId(processId).withCheckDelay(jobExecEnv.getCommand().getCheckDelay()) .withTimeout(timeout).withMemory(jobExecEnv.getMemory()).build(); context.put(JobConstants.JOB_EXECUTION_DTO_KEY, jobExecution); } catch (final IOException ie) { throw new GenieServerException("Unable to start command " + String.valueOf(command), ie); } log.info("Finished Job Kickoff Task for job {}", jobId); MetricsUtils.addSuccessTags(tags); } catch (final Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.getRegistry().timer(JOB_KICKOFF_TASK_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImpl.java
/** * {@inheritDoc}/*from ww w. ja v a 2 s . c om*/ */ @Override public long getLastModifiedTime(final String path) throws GenieException { final long start = System.nanoTime(); final long lastModTime; final Set<Tag> tags = Sets.newHashSet(); try { final AmazonS3URI s3Uri = this.getS3Uri(path); try { final ObjectMetadata o = this.s3ClientFactory.getClient(s3Uri).getObjectMetadata(s3Uri.getBucket(), s3Uri.getKey()); lastModTime = o.getLastModified().getTime(); } catch (final Exception ase) { final String message = String.format("Failed getting the metadata of the s3 file %s", path); log.error(message); throw new GenieServerException(message, ase); } MetricsUtils.addSuccessTags(tags); } catch (Throwable t) { MetricsUtils.addFailureTagsWithException(tags, t); throw t; } finally { this.registry.timer(GET_METADATA_TIMER_NAME, tags).record(System.nanoTime() - start, TimeUnit.NANOSECONDS); } return lastModTime; }