List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:com.netflix.genie.web.services.impl.HttpFileTransferImplTest.java
/** * Make sure that there is no implementation of the putFile method. * * @throws GenieException on error/* w ww. jav a 2s.c om*/ */ @Test(expected = UnsupportedOperationException.class) public void cantPutFile() throws GenieException { try { final String file = UUID.randomUUID().toString(); this.httpFileTransfer.putFile(file, file); } finally { Mockito.verify(this.uploadTimerId, Mockito.times(1)).withTags( MetricsUtils.newFailureTagsMapForException(new UnsupportedOperationException("test"))); Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); } }
From source file:gobblin.service.modules.orchestration.Orchestrator.java
public void orchestrate(Spec spec) throws Exception { long startTime = System.nanoTime(); if (spec instanceof FlowSpec) { Map<Spec, SpecExecutorInstanceProducer> specExecutorInstanceMap = specCompiler.compileFlow(spec); if (specExecutorInstanceMap.isEmpty()) { _log.warn("Cannot determine an executor to run on for Spec: " + spec); return; }//from w ww . j ava2 s . c o m // Schedule all compiled JobSpecs on their respective Executor for (Map.Entry<Spec, SpecExecutorInstanceProducer> specsToExecute : specExecutorInstanceMap .entrySet()) { // Run this spec on selected executor SpecExecutorInstanceProducer producer = null; try { producer = specsToExecute.getValue(); Spec jobSpec = specsToExecute.getKey(); _log.info(String.format("Going to orchestrate JobSpc: %s on Executor: %s", jobSpec, producer)); producer.addSpec(jobSpec); } catch (Exception e) { _log.error("Cannot successfully setup spec: " + specsToExecute.getKey() + " on executor: " + producer + " for flow: " + spec, e); } } } else { Instrumented.markMeter(this.flowOrchestrationFailedMeter); throw new RuntimeException("Spec not of type FlowSpec, cannot orchestrate: " + spec); } Instrumented.markMeter(this.flowOrchestrationSuccessFulMeter); Instrumented.updateTimer(this.flowOrchestrationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS); }
From source file:com.mgmtp.jfunk.core.JFunk.java
private void shutDownExecutorService(final ExecutorService execService) { try {/*from w w w .j a v a 2s. co m*/ execService.shutdownNow(); execService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException ex) { LOG.error("Script execution was interrupted.", ex); } }
From source file:com.linecorp.armeria.server.ServerTest.java
@Test(timeout = idleTimeoutMillis * 5) public void testIdleTimeoutByContentSent() throws Exception { try (Socket socket = new Socket()) { socket.setSoTimeout((int) (idleTimeoutMillis * 4)); socket.connect(server().activePort().get().localAddress()); PrintWriter outWriter = new PrintWriter(socket.getOutputStream(), false); outWriter.print("POST / HTTP/1.1\r\n"); outWriter.print("Connection: Keep-Alive\r\n"); outWriter.print("\r\n"); outWriter.flush();//from w w w . ja v a2 s. co m long lastWriteNanos = System.nanoTime(); //read until EOF while (socket.getInputStream().read() != -1) { continue; } long elapsedTimeMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - lastWriteNanos, TimeUnit.NANOSECONDS); assertThat(elapsedTimeMillis, is(greaterThanOrEqualTo(idleTimeoutMillis))); } }
From source file:com.netflix.genie.web.services.loadbalancers.script.ScriptLoadBalancer.java
/** * {@inheritDoc}//from ww w . ja v a2s. c o m */ @Override public Cluster selectCluster(@Nonnull @NonNull @NotEmpty final Set<Cluster> clusters, @Nonnull @NonNull final JobRequest jobRequest) throws GenieException { final long selectStart = System.nanoTime(); log.debug("Called"); final Set<Tag> tags = Sets.newHashSet(); try { if (this.isConfigured.get() && this.script.get() != null) { log.debug("Evaluating script for job {}", jobRequest.getId().orElse("without id")); final Bindings bindings = new SimpleBindings(); // TODO: For now for backwards compatibility with balancer scripts continue writing Clusters out in // V3 format. Change to V4 once stabalize a bit more bindings.put(CLUSTERS_BINDING, this.mapper.writeValueAsString( clusters.stream().map(DtoConverters::toV3Cluster).collect(Collectors.toSet()))); bindings.put(JOB_REQUEST_BINDING, this.mapper.writeValueAsString(jobRequest)); // Run as callable and timeout after the configured timeout length final String clusterId = this.asyncTaskExecutor .submit(() -> (String) this.script.get().eval(bindings)) .get(this.timeoutLength.get(), TimeUnit.MILLISECONDS); // Find the cluster if not null if (clusterId != null) { for (final Cluster cluster : clusters) { if (clusterId.equals(cluster.getId())) { tags.add(Tag.of(MetricsConstants.TagKeys.STATUS, STATUS_TAG_FOUND)); return cluster; } } } log.warn("Script returned a cluster not in the input list: {}", clusterId); } else { log.debug("Script returned null"); tags.add(Tag.of(MetricsConstants.TagKeys.STATUS, STATUS_TAG_NOT_CONFIGURED)); return null; } tags.add(Tag.of(MetricsConstants.TagKeys.STATUS, STATUS_TAG_NOT_FOUND)); // Defer to any subsequent load balancer in the chain return null; } catch (final Exception e) { tags.add(Tag.of(MetricsConstants.TagKeys.STATUS, STATUS_TAG_FAILED)); tags.add(Tag.of(MetricsConstants.TagKeys.EXCEPTION_CLASS, e.getClass().getCanonicalName())); log.error("Unable to execute script due to {}", e.getMessage(), e); return null; } finally { this.registry.timer(SELECT_TIMER_NAME, tags).record(System.nanoTime() - selectStart, TimeUnit.NANOSECONDS); } }
From source file:com.mattring.nifi.nats.bundle.GetNats.java
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final long start = System.nanoTime(); final int batchSize = context.getProperty(BATCH_SIZE).asInteger(); final String topic = context.getProperty(TOPIC).getValue(); String flowFileContent = null; int numMsgs = 0; if (batchSize < 2) { flowFileContent = inbox.poll();//from w w w . ja v a2 s . c o m if (flowFileContent != null) { numMsgs = 1; } } else { final String demarcator = context.getProperty(MESSAGE_DEMARCATOR).getValue().replace("\\n", "\n") .replace("\\r", "\r").replace("\\t", "\t"); final List<String> batch = new LinkedList<>(); numMsgs = inbox.drainTo(batch, batchSize); if (!batch.isEmpty()) { flowFileContent = StringUtils.join(batch, demarcator); } } if (flowFileContent != null) { FlowFile flowFile = session.create(); final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue()); final byte[] flowFileContentBytes = flowFileContent.getBytes(charset); flowFile = session.append(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { out.write(flowFileContentBytes); } }); final Map<String, String> attributes = new HashMap<>(); attributes.put("nats.topic", topic); attributes.put("nats.numMsgs", Integer.toString(numMsgs)); flowFile = session.putAllAttributes(flowFile, attributes); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); session.getProvenanceReporter().receive(flowFile, "nats://" + topic, "Received " + numMsgs + " NATS messages", millis); getLogger().info("Successfully received {} from NATS with {} messages in {} millis", new Object[] { flowFile, numMsgs, millis }); session.transfer(flowFile, REL_SUCCESS); } }
From source file:com.ning.metrics.serialization.writer.DiskSpoolEventWriter.java
@Override public synchronized void write(final Event event) throws IOException { if (!acceptsEvents) { log.warn("Writer not ready, discarding event: {}", event); return;/*from www . j av a 2s. c o m*/ } if (currentOutputter == null) { currentOutputFile = new File(tmpSpoolDirectory, String.format("%d.bin", fileId.incrementAndGet())); final FileOutputStream outputStream = codec.getFileOutputStream(currentOutputFile); if (eventSerializer == null) { currentOutputter = ObjectOutputterFactory.createObjectOutputter(outputStream, syncType, syncBatchSize); } else { currentOutputter = ObjectOutputterFactory.createObjectOutputter(outputStream, syncType, syncBatchSize, eventSerializer); } } try { final long startTime = System.nanoTime(); currentOutputter.writeObject(event); writeTimer.update(System.nanoTime() - startTime, TimeUnit.NANOSECONDS); } catch (RuntimeException e) { eventSerializationFailures.incrementAndGet(); //noinspection AccessToStaticFieldLockedOnInstance throw new IOException("unable to serialize event", e); } catch (IOException e) { eventSerializationFailures.incrementAndGet(); // If we got bad data, the stream may be in a bad state (i.e. jackson might be unable to append more data). // It's safer to close the stream and reopen a new one. try { forceCommit(); } catch (IOException ignored) { // We want to throw the original one } //noinspection AccessToStaticFieldLockedOnInstance throw new IOException("unable to serialize event", e); } }
From source file:com.sworddance.taskcontrol.FutureListenerProcessor.java
/** * @param futureListener/* www . ja v a2 s .c om*/ */ private void notifyListener(FutureListener<RV> futureListener) { RV value = null; try { if (this.returnedValue != null) { value = this.returnedValue; } if (value == null && getReturnedFuture() != null) { value = getReturnedFuture().get(1, TimeUnit.NANOSECONDS); } } catch (Exception e) { // HACK need to handle exceptions. } try { futureListener.futureSet(getReturnedFuture(), value); } catch (Exception e) { // HACK need to handle exceptions. but don't want to interfere with other listeners getLog().warn("while doing futureSet", e); } }
From source file:MSUmpire.LCMSPeakStructure.LCMSPeakDIAMS2.java
private void PrepareMGF_MS1Cluster(LCMSPeakMS1 ms1lcms) throws IOException { ArrayList<PseudoMSMSProcessing> ScanList = new ArrayList<>(); ExecutorService executorPool = Executors.newFixedThreadPool(NoCPUs); for (PeakCluster ms1cluster : ms1lcms.PeakClusters) { final ArrayList<PrecursorFragmentPairEdge> frags = FragmentsClu2Cur.get(ms1cluster.Index); if (frags != null && DIA_MZ_Range.getX() <= ms1cluster.GetMaxMz() && DIA_MZ_Range.getY() >= ms1cluster.TargetMz()) { // if (DIA_MZ_Range.getX() <= ms1cluster.GetMaxMz() && DIA_MZ_Range.getY() >= ms1cluster.TargetMz() && FragmentsClu2Cur.containsKey(ms1cluster.Index)) { // ArrayList<PrecursorFragmentPairEdge> frags = FragmentsClu2Cur.get(ms1cluster.Index); ms1cluster.GroupedFragmentPeaks.addAll(frags); if (Last_MZ_Range == null || Last_MZ_Range.getY() < ms1cluster.TargetMz()) { PseudoMSMSProcessing mSMSProcessing = new PseudoMSMSProcessing(ms1cluster, parameter); ScanList.add(mSMSProcessing); }/*from w w w . j a v a 2s .c om*/ } } for (PseudoMSMSProcessing proc : ScanList) { executorPool.execute(proc); } executorPool.shutdown(); try { executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { Logger.getRootLogger().info("interrupted.."); } String mgffile = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ1Name() + ".mgf.temp"; String mgffile2 = FilenameUtils.getFullPath(ParentmzXMLName) + GetQ2Name() + ".mgf.temp"; // FileWriter mapwriter = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q1", true); // FileWriter mapwriter2 = new FileWriter(FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q2", true); // FileWriter mgfWriter = new FileWriter(mgffile, true); // FileWriter mgfWriter2 = new FileWriter(mgffile2, true); final BufferedWriter mapwriter = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q1, FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q1"), mapwriter2 = DIAPack.get_file(DIAPack.OutputFile.ScanClusterMapping_Q2, FilenameUtils.getFullPath(ParentmzXMLName) + FilenameUtils.getBaseName(ParentmzXMLName) + ".ScanClusterMapping_Q2"), mgfWriter = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q1, mgffile), mgfWriter2 = DIAPack.get_file(DIAPack.OutputFile.Mgf_Q2, mgffile2); for (PseudoMSMSProcessing mSMSProcessing : ScanList) { if (MatchedFragmentMap.size() > 0) { mSMSProcessing.RemoveMatchedFrag(MatchedFragmentMap); } XYPointCollection Scan = mSMSProcessing.GetScan(); if (Scan != null && Scan.PointCount() > parameter.MinFrag) { // StringBuilder mgfString = new StringBuilder(); if (mSMSProcessing.Precursorcluster.IsotopeComplete(3)) { final BufferedWriter mgfString = mgfWriter; parentDIA.Q1Scan++; mgfString.append("BEGIN IONS\n"); mgfString.append("PEPMASS=").append(String.valueOf(mSMSProcessing.Precursorcluster.TargetMz())) .append("\n"); mgfString.append("CHARGE=").append(String.valueOf(mSMSProcessing.Precursorcluster.Charge)) .append("+\n"); mgfString.append("RTINSECONDS=") .append(String.valueOf(mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f)) .append("\n"); mgfString.append("TITLE=").append(GetQ1Name()).append(".") .append(String.valueOf(parentDIA.Q1Scan)).append(".") .append(String.valueOf(parentDIA.Q1Scan)).append(".") .append(String.valueOf(mSMSProcessing.Precursorcluster.Charge)).append("\n"); for (int i = 0; i < Scan.PointCount(); i++) { mgfString.append(String.valueOf(Scan.Data.get(i).getX())).append(" ") .append(String.valueOf(Scan.Data.get(i).getY())).append("\n"); } mgfString.append("END IONS\n\n"); mapwriter.write(parentDIA.Q1Scan + "_" + mSMSProcessing.Precursorcluster.Index + "\n"); // mgfWriter.write(mgfString.toString()); //} else if (mSMSProcessing.Precursorcluster.IsotopeComplete(2)) { } else { final BufferedWriter mgfString = mgfWriter2; parentDIA.Q2Scan++; mgfString.append("BEGIN IONS\n"); mgfString.append("PEPMASS=").append(String.valueOf(mSMSProcessing.Precursorcluster.TargetMz())) .append("\n"); mgfString.append("CHARGE=").append(String.valueOf(mSMSProcessing.Precursorcluster.Charge)) .append("+\n"); mgfString.append("RTINSECONDS=") .append(String.valueOf(mSMSProcessing.Precursorcluster.PeakHeightRT[0] * 60f)) .append("\n"); mgfString.append("TITLE=").append(GetQ2Name()).append(".") .append(String.valueOf(parentDIA.Q2Scan)).append(".") .append(String.valueOf(parentDIA.Q2Scan)).append(".") .append(String.valueOf(mSMSProcessing.Precursorcluster.Charge)).append("\n"); for (int i = 0; i < Scan.PointCount(); i++) { mgfString.append(String.valueOf(Scan.Data.get(i).getX())).append(" ") .append(String.valueOf(Scan.Data.get(i).getY())).append("\n"); } mgfString.append("END IONS\n\n"); mapwriter2.write(parentDIA.Q2Scan + "_" + mSMSProcessing.Precursorcluster.Index + "\n"); // mgfWriter2.write(mgfString.toString()); } } mSMSProcessing.Precursorcluster.GroupedFragmentPeaks.clear(); } // mgfWriter2.close(); // mgfWriter.close(); // mapwriter.close(); // mapwriter2.close(); }