Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.netflix.genie.core.services.impl.S3FileTransferImplUnitTests.java

/**
 * Test the getFile method for invalid s3 path.
 *
 * @throws GenieException If there is any problem
 *//*from   ww w  .ja  v a  2  s. c om*/
@Test(expected = GenieServerException.class)
public void testGetFileMethodInvalidS3Path() throws GenieException {
    final String invalidS3Path = "filepath";
    try {
        s3FileTransfer.getFile(invalidS3Path, LOCAL_PATH);
    } finally {
        Mockito.verify(this.downloadTimer, Mockito.times(1)).record(Mockito.anyLong(),
                Mockito.eq(TimeUnit.NANOSECONDS));
        Mockito.verify(this.downloadTimerId, Mockito.times(1)).withTags(tagsCaptor.capture());
        Assert.assertEquals(FAILURE_TAGS, tagsCaptor.getValue());
    }
}

From source file:org.apache.gobblin.service.modules.orchestration.Orchestrator.java

public void orchestrate(Spec spec) throws Exception {
    // Add below waiting because TopologyCatalog and FlowCatalog service can be launched at the same time
    this.topologyCatalog.get().getInitComplete().await();

    long startTime = System.nanoTime();
    if (spec instanceof FlowSpec) {
        Map<String, String> flowMetadata = getFlowMetadata((FlowSpec) spec);
        TimingEvent flowCompilationTimer = this.eventSubmitter.isPresent()
                ? this.eventSubmitter.get().getTimingEvent(TimingEvent.FlowTimings.FLOW_COMPILED)
                : null;//from ww w.ja  v  a2 s  .  c  o  m
        Dag<JobExecutionPlan> jobExecutionPlanDag = specCompiler.compileFlow(spec);

        if (jobExecutionPlanDag == null || jobExecutionPlanDag.isEmpty()) {
            Instrumented.markMeter(this.flowOrchestrationFailedMeter);
            _log.warn("Cannot determine an executor to run on for Spec: " + spec);
            return;
        }

        flowMetadata.putIfAbsent(TimingEvent.FlowEventConstants.FLOW_EXECUTION_ID_FIELD,
                jobExecutionPlanDag.getNodes().get(0).getValue().getJobSpec().getConfigAsProperties()
                        .getProperty(ConfigurationKeys.FLOW_EXECUTION_ID_KEY));

        if (flowCompilationTimer != null) {
            flowCompilationTimer.stop(flowMetadata);
        }

        // Schedule all compiled JobSpecs on their respective Executor
        for (Dag.DagNode<JobExecutionPlan> dagNode : jobExecutionPlanDag.getNodes()) {
            JobExecutionPlan jobExecutionPlan = dagNode.getValue();

            // Run this spec on selected executor
            SpecProducer producer = null;
            try {
                producer = jobExecutionPlan.getSpecExecutor().getProducer().get();
                Spec jobSpec = jobExecutionPlan.getJobSpec();

                if (!((JobSpec) jobSpec).getConfig().hasPath(ConfigurationKeys.FLOW_EXECUTION_ID_KEY)) {
                    _log.warn("JobSpec does not contain flowExecutionId.");
                }

                Map<String, String> jobMetadata = getJobMetadata(flowMetadata, jobExecutionPlan);
                _log.info(String.format("Going to orchestrate JobSpec: %s on Executor: %s", jobSpec, producer));

                TimingEvent jobOrchestrationTimer = this.eventSubmitter.isPresent()
                        ? this.eventSubmitter.get().getTimingEvent(TimingEvent.LauncherTimings.JOB_ORCHESTRATED)
                        : null;

                producer.addSpec(jobSpec);

                if (jobOrchestrationTimer != null) {
                    jobOrchestrationTimer.stop(jobMetadata);
                }
            } catch (Exception e) {
                _log.error("Cannot successfully setup spec: " + jobExecutionPlan.getJobSpec() + " on executor: "
                        + producer + " for flow: " + spec, e);
            }
        }
    } else {
        Instrumented.markMeter(this.flowOrchestrationFailedMeter);
        throw new RuntimeException("Spec not of type FlowSpec, cannot orchestrate: " + spec);
    }
    Instrumented.markMeter(this.flowOrchestrationSuccessFulMeter);
    Instrumented.updateTimer(this.flowOrchestrationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
}

From source file:kieker.tools.bridge.cli.CLIServerMain.java

/**
 * Execute the bridge service./*from  ww w . ja va  2s.co m*/
 *
 * @param connector
 *
 * @throws ConnectorDataTransmissionException
 *             if an error occured during connector operations
 */
private static void runService(final Configuration configuration, final IServiceConnector connector)
        throws ConnectorDataTransmissionException {
    // setup service container
    container = new ServiceContainer(configuration, connector, false);

    if (verbose) {
        final String updateIntervalParam = commandLine.getOptionValue(CMD_VERBOSE);
        container.setListenerUpdateInterval((updateIntervalParam != null) ? Long.parseLong(updateIntervalParam) // NOCS
                : ServiceContainer.DEFAULT_LISTENER_UPDATE_INTERVAL); // NOCS
        container.addListener(new IServiceListener() {
            @Override
            public void handleEvent(final long count, final String message) {
                CLIServerMain.getLog().info("Received " + count + " records");
            }
        });
    }

    if (stats) {
        startTime = System.nanoTime();
    }

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                CLIServerMain.shutdown();
            } catch (final ConnectorDataTransmissionException e) {
                CLIServerMain.getLog().error("Graceful shutdown failed.");
                CLIServerMain.getLog().error("Cause " + e.getMessage());
            }
        }
    });

    // run the service
    container.run();

    if (stats) {
        deltaTime = System.nanoTime() - startTime;
    }
    if (verbose) {
        CLIServerMain.getLog().info("Server stopped.");
    }
    if (stats) {
        CLIServerMain.getLog().info("Execution time: " + deltaTime + " ns  "
                + TimeUnit.SECONDS.convert(deltaTime, TimeUnit.NANOSECONDS) + " s");
        CLIServerMain.getLog().info("Time per records: " + (deltaTime / container.getRecordCount()) + " ns/r");
        CLIServerMain.getLog().info("Records per second: " + (container.getRecordCount()
                / (double) TimeUnit.SECONDS.convert(deltaTime, TimeUnit.NANOSECONDS)));
    }
}

From source file:org.wso2.carbon.metrics.reporter.JDBCReporterTest.java

@Test
public void reportsNanoseconds() {
    long timestamp = TimeUnit.NANOSECONDS.convert(clock.getTime(), TimeUnit.MILLISECONDS);
    assertEquals(timestamp, reportGauge(TimeUnit.NANOSECONDS));
}

From source file:org.apache.solr.client.solrj.retry.RetryingSolrServer.java

private void handleException(SolrRequest request, String exceptionTopLevelMsg, String exceptionRootCauseMsg,
        String requestKey, long requestDuration) {
    String timePrefix = "time.";
    metrics.updateTimer(ROOT_PREFIX + "time", requestDuration, TimeUnit.NANOSECONDS);
    String failedRequestsPrefix = ROOT_PREFIX + "fail.";
    metrics.updateTimer(failedRequestsPrefix + "time", requestDuration, TimeUnit.NANOSECONDS);
    metrics.updateTimer(failedRequestsPrefix + timePrefix + "req." + requestKey, requestDuration,
            TimeUnit.NANOSECONDS);
    metrics.updateTimer(//  w w w  . j a v a 2 s. c  o  m
            failedRequestsPrefix + timePrefix + "xreq.top." + requestKey + "." + exceptionTopLevelMsg,
            requestDuration, TimeUnit.NANOSECONDS);
    metrics.updateTimer(
            failedRequestsPrefix + timePrefix + "xreq.root." + requestKey + "." + exceptionRootCauseMsg,
            requestDuration, TimeUnit.NANOSECONDS);
    metrics.updateTimer(failedRequestsPrefix + timePrefix + "top." + exceptionTopLevelMsg, requestDuration,
            TimeUnit.NANOSECONDS);
    metrics.updateTimer(failedRequestsPrefix + timePrefix + "root." + exceptionRootCauseMsg, requestDuration,
            TimeUnit.NANOSECONDS);
}

From source file:com.ebay.pulsar.sessionizer.cluster.SessionizerLoopbackRingListener.java

@Override
public void update(Map<JetstreamTopic, ConsistentHashing<EventConsumerInfo>> map,
        Map<Long, EventConsumerInfo> allConsumers) {
    lock.writeLock().lock();/*from w  w w  .ja  v  a 2 s  .c o  m*/
    try {
        Date changedDate = new Date();
        List<Long> existedConsumers = new ArrayList<Long>(activeConsumers);
        currentState = map;
        boolean hasNewConsumer = false;
        List<Long> newConsumers = new ArrayList<Long>();

        for (EventConsumerInfo info : allConsumers.values()) {
            if (info.getAdvertisement().getInterestedTopics() != null
                    && info.getAdvertisement().getInterestedTopics().contains(loopbackTopic)) {
                if (!activeConsumers.contains(info.getAdvertisement().getConsumerId())) {
                    hasNewConsumer = true;
                }
                newConsumers.add(info.getAdvertisement().getConsumerId());
            }
        }

        if (activeConsumers.size() == newConsumers.size() && !hasNewConsumer) {
            // no change, just return
            return;
        }
        activeConsumers.clear();
        activeConsumers.addAll(newConsumers);
        LOGGER.info("Consistent hashing changed: {}", activeConsumers);

        Collections.sort(activeConsumers);
        ConsistentHashing<EventConsumerInfo> hashing = map.get(loopbackTopic);
        ConsistentHashing<EventConsumerInfo> chState = null;
        if (hashing != null) {
            chState = new ConsistentHashing<EventConsumerInfo>(hashing.getHashFunction(),
                    hashing.getNumHashesPerEntry(), hashing.getSpreadFactor());
            for (int i = 0, t = activeConsumers.size(); i < t; i++) {
                EventConsumerInfo point = allConsumers.get(activeConsumers.get(i));
                chState.add(point);
            }
        }

        lastModifiedTime = changedDate;
        ConsistentHashingState newHead = new ConsistentHashingState(chState, existedConsumers,
                lastModifiedTime);
        newHead.previous = head;
        ConsistentHashingState oldHead = head;
        int count = 0;
        leavingCluster = !activeConsumers.contains(getHostId());
        if (oldHead != null) {
            ConsistentHashingState c = oldHead;
            long currentNanoTime = System.nanoTime();
            while (c.previous != null) {
                if (count > 200) {
                    LOGGER.warn(
                            "Too much consistent hashing history (exceed 100 in 30 minutes), ignore oldest one");
                    c.previous = null;
                    break;
                }
                if ((currentNanoTime - c.effectiveTime) > TimeUnit.NANOSECONDS
                        .convert(getMaxIdleTime() + GRACE_PERIOD, TimeUnit.MILLISECONDS)) {
                    // Leave 10 minutes buffer;
                    c.previous = null;
                    break;
                } else {
                    c = c.previous;
                    // the c.previous maybe change to null by another thread.
                    count++;
                }
            }
        }
        head = newHead;
    } finally {
        lock.writeLock().unlock();
    }
}

From source file:org.apache.hadoop.hbase.mttr.IntegrationTestMTTR.java

public void run(Callable<Boolean> monkeyCallable, String testName) throws Exception {
    int maxIters = util.getHBaseClusterInterface().isDistributedCluster() ? 10 : 3;

    // Array to keep track of times.
    ArrayList<TimingResult> resultPuts = new ArrayList<TimingResult>(maxIters);
    ArrayList<TimingResult> resultScan = new ArrayList<TimingResult>(maxIters);
    ArrayList<TimingResult> resultAdmin = new ArrayList<TimingResult>(maxIters);
    long start = System.nanoTime();

    // We're going to try this multiple times
    for (int fullIterations = 0; fullIterations < maxIters; fullIterations++) {
        // Create and start executing a callable that will kill the servers
        Future<Boolean> monkeyFuture = executorService.submit(monkeyCallable);

        // Pass that future to the timing Callables.
        Future<TimingResult> putFuture = executorService.submit(new PutCallable(monkeyFuture));
        Future<TimingResult> scanFuture = executorService.submit(new ScanCallable(monkeyFuture));
        Future<TimingResult> adminFuture = executorService.submit(new AdminCallable(monkeyFuture));

        Future<Boolean> loadFuture = executorService.submit(new LoadCallable(monkeyFuture));

        monkeyFuture.get();/*  www  .j  av a  2 s.com*/
        loadFuture.get();

        // Get the values from the futures.
        TimingResult putTime = putFuture.get();
        TimingResult scanTime = scanFuture.get();
        TimingResult adminTime = adminFuture.get();

        // Store the times to display later.
        resultPuts.add(putTime);
        resultScan.add(scanTime);
        resultAdmin.add(adminTime);

        // Wait some time for everything to settle down.
        Thread.sleep(5000l);
    }

    long runtimeMs = TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS);

    Objects.ToStringHelper helper = Objects.toStringHelper("MTTRResults").add("putResults", resultPuts)
            .add("scanResults", resultScan).add("adminResults", resultAdmin).add("totalRuntimeMs", runtimeMs)
            .add("name", testName);

    // Log the info
    LOG.info(helper.toString());
}

From source file:org.wso2.carbon.metrics.reporter.JDBCReporterTest.java

@SuppressWarnings("rawtypes")
private long reportGauge(TimeUnit timestampUnit) {
    final Gauge gauge = mock(Gauge.class);
    when(gauge.getValue()).thenReturn(1);

    JDBCReporter reporter = JDBCReporter.forRegistry(registry).convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.NANOSECONDS).convertTimestampTo(timestampUnit).withClock(clock)
            .filter(MetricFilter.ALL).build(SOURCE, dataSource);

    reporter.report(map("gauge", gauge), this.<Counter>map(), this.<Histogram>map(), this.<Meter>map(),
            this.<Timer>map());
    List<Map<String, Object>> result = template.queryForList("SELECT * FROM METRIC_GAUGE");
    assertEquals(1, result.size());//from  w w  w  .  j  a v a 2s  .  com
    return (Long) result.get(0).get("TIMESTAMP");
}

From source file:com.netflix.genie.web.services.impl.JobSpecificationServiceImpl.java

private Cluster selectCluster(final String id, final JobRequest jobRequest, final Set<Cluster> clusters)
        throws GenieException {
    final long start = System.nanoTime();
    final Set<Tag> timerTags = Sets.newHashSet();
    final Set<Tag> counterTags = Sets.newHashSet();
    try {/*from ww  w. j  a  v a  2 s . c  om*/
        final Cluster cluster;
        if (clusters.isEmpty()) {
            this.noClusterFoundCounter.increment();
            throw new GeniePreconditionException(
                    "No cluster/command combination found for the given criteria. Unable to continue");
        } else if (clusters.size() == 1) {
            cluster = clusters.stream().findFirst()
                    .orElseThrow(() -> new GenieServerException("Couldn't get cluster when size was one"));
        } else {
            cluster = this.selectClusterWithLoadBalancer(counterTags, clusters, id, jobRequest);
        }

        log.info("Selected cluster {} for job {}", cluster.getId(), id);
        MetricsUtils.addSuccessTags(timerTags);
        return cluster;
    } catch (final Throwable t) {
        MetricsUtils.addFailureTagsWithException(timerTags, t);
        throw t;
    } finally {
        this.registry.timer(SELECT_CLUSTER_TIMER_NAME, timerTags).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }

}

From source file:org.apache.jena.hadoop.rdf.stats.RdfStats.java

private boolean runJob(Job job) throws Throwable {
    System.out.println("Submitting Job " + job.getJobName());
    long start = System.nanoTime();
    try {/*  www .j  a  v  a2 s .  com*/
        job.submit();
        if (job.monitorAndPrintJob()) {
            System.out.println("Job " + job.getJobName() + " succeeded");
            return true;
        } else {
            System.out.println("Job " + job.getJobName() + " failed");
            return false;
        }
    } catch (Throwable e) {
        System.out.println("Unexpected failure in Job " + job.getJobName());
        throw e;
    } finally {
        long end = System.nanoTime();
        System.out.println("Job " + job.getJobName() + " finished after "
                + String.format("%,d milliseconds", TimeUnit.NANOSECONDS.toMillis(end - start)));
        System.out.println();
    }
}