Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.netflix.genie.web.services.impl.HttpFileTransferImplTest.java

/**
 * Make sure can get the last update time of a file.
 *
 * @throws GenieException On error/* w  w  w  .  j a va2 s.c o  m*/
 */
@Test
public void canGetLastModifiedTime() throws GenieException {
    final long lastModified = 28424323000L;
    final HttpHeaders headers = new HttpHeaders();
    headers.setLastModified(lastModified);
    this.server.expect(MockRestRequestMatchers.requestTo(TEST_URL))
            .andExpect(MockRestRequestMatchers.method(HttpMethod.HEAD))
            .andRespond(MockRestResponseCreators.withSuccess().headers(headers));

    Assert.assertThat(this.httpFileTransfer.getLastModifiedTime(TEST_URL), Matchers.is(lastModified));
    this.server.verify();
    Mockito.verify(this.metadataTimerId, Mockito.times(1)).withTags(MetricsUtils.newSuccessTagsMap());
    Mockito.verify(this.metadataTimer, Mockito.times(1)).record(Mockito.anyLong(),
            Mockito.eq(TimeUnit.NANOSECONDS));
}

From source file:org.apache.cassandra.db.compaction.CompactionTask.java

/**
 * For internal use and testing only.  The rest of the system should go through the submit* methods,
 * which are properly serialized.//from w  ww  .ja  v a  2s  .c o  m
 * Caller is in charge of marking/unmarking the sstables as compacting.
 */
protected void runMayThrow() throws Exception {
    // The collection of sstables passed may be empty (but not null); even if
    // it is not empty, it may compact down to nothing if all rows are deleted.
    assert transaction != null;

    if (transaction.originals().isEmpty())
        return;

    // Note that the current compaction strategy, is not necessarily the one this task was created under.
    // This should be harmless; see comments to CFS.maybeReloadCompactionStrategy.
    AbstractCompactionStrategy strategy = cfs.getCompactionStrategy();

    if (DatabaseDescriptor.isSnapshotBeforeCompaction())
        cfs.snapshotWithoutFlush(System.currentTimeMillis() + "-compact-" + cfs.name);

    // note that we need to do a rough estimate early if we can fit the compaction on disk - this is pessimistic, but
    // since we might remove sstables from the compaction in checkAvailableDiskSpace it needs to be done here
    long expectedWriteSize = cfs.getExpectedCompactedFileSize(transaction.originals(), compactionType);
    long earlySSTableEstimate = Math.max(1, expectedWriteSize / strategy.getMaxSSTableBytes());
    checkAvailableDiskSpace(earlySSTableEstimate, expectedWriteSize);

    // sanity check: all sstables must belong to the same cfs
    assert !Iterables.any(transaction.originals(), new Predicate<SSTableReader>() {
        @Override
        public boolean apply(SSTableReader sstable) {
            return !sstable.descriptor.cfname.equals(cfs.name);
        }
    });

    UUID taskId = SystemKeyspace.startCompaction(cfs, transaction.originals());

    // new sstables from flush can be added during a compaction, but only the compaction can remove them,
    // so in our single-threaded compaction world this is a valid way of determining if we're compacting
    // all the sstables (that existed when we started)
    StringBuilder ssTableLoggerMsg = new StringBuilder("[");
    for (SSTableReader sstr : transaction.originals()) {
        ssTableLoggerMsg.append(String.format("%s:level=%d, ", sstr.getFilename(), sstr.getSSTableLevel()));
    }
    ssTableLoggerMsg.append("]");
    String taskIdLoggerMsg = taskId == null ? UUIDGen.getTimeUUID().toString() : taskId.toString();
    logger.debug("Compacting ({}) {}", taskIdLoggerMsg, ssTableLoggerMsg);

    long start = System.nanoTime();

    long totalKeysWritten = 0;

    long estimatedKeys = 0;
    try (CompactionController controller = getCompactionController(transaction.originals())) {
        Set<SSTableReader> actuallyCompact = Sets.difference(transaction.originals(),
                controller.getFullyExpiredSSTables());

        SSTableFormat.Type sstableFormat = getFormatType(transaction.originals());

        List<SSTableReader> newSStables;
        AbstractCompactionIterable ci;

        // SSTableScanners need to be closed before markCompactedSSTablesReplaced call as scanners contain references
        // to both ifile and dfile and SSTR will throw deletion errors on Windows if it tries to delete before scanner is closed.
        // See CASSANDRA-8019 and CASSANDRA-8399
        try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact);
                AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact)) {
            ci = new CompactionIterable(compactionType, scanners.scanners, controller, sstableFormat, taskId);
            try (CloseableIterator<AbstractCompactedRow> iter = ci.iterator()) {
                if (collector != null)
                    collector.beginCompaction(ci);
                long lastCheckObsoletion = start;

                if (!controller.cfs.getCompactionStrategy().isActive)
                    throw new CompactionInterruptedException(ci.getCompactionInfo());

                try (CompactionAwareWriter writer = getCompactionAwareWriter(cfs, transaction,
                        actuallyCompact)) {
                    estimatedKeys = writer.estimatedKeys();
                    while (iter.hasNext()) {
                        if (ci.isStopRequested())
                            throw new CompactionInterruptedException(ci.getCompactionInfo());

                        try (AbstractCompactedRow row = iter.next()) {
                            if (writer.append(row))
                                totalKeysWritten++;

                            if (System.nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
                                controller.maybeRefreshOverlaps();
                                lastCheckObsoletion = System.nanoTime();
                            }
                        }
                    }

                    // don't replace old sstables yet, as we need to mark the compaction finished in the system table
                    newSStables = writer.finish();
                } finally {
                    // point of no return -- the new sstables are live on disk; next we'll start deleting the old ones
                    // (in replaceCompactedSSTables)
                    if (taskId != null)
                        SystemKeyspace.finishCompaction(taskId);

                    if (collector != null)
                        collector.finishCompaction(ci);
                }
            }
        }

        // log a bunch of statistics about the result and save to system table compaction_history
        long dTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
        long startsize = SSTableReader.getTotalBytes(transaction.originals());
        long endsize = SSTableReader.getTotalBytes(newSStables);
        double ratio = (double) endsize / (double) startsize;

        StringBuilder newSSTableNames = new StringBuilder();
        for (SSTableReader reader : newSStables)
            newSSTableNames.append(reader.descriptor.baseFilename()).append(",");

        double mbps = dTime > 0 ? (double) endsize / (1024 * 1024) / ((double) dTime / 1000) : 0;
        long totalSourceRows = 0;
        String mergeSummary = updateCompactionHistory(cfs.keyspace.getName(), cfs.getColumnFamilyName(), ci,
                startsize, endsize);
        logger.debug(String.format(
                "Compacted (%s) %d sstables to [%s] to level=%d.  %,d bytes to %,d (~%d%% of original) in %,dms = %fMB/s.  %,d total partitions merged to %,d.  Partition merge counts were {%s}",
                taskIdLoggerMsg, transaction.originals().size(), newSSTableNames.toString(), getLevel(),
                startsize, endsize, (int) (ratio * 100), dTime, mbps, totalSourceRows, totalKeysWritten,
                mergeSummary));
        logger.trace(String.format("CF Total Bytes Compacted: %,d",
                CompactionTask.addToTotalBytesCompacted(endsize)));
        logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", totalKeysWritten, estimatedKeys,
                ((double) (totalKeysWritten - estimatedKeys) / totalKeysWritten));

        if (offline)
            Refs.release(Refs.selfRefs(newSStables));
    }
}

From source file:com.ottogroup.bi.spqr.pipeline.component.queue.chronicle.DefaultStreamingMessageQueueTest.java

/**
 * Inserts a configurable number of messages into a {@link Chronicle} and measures the
 * duration it takes to read the content from it using the {@link DefaultStreamingMessageQueue} implementation
 */// w  w  w . jav a2s  . c  o  m
//   @Test
public void testNext_performanceTest() throws Exception {

    Properties props = new Properties();
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_DELETE_ON_EXIT, "true");
    props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_PATH, System.getProperty("java.io.tmpdir"));
    final DefaultStreamingMessageQueue inbox = new DefaultStreamingMessageQueue();
    inbox.setId("testNext_performanceTest");
    inbox.initialize(props);

    final StreamingMessageQueueProducer producer = inbox.getProducer();
    final StreamingMessageQueueConsumer consumer = inbox.getConsumer();

    final CountDownLatch latch = new CountDownLatch(numberOfMessagesPerfTest);

    ExecutorService svc = Executors.newCachedThreadPool();

    Future<Integer> producerDurationFuture = svc.submit(new Callable<Integer>() {

        public Integer call() {
            StreamingDataMessage object = new StreamingDataMessage(new byte[] { 01, 2, 3, 4, 5, 6, 7, 9 },
                    System.currentTimeMillis());
            long s1 = System.nanoTime();
            for (int i = 0; i < numberOfMessagesPerfTest; i++) {
                producer.insert(object);
            }
            long s2 = System.nanoTime();
            return (int) (s2 - s1);
        }
    });

    Future<Integer> durationFuture = svc.submit(new Callable<Integer>() {
        public Integer call() {
            StreamingDataMessage msg = null;
            long start = System.nanoTime();
            while (true) {
                msg = consumer.next();
                if (msg != null) {
                    latch.countDown();
                    if (latch.getCount() == 0)
                        break;
                } else {
                    LockSupport.parkNanos(1);
                }

            }
            long end = System.nanoTime();
            return (int) (end - start);
        }
    });

    try {
        Assert.assertTrue("Failed to receive expected number of messages", latch.await(10, TimeUnit.SECONDS));
    } catch (InterruptedException e) {
        Assert.fail("Failed to receive expected number of messages");
    }

    int producerDuration = producerDurationFuture.get();
    int duration = durationFuture.get();

    double messagesPerNano = ((double) numberOfMessagesPerfTest / (double) duration);
    double messagesPerNanoRounded = (double) Math.round(messagesPerNano * 10000) / 10000;

    double messagesPerMilli = messagesPerNano * 1000000;
    messagesPerMilli = (double) Math.round(messagesPerMilli * 100) / 100;

    long messagesPerSecondTmps = Math.round(messagesPerNano * 1000000 * 1000);
    double messagesPerSecond = (double) Math.round(messagesPerSecondTmps);
    ;

    double nanosPerMessage = ((double) duration / (double) numberOfMessagesPerfTest);
    nanosPerMessage = (double) Math.round(nanosPerMessage * 100) / 100;

    logger.info("message count: " + numberOfMessagesPerfTest);
    logger.info(
            "message producing: " + producerDuration + "ns, " + TimeUnit.NANOSECONDS.toMillis(producerDuration)
                    + "ms, " + TimeUnit.NANOSECONDS.toSeconds(producerDuration) + "s");
    logger.info("message consumption: " + duration + "ns, " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms, "
            + TimeUnit.NANOSECONDS.toSeconds(duration) + "s");
    logger.info("message throughput: " + messagesPerNanoRounded + " msgs/ns, " + messagesPerMilli + " msgs/ms, "
            + messagesPerSecond + " msgs/s");

    svc.shutdownNow();
}

From source file:com.bitplan.mjpegstreamer.MJpegRunnerBase.java

/**
 * read/*from www .  j a  v a2s .co m*/
 */
public boolean read() {
    try {
        BufferedImage bufImg = MJpegHelper.getImage(curFrame);
        if (bufImg == null) {
            throw new IOException("image is null");
        }
        if (framesReadCount == 0) {
            this.firstFrameNanoTime = System.nanoTime();
            this.fpsFrameNanoTime = firstFrameNanoTime;
            this.fpssecond = fpsFrameNanoTime;
        }
        framesReadCount++;
        fpscountIn++;
        frameAvailable = false;

        // uncomment next line for debug image
        // image= viewer.getBufferedImage("/images/start.png");
        // viewer.repaint();
        // Frame per second calculation
        now = System.nanoTime();
        // how many nanosecs since last frame?
        long elapsedFrameTime = now - fpsFrameNanoTime;
        // how many nanosecs since last second timestamp
        long elapsedSecondTime = now - fpssecond;
        long framemillisecs = TimeUnit.MILLISECONDS.convert(elapsedFrameTime, TimeUnit.NANOSECONDS);
        long secmillisecs = TimeUnit.MILLISECONDS.convert(elapsedSecondTime, TimeUnit.NANOSECONDS);
        // is a second over?
        if (secmillisecs > 1000) {
            fpsIn = fpscountIn;
            fpsOut = fpscountOut;
            fpscountOut = 0;
            fpscountIn = 0;
            fpssecond = now;
        }
        // do not render images that are "too quick/too early"
        if (framemillisecs >= this.fpsLimitMillis) {
            for (ImageListener listener : this.imageListeners) {
                if (!listener.isPostListener())
                    listener.onRead(this, bufImg);
            }
            BufferedImage rotatedImage = this.getRotatedImage(bufImg, viewer.getViewerSetting().rotation);
            viewer.renderNextImage(rotatedImage);
            for (ImageListener listener : this.imageListeners) {
                if (listener.isPostListener())
                    listener.onRead(this, rotatedImage);
            }
            // how many frames we acutally displayed
            framesRenderedCount++;
            fpsFrameNanoTime = now;
            fpscountOut++;
        }

        switch (viewer.getViewerSetting().debugMode) {
        case Verbose:
            LOGGER.log(Level.INFO, this.getTimeMsg(" after " + framemillisecs + " msecs"));
            break;
        case FPS:
            if (fpssecond == now)
                LOGGER.log(Level.INFO, this.getTimeMsg(" after " + framemillisecs + " msecs " + fpsIn + "/"
                        + fpsOut + " Frames per second in/out "));
            break;
        case None:
            break;
        }
    } catch (Throwable th) {
        handle("Error acquiring the frame: ", th);
    }
    return framesRenderedCount < viewer.getViewerSetting().pictureCount;
}

From source file:fr.xebia.management.statistics.ProfileAspect.java

@Around(value = "execution(* *(..)) && @annotation(profiled)", argNames = "pjp,profiled")
public Object profileInvocation(ProceedingJoinPoint pjp, Profiled profiled) throws Throwable {

    logger.trace("> profileInvocation({},{}", pjp, profiled);

    MethodSignature jointPointSignature = (MethodSignature) pjp.getStaticPart().getSignature();

    // COMPUTE SERVICE STATISTICS NAME
    Expression nameAsExpression = profiledMethodNameAsExpressionByMethod.get(jointPointSignature.getMethod());
    if (nameAsExpression == null) {
        if (StringUtils.hasLength(profiled.name())) {
            String nameAsStringExpression = profiled.name();
            nameAsExpression = expressionParser.parseExpression(nameAsStringExpression, parserContext);
        } else {//  w  w w  .jav  a 2 s  .  co  m
            String fullyQualifiedMethodName = getFullyQualifiedMethodName(//
                    jointPointSignature.getDeclaringTypeName(), //
                    jointPointSignature.getName(), //
                    this.classNameStyle);
            nameAsExpression = new LiteralExpression(fullyQualifiedMethodName);
        }
    }

    String serviceStatisticsName;
    if (nameAsExpression instanceof LiteralExpression) {
        // Optimization : prevent useless objects instantiations
        serviceStatisticsName = nameAsExpression.getExpressionString();
    } else {
        serviceStatisticsName = nameAsExpression.getValue(new RootObject(pjp), String.class);
    }

    // LOOKUP SERVICE STATISTICS
    ServiceStatistics serviceStatistics = serviceStatisticsByName.get(serviceStatisticsName);

    if (serviceStatistics == null) {
        // INSTIANCIATE NEW SERVICE STATISTICS
        ServiceStatistics newServiceStatistics = new ServiceStatistics(//
                new ObjectName(this.jmxDomain + ":type=ServiceStatistics,name=" + serviceStatisticsName), //
                profiled.businessExceptionsTypes(), profiled.communicationExceptionsTypes());

        newServiceStatistics.setSlowInvocationThresholdInMillis(profiled.slowInvocationThresholdInMillis());
        newServiceStatistics
                .setVerySlowInvocationThresholdInMillis(profiled.verySlowInvocationThresholdInMillis());
        int maxActive;
        if (StringUtils.hasLength(profiled.maxActiveExpression())) {
            maxActive = expressionParser.parseExpression(profiled.maxActiveExpression(), parserContext)
                    .getValue(new RootObject(pjp), Integer.class);
        } else {
            maxActive = profiled.maxActive();
        }
        newServiceStatistics.setMaxActive(maxActive);
        newServiceStatistics.setMaxActiveSemaphoreAcquisitionMaxTimeInNanos(TimeUnit.NANOSECONDS
                .convert(profiled.maxActiveSemaphoreAcquisitionMaxTimeInMillis(), TimeUnit.MILLISECONDS));

        ServiceStatistics previousServiceStatistics = serviceStatisticsByName.putIfAbsent(serviceStatisticsName,
                newServiceStatistics);
        if (previousServiceStatistics == null) {
            serviceStatistics = newServiceStatistics;
            mbeanExporter.registerManagedResource(serviceStatistics);
        } else {
            serviceStatistics = previousServiceStatistics;
        }
    }

    // INVOKE AND PROFILE INVOCATION
    long nanosBefore = System.nanoTime();

    Semaphore semaphore = serviceStatistics.getMaxActiveSemaphore();
    if (semaphore != null) {
        boolean acquired = semaphore.tryAcquire(
                serviceStatistics.getMaxActiveSemaphoreAcquisitionMaxTimeInNanos(), TimeUnit.NANOSECONDS);
        if (!acquired) {
            serviceStatistics.incrementServiceUnavailableExceptionCount();
            throw new ServiceUnavailableException("Service '" + serviceStatisticsName + "' is unavailable: "
                    + serviceStatistics.getCurrentActive() + " invocations of are currently running");
        }
    }
    serviceStatistics.incrementCurrentActiveCount();
    try {

        Object returned = pjp.proceed();

        return returned;
    } catch (Throwable t) {
        serviceStatistics.incrementExceptionCount(t);
        throw t;
    } finally {
        if (semaphore != null) {
            semaphore.release();
        }
        serviceStatistics.decrementCurrentActiveCount();
        long deltaInNanos = System.nanoTime() - nanosBefore;
        serviceStatistics.incrementInvocationCounterAndTotalDurationWithNanos(deltaInNanos);
        if (logger.isDebugEnabled()) {
            logger.debug("< profileInvocation({}): {}ns", serviceStatisticsName, deltaInNanos);
        }
    }
}

From source file:fr.xebia.management.statistics.ServiceStatistics.java

@ManagedAttribute
public long getSlowInvocationThresholdInMillis() {
    return TimeUnit.MILLISECONDS.convert(slowInvocationThresholdInNanos, TimeUnit.NANOSECONDS);
}

From source file:biospectra.index.Indexer.java

@Override
public synchronized void close() throws IOException {
    try {/*from w  ww . ja v a 2 s. com*/
        this.executor.shutdown();
        this.executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);

        this.freeQueue.clear();

        this.analyzer.close();
        this.indexWriter.close();
    } catch (InterruptedException ex) {
        LOG.error("Interrupted", ex);
    }
}

From source file:com.netflix.genie.web.services.impl.JobSpecificationServiceImpl.java

/**
 * {@inheritDoc}//from w ww.  j a v a2  s.c  om
 */
@Override
public JobSpecification resolveJobSpecification(final String id, @Valid final JobRequest jobRequest) {
    final long start = System.nanoTime();
    final Set<Tag> tags = Sets.newHashSet();
    try {
        log.info("Received request to resolve a job specification for job id {} and parameters {}", id,
                jobRequest);
        final Map<Cluster, String> clustersAndCommandsForJob = this.queryForClustersAndCommands(
                jobRequest.getCriteria().getClusterCriteria(), jobRequest.getCriteria().getCommandCriterion());
        // Resolve the cluster for the job request based on the tags specified
        final Cluster cluster = this.selectCluster(id, jobRequest, clustersAndCommandsForJob.keySet());
        // Resolve the command for the job request based on command tags and cluster chosen
        final Command command = this.getCommand(clustersAndCommandsForJob.get(cluster), id);
        // Resolve the applications to use based on the command that was selected
        final List<JobSpecification.ExecutionResource> applicationResources = Lists.newArrayList();
        for (final Application application : this.getApplications(id, jobRequest, command)) {
            applicationResources.add(
                    new JobSpecification.ExecutionResource(application.getId(), application.getResources()));
        }

        final List<String> commandArgs = Lists.newArrayList(command.getExecutable());
        commandArgs.addAll(jobRequest.getCommandArgs());

        //TODO: Set the default job location as a server property?
        final JobSpecification jobSpecification = new JobSpecification(commandArgs,
                new JobSpecification.ExecutionResource(id, jobRequest.getResources()),
                new JobSpecification.ExecutionResource(cluster.getId(), cluster.getResources()),
                new JobSpecification.ExecutionResource(command.getId(), command.getResources()),
                applicationResources, this.generateEnvironmentVariables(id, jobRequest, cluster, command),
                jobRequest.getRequestedAgentConfig().isInteractive(),
                jobRequest.getRequestedAgentConfig().getRequestedJobDirectoryLocation()
                        .orElse(DEFAULT_JOB_DIRECTORY),
                toArchiveLocation(jobRequest.getRequestedJobArchivalData().getRequestedArchiveLocationPrefix()
                        .orElse(null), id));

        MetricsUtils.addSuccessTags(tags);
        return jobSpecification;
    } catch (final Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw new RuntimeException(t);
    } finally {
        this.registry.timer(RESOLVE_JOB_SPECIFICATION_TIMER, tags).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}

From source file:com.comphenix.protocol.error.DetailedErrorReporter.java

/**
 * Determine if we should print the given report.
 * <p>/*  w ww. j  a v  a2  s  .  co m*/
 * The default implementation will check for rate limits.
 * @param report - the report to check.
 * @return TRUE if we should print it, FALSE otherwise.
 */
protected boolean canReport(Report report) {
    long rateLimit = report.getRateLimit();

    // Check for rate limit
    if (rateLimit > 0) {
        synchronized (rateLock) {
            if (rateLimited.containsKey(report)) {
                return false;
            }
            rateLimited.put(report, true, rateLimit, TimeUnit.NANOSECONDS);
        }
    }
    return true;
}