Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.oneops.antenna.ws.AntennaWsController.java

/**
 * Get the current sink cache status. Returns the cumulative status of
 * <ul>/*from w ww. jav a  2  s .c om*/
 * <li>hitCount
 * <li>missCount;
 * <li>loadSuccessCount;
 * <li>loadExceptionCount;
 * <li>totalLoadTime;
 * <li>evictionCount;
 * </ul>
 *
 * @return cache status map.
 */
@RequestMapping(value = "/cache/stats", method = RequestMethod.GET)
@ResponseBody
public Map<String, Object> getCacheStats() {
    Map<String, Object> stat = new LinkedHashMap<String, Object>(5);
    stat.put("status", "ok");
    stat.put("maxSize", cache.getMaxSize());
    stat.put("currentSize", cache.instance().size());
    stat.put("timeout", cache.getTimeout());
    CacheStats cs = cache.instance().stats();
    stat.put("hitCount", cs.hitCount());
    stat.put("missCount", cs.missCount());
    stat.put("loadSuccessCount", cs.loadSuccessCount());
    stat.put("totalLoadTime", TimeUnit.SECONDS.convert(cs.totalLoadTime(), TimeUnit.NANOSECONDS));
    stat.put("loadExceptionCount", cs.loadExceptionCount());
    stat.put("evictionCount", cs.evictionCount());
    return stat;
}

From source file:org.apache.solr.handler.component.HttpShardHandler.java

@Override
public void submit(final ShardRequest sreq, final String shard, final ModifiableSolrParams params,
        String preferredHostAddress) {
    // do this outside of the callable for thread safety reasons
    final List<String> urls = getURLs(shard, preferredHostAddress);

    Callable<ShardResponse> task = () -> {

        ShardResponse srsp = new ShardResponse();
        if (sreq.nodeName != null) {
            srsp.setNodeName(sreq.nodeName);
        }//from   www .  j av  a  2s .c  o m
        srsp.setShardRequest(sreq);
        srsp.setShard(shard);
        SimpleSolrResponse ssr = new SimpleSolrResponse();
        srsp.setSolrResponse(ssr);
        long startTime = System.nanoTime();

        try {
            params.remove(CommonParams.WT); // use default (currently javabin)
            params.remove(CommonParams.VERSION);

            QueryRequest req = makeQueryRequest(sreq, params, shard);
            req.setMethod(SolrRequest.METHOD.POST);

            // no need to set the response parser as binary is the default
            // req.setResponseParser(new BinaryResponseParser());

            // if there are no shards available for a slice, urls.size()==0
            if (urls.size() == 0) {
                // TODO: what's the right error code here? We should use the same thing when
                // all of the servers for a shard are down.
                throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
                        "no servers hosting shard: " + shard);
            }

            if (urls.size() <= 1) {
                String url = urls.get(0);
                srsp.setShardAddress(url);
                try (SolrClient client = new Builder(url).withHttpClient(httpClient).build()) {
                    ssr.nl = client.request(req);
                }
            } else {
                LBHttpSolrClient.Rsp rsp = httpShardHandlerFactory.makeLoadBalancedRequest(req, urls);
                ssr.nl = rsp.getResponse();
                srsp.setShardAddress(rsp.getServer());
            }
        } catch (ConnectException cex) {
            srsp.setException(cex); //????
        } catch (Exception th) {
            srsp.setException(th);
            if (th instanceof SolrException) {
                srsp.setResponseCode(((SolrException) th).code());
            } else {
                srsp.setResponseCode(-1);
            }
        }

        ssr.elapsedTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);

        return transfomResponse(sreq, srsp, shard);
    };

    try {
        if (shard != null) {
            MDC.put("ShardRequest.shards", shard);
        }
        if (urls != null && !urls.isEmpty()) {
            MDC.put("ShardRequest.urlList", urls.toString());
        }
        pending.add(completionService.submit(task));
    } finally {
        MDC.remove("ShardRequest.shards");
        MDC.remove("ShardRequest.urlList");
    }
}

From source file:org.attribyte.api.pubsub.impl.server.BroadcastServlet.java

@Override
public void doPost(HttpServletRequest request, HttpServletResponse response)
        throws IOException, ServletException {

    long startNanos = System.nanoTime();

    byte[] broadcastContent = ByteStreams.toByteArray(request.getInputStream());

    long endNanos = System.nanoTime();

    String topicURL = request.getPathInfo();

    if (maxBodyBytes > 0 && broadcastContent.length > maxBodyBytes) {
        logNotification(request, topicURL, NOTIFICATION_TOO_LARGE.statusCode, null);
        Bridge.sendServletResponse(NOTIFICATION_TOO_LARGE, response);
        return;/*  w w w .  j a v  a2 s.co m*/
    }

    Response endpointResponse;
    if (topicURL != null) {
        if (filters.size() > 0) {
            String checkHeader = request.getHeader(BasicAuthScheme.AUTH_HEADER);
            for (BasicAuthFilter filter : filters) {
                if (filter.reject(topicURL, checkHeader)) {
                    logNotification(request, topicURL, Response.Code.UNAUTHORIZED, broadcastContent);
                    response.sendError(Response.Code.UNAUTHORIZED, "Unauthorized");
                    return;
                }
            }
        }

        try {

            Topic topic = topicCache != null ? topicCache.getIfPresent(topicURL) : null;
            if (topic == null) {
                topic = datastore.getTopic(topicURL, autocreateTopics);
                if (topicCache != null && topic != null) {
                    topicCache.put(topicURL, topic);
                }
            }

            if (topic != null) {
                NotificationMetrics globalMetrics = endpoint.getGlobalNotificationMetrics();
                NotificationMetrics metrics = endpoint.getNotificationMetrics(topic.getId());
                metrics.notificationSize.update(broadcastContent.length);
                globalMetrics.notificationSize.update(broadcastContent.length);
                long acceptTimeNanos = endNanos - startNanos;
                metrics.notifications.update(acceptTimeNanos, TimeUnit.NANOSECONDS);
                globalMetrics.notifications.update(acceptTimeNanos, TimeUnit.NANOSECONDS);
                Notification notification = new Notification(topic, null, broadcastContent); //No custom headers...

                final boolean queued = endpoint.enqueueNotification(notification);
                if (queued) {
                    if (replicationTopic != null) {
                        final boolean replicationQueued = endpoint
                                .enqueueNotification(new Notification(replicationTopic,
                                        Collections.singleton(
                                                new Header(REPLICATION_TOPIC_HEADER, topic.getURL())),
                                        broadcastContent));
                        if (!replicationQueued) { //What to do?
                            logger.error("Replication failure due to notification capacity limits!");
                        }
                    }
                    if (!jsonEnabled) {
                        endpointResponse = ACCEPTED_RESPONSE;
                    } else {
                        ResponseBuilder builder = new ResponseBuilder();
                        builder.setStatusCode(ACCEPTED_RESPONSE.statusCode);
                        builder.addHeader("Content-Type", ServerUtil.JSON_CONTENT_TYPE);
                        ObjectNode responseNode = JsonNodeFactory.instance.objectNode();
                        ArrayNode idsNode = responseNode.putArray("messageIds");
                        idsNode.add(Long.toString(notification.getCreateTimestampMicros()));
                        builder.setBody(responseNode.toString().getBytes(Charsets.UTF_8));
                        endpointResponse = builder.create();
                    }
                } else {
                    endpointResponse = CAPACITY_ERROR_RESPONSE;
                }
            } else {
                endpointResponse = UNKNOWN_TOPIC_RESPONSE;
            }
        } catch (DatastoreException de) {
            logger.error("Problem selecting topic", de);
            endpointResponse = INTERNAL_ERROR_RESPONSE;
        }
    } else {
        endpointResponse = NO_TOPIC_RESPONSE;
    }

    logNotification(request, topicURL, endpointResponse.statusCode, broadcastContent);
    Bridge.sendServletResponse(endpointResponse, response);
}

From source file:fr.xebia.management.statistics.ServiceStatistics.java

@ManagedAttribute(description = "Max acquisition duration fur the max active semaphore")
public long getSemaphoreAcquisitionMaxTimeInMillis() {
    return TimeUnit.MILLISECONDS.convert(maxActiveSemaphoreAcquisitionMaxTimeInNanos, TimeUnit.NANOSECONDS);
}

From source file:com.alertlogic.aws.analytics.poc.DynamoDBPersister.java

/**
 * Drain the queue of pending counts into the provided buffer and write those counts to DynamoDB. This blocks until
 * data is available in the queue./*from w w  w  .  j av  a2  s. c  om*/
 *
 * @param buffer A reusable buffer with sufficient space to drain the entire queue if necessary. This is provided as
 *        an optimization to avoid allocating a new buffer every interval.
 * @throws InterruptedException Thread interrupted while waiting for new data to arrive in the queue.
 */
protected void sendQueueToDynamoDB(List<RecordCount> buffer) throws InterruptedException {
    // Block while waiting for data
    buffer.add(counts.take());
    // Drain as much of the queue as we can.
    // DynamoDBMapper will handle splitting the batch sizes for us.
    counts.drainTo(buffer);
    try {
        long start = System.nanoTime();
        // Write the contents of the buffer as items to our table
        List<FailedBatch> failures = mapper.batchWrite(buffer, Collections.emptyList());
        long end = System.nanoTime();
        LOG.info(String.format("%d new counts sent to DynamoDB in %dms", buffer.size(),
                TimeUnit.NANOSECONDS.toMillis(end - start)));

        for (FailedBatch failure : failures) {
            LOG.warn("Error sending count batch to DynamoDB. This will not be retried!",
                    failure.getException());
        }
    } catch (Exception ex) {
        LOG.error("Error sending new counts to DynamoDB. The some counts may not be persisted.", ex);
    }
}

From source file:com.netflix.genie.web.tasks.job.JobCompletionService.java

/**
 * Event listener for when a job is completed. Updates the status of the job.
 *
 * @param event The Spring Boot application ready event to startup on
 * @throws GenieException If there is any problem
 */// w  w  w .  j  a v  a 2s.c  o  m
void handleJobCompletion(final JobFinishedEvent event) throws GenieException {
    final long start = System.nanoTime();
    final String jobId = event.getId();
    final Map<String, String> tags = Maps.newHashMap();

    try {
        final Job job = retryTemplate.execute(context -> getJob(jobId));

        final JobStatus status = job.getStatus();

        // Make sure the job isn't already done before doing something
        if (status.isActive()) {
            try {
                this.retryTemplate.execute(context -> updateJob(job, event, tags));
            } catch (Exception e) {
                log.error("Failed updating for job: {}", jobId, e);
                tags.put(ERROR_TAG, "JOB_UPDATE_FAILURE");
                this.finalStatusUpdateFailureRate.increment();
            }
            // Things that should be done either way
            try {
                this.retryTemplate.execute(context -> processJobDir(job));
            } catch (Exception e) {
                log.error("Failed archiving directory for job: {}", jobId, e);
                tags.put(ERROR_TAG, "JOB_DIRECTORY_FAILURE");
                this.archivalFailureRate.increment();
            }
            try {
                this.retryTemplate.execute(context -> sendEmail(jobId));
            } catch (Exception e) {
                log.error("Failed sending email for job: {}", jobId, e);
                tags.put(ERROR_TAG, "SEND_EMAIL_FAILURE");
                this.emailFailureRate.increment();
            }
        }
    } catch (Exception e) {
        log.error("Failed getting job with id: {}", jobId, e);
        tags.put(ERROR_TAG, "GET_JOB_FAILURE");
    } finally {
        final Id timerId = this.jobCompletionId.withTags(tags);
        this.registry.timer(timerId).record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
}

From source file:org.agatom.springatom.webmvc.controllers.wizard.SVWizardController.java

@Override
public WizardSubmission onStepSubmit(final String wizard, final String step, final ModelMap formData,
        final Locale locale) throws Exception {
    LOGGER.debug(String.format("onStepSubmit(wizard=%s,step=%s,formData=%s)", wizard, step, formData));

    final long startTime = System.nanoTime();

    WizardSubmission submission = null;//from   w w w.  j  ava 2s .co m
    WizardResult result;

    try {

        final WizardProcessor wizardProcessor = this.processorMap.get(wizard);
        result = wizardProcessor.onStepSubmit(step, formData, locale);

    } catch (Exception exp) {
        LOGGER.debug(String.format("onStepSubmit(wizard=%s,step=%s) failed", wizard, step), exp);
        throw exp;
    }
    final long endTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);

    if (result != null) {
        submission = (WizardSubmission) new WizardSubmission(result, Submission.SUBMIT_STEP).setSize(1)
                .setSuccess(true).setTime(endTime);
    }

    LOGGER.trace(String.format("onStepSubmit(wizard=%s,step=%s) completed in %d ms", wizard, step, endTime));

    return submission;
}

From source file:com.opentable.db.postgres.embedded.EmbeddedPostgres.java

private void waitForServerStartup(StopWatch watch) throws UnknownHostException, IOException {
    Throwable lastCause = null;/*from  www  . j ava 2  s  . c om*/
    final long start = System.nanoTime();
    final long maxWaitNs = TimeUnit.NANOSECONDS.convert(PG_STARTUP_WAIT_MS, TimeUnit.MILLISECONDS);
    while (System.nanoTime() - start < maxWaitNs) {
        try {
            verifyReady();
            LOG.info("{} postmaster startup finished in {}", instanceId, watch);
            return;
        } catch (final SQLException e) {
            lastCause = e;
            LOG.trace("While waiting for server startup", e);
        }

        try {
            Thread.sleep(100);
        } catch (final InterruptedException e) {
            Thread.currentThread().interrupt();
            return;
        }
    }
    throw new IOException("Gave up waiting for server to start after " + PG_STARTUP_WAIT_MS + "ms", lastCause);
}

From source file:com.alertlogic.aws.kinesis.test1.kcl.persistence.ddb.DynamoDBCountPersister.java

/**
 * Drain the queue of pending counts into the provided buffer and write those counts to DynamoDB. This blocks until
 * data is available in the queue./*ww w. j a  v a2  s  . c om*/
 *
 * @param buffer A reusable buffer with sufficient space to drain the entire queue if necessary. This is provided as
 *        an optimization to avoid allocating a new buffer every interval.
 * @throws InterruptedException Thread interrupted while waiting for new data to arrive in the queue.
 */
protected void sendQueueToDynamoDB(List<HttpReferrerPairsCount> buffer) throws InterruptedException {
    // Block while waiting for data
    buffer.add(counts.take());
    // Drain as much of the queue as we can.
    // DynamoDBMapper will handle splitting the batch sizes for us.
    counts.drainTo(buffer);
    try {
        long start = System.nanoTime();
        // Write the contents of the buffer as items to our table
        List<FailedBatch> failures = mapper.batchWrite(buffer, Collections.emptyList());
        long end = System.nanoTime();
        LOG.info(String.format("%d new counts sent to DynamoDB in %dms", buffer.size(),
                TimeUnit.NANOSECONDS.toMillis(end - start)));

        for (FailedBatch failure : failures) {
            LOG.warn("Error sending count batch to DynamoDB. This will not be retried!",
                    failure.getException());
        }
    } catch (Exception ex) {
        LOG.error("Error sending new counts to DynamoDB. The some counts may not be persisted.", ex);
    }
}