Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.ebay.pulsar.sessionizer.cluster.SessionizerLoopbackRingListener.java

@Override
public <T> boolean hasOwnership(T affinityKey) {
    if (shutdownFlag || leavingCluster) {
        return true;
    }//ww w .j  a va2 s  .  c  o m
    long currentNanoTime = System.nanoTime();
    if (head == null || (currentNanoTime - head.effectiveTime) > TimeUnit.NANOSECONDS
            .convert(getMaxIdleTime() + GRACE_PERIOD, TimeUnit.MILLISECONDS)) {
        return true;
    }
    EventConsumerInfo currentInfo = null;
    try {
        if (currentState.containsKey(loopbackTopic)) {
            ConsistentHashing<EventConsumerInfo> che = currentState.get(loopbackTopic);
            Object obj = affinityKey;
            if ((che != null) && (obj != null)) {
                currentInfo = che.get(obj);
            }
        }
    } catch (Throwable t) {
    }

    return currentInfo == null || currentInfo.getAdvertisement().getConsumerId() == getHostId();
}

From source file:com.loopeer.codereader.api.HttpJsonLoggingInterceptor.java

@Override
public Response intercept(Chain chain) throws IOException {
    Level level = this.level;

    Request request = chain.request();
    if (level == Level.NONE) {
        return chain.proceed(request);
    }//from   w w w .jav a 2s  . c  o  m

    boolean logBody = level == Level.BODY;
    boolean logHeaders = logBody || level == Level.HEADERS;

    RequestBody requestBody = request.body();
    boolean hasRequestBody = requestBody != null;

    Connection connection = chain.connection();
    Protocol protocol = connection != null ? connection.protocol() : Protocol.HTTP_1_1;
    String requestStartMessage = "--> " + request.method() + ' ' + request.url() + ' ' + protocol;
    if (!logHeaders && hasRequestBody) {
        requestStartMessage += " (" + requestBody.contentLength() + "-byte body)";
    }
    logger.log(requestStartMessage);

    if (logHeaders) {
        if (hasRequestBody) {
            // Request body headers are only present when installed as a network interceptor. Force
            // them to be included (when available) so there values are known.
            if (requestBody.contentType() != null) {
                logger.log("Content-Type: " + requestBody.contentType());
            }
            if (requestBody.contentLength() != -1) {
                logger.log("Content-Length: " + requestBody.contentLength());
            }
        }

        Headers headers = request.headers();
        for (int i = 0, count = headers.size(); i < count; i++) {
            String name = headers.name(i);
            // Skip headers from the request body as they are explicitly logged above.
            if (!"Content-Type".equalsIgnoreCase(name) && !"Content-Length".equalsIgnoreCase(name)) {
                logger.log(name + ": " + headers.value(i));
            }
        }

        if (!logBody || !hasRequestBody) {
            logger.log("--> END " + request.method());
        } else if (bodyEncoded(request.headers())) {
            logger.log("--> END " + request.method() + " (encoded body omitted)");
        } else {
            Buffer buffer = new Buffer();
            requestBody.writeTo(buffer);

            Charset charset = UTF8;
            MediaType contentType = requestBody.contentType();
            if (contentType != null) {
                charset = contentType.charset(UTF8);
            }

            logger.log("");
            if (isPlaintext(buffer)) {
                logger.log(buffer.readString(charset));
                logger.log("--> END " + request.method() + " (" + requestBody.contentLength() + "-byte body)");
            } else {
                logger.log("--> END " + request.method() + " (binary " + requestBody.contentLength()
                        + "-byte body omitted)");
            }
        }
    }

    long startNs = System.nanoTime();
    Response response;
    try {
        response = chain.proceed(request);
    } catch (Exception e) {
        logger.log("<-- HTTP FAILED: " + e);
        throw e;
    }
    long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);

    ResponseBody responseBody = response.body();
    long contentLength = responseBody.contentLength();
    String bodySize = contentLength != -1 ? contentLength + "-byte" : "unknown-length";
    logger.log("<-- " + response.code() + ' ' + response.message() + ' ' + response.request().url() + " ("
            + tookMs + "ms" + (!logHeaders ? ", " + bodySize + " body" : "") + ')');

    if (logHeaders) {
        Headers headers = response.headers();
        for (int i = 0, count = headers.size(); i < count; i++) {
            logger.log(headers.name(i) + ": " + headers.value(i));
        }

        if (!logBody || !HttpHeaders.hasBody(response)) {
            logger.log("<-- END HTTP");
        } else if (bodyEncoded(response.headers())) {
            logger.log("<-- END HTTP (encoded body omitted)");
        } else {
            BufferedSource source = responseBody.source();
            source.request(Long.MAX_VALUE); // Buffer the entire body.
            Buffer buffer = source.buffer();

            Charset charset = UTF8;
            MediaType contentType = responseBody.contentType();
            if (contentType != null) {
                try {
                    charset = contentType.charset(UTF8);
                } catch (UnsupportedCharsetException e) {
                    logger.log("");
                    logger.log("Couldn't decode the response body; charset is likely malformed.");
                    logger.log("<-- END HTTP");

                    return response;
                }
            }

            if (!isPlaintext(buffer)) {
                logger.log("");
                logger.log("<-- END HTTP (binary " + buffer.size() + "-byte body omitted)");
                return response;
            }

            if (contentLength != 0) {
                logger.log("");
                String logText = buffer.clone().readString(charset);
                try {
                    JSONObject logJson = new JSONObject(logText);
                    logger.log(logJson.toString(4));
                } catch (JSONException e) {
                    e.printStackTrace();
                    logger.log(logText);
                }
            }

            logger.log("<-- END HTTP (" + buffer.size() + "-byte body)");
        }
    }

    return response;
}

From source file:de.sub.goobi.helper.tasks.EmptyTask.java

/**
 * The function getDurationDead() returns the duration the task is dead. If
 * a time of death has not yet been recorded, null is returned.
 *
 * @return the duration since the task died
 */// ww w . j  av  a  2  s  .c  om
Duration getDurationDead() {
    if (passedAway == null) {
        return null;
    }
    long elapsed = System.nanoTime() - passedAway;
    return new Duration(TimeUnit.MILLISECONDS.convert(elapsed, TimeUnit.NANOSECONDS));
}

From source file:io.netty.handler.timeout.IdleStateHandler.java

/**
 * Return the readerIdleTime that was given when instance this class in milliseconds.
 *
 *///w w  w. j av  a2 s . co m
public long getReaderIdleTimeInMillis() {
    return TimeUnit.NANOSECONDS.toMillis(readerIdleTimeNanos);
}

From source file:com.netflix.genie.core.services.impl.LocalJobRunner.java

/**
 * {@inheritDoc}/* ww  w.  j  ava  2 s .  c o  m*/
 */
@SuppressFBWarnings(value = "REC_CATCH_EXCEPTION", justification = "We catch exception to make sure we always mark job failed.")
@Override
public void submitJob(
        @NotNull(message = "No job provided. Unable to submit job for execution.") @Valid final JobRequest jobRequest,
        @NotNull(message = "No cluster provided. Unable to submit job for execution") @Valid final Cluster cluster,
        @NotNull(message = "No command provided. Unable to submit job for execution") @Valid final Command command,
        @NotNull(message = "No applications provided. Unable to execute") final List<Application> applications,
        @Min(value = 1, message = "Memory can't be less than 1 MB") final int memory) throws GenieException {
    final long start = System.nanoTime();

    try {
        log.info("Beginning local job submission for {}", jobRequest);
        final String id = jobRequest.getId().orElseThrow(() -> new GenieServerException("No job id found."));

        try {
            final File jobWorkingDir = this.createJobWorkingDirectory(id);
            final File runScript = this.createRunScript(jobWorkingDir);

            // The map object stores the context for all the workflow tasks
            final Map<String, Object> context = this.createJobContext(jobRequest, cluster, command,
                    applications, memory, jobWorkingDir);

            // Execute the job
            final JobExecution jobExecution = this.executeJob(context, runScript);

            // Job Execution will be null in local mode.
            if (jobExecution != null) {
                // Persist the jobExecution information. This also updates jobStatus to Running
                final long createJobExecutionStart = System.nanoTime();
                try {
                    log.info("Saving job execution for job {}", jobRequest.getId());
                    this.jobPersistenceService.setJobRunningInformation(id,
                            jobExecution.getProcessId()
                                    .orElseThrow(() -> new GenieServerException(
                                            "No process id returned. Unable to persist")),
                            jobExecution.getCheckDelay().orElse(Command.DEFAULT_CHECK_DELAY),
                            jobExecution.getTimeout().orElseThrow(() -> new GenieServerException(
                                    "No timeout date returned. Unable to persist")));
                } finally {
                    this.saveJobExecutionTimer.record(System.nanoTime() - createJobExecutionStart,
                            TimeUnit.NANOSECONDS);
                }

                // Publish a job start Event
                final long publishEventStart = System.nanoTime();
                try {
                    log.info("Publishing job started event for job {}", id);
                    this.genieEventBus.publishSynchronousEvent(new JobStartedEvent(jobExecution, this));
                } finally {
                    this.publishJobStartedEventTimer.record(System.nanoTime() - publishEventStart,
                            TimeUnit.NANOSECONDS);
                }
            }
        } catch (final GeniePreconditionException gpe) {
            log.error(gpe.getMessage(), gpe);
            this.createInitFailureDetailsFile(id, gpe);
            this.genieEventBus.publishAsynchronousEvent(new JobFinishedEvent(id, JobFinishedReason.INVALID,
                    JobStatusMessages.SUBMIT_PRECONDITION_FAILURE, this));
            throw gpe;
        } catch (final Exception e) {
            log.error(e.getMessage(), e);
            this.createInitFailureDetailsFile(id, e);
            this.genieEventBus.publishAsynchronousEvent(new JobFinishedEvent(id,
                    JobFinishedReason.FAILED_TO_INIT, JobStatusMessages.SUBMIT_INIT_FAILURE, this));
            throw e;
        }
    } finally {
        this.overallSubmitTimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
}

From source file:com.netflix.genie.core.jobs.workflow.impl.JobKickoffTask.java

/**
 * {@inheritDoc}//from   w w  w.j av a2 s. c  o  m
 */
@Override
public void executeTask(@NotNull final Map<String, Object> context) throws GenieException, IOException {
    final long start = System.nanoTime();
    try {
        final JobExecutionEnvironment jobExecEnv = (JobExecutionEnvironment) context
                .get(JobConstants.JOB_EXECUTION_ENV_KEY);
        final String jobWorkingDirectory = jobExecEnv.getJobWorkingDir().getCanonicalPath();
        final JobRequest jobRequest = jobExecEnv.getJobRequest();
        final String user = jobRequest.getUser();
        final Writer writer = (Writer) context.get(JobConstants.WRITER_KEY);
        final String jobId = jobRequest.getId()
                .orElseThrow(() -> new GeniePreconditionException("No job id found. Unable to continue."));
        log.info("Starting Job Kickoff Task for job {}", jobId);

        // At this point all contents are written to the run script and we call an explicit flush and close to write
        // the contents to the file before we execute it.
        try {
            writer.flush();
            writer.close();
        } catch (IOException e) {
            throw new GenieServerException("Failed to execute job with exception." + e);
        }
        // Create user, if enabled
        if (isUserCreationEnabled) {
            createUser(user, jobRequest.getGroup().orElse(null));
        }
        // Set the ownership to the user and run as the user, if enabled
        final List<String> command = new ArrayList<>();
        if (isRunAsUserEnabled) {
            changeOwnershipOfDirectory(jobWorkingDirectory, user);

            // This is needed because the genie.log file is still generated as the user running Genie system.
            makeDirGroupWritable(jobWorkingDirectory + "/genie/logs");
            command.add("sudo");
            command.add("-u");
            command.add(user);
        }

        // If the OS is linux use setsid to launch the process so that the entire process tree
        // is launched in process group id which is the same as the pid of the parent process
        if (SystemUtils.IS_OS_LINUX) {
            command.add("setsid");
        }

        final String runScript = jobWorkingDirectory + JobConstants.FILE_PATH_DELIMITER
                + JobConstants.GENIE_JOB_LAUNCHER_SCRIPT;
        command.add(runScript);

        // Cannot convert to executor because it does not provide an api to get process id.
        final ProcessBuilder pb = new ProcessBuilder(command).directory(jobExecEnv.getJobWorkingDir())
                .redirectOutput(new File(jobExecEnv.getJobWorkingDir() + JobConstants.GENIE_LOG_PATH))
                .redirectError(new File(jobExecEnv.getJobWorkingDir() + JobConstants.GENIE_LOG_PATH));

        //
        // Check if file can be executed. This is to fix issue where execution of the run script fails because
        // the file may be used by some other program
        //
        canExecute(runScript);
        try {
            final Process process = pb.start();
            final int processId = this.getProcessId(process);
            final Calendar calendar = Calendar.getInstance(UTC);
            calendar.add(Calendar.SECOND, jobRequest.getTimeout().orElse(JobRequest.DEFAULT_TIMEOUT_DURATION));
            final JobExecution jobExecution = new JobExecution.Builder(this.hostname).withId(jobId)
                    .withProcessId(processId).withCheckDelay(jobExecEnv.getCommand().getCheckDelay())
                    .withTimeout(calendar.getTime()).withMemory(jobExecEnv.getMemory()).build();
            context.put(JobConstants.JOB_EXECUTION_DTO_KEY, jobExecution);
        } catch (final IOException ie) {
            throw new GenieServerException("Unable to start command " + String.valueOf(command), ie);
        }
        log.info("Finished Job Kickoff Task for job {}", jobId);
    } finally {
        final long finish = System.nanoTime();
        this.timer.record(finish - start, TimeUnit.NANOSECONDS);
    }
}

From source file:com.linkedin.pinot.core.query.executor.ServerQueryExecutorV1Impl.java

@Override
public DataTable processQuery(final QueryRequest queryRequest) {
    DataTable instanceResponse;/*w ww  . j  av a  2 s.co  m*/
    long start = System.currentTimeMillis();
    List<SegmentDataManager> queryableSegmentDataManagerList = null;
    InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
    final long requestId = instanceRequest.getRequestId();
    final long nSegmentsInQuery = instanceRequest.getSearchSegmentsSize();
    long nPrunedSegments = -1;
    try {
        TraceContext.register(instanceRequest);
        final BrokerRequest brokerRequest = instanceRequest.getQuery();
        LOGGER.debug("Incoming query is : {}", brokerRequest);
        long startPruningTime = System.nanoTime();
        queryableSegmentDataManagerList = getPrunedQueryableSegments(instanceRequest);
        long pruningTime = System.nanoTime() - startPruningTime;
        _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.SEGMENT_PRUNING, pruningTime);
        nPrunedSegments = queryableSegmentDataManagerList.size();
        LOGGER.debug("Matched {} segments! ", nPrunedSegments);
        if (queryableSegmentDataManagerList.isEmpty()) {
            return null;
        }
        final long startPlanTime = System.nanoTime();
        final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList,
                brokerRequest,
                _instanceDataManager.getTableDataManager(brokerRequest.getQuerySource().getTableName())
                        .getExecutorService(),
                getResourceTimeOut(instanceRequest.getQuery()));
        final long planTime = System.nanoTime() - startPlanTime;
        _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.BUILD_QUERY_PLAN, planTime);

        if (_printQueryPlan) {
            LOGGER.debug(
                    "***************************** Query Plan for Request {} ***********************************",
                    instanceRequest.getRequestId());
            globalQueryPlan.print();
            LOGGER.debug(
                    "*********************************** End Query Plan ***********************************");
        }

        final long executeStartTime = System.nanoTime();
        globalQueryPlan.execute();
        final long executeTime = System.nanoTime() - executeStartTime;
        _serverMetrics.addPhaseTiming(brokerRequest, ServerQueryPhase.QUERY_PLAN_EXECUTION, executeTime);
        instanceResponse = globalQueryPlan.getInstanceResponse();
        final long end = System.currentTimeMillis();
        LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(),
                (end - start));
        LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(),
                instanceResponse.toString());
        instanceResponse.getMetadata().put("timeUsedMs", Long.toString((end - start)));
        instanceResponse.getMetadata().put("requestId", Long.toString(instanceRequest.getRequestId()));
        instanceResponse.getMetadata().put("traceInfo",
                TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        LOGGER.info(
                "Processed requestId {},reqSegments={},prunedSegments={},planTime={},timeUsed={},executeTime={},broker={}",
                requestId, nSegmentsInQuery, nPrunedSegments,
                TimeUnit.MILLISECONDS.convert(planTime, TimeUnit.NANOSECONDS), (end - start),
                TimeUnit.MILLISECONDS.convert(executeTime, TimeUnit.NANOSECONDS), queryRequest.getClientId());
        return instanceResponse;
    } catch (Exception e) {
        _serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS,
                1);
        LOGGER.error("Exception processing requestId {}", requestId, e);
        instanceResponse = new DataTable();
        instanceResponse.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
        TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
        long end = System.currentTimeMillis();
        LOGGER.info("Searching Instance for Request Id - {}, browse took: {}", requestId, requestId,
                (end - start));
        LOGGER.info("InstanceResponse for Request Id - {} : {}", requestId, instanceResponse.toString());
        instanceResponse.getMetadata().put("timeUsedMs", Long.toString((end - start)));
        instanceResponse.getMetadata().put("requestId", Long.toString(instanceRequest.getRequestId()));
        instanceResponse.getMetadata().put("traceInfo",
                TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        return instanceResponse;
    } finally {
        if (_instanceDataManager
                .getTableDataManager(instanceRequest.getQuery().getQuerySource().getTableName()) != null) {
            if (queryableSegmentDataManagerList != null) {
                for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
                    _instanceDataManager
                            .getTableDataManager(instanceRequest.getQuery().getQuerySource().getTableName())
                            .releaseSegment(segmentDataManager);
                }
            }
        }
        TraceContext.unregister(instanceRequest);
    }
}

From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java

private <T> SingleRequestCallerBuilder<T> newCaller(byte[] row, long rpcTimeoutNs) {
    return conn.callerFactory.<T>single().table(tableName).row(row)
            .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
            .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS);
}

From source file:com.linecorp.armeria.server.ServerTest.java

@Test(timeout = idleTimeoutMillis * 5)
public void testIdleTimeoutByNoContentSent() throws Exception {
    try (Socket socket = new Socket()) {
        socket.setSoTimeout((int) (idleTimeoutMillis * 4));
        socket.connect(server().activePort().get().localAddress());
        long connectedNanos = System.nanoTime();
        //read until EOF
        while (socket.getInputStream().read() != -1) {
            continue;
        }/*w  ww  .  j av a2  s  .c om*/
        long elapsedTimeMillis = TimeUnit.MILLISECONDS.convert(System.nanoTime() - connectedNanos,
                TimeUnit.NANOSECONDS);
        assertThat(elapsedTimeMillis, is(greaterThanOrEqualTo(idleTimeoutMillis)));
    }
}

From source file:com.alibaba.dragoon.common.daemon.filter.DragoonConnectorFilter.java

public void sendNotification(DragoonNotificationMessage notification) {
    try {/*from w  w w . j  a va2s  .  c  om*/
        notifyQueue.put(notification);

        // 
        if (notifyQueue.size() > notifyQueueSizeThreshold) {

            DragoonNotificationMessage abandonNotification = notifyQueue.poll(1, TimeUnit.NANOSECONDS);
            if (abandonNotification != null) {
                abandonNotificationCount.incrementAndGet();
            }
        }
    } catch (InterruptedException e) {
        // skip
    }
}