Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.netflix.genie.web.security.saml.SAMLUserDetailsServiceImplUnitTests.java

/**
 * Make sure if user logs in and doesn't have admin group user only gets user role.
 *///ww w  . j a v  a  2 s .co m
@Test
public void canLoadUserWithoutAdminGroup() {
    final SAMLCredential credential = Mockito.mock(SAMLCredential.class);
    Mockito.when(credential.getAttributeAsString(Mockito.eq(USER_ATTRIBUTE_NAME))).thenReturn(USER_ID);
    Mockito.when(credential.getAttributeAsStringArray(Mockito.eq(GROUP_ATTRIBUTE_NAME)))
            .thenReturn(new String[] { UUID.randomUUID().toString(), UUID.randomUUID().toString() });
    final Object result = this.service.loadUserBySAML(credential);

    Assert.assertThat(result, Matchers.notNullValue());
    Assert.assertTrue(result instanceof User);
    final User user = (User) result;
    Assert.assertThat(user.getUsername(), Matchers.is(USER_ID));
    Assert.assertThat(user.getAuthorities(), Matchers.contains(new SimpleGrantedAuthority("ROLE_USER")));
    Assert.assertThat(user.getAuthorities().size(), Matchers.is(1));
    Mockito.verify(this.loadAuthenticationTimer, Mockito.times(1)).record(Mockito.anyLong(),
            Mockito.eq(TimeUnit.NANOSECONDS));
}

From source file:com.vmware.identity.saml.idm.IdmPrincipalAttributesExtractor.java

@Override
public PrincipalId findActiveUser(String attributeName, String attributeValue)
        throws InvalidPrincipalException, SystemException {
    try {//w w w  . j  av  a 2 s.  c om
        final long start = System.nanoTime();
        final PrincipalId result = idmClient.findActiveUserInSystemDomain(tenantName, attributeName,
                attributeValue);
        perfLog.trace("'idmClient.findActiveUser' took {} ms.",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
        return result;
    } catch (com.vmware.identity.idm.InvalidPrincipalException e) {
        throw new InvalidPrincipalException(e);
    } catch (Exception e) {
        throw new SystemException(e);
    }
}

From source file:org.wso2.carbon.apimgt.gateway.handlers.security.APIAuthenticationHandler.java

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EXS_EXCEPTION_SOFTENING_RETURN_FALSE", justification = "Error is sent through payload")
public boolean handleRequest(MessageContext messageContext) {
    Timer timer = MetricManager.timer(org.wso2.carbon.metrics.manager.Level.INFO,
            MetricManager.name(APIConstants.METRICS_PREFIX, this.getClass().getSimpleName()));
    Timer.Context context = timer.start();
    long startTime = System.nanoTime();
    long endTime;
    long difference;

    try {/* w  ww.  j a  va 2  s.co  m*/
        if (APIUtil.isAnalyticsEnabled()) {
            long currentTime = System.currentTimeMillis();
            messageContext.setProperty("api.ut.requestTime", Long.toString(currentTime));
        }
        if (authenticator == null) {
            initializeAuthenticator();
        }
        if (authenticator.authenticate(messageContext)) {
            if (log.isDebugEnabled()) {
                // We do the calculations only if the debug logs are enabled. Otherwise this would be an overhead
                // to all the gateway calls that is happening.
                endTime = System.nanoTime();
                difference = (endTime - startTime) / 1000000;
                String messageDetails = logMessageDetails(messageContext);

                log.debug("Authenticated API, authentication response relieved: " + messageDetails
                        + ", elapsedTimeInMilliseconds=" + difference / 1000000);
            }
            setAPIParametersToMessageContext(messageContext);
            return true;
        }
    } catch (APISecurityException e) {

        if (log.isDebugEnabled()) {
            // We do the calculations only if the debug logs are enabled. Otherwise this would be an overhead
            // to all the gateway calls that is happening.
            endTime = System.nanoTime();
            difference = (endTime - startTime) / 1000000;
            String messageDetails = logMessageDetails(messageContext);
            log.debug("Call to API gateway : " + messageDetails + ", elapsedTimeInMilliseconds="
                    + difference / 1000000);
        }
        // We do not need to log authentication failures as errors since these are not product errors.
        log.warn("API authentication failure due to "
                + APISecurityConstants.getAuthenticationFailureMessage(e.getErrorCode()));

        log.error("API authentication failed with error " + e.getErrorCode(), e);

        handleAuthFailure(messageContext, e);
    } finally {
        messageContext.setProperty(APIMgtGatewayConstants.SECURITY_LATENCY,
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime));
        context.stop();

    }

    return false;
}

From source file:org.apache.hadoop.mapred.gridmix.GridmixJob.java

@Override
public int compareTo(Delayed other) {
    if (this == other) {
        return 0;
    }// www.j  a v  a 2s  .  c o  m
    if (other instanceof GridmixJob) {
        final long otherNanos = ((GridmixJob) other).submissionTimeNanos;
        if (otherNanos < submissionTimeNanos) {
            return 1;
        }
        if (otherNanos > submissionTimeNanos) {
            return -1;
        }
        return id() - ((GridmixJob) other).id();
    }
    final long diff = getDelay(TimeUnit.NANOSECONDS) - other.getDelay(TimeUnit.NANOSECONDS);
    return 0 == diff ? 0 : (diff > 0 ? 1 : -1);
}

From source file:de.tud.inf.db.sparqlytics.olap.Compute.java

@Override
public void run(final Session session) {
    ResultsFormat resultsFormat = session.getResultsFormat();
    if (resultsFormat == null) {
        resultsFormat = ResultsFormat.FMT_RDF_XML;
    }/*w w  w  .  j av  a  2s  .c  o m*/

    //Create SPARQL query and measure elapsed time
    Timer createQuery = Main.METRICS.timer(MetricRegistry.name(Compute.class, "createQuery"));
    long creationTime;
    Query query;
    Timer.Context time = createQuery.time();
    try {
        query = createQuery(session,
                resultsFormat != ResultsFormat.FMT_RS_CSV && resultsFormat != ResultsFormat.FMT_RS_TSV);
    } finally {
        creationTime = time.stop();
    }
    createQuery.update(creationTime, TimeUnit.NANOSECONDS);

    //Measure query length
    String queryString = query.toString();
    Histogram plainQueryLength = Main.METRICS.histogram(MetricRegistry.name(Compute.class, "plainQueryLength"));
    plainQueryLength.update(queryString.length());
    IndentedLineBuffer buffer = new IndentedLineBuffer();
    query.serialize(buffer);
    String indentedQueryString = buffer.toString();
    Histogram indentedQueryLength = Main.METRICS
            .histogram(MetricRegistry.name(Compute.class, "indentedQueryLength"));
    indentedQueryLength.update(buffer.getRow() - 1);

    //Execute SPARQL query and measure elapsed time and result size
    Timer executeQuery = Main.METRICS.timer(MetricRegistry.name(Compute.class, "executeQuery"));
    long executionTime;
    Histogram resultSize = Main.METRICS.histogram(MetricRegistry.name(Compute.class, "resultSize"));
    QueryEngineHTTP exec = (QueryEngineHTTP) QueryExecutionFactory.sparqlService(session.getSPARQLEndpointURL(),
            queryString);
    exec.setModelContentType(WebContent.contentTypeRDFXML);
    if (query.isConstructType()) {
        Model model;
        time = executeQuery.time();
        try {
            model = exec.execConstruct();
        } catch (RuntimeException ex) {
            throw extendRuntimeException(ex, indentedQueryString);
        } finally {
            executionTime = time.stop();
            exec.close();
        }

        try {
            resultSize.update(model.size());

            //Possibly output result
            if (resultsFormat != ResultsFormat.FMT_NONE) {
                Lang lang = ResultsFormat.convert(resultsFormat);
                if (lang == null) {
                    lang = RDFLanguages.contentTypeToLang(resultsFormat.getSymbol());
                }
                try (OutputStream output = session.getOutput()) {
                    model.write(output, lang == null ? null : lang.getLabel(), null);
                } catch (IOException ex) {
                    throw new RuntimeException(ex);
                }
            }
        } finally {
            model.close();
        }
    } else {
        ResultSet result;
        time = executeQuery.time();
        try {
            result = exec.execSelect();
        } catch (RuntimeException ex) {
            throw extendRuntimeException(ex, indentedQueryString);
        } finally {
            executionTime = time.stop();
        }

        //Possibly output result
        try {
            if (resultsFormat != ResultsFormat.FMT_NONE) {
                try (OutputStream output = session.getOutput()) {
                    ResultSetFormatter.output(output, result, resultsFormat);
                } catch (IOException ex) {
                    throw new RuntimeException(ex);
                }
                resultSize.update(result.getRowNumber());
            } else {
                resultSize.update(ResultSetFormatter.consume(result));
            }
        } finally {
            exec.close();
        }
    }
    executeQuery.update(executionTime, TimeUnit.NANOSECONDS);
    LOG.debug("{}\n\nCreation {} us, Execution {} us", indentedQueryString,
            TimeUnit.NANOSECONDS.toMicros(creationTime), TimeUnit.NANOSECONDS.toMicros(executionTime));

    if (Main.getInstance().isDebug()) {
        System.err.print(indentedQueryString);
    }
}

From source file:net.shipilev.fjptrace.tasks.PrintSummaryTask.java

private void summarizeEvents(PrintWriter pw, Events events) {
    SummaryStatistics completeTimes = new SummaryStatistics();
    SummaryStatistics execTimes = new SummaryStatistics();
    Map<Integer, Long> times = new HashMap<>();

    for (Event e : events) {
        switch (e.eventType) {
        case COMPLETING:
            times.put(e.tag, e.time);//from w  w  w .  j a v  a 2 s. co  m
            break;
        case COMPLETED: {
            Long startTime = times.get(e.tag);
            if (startTime != null) {
                completeTimes.addValue(e.time - startTime);
            }
            break;
        }
        case EXEC:
            times.put(e.tag, e.time);
            break;
        case EXECUTED:
            Long startTime = times.get(e.tag);
            if (startTime != null) {
                execTimes.addValue(e.time - startTime);
            }
            break;
        }
    }

    pw.println();
    pw.println("EXEC -> EXECUTED: " + TimeUnit.NANOSECONDS.toMillis((long) execTimes.getSum()) + "ms");
    pw.println(
            "COMPLETING -> COMPLETED: " + TimeUnit.NANOSECONDS.toMillis((long) completeTimes.getSum()) + "ms");

}

From source file:UnitTest4.java

public static void execute()
        throws ClientProtocolException, IOException, InterruptedException, ExecutionException {
    /*//from   w  ww  . j  a va 2 s . c o m
      CloseableHttpAsyncClient httpclient = HttpAsyncClients.createDefault();
      try {
    httpclient.start();
    HttpGet request = new HttpGet("http://www.apache.org/");
    Future<HttpResponse> future = httpclient.execute(request, null);
    HttpResponse response = future.get();
    System.out.println("Response: " + response.getStatusLine());
    System.out.println("Shutting down");
      } finally {
    httpclient.close();
      }
      System.out.println("Done");
    */

    /*
    try (CloseableHttpAsyncClient httpclient = HttpAsyncClients.createDefault()) {
        httpclient.start();
        HttpPost request = new HttpPost(addr);
        StringEntity entity = new StringEntity(event, ContentType.create("application/json", Consts.UTF_8));
        request.setEntity(entity);
        httpclient.execute(request, null);
    } catch (Exception e) {
        LOG.error("Failed to sending event", e);
    }
    */
    //Asserts a;
    CloseableHttpAsyncClient m_httpClient = HttpAsyncClients.createDefault();

    m_httpClient.start();

    HttpHost m_target = new HttpHost("localhost", 5000, "http");
    //HttpPost postRequest = new HttpPost("http://localhost:5000/hello");
    HttpPost postRequest = new HttpPost("/");

    StringEntity params = new StringEntity("");

    postRequest.addHeader("content-type", "application/json");
    postRequest.setEntity(params);

    log.debug("execute() executing request to " + m_target);

    //HttpAsyncRequestConsumer<HttpRequest> gh;

    // works HttpResponse httpResponse = httpClient.execute(target, getRequest);
    Future<HttpResponse> future = m_httpClient.execute(m_target, postRequest, null);
    //Future<HttpResponse> future = m_httpClient.execute(postRequest, null);
    //HttpResponse httpResponse = future.get();
    while (future.isDone() == false) {
        log.debug("Inside while");
    }
    HttpResponse httpResponse = null;
    try {
        httpResponse = future.get(100, TimeUnit.NANOSECONDS);
    } catch (TimeoutException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    HttpEntity entity = httpResponse.getEntity();

    log.debug("execute()----------------------------------------");
    log.debug("execute() {}", httpResponse.getStatusLine());
    Header[] headers = httpResponse.getAllHeaders();
    for (int i = 0; i < headers.length; i++) {
        log.debug("execute() {}", headers[i]);
    }
    log.debug("execute()----------------------------------------");

    String jsonString = null;
    if (entity != null) {
        jsonString = EntityUtils.toString(entity);
        log.debug("execute() {}", jsonString);
    }

}

From source file:com.netflix.genie.web.tasks.leader.DatabaseCleanupTaskUnitTests.java

/**
 * Make sure the run method throws when an error is encountered.
 *//*from   w w  w . jav  a  2  s . c  om*/
@Test(expected = RuntimeException.class)
public void cantRun() {
    final int days = 5;
    final int negativeDays = -1 * days;
    final int pageSize = 10;
    final int maxDeleted = 10_000;

    Mockito.when(this.cleanupProperties.getRetention()).thenReturn(days).thenReturn(negativeDays);
    Mockito.when(this.cleanupProperties.getPageSize()).thenReturn(pageSize);
    Mockito.when(this.cleanupProperties.getMaxDeletedPerTransaction()).thenReturn(maxDeleted);

    Mockito.when(this.jobPersistenceService.deleteBatchOfJobsCreatedBeforeDate(Mockito.any(Date.class),
            Mockito.anyInt(), Mockito.anyInt())).thenThrow(new RuntimeException("test"));

    try {
        this.task.run();
    } finally {
        Mockito.verify(this.deletionTimerId, Mockito.times(1))
                .withTags(MetricsUtils.newFailureTagsMapForException(new RuntimeException()));
        Mockito.verify(this.deletionTimer, Mockito.times(1)).record(Mockito.anyLong(),
                Mockito.eq(TimeUnit.NANOSECONDS));
    }
}

From source file:co.paralleluniverse.galaxy.core.BackupImpl.java

private void startFlushThread() {
    scheduler.scheduleAtFixedRate(new Runnable() {
        @Override/* w w  w .  j a v  a 2s.  co  m*/
        public void run() {
            flushNow();
        }
    }, maxDelayNanos, maxDelayNanos, TimeUnit.NANOSECONDS);
}

From source file:com.amazonaws.util.TimingInfo.java

/**
 * Returns the duration in milliseconds as double, preserving the decimal
 * precision as necessary, for the given start and end time in nanoseconds.
 *///ww w. java 2 s. c om
public static double durationMilliOf(long startTimeNano, long endTimeNano) {
    double micros = (double) TimeUnit.NANOSECONDS.toMicros(endTimeNano - startTimeNano);
    return micros / 1000.0; // convert microseconds to milliseconds in double rather than long, preserving the precision
}