Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.vmware.identity.saml.idm.IdmPrincipalAttributesExtractor.java

@Override
public Set<PrincipalAttribute> getAttributes(PrincipalId principalId,
        Collection<PrincipalAttributeDefinition> attributeDefinitions)
        throws InvalidPrincipalException, SystemException {
    Validate.notNull(principalId);/*w  w w. ja v  a 2  s . c  om*/
    Validate.notNull(attributeDefinitions);

    Set<Attribute> attributeDefsIDM = new HashSet<Attribute>();
    for (PrincipalAttributeDefinition principalAttributeDefinition : attributeDefinitions) {
        attributeDefsIDM.add(convertToIDMAttributeDefinition(principalAttributeDefinition));
    }

    Collection<AttributeValuePair> attributes = Collections.emptySet();
    try {
        final long start = System.nanoTime();
        attributes = idmClient.getAttributeValues(tenantName, principalId, attributeDefsIDM);
        perfLog.trace("'idmClient.getAttributes' took {} ms.",
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
    } catch (com.vmware.identity.idm.InvalidPrincipalException e) {
        throw new InvalidPrincipalException(e);
    } catch (Exception e) {
        throw new SystemException(e);
    }

    Set<PrincipalAttribute> result = Collections.emptySet();
    if (attributes != null) {
        log.trace("{} attributes retrieved for {}", attributes.size(), principalId);
        result = new HashSet<PrincipalAttribute>(attributes.size());
        for (AttributeValuePair attr : attributes) {
            final Attribute attrDefinition = attr.getAttrDefinition();
            if (attrDefinition == null || attrDefinition.getName() == null
                    || attrDefinition.getNameFormat() == null) {
                throw new IllegalStateException("Missing or invalid attribute definition!");
            }
            List<String> values = attr.getValues();
            final PrincipalAttribute newAttr = new PrincipalAttribute(attrDefinition.getName(),
                    attrDefinition.getNameFormat(), attrDefinition.getFriendlyName(),
                    values == null || values.size() == 0 ? null : values.toArray(new String[values.size()]));
            result.add(newAttr);
            log.trace("An attribute {} retrieved for {}", newAttr, principalId);
        }
    }
    return result;
}

From source file:org.apache.hadoop.mapreduce.SimpleEntityWriterV1.java

public void map(IntWritable key, IntWritable val, Context context) throws IOException {
    TimelineClient tlc = new TimelineClientImpl();
    Configuration conf = context.getConfiguration();

    final int kbs = conf.getInt(KBS_SENT, KBS_SENT_DEFAULT);

    long totalTime = 0;
    final int testtimes = conf.getInt(TEST_TIMES, TEST_TIMES_DEFAULT);
    final Random rand = new Random();
    final TaskAttemptID taskAttemptId = context.getTaskAttemptID();
    final char[] payLoad = new char[kbs * 1024];

    for (int i = 0; i < testtimes; i++) {
        // Generate a fixed length random payload
        for (int xx = 0; xx < kbs * 1024; xx++) {
            int alphaNumIdx = rand.nextInt(ALPHA_NUMS.length);
            payLoad[xx] = ALPHA_NUMS[alphaNumIdx];
        }/*from  w  ww  .  j  ava 2 s  .  c o m*/
        String entId = taskAttemptId + "_" + Integer.toString(i);
        final TimelineEntity entity = new TimelineEntity();
        entity.setEntityId(entId);
        entity.setEntityType("FOO_ATTEMPT");
        entity.addOtherInfo("PERF_TEST", payLoad);
        // add an event
        TimelineEvent event = new TimelineEvent();
        event.setTimestamp(System.currentTimeMillis());
        event.setEventType("foo_event");
        entity.addEvent(event);

        // use the current user for this purpose
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        long startWrite = System.nanoTime();
        try {
            tlc.putEntities(entity);
        } catch (Exception e) {
            context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_FAILURES).increment(1);
            LOG.error("writing to the timeline service failed", e);
        }
        long endWrite = System.nanoTime();
        totalTime += TimeUnit.NANOSECONDS.toMillis(endWrite - startWrite);
    }
    LOG.info("wrote " + testtimes + " entities (" + kbs * testtimes + " kB) in " + totalTime + " ms");
    context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_TIME).increment(totalTime);
    context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_COUNTER).increment(testtimes);
    context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_KBS).increment(kbs * testtimes);
}

From source file:com.vmware.identity.interop.ldap.LdapConnection.java

@Override
public void bindConnection(String dn, String cred, LdapBindMethod method) {
    this.validate();

    ILdapClientLibrary ldapClientLibrary = getLdapLibrary();

    long startedAt = System.nanoTime();
    try {//from   w ww. j  a va 2 s  .  co  m
        ldapClientLibrary.ldap_bind_s(this._connectionContext, dn, cred, method.getCode());
    } finally {
        if (perfLog.isTraceEnabled()) {
            perfLog.trace(String.format("bindConnection took [%d]ms",
                    TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startedAt)));
        }
    }
}

From source file:org.apache.hadoop.mapred.gridmix.GridmixJob.java

public GridmixJob(final Configuration conf, long submissionMillis, final JobStory jobdesc, Path outRoot,
        UserGroupInformation ugi, final int seq) throws IOException {
    this.ugi = ugi;
    this.jobdesc = jobdesc;
    this.seq = seq;

    ((StringBuilder) nameFormat.get().out()).setLength(JOBNAME.length());
    try {//w w w  . java  2  s. co  m
        job = this.ugi.doAs(new PrivilegedExceptionAction<Job>() {
            public Job run() throws IOException {
                Job ret = new Job(conf, nameFormat.get().format("%05d", seq).toString());
                ret.getConfiguration().setInt(GRIDMIX_JOB_SEQ, seq);
                ret.getConfiguration().set(ORIGNAME,
                        null == jobdesc.getJobID() ? "<unknown>" : jobdesc.getJobID().toString());
                if (conf.getBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false)) {
                    setJobQueue(ret, jobdesc.getQueueName());
                } else {
                    setJobQueue(ret, conf.get(GRIDMIX_DEFAULT_QUEUE));
                }

                return ret;
            }
        });
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    submissionTimeNanos = TimeUnit.NANOSECONDS.convert(submissionMillis, TimeUnit.MILLISECONDS);
    outdir = new Path(outRoot, "" + seq);
}

From source file:org.agatom.springatom.webmvc.controllers.wizard.SVWizardController.java

/**
 * <b>onWizardInit</b> is called as the first method when new wizard is launched. Selects {@link WizardProcessor}
 * out of {@link #processorMap} and calls {@link WizardProcessor#onWizardInit(java.util.Locale)} in order to
 * retrieve {@link org.agatom.springatom.cmp.wizards.data.WizardDescriptor} for the {@code key} wizard.
 *
 * <b>URI: /cmp/wiz/initialize/{wizard}</b>
 *
 * @param wizard unique id of the {@link WizardProcessor}
 * @param locale current locale (vital to return descriptor with valid labels etc.)
 *
 * @return {@link WizardSubmission} the submission
 *///ww w.j  a  va  2s.  c om
public WizardSubmission onWizardInit(final String wizard, final Locale locale) throws Exception {
    LOGGER.debug(String.format("onWizardInit(key=%s,locale=%s)", wizard, locale));
    final long startTime = System.nanoTime();

    WizardSubmission submission = null;
    WizardResult result;

    try {

        final WizardProcessor wizardProcessor = this.processorMap.get(wizard);
        result = wizardProcessor.onWizardInit(locale);

    } catch (Exception exp) {
        LOGGER.debug(String.format("onWizardInit(wizard=%s) failed", wizard), exp);
        throw exp;
    }
    final long endTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);

    if (result != null) {
        submission = (WizardSubmission) new WizardSubmission(result, Submission.INIT).setSize(1)
                .setSuccess(true).setTime(endTime);
    }

    LOGGER.trace(String.format("onWizardInit(wizard=%s) completed in %d ms", wizard, endTime));

    return submission;
}

From source file:org.apache.solr.handler.TestConfigReload.java

private void checkConfReload(SolrZkClient client, String resPath, String name, String uri) throws Exception {
    Stat stat = new Stat();
    byte[] data = null;
    try {//from w ww.  ja  v  a  2s  .  com
        data = client.getData(resPath, null, stat, true);
    } catch (KeeperException.NoNodeException e) {
        data = "{}".getBytes(StandardCharsets.UTF_8);
        log.info("creating_node {}", resPath);
        client.create(resPath, data, CreateMode.PERSISTENT, true);
    }
    long startTime = System.nanoTime();
    Stat newStat = client.setData(resPath, data, true);
    client.setData("/configs/conf1", new byte[] { 1 }, true);
    assertTrue(newStat.getVersion() > stat.getVersion());
    log.info("new_version " + newStat.getVersion());
    Integer newVersion = newStat.getVersion();
    long maxTimeoutSeconds = 20;
    DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
    List<String> urls = new ArrayList<>();
    for (Slice slice : coll.getSlices()) {
        for (Replica replica : slice.getReplicas())
            urls.add("" + replica.get(ZkStateReader.BASE_URL_PROP) + "/"
                    + replica.get(ZkStateReader.CORE_NAME_PROP));
    }
    HashSet<String> succeeded = new HashSet<>();

    while (TimeUnit.SECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) < maxTimeoutSeconds) {
        Thread.sleep(50);
        for (String url : urls) {
            Map respMap = getAsMap(url + uri + "?wt=json");
            if (String.valueOf(newVersion)
                    .equals(String.valueOf(getObjectByPath(respMap, true, asList(name, "znodeVersion"))))) {
                succeeded.add(url);
            }
        }
        if (succeeded.size() == urls.size())
            break;
        succeeded.clear();
    }
    assertEquals(StrUtils.formatString("tried these servers {0} succeeded only in {1} ", urls, succeeded),
            urls.size(), succeeded.size());
}

From source file:com.netflix.genie.web.security.saml.SAMLUserDetailsServiceImplUnitTests.java

/**
 * Make sure if no groups are found but a user id is that the user logs in but only gets role user.
 *///from w  ww  .java  2 s  .  c  om
@Test
public void canLoadUserWithoutGroups() {
    final SAMLCredential credential = Mockito.mock(SAMLCredential.class);
    Mockito.when(credential.getAttributeAsString(Mockito.eq(USER_ATTRIBUTE_NAME))).thenReturn(USER_ID);
    Mockito.when(credential.getAttributeAsStringArray(Mockito.eq(GROUP_ATTRIBUTE_NAME))).thenReturn(null);
    final Object result = this.service.loadUserBySAML(credential);

    Assert.assertThat(result, Matchers.notNullValue());
    Assert.assertTrue(result instanceof User);
    final User user = (User) result;
    Assert.assertThat(user.getUsername(), Matchers.is(USER_ID));
    Assert.assertThat(user.getAuthorities(), Matchers.contains(new SimpleGrantedAuthority("ROLE_USER")));
    Mockito.verify(this.loadAuthenticationTimer, Mockito.times(1)).record(Mockito.anyLong(),
            Mockito.eq(TimeUnit.NANOSECONDS));
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.LocationsIndexRebuildOp.java

public void initiate() throws IOException {
    LOG.info("Starting index rebuilding");

    // Move locations index to a backup directory
    String basePath = Bookie.getCurrentDirectory(conf.getLedgerDirs()[0]).toString();
    Path currentPath = FileSystems.getDefault().getPath(basePath, "locations");
    String timestamp = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ").format(new Date());
    Path backupPath = FileSystems.getDefault().getPath(basePath, "locations.BACKUP-" + timestamp);
    Files.move(currentPath, backupPath);

    LOG.info("Created locations index backup at {}", backupPath);

    long startTime = System.nanoTime();

    EntryLogger entryLogger = new EntryLogger(conf, new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())));
    Set<Long> entryLogs = entryLogger.getEntryLogsSet();

    String locationsDbPath = FileSystems.getDefault().getPath(basePath, "locations").toFile().toString();

    Set<Long> activeLedgers = getActiveLedgers(conf, KeyValueStorageRocksDB.factory, basePath);
    LOG.info("Found {} active ledgers in ledger manager", activeLedgers.size());

    KeyValueStorage newIndex = KeyValueStorageRocksDB.factory.newKeyValueStorage(locationsDbPath,
            DbConfigType.Huge, conf);/*from  w  ww  .  j  ava 2 s. co m*/

    int totalEntryLogs = entryLogs.size();
    int completedEntryLogs = 0;
    LOG.info("Scanning {} entry logs", totalEntryLogs);

    for (long entryLogId : entryLogs) {
        entryLogger.scanEntryLog(entryLogId, new EntryLogScanner() {
            @Override
            public void process(long ledgerId, long offset, ByteBuf entry) throws IOException {
                long entryId = entry.getLong(8);

                // Actual location indexed is pointing past the entry size
                long location = (entryLogId << 32L) | (offset + 4);

                if (LOG.isDebugEnabled()) {
                    LOG.debug("Rebuilding {}:{} at location {} / {}", ledgerId, entryId, location >> 32,
                            location & (Integer.MAX_VALUE - 1));
                }

                // Update the ledger index page
                LongPairWrapper key = LongPairWrapper.get(ledgerId, entryId);
                LongWrapper value = LongWrapper.get(location);
                newIndex.put(key.array, value.array);
            }

            @Override
            public boolean accept(long ledgerId) {
                return activeLedgers.contains(ledgerId);
            }
        });

        ++completedEntryLogs;
        LOG.info("Completed scanning of log {}.log -- {} / {}", Long.toHexString(entryLogId),
                completedEntryLogs, totalEntryLogs);
    }

    newIndex.sync();
    newIndex.close();

    LOG.info("Rebuilding index is done. Total time: {}", DurationFormatUtils
            .formatDurationHMS(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime)));
}

From source file:com.hurence.logisland.connect.opc.CommonOpcSourceTask.java

@Override
public void start(Map<String, String> props) {
    setConfigurationProperties(props);//from www .  java 2 s  .  c  o  m

    transferQueue = new LinkedTransferQueue<>();
    opcOperations = new SmartOpcOperations<>(createOpcOperations());
    ConnectionProfile connectionProfile = createConnectionProfile();
    host = connectionProfile.getConnectionUri().getHost();
    tagInfoMap = CommonUtils.parseTagsFromProperties(props).stream()
            .collect(Collectors.toMap(TagInfo::getTagId, Function.identity()));
    minWaitTime = Math.min(10, tagInfoMap.values().stream().map(TagInfo::getSamplingInterval)
            .mapToLong(Duration::toMillis).min().getAsLong());
    opcOperations.connect(connectionProfile);
    if (!opcOperations.awaitConnected()) {
        throw new ConnectException("Unable to connect");
    }

    //set up polling source emission
    pollingScheduler = Executors.newSingleThreadScheduledExecutor();
    streamingThread = Executors.newSingleThreadExecutor();
    Map<Duration, List<TagInfo>> pollingMap = tagInfoMap.values().stream()
            .filter(tagInfo -> StreamingMode.POLL.equals(tagInfo.getStreamingMode()))
            .collect(Collectors.groupingBy(TagInfo::getSamplingInterval));
    final Map<String, OpcData> lastValues = Collections.synchronizedMap(new HashMap<>());
    pollingMap.forEach((k, v) -> pollingScheduler.scheduleAtFixedRate(() -> {
        final Instant now = Instant.now();
        v.stream().map(TagInfo::getTagId).map(lastValues::get).filter(Functions.not(Objects::isNull))
                .map(data -> Pair.of(now, data)).forEach(transferQueue::add);

    }, 0, k.toNanos(), TimeUnit.NANOSECONDS));
    //then subscribe for all
    final SubscriptionConfiguration subscriptionConfiguration = new SubscriptionConfiguration()
            .withDefaultSamplingInterval(Duration.ofMillis(10_000));
    tagInfoMap.values().forEach(tagInfo -> subscriptionConfiguration
            .withTagSamplingIntervalForTag(tagInfo.getTagId(), tagInfo.getSamplingInterval()));
    running.set(true);
    streamingThread.submit(() -> {
        while (running.get()) {
            try {
                createSessionIfNeeded();
                if (session == null) {
                    return;
                }

                session.stream(subscriptionConfiguration,
                        tagInfoMap.keySet().toArray(new String[tagInfoMap.size()])).forEach(opcData -> {
                            if (tagInfoMap.get(opcData.getTag()).getStreamingMode()
                                    .equals(StreamingMode.SUBSCRIBE)) {
                                transferQueue.add(Pair.of(
                                        hasServerSideSampling() ? opcData.getTimestamp() : Instant.now(),
                                        opcData));
                            } else {
                                lastValues.put(opcData.getTag(), opcData);
                            }
                        });
            } catch (Exception e) {
                if (running.get()) {
                    logger.warn("Stream interrupted while reading from " + host, e);
                    safeCloseSession();
                    lastValues.clear();

                }
            }
        }
    });

}