Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:edu.cmu.tetrad.data.DataUtils.java

public static ICovarianceMatrix covarianceNonparanormalDrton(DataSet dataSet) {
    final CovarianceMatrix covMatrix = new CovarianceMatrix(dataSet);
    final TetradMatrix data = dataSet.getDoubleData();
    final int NTHREDS = Runtime.getRuntime().availableProcessors() * 10;
    final int EPOCH_COUNT = 100000;

    ExecutorService executor = Executors.newFixedThreadPool(NTHREDS);
    int runnableCount = 0;

    for (int _i = 0; _i < dataSet.getNumColumns(); _i++) {
        for (int _j = _i; _j < dataSet.getNumColumns(); _j++) {
            final int i = _i;
            final int j = _j;

            //                double tau = StatUtils.rankCorrelation(data.viewColumn(i).toArray(), data.viewColumn(j).toArray());
            Runnable worker = new Runnable() {
                @Override/*www. j a va  2s .  c o m*/
                public void run() {
                    double tau = StatUtils.kendallsTau(data.getColumn(i).toArray(),
                            data.getColumn(j).toArray());
                    covMatrix.setValue(i, j, tau);
                    covMatrix.setValue(j, i, tau);
                }
            };

            executor.execute(worker);

            if (runnableCount < EPOCH_COUNT) {
                runnableCount++;
                //                    System.out.println(runnableCount);
            } else {
                executor.shutdown();
                try {
                    // Wait until all threads are finish
                    executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
                    System.out.println("Finished all threads");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }

                executor = Executors.newFixedThreadPool(NTHREDS);
                runnableCount = 0;
            }
        }
    }

    executor.shutdown();

    try {
        // Wait until all threads are finish
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        System.out.println("Finished all threads");
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    return covMatrix;
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

/**
 * Add entry to a ledger, even if the ledger has previous been fenced. This should only
 * happen in bookie recovery or ledger recovery cases, where entries are being replicates
 * so that they exist on a quorum of bookies. The corresponding client side call for this
 * is not exposed to users.//ww  w.jav a 2s .co m
 */
public void recoveryAddEntry(ByteBuffer entry, WriteCallback cb, Object ctx, byte[] masterKey)
        throws IOException, BookieException {
    long requestNanos = MathUtils.nowInNano();
    boolean success = false;
    int entrySize = 0;
    try {
        LedgerDescriptor handle = getLedgerForEntry(entry, masterKey);
        synchronized (handle) {
            entrySize = entry.remaining();
            addEntryInternal(handle, entry, cb, ctx);
        }
        success = true;
    } catch (NoWritableLedgerDirException e) {
        transitionToReadOnlyMode();
        throw new IOException(e);
    } finally {
        long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
        if (success) {
            recoveryAddEntryStats.registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerSuccessfulValue(entrySize);
        } else {
            recoveryAddEntryStats.registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerFailedValue(entrySize);
        }
    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

/**
 * Add entry to a ledger./*from w ww. j  a  va  2 s  . co  m*/
 * @throws BookieException.LedgerFencedException if the ledger is fenced
 */
public void addEntry(ByteBuffer entry, WriteCallback cb, Object ctx, byte[] masterKey)
        throws IOException, BookieException {
    long requestNanos = MathUtils.nowInNano();
    boolean success = false;
    int entrySize = 0;
    try {
        LedgerDescriptor handle = getLedgerForEntry(entry, masterKey);
        synchronized (handle) {
            if (handle.isFenced()) {
                throw BookieException.create(BookieException.Code.LedgerFencedException);
            }
            entrySize = entry.remaining();
            addEntryInternal(handle, entry, cb, ctx);
        }
        success = true;
    } catch (NoWritableLedgerDirException e) {
        transitionToReadOnlyMode();
        throw new IOException(e);
    } finally {
        long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
        if (success) {
            addEntryStats.registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerSuccessfulValue(entrySize);
        } else {
            addEntryStats.registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerFailedValue(entrySize);
        }
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

@Override
public synchronized void createComplete(int rc, final LedgerHandle lh, Object ctx) {
    if (log.isDebugEnabled()) {
        log.debug("[{}] createComplete rc={} ledger={}", name, rc, lh != null ? lh.getId() : -1);
    }/*from w w w.  ja va 2s.c  o  m*/

    if (checkAndCompleteLedgerOpTask(rc, lh, ctx)) {
        return;
    }

    mbean.endDataLedgerCreateOp();
    if (rc != BKException.Code.OK) {
        log.error("[{}] Error creating ledger rc={} {}", name, rc, BKException.getMessage(rc));
        ManagedLedgerException status = createManagedLedgerException(rc);

        // Empty the list of pending requests and make all of them fail
        clearPendingAddEntries(status);
        lastLedgerCreationFailureTimestamp = clock.millis();
        STATE_UPDATER.set(this, State.ClosedLedger);
    } else {
        log.info("[{}] Created new ledger {}", name, lh.getId());
        ledgers.put(lh.getId(), LedgerInfo.newBuilder().setLedgerId(lh.getId()).setTimestamp(0).build());
        currentLedger = lh;
        currentLedgerEntries = 0;
        currentLedgerSize = 0;

        final MetaStoreCallback<Void> cb = new MetaStoreCallback<Void>() {
            @Override
            public void operationComplete(Void v, Stat stat) {
                if (log.isDebugEnabled()) {
                    log.debug("[{}] Updating of ledgers list after create complete. version={}", name, stat);
                }
                ledgersStat = stat;
                ledgersListMutex.unlock();
                updateLedgersIdsComplete(stat);
                synchronized (ManagedLedgerImpl.this) {
                    mbean.addLedgerSwitchLatencySample(
                            System.nanoTime() - lastLedgerCreationInitiationTimestamp, TimeUnit.NANOSECONDS);
                }
            }

            @Override
            public void operationFailed(MetaStoreException e) {
                if (e instanceof BadVersionException) {
                    synchronized (ManagedLedgerImpl.this) {
                        log.error(
                                "[{}] Failed to udpate ledger list. z-node version mismatch. Closing managed ledger",
                                name);
                        STATE_UPDATER.set(ManagedLedgerImpl.this, State.Fenced);
                        clearPendingAddEntries(e);
                        return;
                    }
                }

                log.warn("[{}] Error updating meta data with the new list of ledgers: {}", name,
                        e.getMessage());

                // Remove the ledger, since we failed to update the list
                ledgers.remove(lh.getId());
                mbean.startDataLedgerDeleteOp();
                bookKeeper.asyncDeleteLedger(lh.getId(), (rc1, ctx1) -> {
                    mbean.endDataLedgerDeleteOp();
                    if (rc1 != BKException.Code.OK) {
                        log.warn("[{}] Failed to delete ledger {}: {}", name, lh.getId(),
                                BKException.getMessage(rc1));
                    }
                }, null);

                ledgersListMutex.unlock();

                synchronized (ManagedLedgerImpl.this) {
                    lastLedgerCreationFailureTimestamp = clock.millis();
                    STATE_UPDATER.set(ManagedLedgerImpl.this, State.ClosedLedger);
                    clearPendingAddEntries(e);
                }
            }
        };

        updateLedgersListAfterRollover(cb);
    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

public ByteBuffer readEntry(long ledgerId, long entryId) throws IOException, NoLedgerException {
    long requestNanos = MathUtils.nowInNano();
    boolean success = false;
    int entrySize = 0;
    try {/*from ww  w  .j  a  v a  2s  . c o m*/
        LedgerDescriptor handle = handles.getReadOnlyHandle(ledgerId);
        LOG.trace("Reading {}@{}", entryId, ledgerId);
        ByteBuffer entry = handle.readEntry(entryId);
        entrySize = entry.remaining();
        readBytes.add(entrySize);
        success = true;
        return entry;
    } finally {
        long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
        if (success) {
            readEntryStats.registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            readBytesStats.registerSuccessfulValue(entrySize);
        } else {
            readEntryStats.registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            readBytesStats.registerFailedValue(entrySize);
        }
    }
}

From source file:org.apache.cassandra.concurrent.ContinuationsExecutor.java

/**
 * Returns the thread keep-alive time, which is the amount of time that
 * threads in excess of the core pool size may remain idle before being
 * terminated./*from   w w w.java2s  .c  o m*/
 * 
 * @param unit
 *            the desired time unit of the result
 * @return the time limit
 * @see #setKeepAliveTime
 */
public long getKeepAliveTime(TimeUnit unit) {
    return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS);
}

From source file:com.vmware.identity.idm.server.provider.ldap.LdapWithAdMappingsProvider.java

Set<Group> populateNestedGroups(ILdapConnectionEx connection, String dn, boolean groupNameOnly,
        Set<Group> groups, IIdmAuthStatRecorder authStatRecorder)
        throws NoSuchGroupException, InvalidPrincipalException {
    Validate.notNull(groups, "groups");

    final String ATTR_NAME_GROUP_CN = _ldapSchemaMapping
            .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeAccountName);
    final String ATTR_DESCRIPTION = _ldapSchemaMapping
            .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeDescription);
    final String ATTR_GROUP_SID = _ldapSchemaMapping
            .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeObjectId);

    ArrayList<String> attributeNames = new ArrayList<String>();
    attributeNames.add(ATTR_NAME_GROUP_CN);
    attributeNames.add(ATTR_GROUP_SID);/*  ww  w . ja  v  a 2  s  . c  o  m*/
    if (!groupNameOnly) {
        attributeNames.add(ATTR_DESCRIPTION);
    }

    final boolean bRecurse = (!this.useDirectGroupsOnly()) && (!this.useMatchingRuleInChain());
    final String groupSearchBaseDn = (this.useGroupBaseDnForNestedGroups()
            ? this.getStoreDataEx().getGroupBaseDn()
            : ServerUtils.getDomainDN(this.getDomain()));
    final String searchQueryTemplate = this.useMatchingRuleInChain()
            ? _ldapSchemaMapping.getNestedParentGroupsQuery()
            : _ldapSchemaMapping.getDirectParentGroupsQuery();

    if (logger.isDebugEnabled()) {
        logger.debug(String.format(
                "LdapWithAdMappingsProvider.populateNestedGroups -- GroupSearchBaseDn: %s; query template: %s",
                groupSearchBaseDn, searchQueryTemplate));
    }
    int numberOfLdapSearches = 0;

    HashSet<String> groupsProcessed = new HashSet<String>();
    Stack<String> groupsToProcess = new Stack<String>();

    if (ServerUtils.isNullOrEmpty(dn) == false) {
        groupsToProcess.push(dn);
    }

    long startTimeForAllGrups = System.nanoTime();

    while (groupsToProcess.isEmpty() == false) {
        String currentDn = groupsToProcess.pop();
        groupsProcessed.add(currentDn);

        String filter = String.format(searchQueryTemplate,
                (this.useMatchingRuleInChain() ? LdapFilterString.encodeMatchingRuleInChainDnFilter(currentDn)
                        : LdapFilterString.encode(currentDn)));

        String groupName = null;
        String groupDescription = null;
        String groupEntryObjectSid = null;

        ILdapPagedSearchResult prev_pagedResult = null;
        ILdapPagedSearchResult pagedResult = null;
        boolean isSearchFinished = false;

        try {
            int numOfQueriesPerGroup = 0;
            long startTimePerGroup = System.nanoTime();

            while (!isSearchFinished) {
                if (logger.isTraceEnabled()) {
                    logger.trace(String.format(
                            "LdapWithAdMappingsProvider.populateNestedGroups -- running connection.search_one_page( %s )",
                            filter));
                }
                pagedResult = connection.search_one_page(groupSearchBaseDn, LdapScope.SCOPE_SUBTREE, filter,
                        attributeNames, DEFAULT_PAGE_SIZE, prev_pagedResult);

                numOfQueriesPerGroup += 1;
                numberOfLdapSearches += 1;

                if (pagedResult != null) {
                    ILdapEntry[] entries = pagedResult.getEntries();
                    if ((entries != null) && (entries.length > 0)) {
                        for (ILdapEntry entry : entries) {
                            groupName = ServerUtils
                                    .getStringValue(entry.getAttributeValues(ATTR_NAME_GROUP_CN));

                            if (groupNameOnly == false) {
                                groupDescription = ServerUtils
                                        .getStringValue(entry.getAttributeValues(ATTR_DESCRIPTION));
                            }

                            byte[] resultObjectSID = ServerUtils
                                    .getBinaryValue(entry.getAttributeValues(ATTR_GROUP_SID));

                            SecurityIdentifier sid = SecurityIdentifier.build(resultObjectSID);
                            groupEntryObjectSid = sid.toString();

                            String groupDomainName = ServerUtils.getDomainFromDN(entry.getDN());

                            PrincipalId groupId = new PrincipalId(groupName, groupDomainName);
                            PrincipalId groupAlias = null;
                            GroupDetail groupDetail = null;

                            if (groupNameOnly == false) {
                                // If group lives in the registered Ad over Ldap IDP, we know the alias
                                // Otherwise, we do not know the alias for the domain where group lives.
                                if (groupDomainName.equalsIgnoreCase(this.getStoreData().getName())) {
                                    groupAlias = ServerUtils.getPrincipalAliasId(groupName, this.getAlias());
                                }

                                groupDetail = new GroupDetail(
                                        (groupDescription == null) ? "" : groupDescription);
                            }

                            Group g = new Group(groupId, groupAlias, groupEntryObjectSid, groupDetail);
                            groups.add(g);

                            if ((bRecurse == true) && (groupsProcessed.contains(entry.getDN()) == false)) {
                                groupsToProcess.add(entry.getDN());
                            }
                        }
                    }
                }
                isSearchFinished = (pagedResult == null) || (pagedResult.isSearchFinished());
                if (prev_pagedResult != null) {
                    prev_pagedResult.close();
                    prev_pagedResult = null;
                }
                prev_pagedResult = pagedResult;
                pagedResult = null;
            } // while !isSearchFinished

            // If summarizeLdapQueries is set false, log each ldap query
            if (authStatRecorder != null && !authStatRecorder.summarizeLdapQueries()) {
                authStatRecorder
                        .add(new LdapQueryStat(filter, groupSearchBaseDn, getConnectionString(connection),
                                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimePerGroup),
                                numOfQueriesPerGroup));
            }
        } finally {
            if (prev_pagedResult != null) {
                prev_pagedResult.close();
                prev_pagedResult = null;
            }
            if (pagedResult != null) {
                pagedResult.close();
                pagedResult = null;
            }
        }

    } // groupstoprocess not empty

    if (logger.isDebugEnabled()) {
        logger.debug(String.format("LdapWithAdMappingsProvider.populateNestedGroups -- ran [%d] ldap searches.",
                numberOfLdapSearches));
    }

    // If summarizeLdapQueries is set true, log once only with summary
    if (authStatRecorder != null && authStatRecorder.summarizeLdapQueries()) {
        authStatRecorder.add(new LdapQueryStat(searchQueryTemplate, groupSearchBaseDn,
                getConnectionString(connection),
                TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeForAllGrups), numberOfLdapSearches));
    }

    return groups;
}

From source file:org.apache.geode.internal.InternalDataSerializer.java

/**
 * Reads a {@code TimeUnit} from a {@code DataInput}.
 *
 * @throws IOException A problem occurs while writing to {@code out}
 *//*from   www.j a va  2 s . c om*/
private static TimeUnit readTimeUnit(DataInput in) throws IOException {
    InternalDataSerializer.checkIn(in);

    byte type = in.readByte();

    TimeUnit unit;
    switch (type) {
    case TIME_UNIT_NANOSECONDS:
        unit = TimeUnit.NANOSECONDS;
        break;
    case TIME_UNIT_MICROSECONDS:
        unit = TimeUnit.MICROSECONDS;
        break;
    case TIME_UNIT_MILLISECONDS:
        unit = TimeUnit.MILLISECONDS;
        break;
    case TIME_UNIT_SECONDS:
        unit = TimeUnit.SECONDS;
        break;
    default:
        throw new IOException(LocalizedStrings.DataSerializer_UNKNOWN_TIMEUNIT_TYPE_0.toLocalizedString(type));
    }

    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
        logger.trace(LogMarker.SERIALIZER, "Read TimeUnit: {}", unit);
    }

    return unit;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Void> snapshot(SnapshotDescription snapshotDesc) {
    SnapshotProtos.SnapshotDescription snapshot = ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
    try {// ww w. j  av a  2  s .com
        ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
    } catch (IllegalArgumentException e) {
        return failedFuture(e);
    }
    CompletableFuture<Void> future = new CompletableFuture<>();
    final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot).build();
    addListener(this.<Long>newMasterCaller()
            .action((controller, stub) -> this.<SnapshotRequest, SnapshotResponse, Long>call(controller, stub,
                    request, (s, c, req, done) -> s.snapshot(c, req, done), resp -> resp.getExpectedTimeout()))
            .call(), (expectedTimeout, err) -> {
                if (err != null) {
                    future.completeExceptionally(err);
                    return;
                }
                TimerTask pollingTask = new TimerTask() {
                    int tries = 0;
                    long startTime = EnvironmentEdgeManager.currentTime();
                    long endTime = startTime + expectedTimeout;
                    long maxPauseTime = expectedTimeout / maxAttempts;

                    @Override
                    public void run(Timeout timeout) throws Exception {
                        if (EnvironmentEdgeManager.currentTime() < endTime) {
                            addListener(isSnapshotFinished(snapshotDesc), (done, err2) -> {
                                if (err2 != null) {
                                    future.completeExceptionally(err2);
                                } else if (done) {
                                    future.complete(null);
                                } else {
                                    // retry again after pauseTime.
                                    long pauseTime = ConnectionUtils
                                            .getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries);
                                    pauseTime = Math.min(pauseTime, maxPauseTime);
                                    AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime,
                                            TimeUnit.MILLISECONDS);
                                }
                            });
                        } else {
                            future.completeExceptionally(new SnapshotCreationException(
                                    "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:"
                                            + expectedTimeout + " ms",
                                    snapshotDesc));
                        }
                    }
                };
                AsyncConnectionImpl.RETRY_TIMER.newTimeout(pollingTask, 1, TimeUnit.MILLISECONDS);
            });
    return future;
}