Example usage for java.util.concurrent TimeUnit MICROSECONDS

List of usage examples for java.util.concurrent TimeUnit MICROSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MICROSECONDS.

Prototype

TimeUnit MICROSECONDS

To view the source code for java.util.concurrent TimeUnit MICROSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a millisecond.

Usage

From source file:com.jkoolcloud.tnt4j.streams.fields.ActivityInfo.java

/**
 * Computes the unspecified operation times and/or elapsed time based on the specified ones.
 *//*from w w w  .ja v  a  2s  .c o m*/
private void determineTimes() {
    if (elapsedTime < 0L) {
        long elapsedTimeNano = StringUtils.isEmpty(resourceName) ? TimeTracker.hitAndGet()
                : ACTIVITY_TIME_TRACKER.hitAndGet(resourceName);
        elapsedTime = TimestampFormatter.convert(elapsedTimeNano, TimeUnit.NANOSECONDS, TimeUnit.MICROSECONDS);
    }
    if (endTime == null) {
        if (startTime != null) {
            endTime = new UsecTimestamp(startTime);
            endTime.add(0L, elapsedTime);
        } else {
            endTime = new UsecTimestamp();
        }
    }
    if (startTime == null) {
        startTime = new UsecTimestamp(endTime);
        startTime.subtract(0L, elapsedTime);
    }
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

@Override
public void addComplete(final int rc, LedgerHandle handle, final long entryId, final Object ctx) {
    int rcAfterFailPoint = rc;
    try {/*from w  w w.j a  v a  2 s.  c o  m*/
        if (FailpointUtils.checkFailPoint(FailpointUtils.FailPointName.FP_TransmitComplete)) {
            rcAfterFailPoint = BKException.Code.UnexpectedConditionException;
        }
    } catch (Exception exc) {
        rcAfterFailPoint = BKException.Code.UnexpectedConditionException;
    }
    final int effectiveRC = rcAfterFailPoint;

    // Sanity check to make sure we're receiving these callbacks in order.
    if (entryId > -1 && lastEntryId >= entryId) {
        LOG.error("Log segment {} saw out of order entry {} lastEntryId {}",
                new Object[] { fullyQualifiedLogSegment, entryId, lastEntryId });
    }
    lastEntryId = entryId;

    assert (ctx instanceof BKTransmitPacket);
    final BKTransmitPacket transmitPacket = (BKTransmitPacket) ctx;

    // Time from transmit until receipt of addComplete callback
    addCompleteTime.registerSuccessfulEvent(TimeUnit.MICROSECONDS.convert(
            System.nanoTime() - transmitPacket.getTransmitTime(), TimeUnit.NANOSECONDS), TimeUnit.MICROSECONDS);

    if (BKException.Code.OK == rc) {
        EntryBuffer recordSet = transmitPacket.getRecordSet();
        if (recordSet.hasUserRecords()) {
            synchronized (this) {
                lastTxIdAcknowledged = Math.max(lastTxIdAcknowledged, recordSet.getMaxTxId());
            }
        }
    }

    if (null != scheduler) {
        final Stopwatch queuedTime = Stopwatch.createStarted();
        Futures.addCallback(scheduler.submitOrdered(streamName, new Callable<Void>() {
            @Override
            public Void call() {
                final Stopwatch deferredTime = Stopwatch.createStarted();
                addCompleteQueuedTime.registerSuccessfulEvent(queuedTime.elapsed(TimeUnit.MICROSECONDS),
                        TimeUnit.MICROSECONDS);
                addCompleteDeferredProcessing(transmitPacket, entryId, effectiveRC);
                addCompleteDeferredTime.registerSuccessfulEvent(deferredTime.elapsed(TimeUnit.MICROSECONDS),
                        TimeUnit.MILLISECONDS);
                return null;
            }

            @Override
            public String toString() {
                return String.format("AddComplete(Stream=%s, entryId=%d, rc=%d)", fullyQualifiedLogSegment,
                        entryId, rc);
            }
        }), new FutureCallback<Void>() {
            @Override
            public void onSuccess(Void done) {
            }

            @Override
            public void onFailure(Throwable cause) {
                LOG.error("addComplete processing failed for {} entry {} lastTxId {} rc {} with error",
                        new Object[] { fullyQualifiedLogSegment, entryId,
                                transmitPacket.getRecordSet().getMaxTxId(), rc, cause });
            }
        });
        // Race condition if we notify before the addComplete is enqueued.
        transmitPacket.notifyTransmitComplete(effectiveRC);
        outstandingTransmitsUpdater.getAndDecrement(this);
    } else {
        // Notify transmit complete must be called before deferred processing in the
        // sync case since otherwise callbacks in deferred processing may deadlock.
        transmitPacket.notifyTransmitComplete(effectiveRC);
        outstandingTransmitsUpdater.getAndDecrement(this);
        addCompleteDeferredProcessing(transmitPacket, entryId, effectiveRC);
    }
}

From source file:org.apache.drill.exec.physical.impl.aggregate.HashAggTemplate.java

/**
 * Iterate through the batches of the given partition, writing them to a file
 *
 * @param part The partition (number) to spill
 *//*from   w ww  .jav  a 2 s.co  m*/
private void spillAPartition(int part) {

    ArrayList<BatchHolder> currPartition = batchHolders[part];
    rowsInPartition = 0;
    if (EXTRA_DEBUG_SPILL) {
        logger.debug("HashAggregate: Spilling partition {} current cycle {} part size {}", part,
                spilledState.getCycle(), currPartition.size());
    }

    if (currPartition.size() == 0) {
        return;
    } // in case empty - nothing to spill

    // If this is the first spill for this partition, create an output stream
    if (!isSpilled(part)) {

        spillFiles[part] = spillSet.getNextSpillFile(
                spilledState.getCycle() > 0 ? Integer.toString(spilledState.getCycle()) : null);

        try {
            writers[part] = spillSet.writer(spillFiles[part]);
        } catch (IOException ioe) {
            throw UserException.resourceError(ioe)
                    .message("Hash Aggregation failed to open spill file: " + spillFiles[part]).build(logger);
        }
    }

    for (int currOutBatchIndex = 0; currOutBatchIndex < currPartition.size(); currOutBatchIndex++) {

        // get the number of records in the batch holder that are pending output
        int numOutputRecords = currPartition.get(currOutBatchIndex).getNumPendingOutput();

        rowsInPartition += numOutputRecords; // for logging
        rowsSpilled += numOutputRecords;

        allocateOutgoing(numOutputRecords);
        currPartition.get(currOutBatchIndex).outputValues();
        this.htables[part].outputKeys(currOutBatchIndex, this.outContainer, numOutputRecords);

        // set the value count for outgoing batch value vectors
        for (VectorWrapper<?> v : outgoing) {
            v.getValueVector().getMutator().setValueCount(numOutputRecords);
        }

        outContainer.setRecordCount(numOutputRecords);
        WritableBatch batch = WritableBatch.getBatchNoHVWrap(numOutputRecords, outContainer, false);
        try {
            writers[part].write(batch, null);
        } catch (IOException ioe) {
            throw UserException.dataWriteError(ioe)
                    .message("Hash Aggregation failed to write to output file: " + spillFiles[part])
                    .build(logger);
        } finally {
            batch.clear();
        }
        outContainer.zeroVectors();
        logger.trace("HASH AGG: Took {} us to spill {} records", writers[part].time(TimeUnit.MICROSECONDS),
                numOutputRecords);
    }

    spilledBatchesCount[part] += currPartition.size(); // update count of spilled batches

    logger.trace("HASH AGG: Spilled {} rows from {} batches of partition {}", rowsInPartition,
            currPartition.size(), part);
}

From source file:com.twitter.distributedlog.BKLogHandler.java

private void asyncGetLedgerListInternal(final Comparator<LogSegmentMetadata> comparator,
        final LogSegmentFilter segmentFilter, final Watcher watcher,
        final GenericCallback<List<LogSegmentMetadata>> finalCallback, final AtomicInteger numAttemptsLeft,
        final AtomicLong backoffMillis) {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    try {/*from   ww  w. j ava 2  s.com*/
        if (LOG.isTraceEnabled()) {
            LOG.trace("Async getting ledger list for {}.", getFullyQualifiedName());
        }
        final GenericCallback<List<LogSegmentMetadata>> callback = new GenericCallback<List<LogSegmentMetadata>>() {
            @Override
            public void operationComplete(int rc, List<LogSegmentMetadata> result) {
                long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);
                if (KeeperException.Code.OK.intValue() != rc) {
                    getListStat.registerFailedEvent(elapsedMicros);
                } else {
                    if (LogSegmentFilter.DEFAULT_FILTER == segmentFilter) {
                        isFullListFetched.set(true);
                    }
                    getListStat.registerSuccessfulEvent(elapsedMicros);
                }
                finalCallback.operationComplete(rc, result);
            }
        };
        zooKeeperClient.get().getChildren(logMetadata.getLogSegmentsPath(), watcher,
                new AsyncCallback.Children2Callback() {
                    @Override
                    public void processResult(final int rc, final String path, final Object ctx,
                            final List<String> children, final Stat stat) {
                        if (KeeperException.Code.OK.intValue() != rc) {

                            if ((KeeperException.Code.CONNECTIONLOSS.intValue() == rc
                                    || KeeperException.Code.SESSIONEXPIRED.intValue() == rc
                                    || KeeperException.Code.SESSIONMOVED.intValue() == rc)
                                    && numAttemptsLeft.decrementAndGet() > 0) {
                                long backoffMs = backoffMillis.get();
                                backoffMillis.set(Math.min(conf.getZKRetryBackoffMaxMillis(), 2 * backoffMs));
                                scheduler.schedule(new Runnable() {
                                    @Override
                                    public void run() {
                                        asyncGetLedgerListInternal(comparator, segmentFilter, watcher,
                                                finalCallback, numAttemptsLeft, backoffMillis);
                                    }
                                }, backoffMs, TimeUnit.MILLISECONDS);
                                return;
                            }
                            callback.operationComplete(rc, null);
                            return;
                        }

                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Got ledger list from {} : {}", logMetadata.getLogSegmentsPath(),
                                    children);
                        }

                        ledgerListWatchSet.set(true);
                        Set<String> segmentsReceived = new HashSet<String>();
                        segmentsReceived.addAll(segmentFilter.filter(children));
                        Set<String> segmentsAdded;
                        final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>());
                        final Map<String, LogSegmentMetadata> addedSegments = Collections
                                .synchronizedMap(new HashMap<String, LogSegmentMetadata>());
                        Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived);
                        segmentsAdded = segmentChanges.getLeft();
                        removedSegments.addAll(segmentChanges.getRight());

                        if (segmentsAdded.isEmpty()) {
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("No segments added for {}.", getFullyQualifiedName());
                            }

                            // update the cache before fetch
                            logSegmentCache.update(removedSegments, addedSegments);

                            List<LogSegmentMetadata> segmentList;
                            try {
                                segmentList = getCachedLogSegments(comparator);
                            } catch (UnexpectedException e) {
                                callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                        null);
                                return;
                            }
                            callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList);
                            notifyUpdatedLogSegments(segmentList);
                            if (!removedSegments.isEmpty()) {
                                notifyOnOperationComplete();
                            }
                            return;
                        }

                        final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size());
                        final AtomicInteger numFailures = new AtomicInteger(0);
                        for (final String segment : segmentsAdded) {
                            metadataStore.getLogSegment(logMetadata.getLogSegmentPath(segment))
                                    .addEventListener(new FutureEventListener<LogSegmentMetadata>() {

                                        @Override
                                        public void onSuccess(LogSegmentMetadata result) {
                                            addedSegments.put(segment, result);
                                            complete();
                                        }

                                        @Override
                                        public void onFailure(Throwable cause) {
                                            // NONODE exception is possible in two cases
                                            // 1. A log segment was deleted by truncation between the call to getChildren and read
                                            // attempt on the znode corresponding to the segment
                                            // 2. In progress segment has been completed => inprogress ZNode does not exist
                                            if (cause instanceof KeeperException
                                                    && KeeperException.Code.NONODE == ((KeeperException) cause)
                                                            .code()) {
                                                removedSegments.add(segment);
                                                complete();
                                            } else {
                                                // fail fast
                                                if (1 == numFailures.incrementAndGet()) {
                                                    int rcToReturn = KeeperException.Code.SYSTEMERROR
                                                            .intValue();
                                                    if (cause instanceof KeeperException) {
                                                        rcToReturn = ((KeeperException) cause).code()
                                                                .intValue();
                                                    } else if (cause instanceof ZKException) {
                                                        rcToReturn = ((ZKException) cause)
                                                                .getKeeperExceptionCode().intValue();
                                                    }
                                                    // :( properly we need dlog related response code.
                                                    callback.operationComplete(rcToReturn, null);
                                                    return;
                                                }
                                            }
                                        }

                                        private void complete() {
                                            if (0 == numChildren.decrementAndGet() && numFailures.get() == 0) {
                                                // update the cache only when fetch completed
                                                logSegmentCache.update(removedSegments, addedSegments);
                                                List<LogSegmentMetadata> segmentList;
                                                try {
                                                    segmentList = getCachedLogSegments(comparator);
                                                } catch (UnexpectedException e) {
                                                    callback.operationComplete(
                                                            KeeperException.Code.DATAINCONSISTENCY.intValue(),
                                                            null);
                                                    return;
                                                }
                                                callback.operationComplete(KeeperException.Code.OK.intValue(),
                                                        segmentList);
                                                notifyUpdatedLogSegments(segmentList);
                                                notifyOnOperationComplete();
                                            }
                                        }
                                    });
                        }
                    }
                }, null);
    } catch (ZooKeeperClient.ZooKeeperConnectionException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    } catch (InterruptedException e) {
        getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
        finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
    }
}

From source file:com.netflix.dyno.jedis.DynoJedisPipeline.java

@Override
public Response<String> set(final String key, final String value) {
    if (CompressionStrategy.NONE == connPool.getConfiguration().getCompressionStrategy()) {
        return new PipelineOperation<String>() {
            @Override//from  w w  w  .  j  a  va2s  .c  om
            Response<String> execute(Pipeline jedisPipeline) throws DynoException {
                long startTime = System.nanoTime() / 1000;
                try {
                    return jedisPipeline.set(key, value);
                } finally {
                    long duration = System.nanoTime() / 1000 - startTime;
                    opMonitor.recordSendLatency(OpName.SET.name(), duration, TimeUnit.MICROSECONDS);
                }
            }

        }.execute(key, OpName.SET);
    } else {
        return new PipelineCompressionOperation<String>() {
            @Override
            Response<String> execute(final Pipeline jedisPipeline) throws DynoException {
                long startTime = System.nanoTime() / 1000;
                try {
                    return new PipelineResponse(null).apply(new Func0<Response<String>>() {
                        @Override
                        public Response<String> call() {
                            return jedisPipeline.set(key, compressValue(value));
                        }
                    });
                } finally {
                    long duration = System.nanoTime() / 1000 - startTime;
                    opMonitor.recordSendLatency(OpName.SET.name(), duration, TimeUnit.MICROSECONDS);
                }
            }
        }.execute(key, OpName.SET);
    }
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

private void addCompleteDeferredProcessing(final BKTransmitPacket transmitPacket, final long entryId,
        final int rc) {
    boolean cancelPendingPromises = false;
    EntryBuffer recordSet = transmitPacket.getRecordSet();
    synchronized (this) {
        if (transmitResultUpdater.compareAndSet(this, BKException.Code.OK, rc)) {
            // If this is the first time we are setting an error code in the transmitResult then
            // we must cancel pending promises; once this error has been set, more records will not
            // be enqueued; they will be failed with WriteException
            cancelPendingPromises = (BKException.Code.OK != rc);
        } else {//from  w w  w  . j  av  a 2  s . c o  m
            LOG.warn("Log segment {} entryId {}: Tried to set transmit result to ({}) but is already ({})",
                    new Object[] { fullyQualifiedLogSegment, entryId, rc, transmitResultUpdater.get(this) });
        }

        if (transmitResultUpdater.get(this) != BKException.Code.OK) {
            if (recordSet.hasUserRecords()) {
                transmitDataPacketSize.registerFailedEvent(recordSet.getNumBytes(), TimeUnit.MICROSECONDS);
            }
        } else {
            // If we had data that we flushed then we need it to make sure that
            // background flush in the next pass will make the previous writes
            // visible by advancing the lastAck
            if (recordSet.hasUserRecords()) {
                transmitDataPacketSize.registerSuccessfulEvent(recordSet.getNumBytes(), TimeUnit.MICROSECONDS);
                controlFlushNeeded = true;
                if (immediateFlushEnabled) {
                    if (0 == minDelayBetweenImmediateFlushMs) {
                        backgroundFlush(true);
                    } else {
                        scheduleFlushWithDelayIfNeeded(new Callable<Void>() {
                            @Override
                            public Void call() throws Exception {
                                backgroundFlush(true);
                                return null;
                            }
                        }, immFlushSchedFutureRefUpdater);
                    }
                }
            }
        }

        // update last dlsn before satisifying future
        if (BKException.Code.OK == transmitResultUpdater.get(this)) {
            DLSN lastDLSNInPacket = recordSet.finalizeTransmit(logSegmentSequenceNumber, entryId);
            if (recordSet.hasUserRecords()) {
                if (null != lastDLSNInPacket && lastDLSN.compareTo(lastDLSNInPacket) < 0) {
                    lastDLSN = lastDLSNInPacket;
                }
            }
        }
    }

    if (BKException.Code.OK == transmitResultUpdater.get(this)) {
        recordSet.completeTransmit(logSegmentSequenceNumber, entryId);
    } else {
        recordSet.abortTransmit(Utils.transmitException(transmitResultUpdater.get(this)));
    }

    if (cancelPendingPromises) {
        // Since the writer is in a bad state no more packets will be tramsitted, and its safe to
        // assign a new empty packet. This is to avoid a race with closeInternal which may also
        // try to cancel the current packet;
        final BKTransmitPacket packetCurrentSaved;
        synchronized (this) {
            packetCurrentSaved = new BKTransmitPacket(recordSetWriter);
            recordSetWriter = newRecordSetWriter();
        }
        packetCurrentSaved.getRecordSet().abortTransmit(new WriteCancelledException(streamName,
                Utils.transmitException(transmitResultUpdater.get(this))));
    }
}

From source file:org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.java

private boolean waitUntilDone(long cutoff) throws InterruptedException {
    boolean hasWait = cutoff != Long.MAX_VALUE;
    long lastLog = EnvironmentEdgeManager.currentTime();
    long currentInProgress;
    while (0 != (currentInProgress = actionsInProgress.get())) {
        long now = EnvironmentEdgeManager.currentTime();
        if (hasWait && (now * 1000L) > cutoff) {
            return false;
        }// w w  w .jav  a  2 s  .  co  m
        if (!hasWait) { // Only log if wait is infinite.
            if (now > lastLog + 10000) {
                lastLog = now;
                LOG.info("#" + asyncProcess.id + ", waiting for " + currentInProgress
                        + "  actions to finish on table: " + tableName);
                if (currentInProgress <= asyncProcess.thresholdToLogUndoneTaskDetails) {
                    asyncProcess.logDetailsOfUndoneTasks(currentInProgress);
                }
            }
        }
        synchronized (actionsInProgress) {
            if (actionsInProgress.get() == 0)
                break;
            if (!hasWait) {
                actionsInProgress.wait(10);
            } else {
                long waitMicroSecond = Math.min(100000L, (cutoff - now * 1000L));
                TimeUnit.MICROSECONDS.timedWait(actionsInProgress, waitMicroSecond);
            }
        }
    }
    return true;
}

From source file:com.netflix.dyno.jedis.DynoJedisPipeline.java

/**** Binary Operations ****/
@Override//from  w  ww  .j  a  v  a  2s  .co m
public Response<String> set(final byte[] key, final byte[] value) {
    return new PipelineOperation<String>() {
        @Override
        Response<String> execute(Pipeline jedisPipeline) throws DynoException {
            long startTime = System.nanoTime() / 1000;
            try {
                return jedisPipeline.set(key, value);
            } finally {
                long duration = System.nanoTime() / 1000 - startTime;
                opMonitor.recordSendLatency(OpName.SET.name(), duration, TimeUnit.MICROSECONDS);
            }
        }

    }.execute(key, OpName.SET);
}

From source file:com.netflix.dyno.jedis.DynoJedisPipeline.java

public void sync() {
    long startTime = System.nanoTime() / 1000;
    try {/*from w  w  w .  j  ava2 s . co  m*/
        jedisPipeline.sync();
        opMonitor.recordPipelineSync();
    } catch (JedisConnectionException jce) {
        String msg = "Failed sync() to host: " + getHostInfo();
        pipelineEx.set(new FatalConnectionException(msg, jce)
                .setHost(connection == null ? Host.NO_HOST : connection.getHost()));
        cpMonitor.incOperationFailure(connection == null ? null : connection.getHost(), jce);
        throw jce;
    } finally {
        long duration = System.nanoTime() / 1000 - startTime;
        opMonitor.recordLatency(duration, TimeUnit.MICROSECONDS);
        discardPipeline(false);
        releaseConnection();
    }
}

From source file:com.netflix.dyno.jedis.DynoJedisPipeline.java

public List<Object> syncAndReturnAll() {
    long startTime = System.nanoTime() / 1000;
    try {/*from   w  ww.j a  va 2s.  c o  m*/
        List<Object> result = jedisPipeline.syncAndReturnAll();
        opMonitor.recordPipelineSync();
        return result;
    } catch (JedisConnectionException jce) {
        String msg = "Failed syncAndReturnAll() to host: " + getHostInfo();
        pipelineEx.set(new FatalConnectionException(msg, jce)
                .setHost(connection == null ? Host.NO_HOST : connection.getHost()));
        cpMonitor.incOperationFailure(connection == null ? null : connection.getHost(), jce);
        throw jce;
    } finally {
        long duration = System.nanoTime() / 1000 - startTime;
        opMonitor.recordLatency(duration, TimeUnit.MICROSECONDS);
        discardPipeline(false);
        releaseConnection();
    }
}