Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:ddf.test.itests.catalog.TestCatalog.java

@Test
public void testContentDirectoryMonitor() throws Exception {
    final String TMP_PREFIX = "tcdm_";
    Path tmpDir = Files.createTempDirectory(TMP_PREFIX);
    tmpDir.toFile().deleteOnExit();/*from  w  w w  .  j a  v  a2 s. c  o m*/
    Path tmpFile = Files.createTempFile(tmpDir, TMP_PREFIX, "_tmp.xml");
    tmpFile.toFile().deleteOnExit();
    Files.copy(this.getClass().getClassLoader().getResourceAsStream("metacard5.xml"), tmpFile,
            StandardCopyOption.REPLACE_EXISTING);

    Map<String, Object> cdmProperties = new HashMap<>();
    cdmProperties.putAll(getMetatypeDefaults("content-core-directorymonitor",
            "org.codice.ddf.catalog.content.monitor.ContentDirectoryMonitor"));
    cdmProperties.put("monitoredDirectoryPath", tmpDir.toString() + "/");
    createManagedService("org.codice.ddf.catalog.content.monitor.ContentDirectoryMonitor", cdmProperties);

    long startTime = System.nanoTime();
    ValidatableResponse response = null;
    do {
        response = executeOpenSearch("xml", "q=*SysAdmin*");
        if (response.extract().xmlPath().getList("metacards.metacard").size() == 1) {
            break;
        }
        try {
            TimeUnit.MILLISECONDS.sleep(50);
        } catch (InterruptedException e) {
        }
    } while (TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) < TimeUnit.MINUTES.toMillis(1));
    response.body("metacards.metacard.size()", equalTo(1));
}

From source file:com.sonicle.webtop.core.app.ServiceManager.java

private void initializeJobService(BaseJobService instance) {
    long start = 0, end = 0;
    logger.trace("JobService: calling initialize() [{}]", instance.SERVICE_ID);
    try {/* www  .  j ava 2 s . c  o m*/
        LoggerUtils.setContextDC(instance.SERVICE_ID);
        start = System.nanoTime();
        instance.initialize();
        end = System.nanoTime();
    } catch (Throwable t) {
        logger.error("JobService: initialize() throws errors [{}]", t, instance.getClass().getCanonicalName());
    } finally {
        LoggerUtils.clearContextServiceDC();
    }
    if (logger.isTraceEnabled() && (end != 0))
        logger.trace("JobService: initialize() took {} ms [{}]",
                TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS), instance.SERVICE_ID);
}

From source file:com.sonicle.webtop.core.app.ServiceManager.java

private void cleanupJobService(BaseJobService instance) {
    long start = 0, end = 0;
    logger.trace("JobService: calling cleanup() [{}]", instance.SERVICE_ID);
    try {/*from  w  w w  . ja  v a  2s.co m*/
        LoggerUtils.setContextDC(instance.getManifest().getId());
        start = System.nanoTime();
        instance.cleanup();
        end = System.nanoTime();
    } catch (Throwable t) {
        logger.error("JobService: cleanup() throws errors [{}]", t, instance.getClass().getCanonicalName());
    } finally {
        LoggerUtils.clearContextServiceDC();
    }
    if (logger.isTraceEnabled() && (end != 0))
        logger.trace("JobService: cleanup() took {} ms [{}]",
                TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS), instance.SERVICE_ID);
}

From source file:org.apache.cassandra.concurrent.ContinuationsExecutor.java

/**
 * Performs blocking or timed wait for a task, depending on current
 * configuration settings, or returns null if this worker must exit because
 * of any of: 1. There are more than maximumPoolSize workers (due to a call
 * to setMaximumPoolSize). 2. The pool is stopped. 3. The pool is shutdown
 * and the queue is empty. 4. This worker timed out waiting for a task, and
 * timed-out workers are subject to termination (that is,
 * {@code allowCoreThreadTimeOut || workerCount > corePoolSize}) both
 * before and after the timed wait.//from   w ww  . j av  a  2  s  .co m
 * 
 * @return task, or null if the worker must exit, in which case workerCount
 *         is decremented
 */
private Runnable getTask() {
    boolean timedOut = false; // Did the last poll() time out?

    retry: for (;;) {
        int c = ctl.get();
        int rs = runStateOf(c);

        // Check if queue empty only if necessary.
        if (rs >= SHUTDOWN && (rs >= STOP || workQueue.isEmpty())) {
            decrementWorkerCount();
            return null;
        }

        boolean timed; // Are workers subject to culling?

        for (;;) {
            int wc = workerCountOf(c);
            timed = allowCoreThreadTimeOut || wc > corePoolSize;

            if (wc <= maximumPoolSize && !(timedOut && timed))
                break;
            if (compareAndDecrementWorkerCount(c))
                return null;
            c = ctl.get(); // Re-read ctl
            if (runStateOf(c) != rs)
                continue retry;
            // else CAS failed due to workerCount change; retry inner loop
        }

        try {
            Runnable r = timed ? workQueue.poll(keepAliveTime, TimeUnit.NANOSECONDS) : workQueue.take();
            if (r != null)
                return r;
            timedOut = true;
        } catch (InterruptedException retry) {
            timedOut = false;
        }
    }
}

From source file:jtabwb.launcher.Launcher.java

private long getCurrentTimeMilleseconds() {
    return TimeUnit.MILLISECONDS.convert(bean.getCurrentThreadCpuTime(), TimeUnit.NANOSECONDS);
}

From source file:jp.aegif.nemaki.cmis.service.impl.ObjectServiceImpl.java

@Override
public FailedToDeleteData deleteTree(CallContext callContext, String repositoryId, String folderId,
        Boolean allVersions, UnfileObject unfileObjects, Boolean continueOnFailure, ExtensionsData extension) {
    // //////////////////
    // Inner classes
    // //////////////////
    class DeleteTask implements Callable<Boolean> {
        private CallContext callContext;
        private String repositoryId;
        private Content content;
        private Boolean allVersions;

        public DeleteTask() {
        }//  w  ww. ja  v  a2  s.  c o m

        public DeleteTask(CallContext callContext, String repositoryId, Content content, Boolean allVersions) {
            this.callContext = callContext;
            this.repositoryId = repositoryId;
            this.content = content;
            this.allVersions = allVersions;
        }

        @Override
        public Boolean call() throws Exception {
            try {
                objectServiceInternal.deleteObjectInternal(callContext, repositoryId, content, allVersions,
                        true);
                return false;
            } catch (Exception e) {
                return true;
            }
        }
    }

    class WrappedExecutorService {
        private ExecutorService service;
        private Folder folder;

        private WrappedExecutorService() {
        };

        public WrappedExecutorService(ExecutorService service, Folder folder) {
            this.service = service;
            this.folder = folder;
        }

        public ExecutorService getService() {
            return service;
        }

        public Folder getFolder() {
            return folder;
        }
    }

    class DeleteService {
        private Map<String, Future<Boolean>> failureIds;
        private WrappedExecutorService parentService;
        private CallContext callContext;
        private String repositoryId;
        private Content content;
        private Boolean allVersions;

        public DeleteService() {
        }

        public DeleteService(Map<String, Future<Boolean>> failureIds, WrappedExecutorService parentService,
                CallContext callContext, String repositoryId, Content content, Boolean allVersions) {
            super();
            this.failureIds = failureIds;
            this.parentService = parentService;
            this.callContext = callContext;
            this.repositoryId = repositoryId;
            this.content = content;
            this.allVersions = allVersions;
        }

        public void execute() {
            if (content.isDocument()) {
                Future<Boolean> result = parentService.getService()
                        .submit(new DeleteTask(callContext, repositoryId, content, allVersions));
                failureIds.put(content.getId(), result);
            } else if (content.isFolder()) {
                WrappedExecutorService childrenService = new WrappedExecutorService(
                        Executors.newFixedThreadPool(threadMax), (Folder) content);

                List<Content> children = contentService.getChildren(repositoryId, content.getId());
                if (CollectionUtils.isNotEmpty(children)) {
                    for (Content child : children) {
                        DeleteService deleteService = new DeleteService(this.failureIds, childrenService,
                                callContext, repositoryId, child, allVersions);
                        deleteService.execute();
                    }
                }

                //wait til newService ends
                childrenService.getService().shutdown();
                try {
                    childrenService.getService().awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
                } catch (InterruptedException e) {
                    log.error(e, e);
                }

                //Lastly, delete self
                Future<Boolean> result = parentService.getService()
                        .submit(new DeleteTask(callContext, repositoryId, content, allVersions));
                failureIds.put(content.getId(), result);
            }

        }
    }

    // //////////////////
    // General Exception
    // //////////////////
    exceptionService.invalidArgumentRequiredString("objectId", folderId);
    Folder folder = contentService.getFolder(repositoryId, folderId);
    exceptionService.permissionDenied(callContext, repositoryId, PermissionMapping.CAN_DELETE_TREE_FOLDER,
            folder);
    exceptionService.constraintDeleteRootFolder(repositoryId, folderId);

    // //////////////////
    // Specific Exception
    // //////////////////
    if (folder == null)
        exceptionService.constraint(folderId, "deleteTree cannot be invoked on a non-folder object");

    // //////////////////
    // Body of the method
    // //////////////////
    // Delete descendants
    Map<String, Future<Boolean>> failureIds = new HashMap<String, Future<Boolean>>();

    DeleteService deleteService = new DeleteService(failureIds,
            new WrappedExecutorService(Executors.newFixedThreadPool(threadMax), folder), callContext,
            repositoryId, folder, allVersions);
    deleteService.execute();

    solrUtil.callSolrIndexing(repositoryId);

    // Check FailedToDeleteData
    // FIXME Consider orphans that was failed to be deleted
    FailedToDeleteDataImpl fdd = new FailedToDeleteDataImpl();
    List<String> ids = new ArrayList<String>();
    for (Entry<String, Future<Boolean>> entry : failureIds.entrySet()) {
        Boolean failed;
        try {
            failed = entry.getValue().get();
            if (failed) {
                ids.add(entry.getKey());
            }
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        } catch (ExecutionException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }
    fdd.setIds(ids);
    return fdd;
}

From source file:org.apache.geode.internal.cache.CacheServerLauncher.java

protected void waitForRunning() throws Exception {
    Status status = spinReadStatus();/*from ww  w . ja va 2s. co m*/
    String lastReadMessage = null;
    String lastReportedMessage = null;
    long lastReadTime = System.nanoTime();
    if (status == null) {
        throw new Exception(LocalizedStrings.CacheServerLauncher_NO_AVAILABLE_STATUS.toLocalizedString());
    } else {
        switch (status.state) {
        case STARTING:
            // re-read status for a while...
            while (status.state == STARTING) {
                Thread.sleep(500); // fix for bug 36998
                status = spinReadStatus();

                // check to see if the status message has changed
                if (status.dsMsg != null && !status.dsMsg.equals(lastReadMessage)) {
                    lastReadMessage = status.dsMsg;
                    lastReadTime = System.nanoTime();
                }

                // if the status message has not changed for 15 seconds, print
                // out the message.
                long elapsed = System.nanoTime() - lastReadTime;
                if (TimeUnit.NANOSECONDS.toMillis(elapsed) > STATUS_WAIT_TIME && lastReadMessage != null
                        && !lastReadMessage.equals(lastReportedMessage)) {
                    long elapsedSec = TimeUnit.NANOSECONDS.toSeconds(elapsed);
                    System.out.println(LocalizedStrings.CacheServerLauncher_LAUNCH_IN_PROGRESS_0
                            .toLocalizedString(elapsedSec, status.dsMsg));
                    lastReportedMessage = lastReadMessage;
                }
            }
            if (status.state == SHUTDOWN) {
                System.out.println(status);
                System.exit(1);
            }
            break;
        default:
            break;
        }
        System.out.println(status);
    }
}

From source file:com.jkoolcloud.tnt4j.streams.fields.ActivityInfo.java

/**
 * Computes the unspecified operation times and/or elapsed time based on the specified ones.
 *//*from   w w  w .j  a  v a 2  s  .  c om*/
private void determineTimes() {
    if (elapsedTime < 0L) {
        long elapsedTimeNano = StringUtils.isEmpty(resourceName) ? TimeTracker.hitAndGet()
                : ACTIVITY_TIME_TRACKER.hitAndGet(resourceName);
        elapsedTime = TimestampFormatter.convert(elapsedTimeNano, TimeUnit.NANOSECONDS, TimeUnit.MICROSECONDS);
    }
    if (endTime == null) {
        if (startTime != null) {
            endTime = new UsecTimestamp(startTime);
            endTime.add(0L, elapsedTime);
        } else {
            endTime = new UsecTimestamp();
        }
    }
    if (startTime == null) {
        startTime = new UsecTimestamp(endTime);
        startTime.subtract(0L, elapsedTime);
    }
}

From source file:org.apache.distributedlog.BKLogSegmentWriter.java

@Override
public void addComplete(final int rc, LedgerHandle handle, final long entryId, final Object ctx) {
    int rcAfterFailPoint = rc;
    try {/*  w  ww. j  a  v  a2s  . c o m*/
        if (FailpointUtils.checkFailPoint(FailpointUtils.FailPointName.FP_TransmitComplete)) {
            rcAfterFailPoint = BKException.Code.UnexpectedConditionException;
        }
    } catch (Exception exc) {
        rcAfterFailPoint = BKException.Code.UnexpectedConditionException;
    }
    final int effectiveRC = rcAfterFailPoint;

    // Sanity check to make sure we're receiving these callbacks in order.
    if (entryId > -1 && lastEntryId >= entryId) {
        LOG.error("Log segment {} saw out of order entry {} lastEntryId {}",
                new Object[] { fullyQualifiedLogSegment, entryId, lastEntryId });
    }
    lastEntryId = entryId;

    assert (ctx instanceof BKTransmitPacket);
    final BKTransmitPacket transmitPacket = (BKTransmitPacket) ctx;

    // Time from transmit until receipt of addComplete callback
    addCompleteTime.registerSuccessfulEvent(TimeUnit.MICROSECONDS.convert(
            System.nanoTime() - transmitPacket.getTransmitTime(), TimeUnit.NANOSECONDS), TimeUnit.MICROSECONDS);

    if (BKException.Code.OK == rc) {
        EntryBuffer recordSet = transmitPacket.getRecordSet();
        if (recordSet.hasUserRecords()) {
            synchronized (this) {
                lastTxIdAcknowledged = Math.max(lastTxIdAcknowledged, recordSet.getMaxTxId());
            }
        }
    }

    if (null != scheduler) {
        final Stopwatch queuedTime = Stopwatch.createStarted();
        Futures.addCallback(scheduler.submitOrdered(streamName, new Callable<Void>() {
            @Override
            public Void call() {
                final Stopwatch deferredTime = Stopwatch.createStarted();
                addCompleteQueuedTime.registerSuccessfulEvent(queuedTime.elapsed(TimeUnit.MICROSECONDS),
                        TimeUnit.MICROSECONDS);
                addCompleteDeferredProcessing(transmitPacket, entryId, effectiveRC);
                addCompleteDeferredTime.registerSuccessfulEvent(deferredTime.elapsed(TimeUnit.MICROSECONDS),
                        TimeUnit.MILLISECONDS);
                return null;
            }

            @Override
            public String toString() {
                return String.format("AddComplete(Stream=%s, entryId=%d, rc=%d)", fullyQualifiedLogSegment,
                        entryId, rc);
            }
        }), new FutureCallback<Void>() {
            @Override
            public void onSuccess(Void done) {
            }

            @Override
            public void onFailure(Throwable cause) {
                LOG.error("addComplete processing failed for {} entry {} lastTxId {} rc {} with error",
                        new Object[] { fullyQualifiedLogSegment, entryId,
                                transmitPacket.getRecordSet().getMaxTxId(), rc, cause });
            }
        });
        // Race condition if we notify before the addComplete is enqueued.
        transmitPacket.notifyTransmitComplete(effectiveRC);
        outstandingTransmitsUpdater.getAndDecrement(this);
    } else {
        // Notify transmit complete must be called before deferred processing in the
        // sync case since otherwise callbacks in deferred processing may deadlock.
        transmitPacket.notifyTransmitComplete(effectiveRC);
        outstandingTransmitsUpdater.getAndDecrement(this);
        addCompleteDeferredProcessing(transmitPacket, entryId, effectiveRC);
    }
}

From source file:org.apache.bookkeeper.client.LedgerHandle.java

protected boolean waitForWritable(DistributionSchedule.WriteSet writeSet, long key, int allowedNonWritableCount,
        long durationMs) {
    if (durationMs < 0) {
        return true;
    }/*from www .ja v  a2 s  . c om*/

    final long startTime = MathUtils.nowInNano();
    boolean success = isWritesetWritable(writeSet, key, allowedNonWritableCount);

    if (!success && durationMs > 0) {
        int backoff = 1;
        final int maxBackoff = 4;
        final long deadline = startTime + TimeUnit.MILLISECONDS.toNanos(durationMs);

        while (!isWritesetWritable(writeSet, key, allowedNonWritableCount)) {
            if (MathUtils.nowInNano() < deadline) {
                long maxSleep = MathUtils.elapsedMSec(startTime);
                if (maxSleep < 0) {
                    maxSleep = 1;
                }
                long sleepMs = Math.min(backoff, maxSleep);

                try {
                    TimeUnit.MILLISECONDS.sleep(sleepMs);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    success = isWritesetWritable(writeSet, key, allowedNonWritableCount);
                    break;
                }
                if (backoff <= maxBackoff) {
                    backoff++;
                }
            } else {
                success = false;
                break;
            }
        }
        if (backoff > 1) {
            LOG.info("Spent {} ms waiting for {} writable channels", MathUtils.elapsedMSec(startTime),
                    writeSet.size() - allowedNonWritableCount);
        }
    }

    if (success) {
        clientChannelWriteWaitStats.registerSuccessfulEvent(MathUtils.elapsedNanos(startTime),
                TimeUnit.NANOSECONDS);
    } else {
        clientChannelWriteWaitStats.registerFailedEvent(MathUtils.elapsedNanos(startTime),
                TimeUnit.NANOSECONDS);
    }
    return success;
}