Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:org.apache.hadoop.hive.llap.tezplugins.LlapTaskCommunicator.java

public void registerPingingNode(LlapNodeId nodeId) {
    long currentTs = TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS);
    PingingNodeInfo ni = new PingingNodeInfo(currentTs);
    PingingNodeInfo old = pingedNodeMap.put(nodeId, ni);
    if (old == null) {
        if (LOG.isInfoEnabled()) {
            LOG.info("Added new pinging node: [{}]", nodeId);
        }// ww w.  j  a v  a2s .co m
    } else {
        old.pingCount.incrementAndGet();
    }
    // The node should always be known by this point. Log occasionally if it is not known.
    if (!knownNodeMap.containsKey(nodeId)) {
        if (old == null) {
            // First time this is seen. Log it.
            LOG.warn("Received ping from unknownNode: [{}], count={}", nodeId, ni.pingCount.get());
        } else {
            // Pinged before. Log only occasionally.
            if (currentTs > old.logTimestamp.get() + 5000l) { // 5 seconds elapsed. Log again.
                LOG.warn("Received ping from unknownNode: [{}], count={}", nodeId, old.pingCount.get());
                old.logTimestamp.set(currentTs);
            }
        }

    }
}

From source file:com.funambol.pushlistener.service.taskexecutor.ScheduledTaskExecutor.java

/**
 * Schedules a new task/*  w w w  . j  av  a 2  s  .c o m*/
 *
 * @param task the <code>ScheduledTaskWrapper</code> to schedule
 * @param delay the delay in the scheduling (it must be greater than 1 second
 *        otherwise the delay is set automatically to 1 sec)
 */
protected void scheduleTask(ScheduledTaskWrapper task, long delay) {
    if (task == null) {
        if (log.isTraceEnabled()) {
            log.trace("Trying to schedule a null task. Request rejected");
        }
        return;
    }
    //
    // We don't get any task lock, but the caller should do (see updateTask or
    // removeTask or scheduleTask(ScheduledTaskWrapper))
    //

    //
    // This is used below...see there for a description
    //
    Lock taskExecutionLock = null;
    try {

        if (!ScheduledTaskWrapper.State.CONFIGURED.equals(task.getState())
                && !ScheduledTaskWrapper.State.SCHEDULED.equals(task.getState())) {
            //
            // We log the error creating an empty exception in order to have
            // the stacktrace
            //
            log.error("Trying to schedule a not configured or scheduled task. Request rejected",
                    new Exception());

            return;
        }
        long period = task.getPeriod();
        TimeUnit timeUnit = TimeUnit.MILLISECONDS;
        if (period == 0) {
            period = 1;
            timeUnit = TimeUnit.NANOSECONDS;
        }

        if (log.isTraceEnabled()) {
            log.trace("Scheduling task: " + task);
        }
        //
        // We use the execution lock to avoid the task execution before putting
        // it in the scheduledFutures map.
        // See ScheduledTaskWrapper.execute
        //
        taskExecutionLock = new ReentrantLock();
        taskExecutionLock.lock();
        task.setExecutionLock(taskExecutionLock);
        ScheduledFuture scheduledFuture = scheduleWithFixedDelay(task, delay, period, timeUnit);

        task.setState(ScheduledTaskWrapper.State.SCHEDULED);

        //
        // Since DualHashBidiMap is not synchronized we need to sync the write
        // access
        //
        synchronized (scheduledFutures) {
            scheduledFutures.put(task, scheduledFuture);
        }
    } finally {
        taskExecutionLock.unlock();
    }
}

From source file:ddf.catalog.test.TestCatalog.java

@Test
public void testContentDirectoryMonitor() throws Exception {
    startFeature(true, "content-core-directorymonitor");
    final String TMP_PREFIX = "tcdm_";
    Path tmpDir = Files.createTempDirectory(TMP_PREFIX);
    tmpDir.toFile().deleteOnExit();/*from  w  w w.  j a va  2  s .  c  o m*/
    Path tmpFile = Files.createTempFile(tmpDir, TMP_PREFIX, "_tmp.xml");
    tmpFile.toFile().deleteOnExit();
    Files.copy(this.getClass().getClassLoader().getResourceAsStream("metacard5.xml"), tmpFile,
            StandardCopyOption.REPLACE_EXISTING);

    Map<String, Object> cdmProperties = new HashMap<>();
    cdmProperties.putAll(getMetatypeDefaults("content-core-directorymonitor",
            "ddf.content.core.directorymonitor.ContentDirectoryMonitor"));
    cdmProperties.put("monitoredDirectoryPath", tmpDir.toString() + "/"); // Must end with /
    cdmProperties.put("directive", "STORE_AND_PROCESS");
    createManagedService("ddf.content.core.directorymonitor.ContentDirectoryMonitor", cdmProperties);

    long startTime = System.nanoTime();
    ValidatableResponse response = null;
    do {
        response = executeOpenSearch("xml", "q=*SysAdmin*");
        if (response.extract().xmlPath().getList("metacards.metacard").size() == 1) {
            break;
        }
        try {
            TimeUnit.MILLISECONDS.sleep(50);
        } catch (InterruptedException e) {
        }
    } while (TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) < TimeUnit.MINUTES.toMillis(1));
    response.body("metacards.metacard.size()", equalTo(1));
}

From source file:org.apache.hadoop.ha.ZKFailoverController.java

/**
 * Schedule a call to {@link #recheckElectability()} in the future.
 */// w  w  w  .j  av a 2  s.  co  m
private void scheduleRecheck(long whenNanos) {
    delayExecutor.schedule(new Runnable() {
        @Override
        public void run() {
            try {
                recheckElectability();
            } catch (Throwable t) {
                fatalError("Failed to recheck electability: " + StringUtils.stringifyException(t));
            }
        }
    }, whenNanos, TimeUnit.NANOSECONDS);
}

From source file:ml.shifu.shifu.core.dtrain.dt.DTWorker.java

@Override
public DTWorkerParams doCompute(WorkerContext<DTMasterParams, DTWorkerParams> context) {
    if (context.isFirstIteration()) {
        return new DTWorkerParams();
    }/*from www  .  j av  a 2 s. c o m*/

    DTMasterParams lastMasterResult = context.getLastMasterResult();
    final List<TreeNode> trees = lastMasterResult.getTrees();
    final Map<Integer, TreeNode> todoNodes = lastMasterResult.getTodoNodes();
    if (todoNodes == null) {
        return new DTWorkerParams();
    }

    LOG.info("Start to work: todoNodes size is {}", todoNodes.size());

    Map<Integer, NodeStats> statistics = initTodoNodeStats(todoNodes);

    double trainError = 0d, validationError = 0d;
    double weightedTrainCount = 0d, weightedValidationCount = 0d;
    // renew random seed
    if (this.isGBDT && !this.gbdtSampleWithReplacement && lastMasterResult.isSwitchToNextTree()) {
        this.baggingRandomMap = new HashMap<Integer, Random>();
    }

    long start = System.nanoTime();
    for (Data data : this.trainingData) {
        if (this.isRF) {
            for (TreeNode treeNode : trees) {
                if (treeNode.getNode().getId() == Node.INVALID_INDEX) {
                    continue;
                }

                Node predictNode = predictNodeIndex(treeNode.getNode(), data, true);
                if (predictNode.getPredict() != null) {
                    // only update when not in first node, for treeNode, no predict statistics at that time
                    float weight = data.subsampleWeights[treeNode.getTreeId()];
                    if (Float.compare(weight, 0f) == 0) {
                        // oob data, no need to do weighting
                        validationError += data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedValidationCount += data.significance;
                    } else {
                        trainError += weight * data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedTrainCount += weight * data.significance;
                    }
                }
            }
        }

        if (this.isGBDT) {
            if (this.isContinuousEnabled && lastMasterResult.isContinuousRunningStart()) {
                recoverGBTData(context, data.output, data.predict, data, false);
                trainError += data.significance * loss.computeError(data.predict, data.label);
                weightedTrainCount += data.significance;
            } else {
                if (isNeedRecoverGBDTPredict) {
                    if (this.recoverTrees == null) {
                        this.recoverTrees = recoverCurrentTrees();
                    }
                    // recover gbdt data for fail over
                    recoverGBTData(context, data.output, data.predict, data, true);
                }
                int currTreeIndex = trees.size() - 1;

                if (lastMasterResult.isSwitchToNextTree()) {
                    if (currTreeIndex >= 1) {
                        Node node = trees.get(currTreeIndex - 1).getNode();
                        Node predictNode = predictNodeIndex(node, data, false);
                        if (predictNode.getPredict() != null) {
                            double predict = predictNode.getPredict().getPredict();
                            // first tree logic, master must set it to first tree even second tree with ROOT is
                            // sending
                            if (context.getLastMasterResult().isFirstTree()) {
                                data.predict = (float) predict;
                            } else {
                                // random drop
                                boolean drop = (this.dropOutRate > 0.0
                                        && dropOutRandom.nextDouble() < this.dropOutRate);
                                if (!drop) {
                                    data.predict += (float) (this.learningRate * predict);
                                }
                            }
                            data.output = -1f * loss.computeGradient(data.predict, data.label);
                        }
                        // if not sampling with replacement in gbdt, renew bagging sample rate in next tree
                        if (!this.gbdtSampleWithReplacement) {
                            Random random = null;
                            int classValue = (int) (data.label + 0.01f);
                            if (this.isStratifiedSampling) {
                                random = baggingRandomMap.get(classValue);
                                if (random == null) {
                                    random = DTrainUtils.generateRandomBySampleSeed(
                                            modelConfig.getTrain().getBaggingSampleSeed(),
                                            CommonConstants.NOT_CONFIGURED_BAGGING_SEED);
                                    baggingRandomMap.put(classValue, random);
                                }
                            } else {
                                random = baggingRandomMap.get(0);
                                if (random == null) {
                                    random = DTrainUtils.generateRandomBySampleSeed(
                                            modelConfig.getTrain().getBaggingSampleSeed(),
                                            CommonConstants.NOT_CONFIGURED_BAGGING_SEED);
                                    baggingRandomMap.put(0, random);
                                }
                            }
                            if (random.nextDouble() <= modelConfig.getTrain().getBaggingSampleRate()) {
                                data.subsampleWeights[currTreeIndex % data.subsampleWeights.length] = 1f;
                            } else {
                                data.subsampleWeights[currTreeIndex % data.subsampleWeights.length] = 0f;
                            }
                        }
                    }
                }

                if (context.getLastMasterResult().isFirstTree() && !lastMasterResult.isSwitchToNextTree()) {
                    Node currTree = trees.get(currTreeIndex).getNode();
                    Node predictNode = predictNodeIndex(currTree, data, true);
                    if (predictNode.getPredict() != null) {
                        trainError += data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedTrainCount += data.significance;
                    }
                } else {
                    trainError += data.significance * loss.computeError(data.predict, data.label);
                    weightedTrainCount += data.significance;
                }
            }
        }
    }
    LOG.debug("Compute train error time is {}ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));

    if (validationData != null) {
        start = System.nanoTime();
        for (Data data : this.validationData) {
            if (this.isRF) {
                for (TreeNode treeNode : trees) {
                    if (treeNode.getNode().getId() == Node.INVALID_INDEX) {
                        continue;
                    }
                    Node predictNode = predictNodeIndex(treeNode.getNode(), data, true);
                    if (predictNode.getPredict() != null) {
                        // only update when not in first node, for treeNode, no predict statistics at that time
                        validationError += data.significance * loss
                                .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                        weightedValidationCount += data.significance;
                    }
                }
            }

            if (this.isGBDT) {
                if (this.isContinuousEnabled && lastMasterResult.isContinuousRunningStart()) {
                    recoverGBTData(context, data.output, data.predict, data, false);
                    validationError += data.significance * loss.computeError(data.predict, data.label);
                    weightedValidationCount += data.significance;
                } else {
                    if (isNeedRecoverGBDTPredict) {
                        if (this.recoverTrees == null) {
                            this.recoverTrees = recoverCurrentTrees();
                        }
                        // recover gbdt data for fail over
                        recoverGBTData(context, data.output, data.predict, data, true);
                    }
                    int currTreeIndex = trees.size() - 1;
                    if (lastMasterResult.isSwitchToNextTree()) {
                        if (currTreeIndex >= 1) {
                            Node node = trees.get(currTreeIndex - 1).getNode();
                            Node predictNode = predictNodeIndex(node, data, false);
                            if (predictNode.getPredict() != null) {
                                double predict = predictNode.getPredict().getPredict();
                                if (context.getLastMasterResult().isFirstTree()) {
                                    data.predict = (float) predict;
                                } else {
                                    data.predict += (float) (this.learningRate * predict);
                                }
                                data.output = -1f * loss.computeGradient(data.predict, data.label);
                            }
                        }
                    }
                    if (context.getLastMasterResult().isFirstTree() && !lastMasterResult.isSwitchToNextTree()) {
                        Node predictNode = predictNodeIndex(trees.get(currTreeIndex).getNode(), data, true);
                        if (predictNode.getPredict() != null) {
                            validationError += data.significance * loss
                                    .computeError((float) (predictNode.getPredict().getPredict()), data.label);
                            weightedValidationCount += data.significance;
                        }
                    } else {
                        validationError += data.significance * loss.computeError(data.predict, data.label);
                        weightedValidationCount += data.significance;
                    }
                }
            }
        }
        LOG.debug("Compute val error time is {}ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
    }

    if (this.isGBDT) {
        // reset trees to null to save memory
        this.recoverTrees = null;
        if (this.isNeedRecoverGBDTPredict) {
            // no need recover again
            this.isNeedRecoverGBDTPredict = false;
        }
    }

    start = System.nanoTime();
    CompletionService<Map<Integer, NodeStats>> completionService = new ExecutorCompletionService<Map<Integer, NodeStats>>(
            this.threadPool);

    int realThreadCount = 0;
    LOG.debug("while todo size {}", todoNodes.size());

    int realRecords = this.trainingData.size();
    int realThreads = this.workerThreadCount > realRecords ? realRecords : this.workerThreadCount;

    int[] trainLows = new int[realThreads];
    int[] trainHighs = new int[realThreads];

    int stepCount = realRecords / realThreads;
    if (realRecords % realThreads != 0) {
        // move step count to append last gap to avoid last thread worse 2*stepCount-1
        stepCount += (realRecords % realThreads) / stepCount;
    }
    for (int i = 0; i < realThreads; i++) {
        trainLows[i] = i * stepCount;
        if (i != realThreads - 1) {
            trainHighs[i] = trainLows[i] + stepCount - 1;
        } else {
            trainHighs[i] = realRecords - 1;
        }
    }

    for (int i = 0; i < realThreads; i++) {
        final Map<Integer, TreeNode> localTodoNodes = new HashMap<Integer, TreeNode>(todoNodes);
        final Map<Integer, NodeStats> localStatistics = initTodoNodeStats(todoNodes);

        final int startIndex = trainLows[i];
        final int endIndex = trainHighs[i];
        LOG.info("Thread {} todo size {} stats size {} start index {} end index {}", i, localTodoNodes.size(),
                localStatistics.size(), startIndex, endIndex);

        if (localTodoNodes.size() == 0) {
            continue;
        }
        realThreadCount += 1;
        completionService.submit(new Callable<Map<Integer, NodeStats>>() {
            @Override
            public Map<Integer, NodeStats> call() throws Exception {
                long start = System.nanoTime();
                List<Integer> nodeIndexes = new ArrayList<Integer>(trees.size());
                for (int j = startIndex; j <= endIndex; j++) {
                    Data data = DTWorker.this.trainingData.get(j);
                    nodeIndexes.clear();
                    if (DTWorker.this.isRF) {
                        for (TreeNode treeNode : trees) {
                            if (treeNode.getNode().getId() == Node.INVALID_INDEX) {
                                nodeIndexes.add(Node.INVALID_INDEX);
                            } else {
                                Node predictNode = predictNodeIndex(treeNode.getNode(), data, false);
                                nodeIndexes.add(predictNode.getId());
                            }
                        }
                    }

                    if (DTWorker.this.isGBDT) {
                        int currTreeIndex = trees.size() - 1;
                        Node predictNode = predictNodeIndex(trees.get(currTreeIndex).getNode(), data, false);
                        // update node index
                        nodeIndexes.add(predictNode.getId());
                    }
                    for (Map.Entry<Integer, TreeNode> entry : localTodoNodes.entrySet()) {
                        // only do statistics on effective data
                        Node todoNode = entry.getValue().getNode();
                        int treeId = entry.getValue().getTreeId();
                        int currPredictIndex = 0;
                        if (DTWorker.this.isRF) {
                            currPredictIndex = nodeIndexes.get(entry.getValue().getTreeId());
                        }
                        if (DTWorker.this.isGBDT) {
                            currPredictIndex = nodeIndexes.get(0);
                        }

                        if (todoNode.getId() == currPredictIndex) {
                            List<Integer> features = entry.getValue().getFeatures();
                            if (features.isEmpty()) {
                                features = getAllValidFeatures();
                            }
                            for (Integer columnNum : features) {
                                double[] featuerStatistic = localStatistics.get(entry.getKey())
                                        .getFeatureStatistics().get(columnNum);
                                float weight = data.subsampleWeights[treeId % data.subsampleWeights.length];
                                if (Float.compare(weight, 0f) != 0) {
                                    // only compute weight is not 0
                                    short binIndex = data.inputs[DTWorker.this.inputIndexMap.get(columnNum)];
                                    DTWorker.this.impurity.featureUpdate(featuerStatistic, binIndex,
                                            data.output, data.significance, weight);
                                }
                            }
                        }
                    }
                }
                LOG.debug("Thread computing stats time is {}ms in thread {}",
                        TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start),
                        Thread.currentThread().getName());
                return localStatistics;
            }
        });
    }

    int rCnt = 0;
    while (rCnt < realThreadCount) {
        try {
            Map<Integer, NodeStats> currNodeStatsmap = completionService.take().get();
            if (rCnt == 0) {
                statistics = currNodeStatsmap;
            } else {
                for (Entry<Integer, NodeStats> entry : statistics.entrySet()) {
                    NodeStats resultNodeStats = entry.getValue();
                    mergeNodeStats(resultNodeStats, currNodeStatsmap.get(entry.getKey()));
                }
            }
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        rCnt += 1;
    }
    LOG.debug("Compute stats time is {}ms", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));

    LOG.info(
            "worker count is {}, error is {}, and stats size is {}. weightedTrainCount {}, weightedValidationCount {}, trainError {}, validationError {}",
            count, trainError, statistics.size(), weightedTrainCount, weightedValidationCount, trainError,
            validationError);
    return new DTWorkerParams(weightedTrainCount, weightedValidationCount, trainError, validationError,
            statistics);
}

From source file:org.apache.accumulo.shell.Shell.java

public void printVerboseInfo() throws IOException {
    StringBuilder sb = new StringBuilder("-\n");
    sb.append("- Current user: ").append(connector.whoami()).append("\n");
    if (execFile != null)
        sb.append("- Executing commands from: ").append(execFile).append("\n");
    if (disableAuthTimeout)
        sb.append("- Authorization timeout: disabled\n");
    else//from w ww.  java  2 s.  c  o  m
        sb.append("- Authorization timeout: ")
                .append(String.format("%ds%n", TimeUnit.NANOSECONDS.toSeconds(authTimeout)));
    sb.append("- Debug: ").append(isDebuggingEnabled() ? "on" : "off").append("\n");
    if (!scanIteratorOptions.isEmpty()) {
        for (Entry<String, List<IteratorSetting>> entry : scanIteratorOptions.entrySet()) {
            sb.append("- Session scan iterators for table ").append(entry.getKey()).append(":\n");
            for (IteratorSetting setting : entry.getValue()) {
                sb.append("-    Iterator ").append(setting.getName()).append(" options:\n");
                sb.append("-        ").append("iteratorPriority").append(" = ").append(setting.getPriority())
                        .append("\n");
                sb.append("-        ").append("iteratorClassName").append(" = ")
                        .append(setting.getIteratorClass()).append("\n");
                for (Entry<String, String> optEntry : setting.getOptions().entrySet()) {
                    sb.append("-        ").append(optEntry.getKey()).append(" = ").append(optEntry.getValue())
                            .append("\n");
                }
            }
        }
    }
    sb.append("-\n");
    reader.print(sb.toString());
}

From source file:org.apache.hadoop.hive.llap.tezplugins.LlapTaskCommunicator.java

void nodePinged(String hostname, String uniqueId, int port, TezAttemptArray tasks) {
    // TODO: do we ever need the port? we could just do away with nodeId altogether.
    LlapNodeId nodeId = LlapNodeId.getInstance(hostname, port);
    registerPingingNode(nodeId);//from  w w w  . ja va  2s. co m
    BiMap<ContainerId, TezTaskAttemptID> biMap = entityTracker.getContainerAttemptMapForNode(nodeId);
    if (biMap != null) {
        HashSet<TezTaskAttemptID> attempts = new HashSet<>();
        for (Writable w : tasks.get()) {
            attempts.add((TezTaskAttemptID) w);
        }
        String error = "";
        synchronized (biMap) {
            for (Map.Entry<ContainerId, TezTaskAttemptID> entry : biMap.entrySet()) {
                // TODO: this is a stopgap fix. We really need to change all mappings by unique node ID,
                //       or at least (in this case) track the latest unique ID for LlapNode and retry all
                //       older-node tasks proactively. For now let the heartbeats fail them.
                TezTaskAttemptID attemptId = entry.getValue();
                String taskNodeId = entityTracker.getUniqueNodeId(attemptId);
                // Unique ID is registered based on Submit response. Theoretically, we could get a ping
                // when the task is valid but we haven't stored the unique ID yet, so taskNodeId is null.
                // However, the next heartbeat(s) should get the value eventually and mark task as alive.
                // Also, we prefer a missed heartbeat over a stuck query in case of discrepancy in ET.
                if (taskNodeId != null && taskNodeId.equals(uniqueId)) {
                    if (attempts.contains(attemptId)) {
                        getContext().taskAlive(entry.getValue());
                    } else {
                        error += (attemptId + ", ");
                    }
                    getContext().containerAlive(entry.getKey());
                }
            }
        }
        if (!error.isEmpty()) {
            LOG.info("The tasks we expected to be on the node are not there: " + error);
        }
    } else {
        long currentTs = TimeUnit.MILLISECONDS.convert(System.nanoTime(), TimeUnit.NANOSECONDS);
        if (currentTs > nodeNotFoundLogTime.get() + 5000l) {
            LOG.warn("Received ping from node without any registered tasks or containers: " + hostname + ":"
                    + port + ". Could be caused by pre-emption by the AM,"
                    + " or a mismatched hostname. Enable debug logging for mismatched host names");
            nodeNotFoundLogTime.set(currentTs);
        }
    }
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.java

private void recordSuccessfulEvent(OpStatsLogger logger, long startTimeNanos) {
    logger.registerSuccessfulEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.java

private void recordFailedEvent(OpStatsLogger logger, long startTimeNanos) {
    logger.registerFailedEvent(MathUtils.elapsedNanos(startTimeNanos), TimeUnit.NANOSECONDS);
}