Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java

@Override
public long getScanTimeout(TimeUnit unit) {
    return TimeUnit.NANOSECONDS.convert(scanTimeoutNs, unit);
}

From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java

@Override
public void applyAssignments(List<DrillbitEndpoint> endpoints) throws PhysicalOperatorSetupException {
    logger.debug("Incoming endpoints :" + endpoints);
    watch.reset();/*from  www . ja v  a 2 s.c o  m*/
    watch.start();

    final int numSlots = endpoints.size();
    int totalAssignmentsTobeDone = chunksMapping.size();

    Preconditions.checkArgument(numSlots <= totalAssignmentsTobeDone, String.format(
            "Incoming endpoints %d is greater than number of chunks %d", numSlots, totalAssignmentsTobeDone));

    final int minPerEndpointSlot = (int) Math.floor((double) totalAssignmentsTobeDone / numSlots);
    final int maxPerEndpointSlot = (int) Math.ceil((double) totalAssignmentsTobeDone / numSlots);

    endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots);
    Map<String, Queue<Integer>> endpointHostIndexListMap = Maps.newHashMap();

    for (int i = 0; i < numSlots; ++i) {
        endpointFragmentMapping.put(i, new ArrayList<MongoSubScanSpec>(maxPerEndpointSlot));
        String hostname = endpoints.get(i).getAddress();
        Queue<Integer> hostIndexQueue = endpointHostIndexListMap.get(hostname);
        if (hostIndexQueue == null) {
            hostIndexQueue = Lists.newLinkedList();
            endpointHostIndexListMap.put(hostname, hostIndexQueue);
        }
        hostIndexQueue.add(i);
    }

    Set<Entry<String, List<ChunkInfo>>> chunksToAssignSet = Sets.newHashSet(chunksInverseMapping.entrySet());

    for (Iterator<Entry<String, List<ChunkInfo>>> chunksIterator = chunksToAssignSet.iterator(); chunksIterator
            .hasNext();) {
        Entry<String, List<ChunkInfo>> chunkEntry = chunksIterator.next();
        Queue<Integer> slots = endpointHostIndexListMap.get(chunkEntry.getKey());
        if (slots != null) {
            for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
                Integer slotIndex = slots.poll();
                List<MongoSubScanSpec> subScanSpecList = endpointFragmentMapping.get(slotIndex);
                subScanSpecList.add(buildSubScanSpecAndGet(chunkInfo));
                slots.offer(slotIndex);
            }
            chunksIterator.remove();
        }
    }

    PriorityQueue<List<MongoSubScanSpec>> minHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots,
            LIST_SIZE_COMPARATOR);
    PriorityQueue<List<MongoSubScanSpec>> maxHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots,
            LIST_SIZE_COMPARATOR_REV);
    for (List<MongoSubScanSpec> listOfScan : endpointFragmentMapping.values()) {
        if (listOfScan.size() < minPerEndpointSlot) {
            minHeap.offer(listOfScan);
        } else if (listOfScan.size() > minPerEndpointSlot) {
            maxHeap.offer(listOfScan);
        }
    }

    if (chunksToAssignSet.size() > 0) {
        for (Entry<String, List<ChunkInfo>> chunkEntry : chunksToAssignSet) {
            for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
                List<MongoSubScanSpec> smallestList = minHeap.poll();
                smallestList.add(buildSubScanSpecAndGet(chunkInfo));
                minHeap.offer(smallestList);
            }
        }
    }

    while (minHeap.peek() != null && minHeap.peek().size() < minPerEndpointSlot) {
        List<MongoSubScanSpec> smallestList = minHeap.poll();
        List<MongoSubScanSpec> largestList = maxHeap.poll();
        smallestList.add(largestList.remove(largestList.size() - 1));
        if (largestList.size() > minPerEndpointSlot) {
            maxHeap.offer(largestList);
        }
        if (smallestList.size() < minPerEndpointSlot) {
            minHeap.offer(smallestList);
        }
    }

    logger.debug("Built assignment map in {} s.\nEndpoints: {}.\nAssignment Map: {}",
            watch.elapsed(TimeUnit.NANOSECONDS) / 1000, endpoints, endpointFragmentMapping.toString());
}

From source file:at.ac.tuwien.infosys.jcloudscale.server.JCloudScaleServer.java

protected void touchServerCloudObject(ServerCloudObject sco) {

    if (log.isLoggable(Level.FINE))
        log.fine("Received isAlive message for object " + sco.getId() + ". Last isAlive "
                + TimeUnit.MILLISECONDS.convert(System.nanoTime() - sco.getLastTouched(), TimeUnit.NANOSECONDS)
                + "ms ago.");

    sco.setLastTouched(System.nanoTime());
}

From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java

@Override
public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
    return conn.callerFactory.batch().table(tableName).actions(actions)
            .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
            .readRpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS)
            .writeRpcTimeout(writeRpcTimeoutNs, TimeUnit.NANOSECONDS).call();
}

From source file:com.yahoo.pulsar.broker.service.BrokerService.java

private CompletableFuture<Topic> createPersistentTopic(final String topic) throws RuntimeException {
    checkTopicNsOwnership(topic);/*from w ww.j a v a2s . c  o  m*/

    final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
    DestinationName destinationName = DestinationName.get(topic);
    if (!pulsar.getNamespaceService().isServiceUnitActive(destinationName)) {
        // namespace is being unloaded
        String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic);
        log.warn(msg);
        throw new RuntimeException(new ServiceUnitNotReadyException(msg));
    }

    final CompletableFuture<Topic> topicFuture = new CompletableFuture<>();

    getManagedLedgerConfig(destinationName).thenAccept(config -> {
        // Once we have the configuration, we can proceed with the async open operation

        managedLedgerFactory.asyncOpen(destinationName.getPersistenceNamingEncoding(), config,
                new OpenLedgerCallback() {
                    @Override
                    public void openLedgerComplete(ManagedLedger ledger, Object ctx) {
                        PersistentTopic persistentTopic = new PersistentTopic(topic, ledger,
                                BrokerService.this);

                        CompletableFuture<Void> replicationFuture = persistentTopic.checkReplication();
                        replicationFuture.thenRun(() -> {
                            log.info("Created topic {}", topic);
                            long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime())
                                    - topicCreateTimeMs;
                            pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs);
                            addTopicToStatsMaps(destinationName, persistentTopic);
                            topicFuture.complete(persistentTopic);
                        });
                        replicationFuture.exceptionally((ex) -> {
                            log.warn("Replication check failed. Removing topic from topics list {}, {}", topic,
                                    ex);
                            persistentTopic.stopReplProducers().whenComplete((v, exception) -> {
                                topics.remove(topic, topicFuture);
                                topicFuture.completeExceptionally(ex);
                            });

                            return null;
                        });
                    }

                    @Override
                    public void openLedgerFailed(ManagedLedgerException exception, Object ctx) {
                        log.warn("Failed to create topic {}", topic, exception);
                        topics.remove(topic, topicFuture);
                        topicFuture.completeExceptionally(new PersistenceException(exception));
                    }
                }, null);

    }).exceptionally((exception) -> {
        log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception);
        topics.remove(topic, topicFuture);
        topicFuture.completeExceptionally(exception);
        return null;
    });

    return topicFuture;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

private void scheduleTimeoutTask() {
    long timeoutSec = config.getAddEntryTimeoutSeconds();
    // disable timeout task checker if timeout <= 0
    if (timeoutSec > 0) {
        this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(() -> {
            OpAddEntry opAddEntry = pendingAddEntries.peek();
            if (opAddEntry != null) {
                boolean isTimedOut = opAddEntry.lastInitTime != -1
                        && TimeUnit.NANOSECONDS
                                .toSeconds(System.nanoTime() - opAddEntry.lastInitTime) >= timeoutSec
                        && opAddEntry.completed == FALSE;
                if (isTimedOut) {
                    log.error("Failed to add entry for ledger {} in time-out {} sec",
                            (opAddEntry.ledger != null ? opAddEntry.ledger.getId() : -1), timeoutSec);
                    opAddEntry.handleAddFailure(opAddEntry.ledger);
                }/*from   w  w  w  .  j  a  v  a  2s .  com*/
            }
        }, config.getAddEntryTimeoutSeconds(), config.getAddEntryTimeoutSeconds(), TimeUnit.SECONDS);
    }
}

From source file:com.vmware.identity.interop.ldap.LdapConnection.java

@Override
public void bindSaslSrpConnection(String upn, String userPassword) {
    this.validate();

    ILdapClientLibrary ldapClientLibrary = getLdapLibrary();

    long startedAt = System.nanoTime();
    try {//from  w  w  w .  ja  v a 2s.co  m
        ldapClientLibrary.ldap_sasl_srp_bind_s(this._connection, upn, userPassword);
    } finally {
        if (perfLog.isTraceEnabled()) {
            perfLog.trace(String.format("bindSaslSrpConnection took [%d]ms",
                    TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startedAt)));
        }
    }
}

From source file:com.netflix.genie.core.services.impl.JobCoordinatorServiceImpl.java

private Cluster getCluster(final JobRequest jobRequest) throws GenieException {
    final long start = System.nanoTime();
    final Map<String, String> timerTags = MetricsUtils.newSuccessTagsMap();
    final Map<String, String> counterTags = Maps.newHashMap();
    try {/*from   w w  w.ja  v  a2s. com*/
        log.info("Selecting cluster for job {}", jobRequest.getId().orElse(NO_ID_FOUND));
        final List<Cluster> clusters = ImmutableList
                .copyOf(this.clusterService.chooseClusterForJobRequest(jobRequest));
        Cluster cluster = null;
        if (clusters.isEmpty()) {
            this.noClusterFoundCounter.increment();
            throw new GeniePreconditionException(
                    "No cluster/command combination found for the given criteria. Unable to continue");
        } else if (clusters.size() == 1) {
            cluster = clusters.get(0);
        } else {
            for (final ClusterLoadBalancer loadBalancer : this.clusterLoadBalancers) {
                final String loadBalancerClass = (loadBalancer instanceof TargetClassAware
                        ? ((TargetClassAware) loadBalancer).getTargetClass()
                        : loadBalancer.getClass()).getCanonicalName();
                counterTags.put(MetricsConstants.TagKeys.CLASS_NAME, loadBalancerClass);
                try {
                    final Cluster selectedCluster = loadBalancer.selectCluster(clusters, jobRequest);
                    if (selectedCluster != null) {
                        // Make sure the cluster existed in the original list of clusters
                        if (clusters.contains(selectedCluster)) {
                            log.debug("Successfully selected cluster {} using load balancer {}",
                                    selectedCluster.getId().orElse(NO_ID_FOUND), loadBalancerClass);
                            counterTags.put(MetricsConstants.TagKeys.STATUS, LOAD_BALANCER_STATUS_SUCCESS);
                            this.registry.counter(this.loadBalancerCounterId.withTags(counterTags)).increment();
                            cluster = selectedCluster;
                            break;
                        } else {
                            log.error(
                                    "Successfully selected cluster {} using load balancer {} but "
                                            + "it wasn't in original cluster list {}",
                                    selectedCluster.getId().orElse(NO_ID_FOUND), loadBalancerClass, clusters);
                            counterTags.put(MetricsConstants.TagKeys.STATUS, LOAD_BALANCER_STATUS_INVALID);

                            this.registry.counter(this.loadBalancerCounterId.withTags(counterTags)).increment();
                        }
                    } else {
                        counterTags.put(MetricsConstants.TagKeys.STATUS, LOAD_BALANCER_STATUS_NO_PREFERENCE);
                        this.registry.counter(this.loadBalancerCounterId.withTags(counterTags)).increment();
                    }
                } catch (final Exception e) {
                    log.error("Cluster load balancer {} threw exception:", loadBalancer, e);
                    counterTags.put(MetricsConstants.TagKeys.STATUS, LOAD_BALANCER_STATUS_EXCEPTION);
                    this.registry.counter(this.loadBalancerCounterId.withTags(counterTags)).increment();
                }
            }

            // Make sure we selected a cluster
            if (cluster == null) {
                this.noClusterSelectedCounter.increment();
                throw new GeniePreconditionException(
                        "Unable to select a cluster from using any of the available load balancers.");
            }
        }

        log.info("Selected cluster {} for job {}", cluster.getId().orElse(NO_ID_FOUND),
                jobRequest.getId().orElse(NO_ID_FOUND));
        return cluster;
    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(timerTags, t);
        throw t;
    } finally {
        this.registry.timer(selectClusterTimerId.withTags(timerTags)).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}

From source file:edu.cmu.tetrad.search.TestIndTestConditionalCorrelation.java

public void test8() {
    int NTHREDS = 100;
    long start = System.currentTimeMillis();

    ExecutorService executor = Executors.newFixedThreadPool(NTHREDS);
    for (int i = 0; i < 5000; i++) {
        Runnable worker = new MyRunnable(10000000L + i);
        executor.execute(worker);//  w w  w  .jav  a  2 s.  c  o m
    }
    // This will make the executor accept no new threads
    // and finish all existing threads in the queue
    //        executor.shutdown();
    try {
        // Wait until all threads are finish
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        System.out.println("Finished all threads");
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    long stop = System.currentTimeMillis();

    System.out.println((stop - start) + " ms");
}