Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java

@Override
public List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
    long checkStart = MathUtils.nowInNano();
    LOG.info("Starting localConsistencyCheck");
    long checkedLedgers = 0;
    long checkedPages = 0;
    final MutableLong checkedEntries = new MutableLong(0);
    final MutableLong pageRetries = new MutableLong(0);
    NavigableMap<Long, Boolean> bkActiveLedgersSnapshot = activeLedgers.snapshot();
    final List<DetectedInconsistency> errors = new ArrayList<>();
    for (Long ledger : bkActiveLedgersSnapshot.keySet()) {
        try (LedgerCache.PageEntriesIterable pages = ledgerCache.listEntries(ledger)) {
            for (LedgerCache.PageEntries page : pages) {
                @Cleanup/*w  w  w .j ava 2s.c o  m*/
                LedgerEntryPage lep = page.getLEP();
                MutableBoolean retry = new MutableBoolean(false);
                do {
                    retry.setValue(false);
                    int version = lep.getVersion();

                    MutableBoolean success = new MutableBoolean(true);
                    long start = MathUtils.nowInNano();
                    lep.getEntries((entry, offset) -> {
                        rateLimiter.ifPresent(RateLimiter::acquire);

                        try {
                            entryLogger.checkEntry(ledger, entry, offset);
                            checkedEntries.increment();
                        } catch (EntryLogger.EntryLookupException e) {
                            if (version != lep.getVersion()) {
                                pageRetries.increment();
                                if (lep.isDeleted()) {
                                    LOG.debug("localConsistencyCheck: ledger {} deleted", ledger);
                                } else {
                                    LOG.debug("localConsistencyCheck: concurrent modification, retrying");
                                    retry.setValue(true);
                                    retryCounter.inc();
                                }
                                return false;
                            } else {
                                errors.add(new DetectedInconsistency(ledger, entry, e));
                                LOG.error("Got error: ", e);
                            }
                            success.setValue(false);
                        }
                        return true;
                    });

                    if (success.booleanValue()) {
                        pageScanStats.registerSuccessfulEvent(MathUtils.elapsedNanos(start),
                                TimeUnit.NANOSECONDS);
                    } else {
                        pageScanStats.registerFailedEvent(MathUtils.elapsedNanos(start), TimeUnit.NANOSECONDS);
                    }
                } while (retry.booleanValue());
                checkedPages++;
            }
        } catch (NoLedgerException | FileInfo.FileInfoDeletedException e) {
            if (activeLedgers.containsKey(ledger)) {
                LOG.error("Cannot find ledger {}, should exist, exception is ", ledger, e);
                errors.add(new DetectedInconsistency(ledger, -1, e));
            } else {
                LOG.debug("ledger {} deleted since snapshot taken", ledger);
            }
        } catch (Exception e) {
            throw new IOException("Got other exception in localConsistencyCheck", e);
        }
        checkedLedgers++;
    }
    LOG.info(
            "Finished localConsistencyCheck, took {}s to scan {} ledgers, {} pages, "
                    + "{} entries with {} retries, {} errors",
            TimeUnit.NANOSECONDS.toSeconds(MathUtils.elapsedNanos(checkStart)), checkedLedgers, checkedPages,
            checkedEntries.longValue(), pageRetries.longValue(), errors.size());

    return errors;
}

From source file:MSUmpire.PeptidePeakClusterDetection.PDHandlerBase.java

protected void PeakCurveCorrClustering(XYData mzRange) throws IOException {
    Logger.getRootLogger().info("Grouping isotopic peak curves........");

    LCMSPeakBase.PeakClusters = new ArrayList<>();

    //Thread pool
    final ForkJoinPool fjp = new ForkJoinPool(NoCPUs);
    //        ArrayList<PeakCurveClusteringCorrKDtree> ResultList = new ArrayList<>();
    final ArrayList<ForkJoinTask<ArrayList<PeakCluster>>> ftemp = new ArrayList<>();
    final int end_idx = LCMSPeakBase.UnSortedPeakCurves.size();
    final ArrayList<PeakCluster> resultClusters = new ArrayList<>();
    //For each peak curve
    //        for (PeakCurve Peakcurve : LCMSPeakBase.UnSortedPeakCurves) {
    for (int i = 0; i < end_idx; ++i) {
        final PeakCurve Peakcurve = LCMSPeakBase.UnSortedPeakCurves.get(i);
        if (Peakcurve.TargetMz >= mzRange.getX() && Peakcurve.TargetMz <= mzRange.getY()) {
            //Create a thread unit for doing isotope clustering given a peak curve as the monoisotope peak
            PeakCurveClusteringCorrKDtree unit = new PeakCurveClusteringCorrKDtree(Peakcurve,
                    LCMSPeakBase.GetPeakCurveSearchTree(), parameter, IsotopePatternMap,
                    LCMSPeakBase.StartCharge, LCMSPeakBase.EndCharge, LCMSPeakBase.MaxNoPeakCluster,
                    LCMSPeakBase.MinNoPeakCluster);
            //                ResultList.add(unit);
            ftemp.add(fjp.submit(unit));
        }//from www.jav  a 2  s .co m
        if (step_pccc == -1)
            step_pccc = fjp.getParallelism() * 32;
        final boolean last_iter = i + 1 == end_idx;
        if (ftemp.size() == step_pccc || last_iter) {
            final List<ForkJoinTask<ArrayList<PeakCluster>>> ftemp_sublist_view = last_iter ? ftemp
                    : ftemp.subList(0, step_pccc / 2);
            for (final ForkJoinTask<ArrayList<PeakCluster>> fut : ftemp_sublist_view)
                try {
                    resultClusters.addAll(fut.get());
                } catch (InterruptedException | ExecutionException ex) {
                    throw new RuntimeException(ex);
                }
            ftemp_sublist_view.clear();
            if (!last_iter && fjp.getActiveThreadCount() < fjp.getParallelism()) {
                //                    System.out.println("PeakCurveSmoothingUnit: fjp.getActiveThreadCount()\t"+fjp.getActiveThreadCount()+"\t"+step_pccc);
                step_pccc *= 2;
            }
        }
    }

    assert ftemp.isEmpty() : "temp storage for futures should be empty by end of loop";
    fjp.shutdown();

    try {
        fjp.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        Logger.getRootLogger().info("interrupted..");
    }

    for (final PeakCluster peakCluster : resultClusters) {
        //Check if the monoistope peak of cluster has been grouped in other isotope cluster, if yes, remove the peak cluster
        if (!parameter.RemoveGroupedPeaks ||
        //                    !peakCluster.MonoIsotopePeak.ChargeGrouped.contains(peakCluster.Charge)
                !IonChargeHashSet.contains(peakCluster.MonoIsotopePeak.ChargeGrouped, peakCluster.Charge)) {
            peakCluster.Index = LCMSPeakBase.PeakClusters.size() + 1;
            peakCluster.GetConflictCorr();
            LCMSPeakBase.PeakClusters.add(peakCluster);
        }
    }

    System.gc();
    Logger.getRootLogger()
            .info("No of ion clusters:" + LCMSPeakBase.PeakClusters.size() + " (Memory usage:"
                    + Math.round(
                            (Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1048576)
                    + "MB)");
}

From source file:MSUmpire.DIA.DIAPack.java

public void AssignQuant(boolean export) throws IOException, SQLException {
    Logger.getRootLogger().info("Assign peak cluster to identified peptides");
    GenerateClusterScanNomapping();//from  ww w  .j a v  a2s .c o  m

    ExecutorService executorPool = null;
    for (PeakCluster cluster : MS1FeatureMap.PeakClusters) {
        cluster.Identified = false;
    }

    for (PepIonID pepIonID : IDsummary.GetPepIonList().values()) {
        pepIonID.MS1PeakClusters = new ArrayList<>();
        pepIonID.MS2UnfragPeakClusters = new ArrayList<>();
    }

    //Assign precursor features and grouped fragments for all identified peptide ions for a isolation window
    for (LCMSPeakDIAMS2 DIAWindow : DIAWindows) {
        DIA_window_Quant dia_w = new DIA_window_Quant(GetQ1Name(), GetQ2Name(), GetQ3Name(), ScanClusterMap_Q1,
                ScanClusterMap_Q2, ScanClusterMap_Q3, MS1FeatureMap, DIAWindow, IDsummary, NoCPUs);
        dia_w.run();
    }

    executorPool = Executors.newFixedThreadPool(NoCPUs);

    //Match fragments and calculate quantification for each peptide ion
    for (PepIonID pepIonID : IDsummary.GetPepIonList().values()) {
        DIAAssignQuantUnit quantunit = new DIAAssignQuantUnit(pepIonID, MS1FeatureMap, parameter);
        executorPool.execute(quantunit);
    }
    executorPool.shutdown();

    try {
        executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        Logger.getRootLogger().info("interrupted..");
    }

    if (export) {
        ExportID();
    }
}

From source file:org.apache.solr.client.solrj.impl.BackupRequestLBHttpSolrClient.java

/**
 * The following was copied from base class (5.3) to work around private access modifiers
 *//*from w w  w .  ja va 2  s.co m*/
protected long getTimeAllowedInNanos(final SolrRequest req) {
    SolrParams reqParams = req.getParams();
    return reqParams == null ? -1
            : TimeUnit.NANOSECONDS.convert(reqParams.getInt(CommonParams.TIME_ALLOWED, -1),
                    TimeUnit.MILLISECONDS);
}

From source file:com.netflix.genie.core.services.impl.JobCoordinatorServiceImpl.java

private List<Application> getApplications(final JobRequest jobRequest, final Command command)
        throws GenieException {
    final long start = System.nanoTime();
    final Map<String, String> tags = MetricsUtils.newSuccessTagsMap();
    try {//from w w w.  j a v a 2 s  .c om
        final String jobId = jobRequest.getId().orElseThrow(() -> new GenieServerException("No job Id"));
        final String commandId = command.getId().orElseThrow(() -> new GenieServerException("No command Id"));
        log.info("Selecting applications for job {} and command {}", jobId, commandId);
        // TODO: What do we do about application status? Should probably check here
        final List<Application> applications = new ArrayList<>();
        if (jobRequest.getApplications().isEmpty()) {
            applications.addAll(this.commandService.getApplicationsForCommand(commandId));
        } else {
            for (final String applicationId : jobRequest.getApplications()) {
                applications.add(this.applicationService.getApplication(applicationId));
            }
        }
        log.info("Selected applications {} for job {}",
                applications.stream().map(Application::getId).filter(Optional::isPresent).map(Optional::get)
                        .reduce((one, two) -> one + "," + two).orElse(NO_ID_FOUND),
                jobRequest.getId().orElse(NO_ID_FOUND));
        return applications;

    } catch (Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.registry.timer(selectApplicationsTimerId.withTags(tags)).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}

From source file:io.nats.client.ITClusterTest.java

/**
 * Ensures that if a ping is not ponged within the pingInterval, that a disconnect/reconnect
 * takes place./*from   w ww.j  av a 2  s. com*/
 * <p>
 * <p>We test this by setting maxPingsOut < 0 and setting the pingInterval very small. After
 * the first
 * disconnect, we measure the reconnect-to-disconnect time to ensure it isn't greater than 2
 * * pingInterval.
 *
 * @throws Exception if anything goes wrong
 */
@Test
public void testPingReconnect() throws Exception {
    final int reconnects = 4;
    final AtomicInteger timesReconnected = new AtomicInteger();
    //        setLogLevel(Level.DEBUG);
    try (NatsServer s1 = runServerOnPort(1222)) {
        Options opts = new Options.Builder(defaultOptions()).dontRandomize().reconnectWait(200).pingInterval(50)
                .maxPingsOut(-1).timeout(1000).build();
        opts.servers = Nats.processUrlArray(testServers);

        final CountDownLatch wg = new CountDownLatch(reconnects);
        final BlockingQueue<Long> rch = new LinkedBlockingQueue<Long>(reconnects);
        final BlockingQueue<Long> dch = new LinkedBlockingQueue<Long>(reconnects);

        opts.disconnectedCb = new DisconnectedCallback() {
            public void onDisconnect(ConnectionEvent event) {
                dch.add(System.nanoTime());
            }
        };

        opts.reconnectedCb = new ReconnectedCallback() {
            @Override
            public void onReconnect(ConnectionEvent event) {
                rch.add(System.nanoTime());
                wg.countDown();
            }
        };

        try (ConnectionImpl c = (ConnectionImpl) opts.connect()) {
            wg.await();
            s1.shutdown();

            // Throw away the first one
            dch.take();
            for (int i = 0; i < reconnects - 1; i++) {
                Long disconnectedAt = dch.take();
                Long reconnectedAt = rch.take();
                Long pingCycle = TimeUnit.NANOSECONDS.toMillis(disconnectedAt - reconnectedAt);
                assertFalse(String.format("Reconnect due to ping took %d msec", pingCycle),
                        pingCycle > 2 * c.getOptions().getPingInterval());
            }
        }
    }
}

From source file:com.tinspx.util.concurrent.TimedSemaphoreTest.java

/**
 * This tests many different variations of threads counts and permit acquire
 * methods. It is not normally run as it takes a long time to run. (30 min
 * when only testing 20 ms period on my computer).
 *//*from   w  w w.jav  a  2  s  . c  o m*/
//    @Test
public void testAll() throws InterruptedException {
    //        System.out.println("Total Tests: " + (10 * 3 * 6 * 6 * ACQUIRES.size() * PERMITS_LIST.size()));

    final Executor executor = Executors.newFixedThreadPool(8, ThreadUtils.daemonThreadFactory());
    final int A = 10;
    //        for(int delay : Arrays.asList(20000000, 50000000, 100000000)) {
    for (int period : Arrays.asList(20000000)) {
        //        for(int threads = 3; threads <= 8; threads++) {
        for (int threads : Arrays.asList(1, 2, 3, 4, 6, 8)) {
            for (int limit = 1; limit <= 6 && limit <= threads + 3; limit++) {
                for (Acquire acquire : ACQUIRES) {
                    for (Permits permits : acquire.permits()) {
                        runTest(executor, new TimedSemaphore(limit, period, TimeUnit.NANOSECONDS),
                                Ticker.systemTicker(), threads, A, acquire, permits);

                    } //permits
                } //acquires
            } //limit
        } //threads
    } //period

    System.out.println("stolen permits: " + stolen.get());
}

From source file:org.apache.solr.client.solrj.impl.LBHttpSolrClient.java

/**
 * @return time allowed in nanos, returns -1 if no time_allowed is specified.
 *//* ww w .j a  va  2 s.  co m*/
private long getTimeAllowedInNanos(final SolrRequest req) {
    SolrParams reqParams = req.getParams();
    return reqParams == null ? -1
            : TimeUnit.NANOSECONDS.convert(reqParams.getInt(CommonParams.TIME_ALLOWED, -1),
                    TimeUnit.MILLISECONDS);
}

From source file:org.apache.accumulo.gc.GarbageCollectWriteAheadLogsTest.java

@Test
public void testTimeToDeleteFalse() {
    HostAndPort address = HostAndPort.fromString("tserver1:9998");
    long wait = AccumuloConfiguration.getTimeInMillis("1h");
    long t1, t2;//ww  w. j  av a 2s  .  com
    boolean ttd;
    do {
        t1 = System.nanoTime();
        gcwal.clearFirstSeenDead();
        assertFalse("First call should be false and should store the first seen time",
                gcwal.timeToDelete(address, wait));
        ttd = gcwal.timeToDelete(address, wait);
        t2 = System.nanoTime();
    } while (TimeUnit.NANOSECONDS.toMillis(t2 - t1) > (wait / 2)); // as long as it took less than half of the configured wait

    assertFalse(ttd);
}

From source file:MSUmpire.SpectrumParser.mzXMLParser.java

private List<MzXMLthreadUnit> ParseScans(final BitSet IncludedScans) {
    List<MzXMLthreadUnit> ScanList = new ArrayList<>();
    ArrayList<ForkJoinTask<?>> futures = new ArrayList<>();
    final ForkJoinPool fjp = new ForkJoinPool(NoCPUs);
    Iterator<Entry<Integer, Long>> iter = ScanIndex.entrySet().iterator();
    Entry<Integer, Long> ent = iter.next();
    long currentIdx = ent.getValue();
    int nextScanNo = ent.getKey();
    final RandomAccessFile fileHandler;
    try {/*from www  .  j ava 2s . com*/
        fileHandler = new RandomAccessFile(filename, "r");
    } catch (FileNotFoundException e) {
        throw new RuntimeException(e);
    }
    byte[] buffer = new byte[1 << 10];
    if (step == -1)
        step = fjp.getParallelism() * 32;
    while (iter.hasNext()) {
        ent = iter.next();
        long startposition = currentIdx;
        long nexposition = ent.getValue();
        int currentScanNo = nextScanNo;
        nextScanNo = ent.getKey();
        currentIdx = nexposition;

        if (IncludedScans.get(currentScanNo)) {
            try {
                final int bufsize = (int) (nexposition - startposition);
                if (buffer.length < bufsize)
                    buffer = new byte[Math.max(bufsize, buffer.length << 1)];
                //                    byte[] buffer = new byte[bufsize];
                //                    RandomAccessFile fileHandler = new RandomAccessFile(filename, "r");
                fileHandler.seek(startposition);
                fileHandler.read(buffer, 0, bufsize);
                //                    fileHandler.close();
                //                    String xmltext = new String(buffer);
                String xmltext = new String(buffer, 0, bufsize, StandardCharsets.ISO_8859_1);
                if (ent.getKey() == Integer.MAX_VALUE) {
                    xmltext = xmltext.replaceAll("</msRun>", "");
                }
                boolean ReadPeak = true;
                final MzXMLthreadUnit unit = new MzXMLthreadUnit(xmltext, parameter, datatype, ReadPeak);
                futures.add(fjp.submit(unit));
                ScanList.add(unit);

                if ((ScanList.size() % step) == 0) {
                    futures.get(futures.size() - step).get();
                    if (iter.hasNext() && fjp.getActiveThreadCount() < fjp.getParallelism()) {
                        step *= 2;
                        //                            System.out.println("MzXMLthreadUnit: fjp.getActiveThreadCount()\t" + fjp.getActiveThreadCount()+"\t"+step);
                    }
                }
            } catch (Exception ex) {
                Logger.getRootLogger().error(ExceptionUtils.getStackTrace(ex));
            }
        }
    }
    try {
        fileHandler.close();
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
    fjp.shutdown();
    try {
        fjp.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    //        for (MzXMLthreadUnit unit : ScanList) {
    //            executorPool.execute(unit);
    //        }
    //        executorPool.shutdown();
    //
    //        try {
    //            executorPool.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    //        } catch (InterruptedException e) {
    //            Logger.getRootLogger().info("interrupted..");
    //        }
    return ScanList;
}