Example usage for java.util.concurrent TimeUnit MICROSECONDS

List of usage examples for java.util.concurrent TimeUnit MICROSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit MICROSECONDS.

Prototype

TimeUnit MICROSECONDS

To view the source code for java.util.concurrent TimeUnit MICROSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a millisecond.

Usage

From source file:eu.edisonproject.training.wsd.Wikidata.java

private Map<CharSequence, List<CharSequence>> getCategories(Set<Term> terms)
        throws MalformedURLException, InterruptedException, ExecutionException {
    Map<CharSequence, List<CharSequence>> cats = new HashMap<>();

    if (terms.size() > 0) {
        int maxT = 2;
        BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT);
        ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue);

        //            ExecutorService pool = new ThreadPoolExecutor(maxT, maxT,
        //                    5000L, TimeUnit.MILLISECONDS,
        //                    new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy());
        Set<Future<Map<CharSequence, List<CharSequence>>>> set1 = new HashSet<>();
        String prop = "P910";
        for (Term t : terms) {
            URL url = new URL(
                    PAGE + "?action=wbgetclaims&format=json&props=&property=" + prop + "&entity=" + t.getUid());
            Logger.getLogger(Wikidata.class.getName()).log(Level.FINE, url.toString());
            WikiRequestor req = new WikiRequestor(url, t.getUid().toString(), 1);
            Future<Map<CharSequence, List<CharSequence>>> future = pool.submit(req);
            set1.add(future);//from  w  w  w. j  a  v  a  2  s .  com
        }
        pool.shutdown();

        Map<CharSequence, List<CharSequence>> map = new HashMap<>();
        for (Future<Map<CharSequence, List<CharSequence>>> future : set1) {
            while (!future.isDone()) {
                //                Logger.getLogger(Wikipedia.class.getName()).log(Level.INFO, "Task is not completed yet....");
                Thread.currentThread().sleep(10);
            }
            Map<CharSequence, List<CharSequence>> c = future.get();
            if (c != null) {
                map.putAll(c);
            }
        }
        workQueue = new ArrayBlockingQueue(maxT);
        pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue);

        //            pool = new ThreadPoolExecutor(maxT, maxT,
        //                    5000L, TimeUnit.MILLISECONDS,
        //                    new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy());
        Set<Future<Map<CharSequence, List<CharSequence>>>> set2 = new HashSet<>();
        for (Term t : terms) {
            List<CharSequence> catIDs = map.get(t.getUid());
            for (CharSequence catID : catIDs) {
                URL url = new URL(
                        PAGE + "?action=wbgetentities&format=json&props=labels&languages=en&ids=" + catID);
                Logger.getLogger(Wikidata.class.getName()).log(Level.FINE, url.toString());
                WikiRequestor req = new WikiRequestor(url, t.getUid().toString(), 2);
                Future<Map<CharSequence, List<CharSequence>>> future = pool.submit(req);
                set2.add(future);
            }
        }
        pool.shutdown();

        for (Future<Map<CharSequence, List<CharSequence>>> future : set2) {
            while (!future.isDone()) {
                //                Logger.getLogger(Wikipedia.class.getName()).log(Level.INFO, "Task is not completed yet....");
                Thread.currentThread().sleep(10);
            }
            Map<CharSequence, List<CharSequence>> c = future.get();
            if (c != null) {
                cats.putAll(c);
            }
        }
    }

    return cats;
}

From source file:org.jas.helper.ScrobblerHelper.java

private ActionResult scrobbling(Metadata metadata) throws IOException, InterruptedException {
    User currentUser = controlEngine.get(Model.CURRENT_USER);
    if (StringUtils.isEmpty(currentUser.getUsername())) {
        return ActionResult.NotLogged;
    }/*from  w w w. j av  a2s  .co m*/

    if (currentUser.getSession() != null) {
        try {
            // According to Caching Rule (http://www.u-mass.de/lastfm/doc)
            ScheduledFuture<ActionResult> future = scheduler.schedule(
                    new ScrobbleTask(metadata, currentUser.getSession()), REQUEST_PERIOD,
                    TimeUnit.MICROSECONDS);
            return future.get();
        } catch (ExecutionException eex) {
            log.error(eex, eex);
            return ActionResult.Error;
        }
    } else {
        log.error("There isn't a valid session");
        return ActionResult.Sessionless;
    }
}

From source file:org.apache.druid.benchmark.query.TimeseriesBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)/*from  ww w .  j  a  v a2 s .c om*/
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void querySingleIncrementalIndex(Blackhole blackhole) {
    QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, "incIndex",
            new IncrementalIndexSegment(incIndexes.get(0), "incIndex"));

    List<Result<TimeseriesResultValue>> results = TimeseriesBenchmark.runQuery(factory, runner, query);
    for (Result<TimeseriesResultValue> result : results) {
        blackhole.consume(result);
    }
}

From source file:org.apache.druid.benchmark.FilterPartitionBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)//from   w  ww .  j a  v a2  s.  co m
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void readOrFilterCNF(Blackhole blackhole) {
    Filter filter = new NoBitmapSelectorFilter("dimSequential", "199");
    Filter filter2 = new AndFilter(Arrays.asList(new SelectorFilter("dimMultivalEnumerated2", "Corundum"),
            new NoBitmapSelectorFilter("dimMultivalEnumerated", "Bar")));
    Filter orFilter = new OrFilter(Arrays.asList(filter, filter2));

    StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex);
    Sequence<Cursor> cursors = makeCursors(sa, Filters.convertToCNF(orFilter));
    readCursors(cursors, blackhole);
}

From source file:org.apache.druid.benchmark.query.TopNBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)//  ww  w.  j  a  v  a 2 s.  co m
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndex(Blackhole blackhole) {
    List<QueryRunner<Result<TopNResultValue>>> singleSegmentRunners = Lists.newArrayList();
    QueryToolChest toolChest = factory.getToolchest();
    for (int i = 0; i < numSegments; i++) {
        String segmentName = "qIndex" + i;
        QueryRunner<Result<TopNResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, segmentName,
                new QueryableIndexSegment(segmentName, qIndexes.get(i)));
        singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
    }

    QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(
            toolChest.mergeResults(factory.mergeRunners(executorService, singleSegmentRunners)), toolChest));

    Sequence<Result<TopNResultValue>> queryResult = theRunner.run(QueryPlus.wrap(query), Maps.newHashMap());
    List<Result<TopNResultValue>> results = queryResult.toList();

    for (Result<TopNResultValue> result : results) {
        blackhole.consume(result);
    }
}

From source file:io.druid.benchmark.FilterPartitionBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)/*ww w  .  j a v  a  2  s.c  o m*/
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void readWithExFnPostFilter(Blackhole blackhole) throws Exception {
    Filter filter = new NoBitmapSelectorDimFilter("dimSequential", "super-199", JS_EXTRACTION_FN).toFilter();

    StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex);
    Sequence<Cursor> cursors = makeCursors(sa, filter);

    Sequence<List<String>> stringListSeq = readCursors(cursors, blackhole);
    List<String> strings = Sequences
            .toList(Sequences.limit(stringListSeq, 1), Lists.<List<String>>newArrayList()).get(0);
    for (String st : strings) {
        blackhole.consume(st);
    }
}

From source file:io.druid.benchmark.query.TimeseriesBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)//  w  w  w.j  av a  2 s . c o m
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void querySingleQueryableIndex(Blackhole blackhole) throws Exception {
    final QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory,
            "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)));

    List<Result<TimeseriesResultValue>> results = TimeseriesBenchmark.runQuery(factory, runner, query);
    for (Result<TimeseriesResultValue> result : results) {
        blackhole.consume(result);
    }
}

From source file:com.ebuddy.cassandra.cql.dao.CqlStructuredDataSupport.java

private static Optional<Long> tryGetCurrentMicros() {
    long nowMicros = TimeUnit.MICROSECONDS.convert(System.currentTimeMillis(), TimeUnit.MILLISECONDS);
    long lastMicros = lastTime.get();
    boolean success = true;
    if (nowMicros > lastMicros) {
        success = lastTime.compareAndSet(lastMicros, nowMicros);
    } else {//from  w  w  w. j  a  v  a 2 s  .  c  om
        // add a pseudo-microsecond to whatever current lastTime is.
        // Note that if in the unlikely event that we have incremented the counter
        // past the actual system clock, then use an incremented lastTime instead of the system clock.
        // The implication of this is that if we have over a thousand requests on this
        // method within the same millisecond, then the timestamp we use can get out of sync
        // with other client VMs. This is deemed highly unlikely.
        nowMicros = lastTime.incrementAndGet();
    }

    return success ? Optional.of(nowMicros) : Optional.<Long>absent();
}

From source file:org.apache.druid.benchmark.query.TimeseriesBenchmark.java

@Benchmark
@BenchmarkMode(Mode.AverageTime)//ww  w  . ja  v a2 s  .c o  m
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void querySingleQueryableIndex(Blackhole blackhole) {
    final QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory,
            "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0)));

    List<Result<TimeseriesResultValue>> results = TimeseriesBenchmark.runQuery(factory, runner, query);
    for (Result<TimeseriesResultValue> result : results) {
        blackhole.consume(result);
    }
}

From source file:org.apache.nifi.processors.standard.EnforceOrder.java

@Override
protected Collection<ValidationResult> customValidate(ValidationContext validationContext) {
    final List<ValidationResult> results = new ArrayList<>(super.customValidate(validationContext));

    final Long waitTimeoutMillis = validationContext.getProperty(WAIT_TIMEOUT)
            .asTimePeriod(TimeUnit.MICROSECONDS);
    final Long inactiveTimeoutMillis = validationContext.getProperty(INACTIVE_TIMEOUT)
            .asTimePeriod(TimeUnit.MICROSECONDS);

    if (waitTimeoutMillis >= inactiveTimeoutMillis) {
        results.add(//www  .ja  va  2  s  .  c o  m
                new ValidationResult.Builder().input(validationContext.getProperty(INACTIVE_TIMEOUT).getValue())
                        .subject(INACTIVE_TIMEOUT.getDisplayName())
                        .explanation(String.format("%s should be longer than %s",
                                INACTIVE_TIMEOUT.getDisplayName(), WAIT_TIMEOUT.getDisplayName()))
                        .valid(false).build());
    }

    return results;
}