List of usage examples for java.util.concurrent TimeUnit MICROSECONDS
TimeUnit MICROSECONDS
To view the source code for java.util.concurrent TimeUnit MICROSECONDS.
Click Source Link
From source file:com.linkedin.pinot.perf.BenchmarkOfflineIndexReader.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//from www . j a va2 s .com @OutputTimeUnit(TimeUnit.MICROSECONDS) public double intDictionary() { int length = _intDictionary.length(); int ret = 0; for (int i = 0; i < NUM_ROUNDS; i++) { int value = _intDictionary.getIntValue(RANDOM.nextInt(length)); ret += _intDictionary.indexOf(value); } return ret; }
From source file:io.druid.benchmark.FilteredAggregatorBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)// www .j a va 2 s. com @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleIncrementalIndex(Blackhole blackhole) throws Exception { QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, "incIndex", new IncrementalIndexSegment(incIndex, "incIndex")); List<Result<TimeseriesResultValue>> results = FilteredAggregatorBenchmark.runQuery(factory, runner, query); for (Result<TimeseriesResultValue> result : results) { blackhole.consume(result); } }
From source file:io.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)/*from w w w . j ava 2 s . c o m*/ @OutputTimeUnit(TimeUnit.MICROSECONDS) public void timeFilterHalf(Blackhole blackhole) throws Exception { StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, timeFilterHalf); Sequence<List<Long>> longListSeq = readCursorsLong(cursors, blackhole); List<Long> strings = Sequences.toList(Sequences.limit(longListSeq, 1), Lists.<List<Long>>newArrayList()) .get(0); for (Long st : strings) { blackhole.consume(st); } }
From source file:org.apache.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//w w w . j a v a 2 s . c om @OutputTimeUnit(TimeUnit.MICROSECONDS) public void timeFilterAll(Blackhole blackhole) { StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, timeFilterAll); readCursorsLong(cursors, blackhole); }
From source file:nl.uva.sne.disambiguators.Wikidata.java
private Map<String, List<String>> getbroaderIDS(Set<Term> terms) throws MalformedURLException, InterruptedException, ExecutionException { Map<String, List<String>> map = new HashMap<>(); if (terms.size() > 0) { int maxT = 2; BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT); ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue); // ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, // 5000L, TimeUnit.MILLISECONDS, // new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy()); Set<Future<Map<String, List<String>>>> set1 = new HashSet<>(); String prop = "P31"; for (Term t : terms) { URL url = new URL( page + "?action=wbgetclaims&format=json&props=&property=" + prop + "&entity=" + t.getUID()); System.err.println(url); WikiRequestor req = new WikiRequestor(url, t.getUID(), 1); Future<Map<String, List<String>>> future = pool.submit(req); set1.add(future);//from w ww. j av a 2 s . co m } pool.shutdown(); for (Future<Map<String, List<String>>> future : set1) { while (!future.isDone()) { // Logger.getLogger(Wikipedia.class.getName()).log(Level.INFO, "Task is not completed yet...."); Thread.currentThread().sleep(10); } Map<String, List<String>> c = future.get(); if (c != null) { map.putAll(c); } } } return map; }
From source file:org.apache.synapse.transport.amqp.pollingtask.AMQPTransportPollingTaskFactory.java
private static TimeUnit getTimeUnit(String timeUnit) { if ("days".equals(timeUnit)) { return TimeUnit.DAYS; } else if ("hours".equals(timeUnit)) { return TimeUnit.HOURS; } else if ("minutes".equals(timeUnit)) { return TimeUnit.MINUTES; } else if ("seconds".equals(timeUnit)) { return TimeUnit.SECONDS; } else if ("milliseconds".equals(timeUnit)) { return TimeUnit.MILLISECONDS; } else {//from www . j av a 2 s .c o m return TimeUnit.MICROSECONDS; } }
From source file:io.druid.benchmark.query.SelectBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//from ww w . j a v a2 s. c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void queryIncrementalIndex(Blackhole blackhole) throws Exception { SelectQuery queryCopy = query.withPagingSpec(PagingSpec.newSpec(pagingThreshold)); String segmentId = "incIndex"; QueryRunner<Row> runner = QueryBenchmarkUtil.makeQueryRunner(factory, segmentId, new IncrementalIndexSegment(incIndexes.get(0), segmentId)); boolean done = false; while (!done) { List<Result<SelectResultValue>> results = SelectBenchmark.runQuery(factory, runner, queryCopy); SelectResultValue result = results.get(0).getValue(); if (result.getEvents().size() == 0) { done = true; } else { for (EventHolder eh : result.getEvents()) { blackhole.consume(eh); } queryCopy = incrementQueryPagination(queryCopy, result); } } }
From source file:org.apache.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)// w w w. ja va 2 s .c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void readWithPreFilter(Blackhole blackhole) { Filter filter = new SelectorFilter("dimSequential", "199"); StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, filter); readCursors(cursors, blackhole); }
From source file:com.linkedin.pinot.perf.BenchmarkOfflineIndexReader.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//from ww w . j a v a 2 s .c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public int longDictionary() { int length = _longDictionary.length(); int ret = 0; for (int i = 0; i < NUM_ROUNDS; i++) { long value = _longDictionary.getLongValue(RANDOM.nextInt(length)); ret += _longDictionary.indexOf(value); } return ret; }
From source file:org.apache.druid.benchmark.FilteredAggregatorBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//from w w w .ja v a 2 s . c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleQueryableIndex(Blackhole blackhole) { final QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, "qIndex", new QueryableIndexSegment("qIndex", qIndex)); List<Result<TimeseriesResultValue>> results = FilteredAggregatorBenchmark.runQuery(factory, runner, query); for (Result<TimeseriesResultValue> result : results) { blackhole.consume(result); } }