List of usage examples for java.util.concurrent TimeUnit MICROSECONDS
TimeUnit MICROSECONDS
To view the source code for java.util.concurrent TimeUnit MICROSECONDS.
Click Source Link
From source file:io.druid.benchmark.query.TimeseriesBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)/*from ww w . ja v a 2s .c o m*/ @OutputTimeUnit(TimeUnit.MICROSECONDS) public void queryFilteredSingleQueryableIndex(Blackhole blackhole) throws Exception { final QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0))); DimFilter filter = new SelectorDimFilter("dimSequential", "399", null); Query filteredQuery = query.withDimFilter(filter); List<Result<TimeseriesResultValue>> results = TimeseriesBenchmark.runQuery(factory, runner, filteredQuery); for (Result<TimeseriesResultValue> result : results) { blackhole.consume(result); } }
From source file:org.apache.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)/*from ww w.j a v a 2 s . c o m*/ @OutputTimeUnit(TimeUnit.MICROSECONDS) public void readComplexOrFilter(Blackhole blackhole) { DimFilter dimFilter1 = new OrDimFilter(Arrays.asList(new SelectorDimFilter("dimSequential", "199", null), new AndDimFilter( Arrays.asList(new NoBitmapSelectorDimFilter("dimMultivalEnumerated2", "Corundum", null), new SelectorDimFilter("dimMultivalEnumerated", "Bar", null))))); DimFilter dimFilter2 = new OrDimFilter(Arrays.asList(new SelectorDimFilter("dimSequential", "299", null), new SelectorDimFilter("dimSequential", "399", null), new AndDimFilter( Arrays.asList(new NoBitmapSelectorDimFilter("dimMultivalEnumerated2", "Xylophone", null), new SelectorDimFilter("dimMultivalEnumerated", "Foo", null))))); DimFilter dimFilter3 = new OrDimFilter(Arrays.asList(dimFilter1, dimFilter2, new AndDimFilter( Arrays.asList(new NoBitmapSelectorDimFilter("dimMultivalEnumerated2", "Orange", null), new SelectorDimFilter("dimMultivalEnumerated", "World", null))))); StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, dimFilter3.toFilter()); readCursors(cursors, blackhole); }
From source file:io.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)// w w w. j a v a 2s . co m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void readOrFilter(Blackhole blackhole) throws Exception { Filter filter = new NoBitmapSelectorFilter("dimSequential", "199"); Filter filter2 = new AndFilter( Arrays.<Filter>asList(new SelectorFilter("dimMultivalEnumerated2", "Corundum"), new NoBitmapSelectorFilter("dimMultivalEnumerated", "Bar"))); Filter orFilter = new OrFilter(Arrays.<Filter>asList(filter, filter2)); StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, orFilter); Sequence<List<String>> stringListSeq = readCursors(cursors, blackhole); List<String> strings = Sequences .toList(Sequences.limit(stringListSeq, 1), Lists.<List<String>>newArrayList()).get(0); for (String st : strings) { blackhole.consume(st); } }
From source file:com.tinspx.util.concurrent.DelayedSemaphoreTest.java
private String acquire(DelayedSemaphore ls, int count) throws InterruptedException { final long MAX_WAIT = TimeUnit.MICROSECONDS.toNanos(50000), MAX_CHECK = TimeUnit.MICROSECONDS.toNanos(75000); switch (count % 3) { case 0://w ww.j a v a 2 s .c o m ls.acquire(); return null; case 1: while (!ls.tryAcquire()) { } return null; case 2: while (true) { long time = ls.ticker().read(); boolean acquired = ls.tryAcquire(MAX_WAIT, TimeUnit.NANOSECONDS); time = ls.ticker().read() - time; if (time > MAX_CHECK) { return String.format("%d > %d (max %d), acquired: %b", time, MAX_CHECK, MAX_WAIT, acquired); } // System.out.println("passed"); if (acquired) { return null; } else { attempts++; } } default: throw new AssertionError(); } }
From source file:org.apache.druid.benchmark.query.TimeseriesBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)// w w w . ja va 2s.co m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void queryFilteredSingleQueryableIndex(Blackhole blackhole) { final QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, "qIndex", new QueryableIndexSegment("qIndex", qIndexes.get(0))); DimFilter filter = new SelectorDimFilter("dimSequential", "399", null); Query filteredQuery = query.withDimFilter(filter); List<Result<TimeseriesResultValue>> results = TimeseriesBenchmark.runQuery(factory, runner, filteredQuery); for (Result<TimeseriesResultValue> result : results) { blackhole.consume(result); } }
From source file:io.druid.benchmark.query.TimeseriesBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)/* w ww .j av a 2 s .c o m*/ @OutputTimeUnit(TimeUnit.MICROSECONDS) public void queryMultiQueryableIndex(Blackhole blackhole) throws Exception { List<QueryRunner<Result<TimeseriesResultValue>>> singleSegmentRunners = Lists.newArrayList(); QueryToolChest toolChest = factory.getToolchest(); for (int i = 0; i < numSegments; i++) { String segmentName = "qIndex" + i; QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, segmentName, new QueryableIndexSegment(segmentName, qIndexes.get(i))); singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner)); } QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>( toolChest.mergeResults(factory.mergeRunners(executorService, singleSegmentRunners)), toolChest)); Sequence<Result<TimeseriesResultValue>> queryResult = theRunner.run(QueryPlus.wrap(query), Maps.<String, Object>newHashMap()); List<Result<TimeseriesResultValue>> results = Sequences.toList(queryResult, Lists.<Result<TimeseriesResultValue>>newArrayList()); for (Result<TimeseriesResultValue> result : results) { blackhole.consume(result); } }
From source file:io.druid.benchmark.query.GroupByBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)// ww w . ja va 2 s . c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void querySingleIncrementalIndex(Blackhole blackhole) throws Exception { QueryRunner<Row> runner = QueryBenchmarkUtil.makeQueryRunner(factory, "incIndex", new IncrementalIndexSegment(anIncrementalIndex, "incIndex")); List<Row> results = GroupByBenchmark.runQuery(factory, runner, query); for (Row result : results) { blackhole.consume(result); } }
From source file:io.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)/*from w w w . j av a 2 s . c om*/ @OutputTimeUnit(TimeUnit.MICROSECONDS) public void readOrFilterCNF(Blackhole blackhole) throws Exception { Filter filter = new NoBitmapSelectorFilter("dimSequential", "199"); Filter filter2 = new AndFilter( Arrays.<Filter>asList(new SelectorFilter("dimMultivalEnumerated2", "Corundum"), new NoBitmapSelectorFilter("dimMultivalEnumerated", "Bar"))); Filter orFilter = new OrFilter(Arrays.<Filter>asList(filter, filter2)); StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, Filters.convertToCNF(orFilter)); Sequence<List<String>> stringListSeq = readCursors(cursors, blackhole); List<String> strings = Sequences .toList(Sequences.limit(stringListSeq, 1), Lists.<List<String>>newArrayList()).get(0); for (String st : strings) { blackhole.consume(st); } }
From source file:org.apache.druid.benchmark.query.TimeseriesBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//from ww w . j a v a 2 s. c om @OutputTimeUnit(TimeUnit.MICROSECONDS) public void queryMultiQueryableIndex(Blackhole blackhole) { List<QueryRunner<Result<TimeseriesResultValue>>> singleSegmentRunners = Lists.newArrayList(); QueryToolChest toolChest = factory.getToolchest(); for (int i = 0; i < numSegments; i++) { String segmentName = "qIndex" + i; QueryRunner<Result<TimeseriesResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(factory, segmentName, new QueryableIndexSegment(segmentName, qIndexes.get(i))); singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner)); } QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>( toolChest.mergeResults(factory.mergeRunners(executorService, singleSegmentRunners)), toolChest)); Sequence<Result<TimeseriesResultValue>> queryResult = theRunner.run(QueryPlus.wrap(query), Maps.newHashMap()); List<Result<TimeseriesResultValue>> results = queryResult.toList(); for (Result<TimeseriesResultValue> result : results) { blackhole.consume(result); } }
From source file:org.apache.hadoop.hdfs.qjournal.server.Journal.java
/** * Write a batch of edits to the journal. * {@see QJournalProtocol#journal(RequestInfo, long, long, int, byte[])} *//* w w w .j av a2s . co m*/ synchronized void journal(RequestInfo reqInfo, long segmentTxId, long firstTxnId, int numTxns, byte[] records) throws IOException { checkFormatted(); checkWriteRequest(reqInfo); checkSync(curSegment != null, "Can't write, no segment open"); if (curSegmentTxId != segmentTxId) { // Sanity check: it is possible that the writer will fail IPCs // on both the finalize() and then the start() of the next segment. // This could cause us to continue writing to an old segment // instead of rolling to a new one, which breaks one of the // invariants in the design. If it happens, abort the segment // and throw an exception. JournalOutOfSyncException e = new JournalOutOfSyncException( "Writer out of sync: it thinks it is writing segment " + segmentTxId + " but current segment is " + curSegmentTxId); abortCurSegment(); throw e; } checkSync(nextTxId == firstTxnId, "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId); long lastTxnId = firstTxnId + numTxns - 1; if (LOG.isTraceEnabled()) { LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId); } // If the edit has already been marked as committed, we know // it has been fsynced on a quorum of other nodes, and we are // "catching up" with the rest. Hence we do not need to fsync. boolean isLagging = lastTxnId <= committedTxnId.get(); boolean shouldFsync = !isLagging; curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); StopWatch sw = new StopWatch(); sw.start(); curSegment.flush(shouldFsync); sw.stop(); long nanoSeconds = sw.now(); metrics.addSync(TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS)); long milliSeconds = TimeUnit.MILLISECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS); if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) { LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId + " took " + milliSeconds + "ms"); } if (isLagging) { // This batch of edits has already been committed on a quorum of other // nodes. So, we are in "catch up" mode. This gets its own metric. metrics.batchesWrittenWhileLagging.incr(1); } metrics.batchesWritten.incr(1); metrics.bytesWritten.incr(records.length); metrics.txnsWritten.incr(numTxns); highestWrittenTxId = lastTxnId; nextTxId = lastTxnId + 1; }