List of usage examples for java.util.concurrent TimeUnit MICROSECONDS
TimeUnit MICROSECONDS
To view the source code for java.util.concurrent TimeUnit MICROSECONDS.
Click Source Link
From source file:com.vmware.photon.controller.housekeeper.dcp.ImageDatastoreSweeperService.java
/** * Initialize state with defaults.// ww w . java 2s. co m * * @param current */ private void initializeState(State current) { InitializationUtils.initialize(current); if (current.taskState.stage == TaskState.TaskStage.CREATED) { current.taskState.stage = TaskState.TaskStage.STARTED; current.taskState.subStage = TaskState.SubStage.GET_HOST_INFO; } if (current.taskState.stage == TaskState.TaskStage.STARTED && current.taskState.subStage == null) { current.taskState.subStage = TaskState.SubStage.GET_HOST_INFO; } if (current.documentExpirationTimeMicros <= 0) { current.documentExpirationTimeMicros = ServiceUtils .computeExpirationTime(ServiceUtils.DEFAULT_DOC_EXPIRATION_TIME_MICROS); } if (null == current.scanTimeout) { current.scanTimeout = TimeUnit.MICROSECONDS.toSeconds(current.documentExpirationTimeMicros); } if (null == current.sweepTimeout) { current.sweepTimeout = TimeUnit.MICROSECONDS.toSeconds(current.documentExpirationTimeMicros); } }
From source file:org.wso2.andes.server.virtualhost.plugins.SlowConsumerDetectionConfigurationTest.java
/** * Failure Testing:/*ww w . j a v a 2 s . c om*/ * * Test that negative delays are invalid. * * Delay must be a positive value as negative delay means doesn't make sense. * * Configuration exception with a useful message should be thrown here. * */ public void testConfigLoadingInValidDelayNegative() { SlowConsumerDetectionConfiguration config = new SlowConsumerDetectionConfiguration(); XMLConfiguration xmlconfig = new XMLConfiguration(); xmlconfig.addProperty("delay", "-10"); xmlconfig.addProperty("timeunit", TimeUnit.MICROSECONDS.toString()); // Create a CompositeConfiguration as this is what the broker uses CompositeConfiguration composite = new CompositeConfiguration(); composite.addConfiguration(xmlconfig); try { config.setConfiguration("", composite); fail("Configuration should fail to validate"); } catch (ConfigurationException e) { Throwable cause = e.getCause(); assertNotNull("Configuration Exception must not be null.", cause); assertEquals("Cause not correct", ConfigurationException.class, cause.getClass()); assertEquals("Incorrect message.", "SlowConsumerDetectionConfiguration: 'delay' must be a Positive Long value.", cause.getMessage()); } }
From source file:tools.descartes.wcf.management.timeSeries.TimeSeries.java
private TimeUnit getBiggerTimeUnit(TimeUnit timeUnit) { TimeUnit higherUnit = null;/*w w w . j a va 2 s. c o m*/ switch (timeUnit) { case NANOSECONDS: higherUnit = TimeUnit.MICROSECONDS; break; case MICROSECONDS: higherUnit = TimeUnit.MILLISECONDS; break; case MILLISECONDS: higherUnit = TimeUnit.SECONDS; break; case SECONDS: higherUnit = TimeUnit.MINUTES; break; case MINUTES: higherUnit = TimeUnit.HOURS; break; case HOURS: higherUnit = TimeUnit.DAYS; break; default: break; } return higherUnit; }
From source file:com.twitter.distributedlog.LedgerHandleCache.java
/** * Open the log segment./* w w w . ja v a2 s.c om*/ * * @param metadata * the log segment metadata * @param fence * whether to fence the log segment during open * @return a future presenting the open result. */ public Future<LedgerDescriptor> asyncOpenLedger(LogSegmentMetadata metadata, boolean fence) { final Stopwatch stopwatch = Stopwatch.createStarted(); final OpStatsLogger openStatsLogger = fence ? openStats : openNoRecoveryStats; final Promise<LedgerDescriptor> promise = new Promise<LedgerDescriptor>(); final LedgerDescriptor ledgerDesc = new LedgerDescriptor(metadata.getLedgerId(), metadata.getLogSegmentSequenceNumber(), fence); RefCountedLedgerHandle refhandle = handlesMap.get(ledgerDesc); if (null == refhandle) { asyncOpenLedger(ledgerDesc, new AsyncCallback.OpenCallback() { @Override public void openComplete(int rc, LedgerHandle lh, Object ctx) { if (BKException.Code.OK != rc) { promise.setException(BKException.create(rc)); return; } RefCountedLedgerHandle newRefHandle = new RefCountedLedgerHandle(lh); RefCountedLedgerHandle oldRefHandle = handlesMap.putIfAbsent(ledgerDesc, newRefHandle); if (null != oldRefHandle) { oldRefHandle.addRef(); if (newRefHandle.removeRef()) { newRefHandle.handle.asyncClose(new AsyncCallback.CloseCallback() { @Override public void closeComplete(int i, LedgerHandle ledgerHandle, Object o) { // No action necessary } }, null); } } promise.setValue(ledgerDesc); } }, null); } else { refhandle.addRef(); promise.setValue(ledgerDesc); } return promise.addEventListener(new FutureEventListener<LedgerDescriptor>() { @Override public void onSuccess(LedgerDescriptor value) { openStatsLogger.registerSuccessfulEvent(stopwatch.elapsed(TimeUnit.MICROSECONDS)); } @Override public void onFailure(Throwable cause) { openStatsLogger.registerFailedEvent(stopwatch.elapsed(TimeUnit.MICROSECONDS)); } }); }
From source file:nl.uva.sne.disambiguators.WikipediaOnline.java
private Map<String, List<String>> getCategories(Set<Term> terms) throws MalformedURLException, InterruptedException, ExecutionException { int maxT = 3; BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT); ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue); // ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, // 5000L, TimeUnit.MILLISECONDS, // new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy()); Map<String, List<String>> cats = new HashMap<>(); Set<Future<Map<String, List<String>>>> set = new HashSet<>(); int count = 0; for (Term t : terms) { URL url = new URL(page + "?action=query&format=json&prop=categories&pageids=" + t.getUID()); System.err.println(url);//from w w w. j a va 2 s . c om WikiRequestor req = new WikiRequestor(url, t.getUID(), 0); Future<Map<String, List<String>>> future = pool.submit(req); set.add(future); } pool.shutdown(); for (Future<Map<String, List<String>>> future : set) { while (!future.isDone()) { // Logger.getLogger(WikipediaOnline.class.getName()).log(Level.INFO, "Task is not completed yet...."); Thread.currentThread().sleep(10); } Map<String, List<String>> c = future.get(); if (c != null) { cats.putAll(c); } } return cats; }
From source file:org.apache.hadoop.hbase.client.ScannerCallableWithReplicas.java
@Override public Result[] call(int timeout) throws IOException { // If the active replica callable was closed somewhere, invoke the RPC to // really close it. In the case of regular scanners, this applies. We make couple // of RPCs to a RegionServer, and when that region is exhausted, we set // the closed flag. Then an RPC is required to actually close the scanner. if (currentScannerCallable != null && currentScannerCallable.closed) { // For closing we target that exact scanner (and not do replica fallback like in // the case of normal reads) if (LOG.isTraceEnabled()) { LOG.trace("Closing scanner id=" + currentScannerCallable.scannerId); }/*from ww w . j av a2s . co m*/ Result[] r = currentScannerCallable.call(timeout); currentScannerCallable = null; return r; } // We need to do the following: //1. When a scan goes out to a certain replica (default or not), we need to // continue to hit that until there is a failure. So store the last successfully invoked // replica //2. We should close the "losing" scanners (scanners other than the ones we hear back // from first) // RegionLocations rl = RpcRetryingCallerWithReadReplicas.getRegionLocations(true, RegionReplicaUtil.DEFAULT_REPLICA_ID, cConnection, tableName, currentScannerCallable.getRow()); // allocate a boundedcompletion pool of some multiple of number of replicas. // We want to accomodate some RPCs for redundant replica scans (but are still in progress) ResultBoundedCompletionService<Pair<Result[], ScannerCallable>> cs = new ResultBoundedCompletionService<Pair<Result[], ScannerCallable>>( RpcRetryingCallerFactory.instantiate(ScannerCallableWithReplicas.this.conf), pool, rl.size() * 5); AtomicBoolean done = new AtomicBoolean(false); replicaSwitched.set(false); // submit call for the primary replica. addCallsForCurrentReplica(cs, rl); try { // wait for the timeout to see whether the primary responds back Future<Pair<Result[], ScannerCallable>> f = cs.poll(timeBeforeReplicas, TimeUnit.MICROSECONDS); // Yes, microseconds if (f != null) { Pair<Result[], ScannerCallable> r = f.get(); if (r != null && r.getSecond() != null) { updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool); } return r == null ? null : r.getFirst(); //great we got a response } } catch (ExecutionException e) { RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries); } catch (CancellationException e) { throw new InterruptedIOException(e.getMessage()); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } // submit call for the all of the secondaries at once // TODO: this may be an overkill for large region replication addCallsForOtherReplicas(cs, rl, 0, rl.size() - 1); try { Future<Pair<Result[], ScannerCallable>> f = cs.take(); Pair<Result[], ScannerCallable> r = f.get(); if (r != null && r.getSecond() != null) { updateCurrentlyServingReplica(r.getSecond(), r.getFirst(), done, pool); } return r == null ? null : r.getFirst(); // great we got an answer } catch (ExecutionException e) { RpcRetryingCallerWithReadReplicas.throwEnrichedException(e, retries); } catch (CancellationException e) { throw new InterruptedIOException(e.getMessage()); } catch (InterruptedException e) { throw new InterruptedIOException(e.getMessage()); } finally { // We get there because we were interrupted or because one or more of the // calls succeeded or failed. In all case, we stop all our tasks. cs.cancelAll(); } return null; // unreachable }
From source file:eu.edisonproject.training.wsd.WikipediaOnline.java
private Map<CharSequence, List<CharSequence>> getCategories(Set<Term> terms) throws MalformedURLException, InterruptedException, ExecutionException { int maxT = 2; BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT); ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue); // ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, // 5000L, TimeUnit.MILLISECONDS, // new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy()); Map<CharSequence, List<CharSequence>> cats = new HashMap<>(); Set<Future<Map<CharSequence, List<CharSequence>>>> set = new HashSet<>(); for (Term t : terms) { URL url = new URL(PAGE + "?action=query&format=json&prop=categories&pageids=" + t.getUid()); LOGGER.log(Level.FINE, url.toString()); WikiRequestor req = new WikiRequestor(url, t.getUid().toString(), 0); Future<Map<CharSequence, List<CharSequence>>> future = pool.submit(req); set.add(future);//from w ww.ja va 2 s . co m } pool.shutdown(); for (Future<Map<CharSequence, List<CharSequence>>> future : set) { while (!future.isDone()) { // LOGGER.log(Level.INFO, "Task is not completed yet...."); Thread.currentThread().sleep(10); } Map<CharSequence, List<CharSequence>> c = future.get(); if (c != null) { cats.putAll(c); } } return cats; }
From source file:com.googlecode.ehcache.annotations.integration.CacheableTest.java
@Test public void testEnumArgCaching() { Assert.assertEquals(0, cacheableTestInterface.enumParameterCount()); Assert.assertEquals("enumParameter(SECONDS)", cacheableTestInterface.enumParameter(TimeUnit.SECONDS)); Assert.assertEquals(1, cacheableTestInterface.enumParameterCount()); Assert.assertEquals("enumParameter(SECONDS)", cacheableTestInterface.enumParameter(TimeUnit.SECONDS)); Assert.assertEquals(1, cacheableTestInterface.enumParameterCount()); Assert.assertEquals("enumParameter(null)", cacheableTestInterface.enumParameter(null)); Assert.assertEquals(2, cacheableTestInterface.enumParameterCount()); Assert.assertEquals("enumParameter(null)", cacheableTestInterface.enumParameter(null)); Assert.assertEquals(2, cacheableTestInterface.enumParameterCount()); Assert.assertEquals("enumParameter(MICROSECONDS)", cacheableTestInterface.enumParameter(TimeUnit.MICROSECONDS)); Assert.assertEquals(3, cacheableTestInterface.enumParameterCount()); Assert.assertEquals("enumParameter(MICROSECONDS)", cacheableTestInterface.enumParameter(TimeUnit.MICROSECONDS)); Assert.assertEquals(3, cacheableTestInterface.enumParameterCount()); }
From source file:com.linkedin.pinot.perf.BenchmarkOfflineIndexReader.java
@Benchmark @BenchmarkMode(Mode.AverageTime)//from w w w.ja va 2 s. c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public int fixedBitSingleValueReader() { ReaderContext context = _fixedBitSingleValueReader.createContext(); int ret = 0; for (int i = 0; i < _numDocs; i++) { ret += _fixedBitSingleValueReader.getInt(i, context); } return ret; }
From source file:io.druid.benchmark.FilterPartitionBenchmark.java
@Benchmark @BenchmarkMode(Mode.AverageTime)// w w w . j a v a2 s .c o m @OutputTimeUnit(TimeUnit.MICROSECONDS) public void stringRead(Blackhole blackhole) throws Exception { StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex); Sequence<Cursor> cursors = makeCursors(sa, null); Sequence<List<String>> stringListSeq = readCursors(cursors, blackhole); List<String> strings = Sequences .toList(Sequences.limit(stringListSeq, 1), Lists.<List<String>>newArrayList()).get(0); for (String st : strings) { blackhole.consume(st); } }