List of usage examples for java.util.concurrent TimeUnit MICROSECONDS
TimeUnit MICROSECONDS
To view the source code for java.util.concurrent TimeUnit MICROSECONDS.
Click Source Link
From source file:org.omnaest.utils.time.DurationCaptureTest.java
@Test public void testTimeMeasurement() { ///*from ww w. ja va 2s. c o m*/ DurationCapture durationCapture = DurationCapture.newInstance(); // durationCapture.startTimeMeasurement(); List<String> tempStringList = new ArrayList<String>(); for (int ii = 0; ii < 10000000 && durationCapture.getInterimTimeInMilliseconds() < 100; ii++) { // durationCapture.startTimeMeasurement("Interval1"); tempStringList.add("value1" + ii); durationCapture.stopTimeMeasurement("Interval1"); // durationCapture.startTimeMeasurement("Interval2"); tempStringList.add("value2" + ii); durationCapture.stopTimeMeasurement("Interval2"); // durationCapture.startTimeMeasurement("Interval999"); tempStringList.add("value3" + ii); durationCapture.stopTimeMeasurement("Interval999"); } durationCapture.stopTimeMeasurement(); // long duration = durationCapture.getDurationInMilliseconds(); assertTrue(duration > 0); // long duration2 = durationCapture.getDuration(TimeUnit.MICROSECONDS); assertEquals(duration, duration2 / 1000); // Map<Object, Long> intervalKeyToDurationMap = durationCapture .getIntervalKeyToDurationMap(TimeUnit.MICROSECONDS); assertEquals(4, intervalKeyToDurationMap.keySet().size()); // //System.out.println( durationCapture.calculateIntervalStatisticLogMessage() ); }
From source file:org.apache.pulsar.client.impl.AcknowledgmentsGroupingTracker.java
public AcknowledgmentsGroupingTracker(ConsumerImpl<?> consumer, ConsumerConfigurationData<?> conf, EventLoopGroup eventLoopGroup) { this.consumer = consumer; this.pendingIndividualAcks = new ConcurrentSkipListSet<>(); this.acknowledgementGroupTimeMicros = conf.getAcknowledgementsGroupTimeMicros(); if (acknowledgementGroupTimeMicros > 0) { scheduledTask = eventLoopGroup.next().scheduleWithFixedDelay(this::flush, acknowledgementGroupTimeMicros, acknowledgementGroupTimeMicros, TimeUnit.MICROSECONDS); } else {// ww w.ja va2s. c om scheduledTask = null; } }
From source file:com.clustercontrol.nodemap.util.SearchConnectionExecutor.java
/** * ???ExecutorService<BR>//from ww w .j a v a 2s . c o m * * @param scopeId ?ID * @param isL3 L3????true L2????false? * @throws HinemosUnknown */ public SearchConnectionExecutor(String scopeId, boolean isL3) throws HinemosUnknown { start = HinemosTime.currentTimeMillis(); this.isL3 = isL3; // ??SNMP? int threadSize = HinemosPropertyUtil .getHinemosPropertyNum("nodemap.search.connection.thread", Long.valueOf(4)).intValue(); m_log.info("static() : Thread Size = " + threadSize); m_log.info("SearchConnectionExecutor() : scopeId=" + scopeId + ",L3=" + isL3); // ??????OID? String oid = isL3 ? SearchConnectionProperties.DEFAULT_OID_ARP : SearchConnectionProperties.DEFAULT_OID_FDB; oidSet = new HashSet<String>(); oidSet.add(oid); facilityIdList = bean.getFacilityIdList(scopeId, RepositoryControllerBean.ONE_LEVEL); _executor = new MonitoredThreadPoolExecutor(threadSize, threadSize, 0L, TimeUnit.MICROSECONDS, new LinkedBlockingQueue<Runnable>(), new ThreadFactory() { private volatile int _count = 0; @Override public Thread newThread(Runnable r) { return new Thread(r, "SearchConnectionExecutor-" + _count++); } }, new ThreadPoolExecutor.AbortPolicy()); now = HinemosTime.currentTimeMillis(); m_log.debug("Constructer : " + (now - start) + "ms"); }
From source file:org.apache.pulsar.client.impl.PersistentAcknowledgmentsGroupingTracker.java
public PersistentAcknowledgmentsGroupingTracker(ConsumerImpl<?> consumer, ConsumerConfigurationData<?> conf, EventLoopGroup eventLoopGroup) { this.consumer = consumer; this.pendingIndividualAcks = new ConcurrentSkipListSet<>(); this.acknowledgementGroupTimeMicros = conf.getAcknowledgementsGroupTimeMicros(); if (acknowledgementGroupTimeMicros > 0) { scheduledTask = eventLoopGroup.next().scheduleWithFixedDelay(this::flush, acknowledgementGroupTimeMicros, acknowledgementGroupTimeMicros, TimeUnit.MICROSECONDS); } else {//from w w w .j av a 2 s . c o m scheduledTask = null; } }
From source file:org.apache.hadoop.fs.nfs.rpc.RpcClient.java
public RpcClient(String hostname, int port) throws IOException { tasks = new ConcurrentHashMap<>(); pending = new ConcurrentLinkedQueue<>(); xid = new AtomicInteger(new Random(System.currentTimeMillis()).nextInt(1024) * 1000000); errored = new AtomicBoolean(false); shutdown = new AtomicBoolean(false); ChannelFactory factory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), 1, 8); client = this; ChannelPipelineFactory pipelineFactory = new ChannelPipelineFactory() { @Override//from w w w.java 2 s. c o m public ChannelPipeline getPipeline() { return Channels.pipeline(new RpcFrameDecoder(), new IdleStateHandler(timer, 0, 1, 0, TimeUnit.MICROSECONDS), new RpcClientHandler(client, bootstrap, timer)); } }; bootstrap = new ClientBootstrap(factory); bootstrap.setPipelineFactory(pipelineFactory); bootstrap.setOption("remoteAddress", new InetSocketAddress(hostname, port)); bootstrap.setOption("tcpNoDelay", true); bootstrap.setOption("keepAlive", false); bootstrap.setOption("soLinger", 0); bootstrap.setOption("receiveBufferSize", 32 * 1024 * 1024); bootstrap.setOption("sendBufferSize", 32 * 1024 * 1024); future = bootstrap.connect(); future.awaitUninterruptibly(); if (future.isDone() && (future.isCancelled() || !future.isSuccess())) { throw new IOException("Could not connect to " + hostname + " on port " + port); } }
From source file:bes.injector.InjectorBurnTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException, TimeoutException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; final Injector injector = new Injector(""); for (int i = 0; i < executors.length; i++) { executors[i] = injector.newExecutor(threadCount, maxQueued); threadCounts[i] = threadCount;/*from www . ja v a2 s. c o m*/ workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<Batch>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) { try { Thread.sleep(10); } catch (InterruptedException e) { } } // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<Result>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }
From source file:org.apache.cassandra.concurrent.LongSharedExecutorPoolTest.java
private void testPromptnessOfExecution(long intervalNanos, float loadIncrement) throws InterruptedException, ExecutionException { final int executorCount = 4; int threadCount = 8; int maxQueued = 1024; final WeibullDistribution workTime = new WeibullDistribution(3, 200000); final long minWorkTime = TimeUnit.MICROSECONDS.toNanos(1); final long maxWorkTime = TimeUnit.MILLISECONDS.toNanos(1); final int[] threadCounts = new int[executorCount]; final WeibullDistribution[] workCount = new WeibullDistribution[executorCount]; final ExecutorService[] executors = new ExecutorService[executorCount]; for (int i = 0; i < executors.length; i++) { executors[i] = SharedExecutorPool.SHARED.newExecutor(threadCount, maxQueued, "test" + i, "test" + i); threadCounts[i] = threadCount;/*from w w w.j a va 2 s. c o m*/ workCount[i] = new WeibullDistribution(2, maxQueued); threadCount *= 2; maxQueued *= 2; } long runs = 0; long events = 0; final TreeSet<Batch> pending = new TreeSet<>(); final BitSet executorsWithWork = new BitSet(executorCount); long until = 0; // basic idea is to go through different levels of load on the executor service; initially is all small batches // (mostly within max queue size) of very short operations, moving to progressively larger batches // (beyond max queued size), and longer operations for (float multiplier = 0f; multiplier < 2.01f;) { if (System.nanoTime() > until) { System.out.println(String.format("Completed %.0fK batches with %.1fM events", runs * 0.001f, events * 0.000001f)); events = 0; until = System.nanoTime() + intervalNanos; multiplier += loadIncrement; System.out.println(String.format("Running for %ds with load multiplier %.1f", TimeUnit.NANOSECONDS.toSeconds(intervalNanos), multiplier)); } // wait a random amount of time so we submit new tasks in various stages of long timeout; if (pending.isEmpty()) timeout = 0; else if (Math.random() > 0.98) timeout = Long.MAX_VALUE; else if (pending.size() == executorCount) timeout = pending.first().timeout; else timeout = (long) (Math.random() * pending.last().timeout); while (!pending.isEmpty() && timeout > System.nanoTime()) { Batch first = pending.first(); boolean complete = false; try { for (Result result : first.results.descendingSet()) result.future.get(timeout - System.nanoTime(), TimeUnit.NANOSECONDS); complete = true; } catch (TimeoutException e) { } if (!complete && System.nanoTime() > first.timeout) { for (Result result : first.results) if (!result.future.isDone()) throw new AssertionError(); complete = true; } if (complete) { pending.pollFirst(); executorsWithWork.clear(first.executorIndex); } } // if we've emptied the executors, give all our threads an opportunity to spin down if (timeout == Long.MAX_VALUE) Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS); // submit a random batch to the first free executor service int executorIndex = executorsWithWork.nextClearBit(0); if (executorIndex >= executorCount) continue; executorsWithWork.set(executorIndex); ExecutorService executor = executors[executorIndex]; TreeSet<Result> results = new TreeSet<>(); int count = (int) (workCount[executorIndex].sample() * multiplier); long targetTotalElapsed = 0; long start = System.nanoTime(); long baseTime; if (Math.random() > 0.5) baseTime = 2 * (long) (workTime.sample() * multiplier); else baseTime = 0; for (int j = 0; j < count; j++) { long time; if (baseTime == 0) time = (long) (workTime.sample() * multiplier); else time = (long) (baseTime * Math.random()); if (time < minWorkTime) time = minWorkTime; if (time > maxWorkTime) time = maxWorkTime; targetTotalElapsed += time; Future<?> future = executor.submit(new WaitTask(time)); results.add(new Result(future, System.nanoTime() + time)); } long end = start + (long) Math.ceil(targetTotalElapsed / (double) threadCounts[executorIndex]) + TimeUnit.MILLISECONDS.toNanos(100L); long now = System.nanoTime(); if (runs++ > executorCount && now > end) throw new AssertionError(); events += results.size(); pending.add(new Batch(results, end, executorIndex)); // System.out.println(String.format("Submitted batch to executor %d with %d items and %d permitted millis", executorIndex, count, TimeUnit.NANOSECONDS.toMillis(end - start))); } }
From source file:io.mycat.backend.heartbeat.MySQLConsistencyHelper.java
@Override public void onResult(SQLQueryResult<Map<String, String>> result) { // {"dataNode":"db2","result":{"max_timestamp":"1450423751170"},"success":true} // {"dataNode":"db3","result":{"count(*)":"1"},"success":true} LOGGER.debug("resultresultresultresult:" + JSON.toJSONString(result)); Map<String, String> rowMap = null; String count = null;/*from www . ja v a 2 s . c om*/ String innerCol = null; String maxTimestamp = null; if (result != null) rowMap = result.getResult(); if (rowMap != null) { maxTimestamp = rowMap.get(GlobalTableUtil.MAX_COLUMN); count = rowMap.get(GlobalTableUtil.COUNT_COLUMN); innerCol = rowMap.get(GlobalTableUtil.INNER_COLUMN); if ((rowMap.containsKey(GlobalTableUtil.MAX_COLUMN) && StringUtils.isNotBlank(maxTimestamp)) || (rowMap.containsKey(GlobalTableUtil.COUNT_COLUMN) && StringUtils.isNotBlank(count)) || (rowMap.containsKey(GlobalTableUtil.INNER_COLUMN) && StringUtils.isNotBlank(innerCol))) { heartbeat.setResult(result); return; } else { if (this.retryTime.get() > 0) { try { TimeUnit.MICROSECONDS.sleep(10); } catch (InterruptedException e) { } this.retryTime.decrementAndGet(); this.sqlJob.run(); return; } heartbeat.setResult(result); return; } } else { if (this.retryTime.get() > 0) { try { TimeUnit.MICROSECONDS.sleep(3); } catch (InterruptedException e) { } this.retryTime.decrementAndGet(); this.sqlJob.run(); return; } heartbeat.setResult(result); return; } }
From source file:com.endgame.binarypig.loaders.AbstractFileDroppingLoader.java
public static void dumpStat(String name, Stopwatch stat, long totalUsec, long records) { long uSec = stat.elapsedTime(TimeUnit.MICROSECONDS); double percentTotal = (100.0 * uSec) / totalUsec; double usecPerRecord = (double) uSec / (double) records; System.err.printf(String.format("STAT:%s: %.4f uSec/rec, %s, %.2f percent total overhead\n", name, usecPerRecord, stat.toString(4), percentTotal)); }
From source file:org.apache.tinkerpop.gremlin.process.traversal.util.StandardTraversalMetrics.java
@Override public String toString() { computeTotals();//from www .j a v a 2 s .c o m // Build a pretty table of metrics data. // Append headers final StringBuilder sb = new StringBuilder("Traversal Metrics\n") .append(String.format("%-50s %21s %11s %15s %8s", HEADERS)); sb.append( "\n============================================================================================================="); appendMetrics(computedMetrics.values(), sb, 0); // Append total duration sb.append(String.format("%n%50s %21s %11s %15.3f %8s", ">TOTAL", "-", "-", getDuration(TimeUnit.MICROSECONDS) / 1000.0, "-")); return sb.toString(); }