List of usage examples for java.util.concurrent TimeUnit NANOSECONDS
TimeUnit NANOSECONDS
To view the source code for java.util.concurrent TimeUnit NANOSECONDS.
Click Source Link
From source file:kieker.tools.traceAnalysis.TraceAnalysisTool.java
private static void addDecorators(final String[] decoratorNames, final AbstractDependencyGraphFilter<?> plugin) { if (decoratorNames == null) { return;//from ww w . ja v a 2s. c o m } final List<String> decoratorList = Arrays.asList(decoratorNames); final Iterator<String> decoratorIterator = decoratorList.iterator(); while (decoratorIterator.hasNext()) { final String currentDecoratorStr = decoratorIterator.next(); if (Constants.RESPONSE_TIME_DECORATOR_FLAG_NS.equals(currentDecoratorStr)) { plugin.addDecorator(new ResponseTimeNodeDecorator(TimeUnit.NANOSECONDS)); continue; } else if (Constants.RESPONSE_TIME_DECORATOR_FLAG_US.equals(currentDecoratorStr)) { plugin.addDecorator(new ResponseTimeNodeDecorator(TimeUnit.MICROSECONDS)); continue; } else if (Constants.RESPONSE_TIME_DECORATOR_FLAG_MS.equals(currentDecoratorStr)) { plugin.addDecorator(new ResponseTimeNodeDecorator(TimeUnit.MILLISECONDS)); continue; } else if (Constants.RESPONSE_TIME_DECORATOR_FLAG_S.equals(currentDecoratorStr)) { plugin.addDecorator(new ResponseTimeNodeDecorator(TimeUnit.SECONDS)); continue; } else if (Constants.RESPONSE_TIME_COLORING_DECORATOR_FLAG.equals(currentDecoratorStr)) { // if decorator is responseColoring, next value should be the threshold final String thresholdStringStr = decoratorIterator.next(); try { final int threshold = Integer.parseInt(thresholdStringStr); plugin.addDecorator(new ResponseTimeColorNodeDecorator(threshold)); } catch (final NumberFormatException exc) { System.err.println( "\nFailed to parse int value of property " + "threshold(ms) : " + thresholdStringStr); // NOPMD (System.out) } } else { LOG.warn("Unknown decoration name '" + currentDecoratorStr + "'."); return; } } }
From source file:com.netflix.genie.core.services.impl.S3FileTransferImplUnitTests.java
/** * Test the putFile method for valid s3 path. * * @throws GenieException If there is any problem *///from w ww .ja va2s.c om @Test(expected = GenieServerException.class) public void testPutFileMethodFailureToFetch() throws GenieException { Mockito.when(this.s3Client.putObject(Mockito.any(), Mockito.any(), Mockito.any(File.class))) .thenThrow(new AmazonS3Exception("something")); final ArgumentCaptor<String> bucketArgument = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> keyArgument = ArgumentCaptor.forClass(String.class); try { s3FileTransfer.putFile(LOCAL_PATH, S3_PATH); } finally { Mockito.verify(this.s3Client).putObject(bucketArgument.capture(), keyArgument.capture(), Mockito.any(File.class)); Assert.assertEquals(S3_BUCKET, bucketArgument.getValue()); Assert.assertEquals(S3_KEY, keyArgument.getValue()); Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.uploadTimerId, Mockito.times(1)).withTags(tagsCaptor.capture()); Assert.assertEquals(FAILURE_TAGS, tagsCaptor.getValue()); } }
From source file:com.netflix.genie.web.services.impl.S3FileTransferImplTest.java
/** * Test the putFile method for valid s3 path. * * @throws GenieException If there is any problem *///from w w w . ja v a 2s. c o m @Test(expected = GenieServerException.class) public void testPutFileMethodFailureToFetch() throws GenieException { Mockito.when(this.s3Client.putObject(Mockito.any(), Mockito.any(), Mockito.any(File.class))) .thenThrow(new AmazonS3Exception("something")); final ArgumentCaptor<String> bucketArgument = ArgumentCaptor.forClass(String.class); final ArgumentCaptor<String> keyArgument = ArgumentCaptor.forClass(String.class); try { s3FileTransfer.putFile(LOCAL_PATH, S3_PATH); } finally { Mockito.verify(this.s3Client).putObject(bucketArgument.capture(), keyArgument.capture(), Mockito.any(File.class)); Assert.assertEquals(S3_BUCKET, bucketArgument.getValue()); Assert.assertEquals(S3_KEY, keyArgument.getValue()); Mockito.verify(this.uploadTimer, Mockito.times(1)).record(Mockito.anyLong(), Mockito.eq(TimeUnit.NANOSECONDS)); Mockito.verify(this.registry, Mockito.times(1)).timer(Mockito.eq(S3FileTransferImpl.UPLOAD_TIMER_NAME), this.tagsCaptor.capture()); Assert.assertEquals(MetricsUtils.newFailureTagsSetForException(new GenieServerException("blah")), this.tagsCaptor.getValue()); } }
From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java
@Override public long getReadRpcTimeout(TimeUnit unit) { return unit.convert(readRpcTimeoutNs, TimeUnit.NANOSECONDS); }
From source file:com.microsoft.azure.management.datalake.store.uploader.DataLakeStoreUploader.java
/** * Concatenates all the segments defined in the metadata into a single stream. * * @param metadata The {@link UploadMetadata} to determine the segments to concatenate * @throws Exception/*w w w . j av a2s .c o m*/ */ private void concatenateSegments(final UploadMetadata metadata) throws Exception { final String[] inputPaths = new String[metadata.getSegmentCount()]; //verify if target stream exists if (frontEnd.streamExists(metadata.getTargetStreamPath())) { if (this.getParameters().isOverwrite()) { frontEnd.deleteStream(metadata.getTargetStreamPath(), false); } else { throw new OperationsException("Target Stream already exists"); } } //ensure all input streams exist and are of the expected length //ensure all segments in the metadata are marked as 'complete' final List<Exception> exceptions = new ArrayList<>(); ExecutorService exec = Executors.newFixedThreadPool(this.getParameters().getThreadCount()); for (int i = 0; i < metadata.getSegmentCount(); i++) { final int finalI = i; exec.submit(new Runnable() { @Override public void run() { try { if (metadata.getSegments()[finalI].getStatus() != SegmentUploadStatus.Complete) { throw new UploadFailedException( "Cannot perform 'concatenate' operation because not all streams are fully uploaded."); } String remoteStreamPath = metadata.getSegments()[finalI].getPath(); int retryCount = 0; long remoteLength = -1; while (retryCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { retryCount++; try { remoteLength = frontEnd.getStreamLength(remoteStreamPath); break; } catch (Exception e) { if (retryCount >= SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) { throw new UploadFailedException(MessageFormat.format( "Cannot perform 'concatenate' operation due to the following exception retrieving file information: {0}", e)); } SingleSegmentUploader.waitForRetry(retryCount, parameters.isUseSegmentBlockBackOffRetryStrategy()); } } if (remoteLength != metadata.getSegments()[finalI].getLength()) { throw new UploadFailedException(MessageFormat.format( "Cannot perform 'concatenate' operation because segment {0} has an incorrect length (expected {1}, actual {2}).", finalI, metadata.getSegments()[finalI].getLength(), remoteLength)); } inputPaths[finalI] = remoteStreamPath; } catch (Exception ex) { //collect any exceptions, whether we just generated them above or whether they come from the Front End, synchronized (exceptions) { exceptions.add(ex); } } } }); } exec.shutdown(); try { exec.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // waits ~292 years for completion or interruption. } catch (InterruptedException e) { // add the exception since it will indicate that it was cancelled. exceptions.add(e); } if (exceptions.size() > 0) { throw new AggregateUploadException("At least one concatenate test failed", exceptions.remove(0), exceptions); } //issue the command frontEnd.concatenate(metadata.getTargetStreamPath(), inputPaths); }
From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java
@Override public long getWriteRpcTimeout(TimeUnit unit) { return unit.convert(writeRpcTimeoutNs, TimeUnit.NANOSECONDS); }
From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java
private void tryResubmit(Stream<Action> actions, int tries) { long delayNs; if (operationTimeoutNs > 0) { long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS; if (maxDelayNs <= 0) { failAll(actions, tries);//from w w w . j av a2 s . c o m return; } delayNs = Math.min(maxDelayNs, getPauseTime(pauseNs, tries - 1)); } else { delayNs = getPauseTime(pauseNs, tries - 1); } retryTimer.newTimeout(t -> groupAndSend(actions, tries + 1), delayNs, TimeUnit.NANOSECONDS); }
From source file:org.apache.hadoop.ha.ZKFailoverController.java
/** * Wait until one of the following events: * <ul>/*www. j a v a2s . c o m*/ * <li>Another thread publishes the results of an attempt to become active * using {@link #recordActiveAttempt(ActiveAttemptRecord)}</li> * <li>The node enters bad health status</li> * <li>The specified timeout elapses</li> * </ul> * * @param timeoutMillis number of millis to wait * @return the published record, or null if the timeout elapses or the * service becomes unhealthy * @throws InterruptedException if the thread is interrupted. */ private ActiveAttemptRecord waitForActiveAttempt(int timeoutMillis) throws InterruptedException { long st = System.nanoTime(); long waitUntil = st + TimeUnit.NANOSECONDS.convert(timeoutMillis, TimeUnit.MILLISECONDS); do { // periodically check health state, because entering an // unhealthy state could prevent us from ever attempting to // become active. We can detect this and respond to the user // immediately. synchronized (this) { if (lastHealthState != State.SERVICE_HEALTHY) { // early out if service became unhealthy return null; } } synchronized (activeAttemptRecordLock) { if ((lastActiveAttemptRecord != null && lastActiveAttemptRecord.nanoTime >= st)) { return lastActiveAttemptRecord; } // Only wait 1sec so that we periodically recheck the health state // above. activeAttemptRecordLock.wait(1000); } } while (System.nanoTime() < waitUntil); // Timeout elapsed. LOG.warn(timeoutMillis + "ms timeout elapsed waiting for an attempt " + "to become active"); return null; }
From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java
@Override public long getOperationTimeout(TimeUnit unit) { return unit.convert(operationTimeoutNs, TimeUnit.NANOSECONDS); }
From source file:com.addthis.hydra.task.source.AbstractStreamFileDataSource.java
private Bundle pollAndCloseOnInterrupt(long pollFor, TimeUnit unit) { boolean interrupted = false; try {// www. j av a 2 s .c om long remainingNanos = unit.toNanos(pollFor); long end = System.nanoTime() + remainingNanos; while (true) { try { return queue.poll(remainingNanos, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { interrupted = true; log.info("interrupted while polling for bundles; closing source then resuming poll"); close(); remainingNanos = end - System.nanoTime(); } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } }