Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.netflix.genie.web.services.impl.S3FileTransferImplTest.java

/**
 * Test the getFile method for valid s3 path.
 *
 * @throws GenieException If there is any problem
 *///  ww  w  . j a  v  a  2 s . com
@Test
public void testGetFileMethodValidS3Path() throws GenieException {
    final ObjectMetadata objectMetadata = Mockito.mock(ObjectMetadata.class);
    Mockito.when(this.s3Client.getObject(Mockito.any(GetObjectRequest.class), Mockito.any(File.class)))
            .thenReturn(objectMetadata);
    final ArgumentCaptor<GetObjectRequest> argument = ArgumentCaptor.forClass(GetObjectRequest.class);

    s3FileTransfer.getFile(S3_PATH, LOCAL_PATH);
    Mockito.verify(this.s3Client).getObject(argument.capture(), Mockito.any());
    Assert.assertEquals(S3_BUCKET, argument.getValue().getBucketName());
    Assert.assertEquals(S3_KEY, argument.getValue().getKey());
    Mockito.verify(this.downloadTimer, Mockito.times(1)).record(Mockito.anyLong(),
            Mockito.eq(TimeUnit.NANOSECONDS));
    Mockito.verify(this.registry, Mockito.times(1)).timer(Mockito.eq(S3FileTransferImpl.DOWNLOAD_TIMER_NAME),
            this.tagsCaptor.capture());
    Assert.assertEquals(SUCCESS_TAGS, this.tagsCaptor.getValue());

}

From source file:com.netflix.genie.core.services.impl.LocalJobRunner.java

private File createRunScript(final File jobWorkingDir) throws GenieException {
    final long start = System.nanoTime();
    try {//  w ww  .j  a v a  2s .c  o  m
        final File runScript = new File(jobWorkingDir, JobConstants.GENIE_JOB_LAUNCHER_SCRIPT);
        if (!runScript.exists()) {
            try {
                if (!runScript.createNewFile()) {
                    throw new GenieServerException("Unable to create run script file due to unknown reason.");
                }
            } catch (final IOException ioe) {
                throw new GenieServerException("Unable to create run script file due to IOException.", ioe);
            }
        }
        if (!runScript.setExecutable(true)) {
            throw new GenieServerException("Unable to make run script executable");
        }
        log.info("Created run script {}", runScript);
        return runScript;
    } finally {
        this.createRunScriptTimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
}

From source file:org.wso2.carbon.metrics.jdbc.reporter.JdbcReporterTest.java

@Test
public void reportsNanoseconds() {
    long timestamp = TimeUnit.NANOSECONDS.convert(clock.getTime(), TimeUnit.MILLISECONDS);
    Assert.assertEquals(reportGauge(TimeUnit.NANOSECONDS), timestamp);
}

From source file:org.apache.flink.test.recovery.AbstractTaskManagerProcessFailureRecoveryTest.java

protected void waitUntilNumTaskManagersAreRegistered(ActorRef jobManager, int numExpected, long maxDelayMillis)
        throws Exception {
    final long pollInterval = 10_000_000; // 10 ms = 10,000,000 nanos
    final long deadline = System.nanoTime() + maxDelayMillis * 1_000_000;

    long time;/* ww w .  ja v  a 2s .  c  o m*/

    while ((time = System.nanoTime()) < deadline) {
        FiniteDuration timeout = new FiniteDuration(pollInterval, TimeUnit.NANOSECONDS);

        try {
            Future<?> result = Patterns.ask(jobManager,
                    JobManagerMessages.getRequestNumberRegisteredTaskManager(), new Timeout(timeout));

            int numTMs = (Integer) Await.result(result, timeout);

            if (numTMs == numExpected) {
                return;
            }
        } catch (TimeoutException e) {
            // ignore and retry
        } catch (ClassCastException e) {
            fail("Wrong response: " + e.getMessage());
        }

        long timePassed = System.nanoTime() - time;
        long remainingMillis = (pollInterval - timePassed) / 1_000_000;
        if (remainingMillis > 0) {
            Thread.sleep(remainingMillis);
        }
    }

    fail("The TaskManagers did not register within the expected time (" + maxDelayMillis + "msecs)");
}

From source file:com.netflix.genie.core.services.impl.S3FileTransferImplUnitTests.java

/**
 * Test the getFile method for valid s3 path.
 *
 * @throws GenieException If there is any problem
 *//*from  w ww. j ava2s . c om*/
@Test
public void testGetFileMethodValidS3Path() throws GenieException {

    final ObjectMetadata objectMetadata = Mockito.mock(ObjectMetadata.class);
    Mockito.when(this.s3Client.getObject(Mockito.any(GetObjectRequest.class), Mockito.any(File.class)))
            .thenReturn(objectMetadata);
    final ArgumentCaptor<GetObjectRequest> argument = ArgumentCaptor.forClass(GetObjectRequest.class);

    s3FileTransfer.getFile(S3_PATH, LOCAL_PATH);
    Mockito.verify(this.s3Client).getObject(argument.capture(), Mockito.any());
    Assert.assertEquals(S3_BUCKET, argument.getValue().getBucketName());
    Assert.assertEquals(S3_KEY, argument.getValue().getKey());
    Mockito.verify(this.downloadTimer, Mockito.times(1)).record(Mockito.anyLong(),
            Mockito.eq(TimeUnit.NANOSECONDS));
    Mockito.verify(this.downloadTimerId, Mockito.times(1)).withTags(tagsCaptor.capture());
    Assert.assertEquals(SUCCESS_TAGS, tagsCaptor.getValue());

}

From source file:metlos.executors.batch.BatchExecutorTest.java

private long rapidFireSimpleExecutorTime(final int taskDurationMillis, int nofJobs, int nofThreads)
        throws Exception {

    ThreadPoolExecutor ex = new ThreadPoolExecutor(nofThreads, nofThreads, 0, TimeUnit.NANOSECONDS,
            new LinkedBlockingQueue<Runnable>());
    List<Callable<Void>> payload = getCallables(taskDurationMillis, nofJobs);

    return measureExecutionTime(System.currentTimeMillis(), ex.invokeAll(payload));
}

From source file:org.apache.hadoop.hdfs.client.ClientMmapManager.java

/**
 * Create a new mmap object.//from  ww w  .j a v  a 2  s.c  o m
 * 
 * NOTE: you must call this function with the lock held.
 *
 * @param key              The key which describes this mmap.
 * @param in               The input stream to use to create the mmap.
 * @return                 The new mmap object, or null if there were
 *                         insufficient resources.
 * @throws IOException     If there was an I/O error creating the mmap.
 */
private ClientMmap create(Key key, FileInputStream in) throws IOException {
    if (mmaps.size() + 1 > cacheSize) {
        if (!evictOne()) {
            LOG.warn("mmap cache is full (with " + cacheSize + " elements) and "
                    + "nothing is evictable.  Ignoring request for mmap with " + "datanodeID=" + key.datanode
                    + ", " + "block=" + key.block);
            return null;
        }
    }
    // Create the condition variable that other threads may wait on.
    Waitable<ClientMmap> waitable = new Waitable<ClientMmap>(lock.newCondition());
    mmaps.put(key, waitable);
    // Load the entry
    boolean success = false;
    ClientMmap mmap = null;
    try {
        try {
            lock.unlock();
            mmap = ClientMmap.load(this, in, key.block, key.datanode);
        } finally {
            lock.lock();
        }
        if (cacheCleaner == null) {
            cacheCleaner = new CacheCleaner(this);
            ScheduledFuture<?> future = executor.scheduleAtFixedRate(cacheCleaner, timeoutNs,
                    timeoutNs / runsPerTimeout, TimeUnit.NANOSECONDS);
            cacheCleaner.setFuture(future);
        }
        success = true;
    } finally {
        if (!success) {
            LOG.warn("failed to create mmap for datanodeID=" + key.datanode + ", " + "block=" + key.block);
            mmaps.remove(key);
        }
        waitable.provide(mmap);
    }
    if (LOG.isDebugEnabled()) {
        LOG.info("created a new ClientMmap for block " + key.block + " on datanode " + key.datanode);
    }
    return mmap;
}

From source file:com.hurence.logisland.processor.hbase.AbstractPutHBase.java

@Override
public Collection<Record> process(final ProcessContext context, final Collection<Record> records)
        throws ProcessException {

    final int batchSize = context.getPropertyValue(BATCH_SIZE).asInteger();

    if (records == null || records.size() == 0) {
        return Collections.emptyList();
    }/*from  w w w .ja  v a 2s.  c  o m*/

    final Map<String, List<PutRecord>> tablePuts = new HashMap<>();

    // Group Records by HBase Table
    for (final Record record : records) {
        final PutRecord putRecord = createPut(context, record, serializer);

        if (putRecord == null) {
            // sub-classes should log appropriate error messages before returning null
            record.addError(ProcessError.RECORD_CONVERSION_ERROR.toString(), getLogger(),
                    "Failed to produce a put for Record from {}" + record.toString());
        } else if (!putRecord.isValid()) {
            if (StringUtils.isBlank(putRecord.getTableName())) {
                record.addError(ProcessError.BAD_RECORD.toString(), getLogger(),
                        "Missing table name for Record " + record.toString());
            } else if (null == putRecord.getRow()) {
                record.addError(ProcessError.BAD_RECORD.toString(), getLogger(),
                        "Missing row id for Record " + record.toString());
            } else if (putRecord.getColumns() == null || putRecord.getColumns().isEmpty()) {
                record.addError(ProcessError.BAD_RECORD.toString(), getLogger(),
                        "No columns provided for Record " + record.toString());
            } else {
                // really shouldn't get here, but just in case
                record.addError(ProcessError.RECORD_CONVERSION_ERROR.toString(), getLogger(),
                        "Failed to produce a put for Record from " + record.toString());
            }
        } else {
            List<PutRecord> putRecords = tablePuts.get(putRecord.getTableName());
            if (putRecords == null) {
                putRecords = new ArrayList<>();
                tablePuts.put(putRecord.getTableName(), putRecords);
            }
            putRecords.add(putRecord);
        }
    }

    getLogger().debug("Sending {} Records to HBase in {} put operations",
            new Object[] { records.size(), tablePuts.size() });

    final long start = System.nanoTime();
    final List<PutRecord> successes = new ArrayList<>();

    for (Map.Entry<String, List<PutRecord>> entry : tablePuts.entrySet()) {
        try {
            clientService.put(entry.getKey(), entry.getValue());
            successes.addAll(entry.getValue());
        } catch (Exception e) {
            getLogger().error(e.getMessage(), e);

            for (PutRecord putRecord : entry.getValue()) {
                String msg = String.format("Failed to send {} to HBase due to {}; routing to failure",
                        putRecord.getRecord(), e);
                putRecord.getRecord().addError("HBASE_PUT_RECORD_FAILURE", getLogger(), msg);
            }
        }
    }

    final long sendMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
    getLogger().debug("Sent {} Records to HBase successfully in {} milliseconds",
            new Object[] { successes.size(), sendMillis });

    for (PutRecord putRecord : successes) {
        final String details = "Put " + putRecord.getColumns().size() + " cells to HBase";
        //session.getProvenanceReporter().send(putRecord.getRecord(), getTransitUri(putRecord), details, sendMillis);
    }
    return records;

}

From source file:com.facebook.imagepipeline.animated.impl.AnimatedDrawableCachingBackendImpl.java

private CloseableReference<Bitmap> obtainBitmapInternal() {
    Bitmap bitmap;//from w  ww  .ja va 2  s. c  o  m
    synchronized (this) {
        long nowNanos = System.nanoTime();
        long waitUntilNanos = nowNanos + TimeUnit.NANOSECONDS.convert(20, TimeUnit.MILLISECONDS);
        while (mFreeBitmaps.isEmpty() && nowNanos < waitUntilNanos) {
            try {
                TimeUnit.NANOSECONDS.timedWait(this, waitUntilNanos - nowNanos);
                nowNanos = System.nanoTime();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new RuntimeException(e);
            }
        }

        if (mFreeBitmaps.isEmpty()) {
            bitmap = createNewBitmap();
        } else {
            bitmap = mFreeBitmaps.remove(mFreeBitmaps.size() - 1);
        }
    }
    return CloseableReference.of(bitmap, mResourceReleaserForBitmaps);
}

From source file:com.sworddance.taskcontrol.TestTaskControl.java

/**
 * make sure an empty taskgroup immediately reports that it is done (especially with result)
 * @throws Exception/*from w  w w. java 2 s  . c  o  m*/
 *
 */
@SuppressWarnings("unchecked")
public void testEmptyTaskGroup() throws Exception {
    TaskControl taskControl = new TaskControl(new TestPriorityComparator(), 1,
            LogFactory.getLog(this.getClass()));
    TaskGroup taskGroup = taskControl.newTaskGroup("empty");
    startTaskControl(taskControl, taskGroup);
    FutureResult result = taskGroup.getResult();

    // should immediately return result.
    result.get(1L, TimeUnit.NANOSECONDS);

}