Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:com.streamsets.datacollector.util.SystemProcessImpl.java

/**
 * Java 1.7 does not have Process.waitFor(timeout)
 *//*  ww  w.  ja v a2 s  .c o  m*/
private static boolean waitFor(Process process, long timeout, TimeUnit unit) {
    long startTime = System.nanoTime();
    long rem = unit.toNanos(timeout);
    do {
        try {
            process.exitValue();
            return true;
        } catch (IllegalThreadStateException ex) {
            if (rem > 0)
                ThreadUtil.sleep(Math.min(TimeUnit.NANOSECONDS.toMillis(rem) + 1, 100));
        }
        rem = unit.toNanos(timeout) - (System.nanoTime() - startTime);
    } while (rem > 0);
    return false;
}

From source file:org.apache.drill.exec.store.hive.HiveMetadataProvider.java

/**
 * Return {@link LogicalInputSplit}s for given {@link HiveReadEntry}. First splits are looked up in cache, if not
 * found go through {@link InputFormat#getSplits(JobConf, int)} to find the splits.
 *
 * @param hiveReadEntry Subset of the {@link HiveReadEntry} used when creating this object.
 * @return list of logically grouped input splits
 *//*  w  w w .  ja  v  a2 s  . c  om*/
public List<LogicalInputSplit> getInputSplits(final HiveReadEntry hiveReadEntry) {
    final Stopwatch timeGetSplits = Stopwatch.createStarted();
    try {
        if (!isPartitionedTable) {
            return getTableInputSplits();
        }

        final List<LogicalInputSplit> splits = Lists.newArrayList();
        for (HivePartition p : hiveReadEntry.getPartitions()) {
            splits.addAll(getPartitionInputSplits(p));
        }
        return splits;
    } catch (final Exception e) {
        logger.error("Failed to get InputSplits", e);
        throw new DrillRuntimeException("Failed to get InputSplits", e);
    } finally {
        logger.debug("Took {} s to get InputSplits from {}.{}",
                timeGetSplits.elapsed(TimeUnit.NANOSECONDS) / 1000, hiveReadEntry.getTable().getDbName(),
                hiveReadEntry.getTable().getTableName());
    }
}

From source file:io.nats.client.ITClusterTest.java

@Test
public void testBasicClusterReconnect() throws Exception {
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {

            Options opts = new Options.Builder(Nats.defaultOptions()).dontRandomize().build();

            final AtomicBoolean dcbCalled = new AtomicBoolean(false);
            final CountDownLatch dcLatch = new CountDownLatch(1);
            opts.disconnectedCb = new DisconnectedCallback() {
                public void onDisconnect(ConnectionEvent event) {
                    // Suppress any additional calls
                    if (dcbCalled.get()) {
                        return;
                    }/* ww  w. ja v  a2 s  .c om*/
                    dcbCalled.set(true);
                    dcLatch.countDown();
                }
            };

            final CountDownLatch rcLatch = new CountDownLatch(1);
            opts.reconnectedCb = new ReconnectedCallback() {
                public void onReconnect(ConnectionEvent event) {
                    logger.info("rcb called");
                    rcLatch.countDown();
                }
            };

            try (Connection c = Nats.connect(servers, opts)) {
                assertNotNull(c.getConnectedUrl());

                s1.shutdown();

                // wait for disconnect
                assertTrue("Did not receive a disconnect callback message",
                        await(dcLatch, 2, TimeUnit.SECONDS));

                long reconnectTimeStart = System.nanoTime();

                assertTrue("Did not receive a reconnect callback message: ",
                        await(rcLatch, 2, TimeUnit.SECONDS));

                assertTrue(c.getConnectedUrl().equals(testServers[2]));

                // Make sure we did not wait on reconnect for default time.
                // Reconnect should be fast since it will be a switch to the
                // second server and not be dependent on server restart time.
                // assertTrue(reconElapsed.get() <= cf.getReconnectWait());

                long maxDuration = 100;
                long reconnectTime = System.nanoTime() - reconnectTimeStart;
                assertFalse(
                        String.format("Took longer than expected to reconnect: %dms\n",
                                TimeUnit.NANOSECONDS.toMillis(reconnectTime)),
                        TimeUnit.NANOSECONDS.toMillis(reconnectTime) > maxDuration);
            }
        }
    }
}

From source file:org.agatom.springatom.cmp.wizards.validation.ValidationServiceImpl.java

@Override
public void validate(final Validator localValidator, final Errors errors, final WizardResult result) {

    final long startTime = System.nanoTime();
    localValidator.validate(localValidator, errors);
    final long endTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);

    result.addDebugData(WizardDebugDataKeys.VALIDATOR, ClassUtils.getShortName(localValidator.getClass()));
    result.addDebugData(WizardDebugDataKeys.VALIDATION_TIME, endTime);

}

From source file:org.apache.solr.handler.component.AlfrescoHttpShardHandler.java

@Override
public void submit(final ShardRequest sreq, final String shard, final ModifiableSolrParams params) {
    // do this outside of the callable for thread safety reasons
    final List<String> urls = getURLs(shard);

    Callable<ShardResponse> task = new Callable<ShardResponse>() {
        @Override//w ww.  j  a  v a 2  s .co  m
        public ShardResponse call() throws Exception {

            ShardResponse srsp = new ShardResponse();
            if (sreq.nodeName != null) {
                srsp.setNodeName(sreq.nodeName);
            }
            srsp.setShardRequest(sreq);
            srsp.setShard(shard);
            SimpleSolrResponse ssr = new SimpleSolrResponse();
            srsp.setSolrResponse(ssr);
            long startTime = System.nanoTime();

            try {
                String json = params.get(AbstractQParser.ALFRESCO_JSON);
                params.remove(AbstractQParser.ALFRESCO_JSON);

                params.remove(CommonParams.WT); // use default (currently javabin)
                params.remove(CommonParams.VERSION);

                // SolrRequest req = new QueryRequest(SolrRequest.METHOD.POST, "/select");
                // use generic request to avoid extra processing of queries
                AlfrescoQueryRequest req = new AlfrescoQueryRequest(params);
                req.setMethod(SolrRequest.METHOD.POST);
                if (json != null) {
                    req.setContentStream(new ContentStreamBase.StringStream(json));
                }

                // no need to set the response parser as binary is the default
                // req.setResponseParser(new BinaryResponseParser());

                // if there are no shards available for a slice, urls.size()==0
                if (urls.size() == 0) {
                    // TODO: what's the right error code here? We should use the same thing when
                    // all of the servers for a shard are down.
                    throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
                            "no servers hosting shard: " + shard);
                }

                if (urls.size() <= 1) {
                    String url = urls.get(0);
                    srsp.setShardAddress(url);
                    SolrServer server = new HttpSolrServer(url, httpClient);
                    try {
                        ssr.nl = server.request(req);
                    } finally {
                        server.shutdown();
                    }
                } else {
                    LBHttpSolrServer.Rsp rsp = httpShardHandlerFactory.makeLoadBalancedRequest(req, urls);
                    ssr.nl = rsp.getResponse();
                    srsp.setShardAddress(rsp.getServer());
                }
            } catch (ConnectException cex) {
                srsp.setException(cex); //????
            } catch (Exception th) {
                srsp.setException(th);
                if (th instanceof SolrException) {
                    srsp.setResponseCode(((SolrException) th).code());
                } else {
                    srsp.setResponseCode(-1);
                }
            }

            ssr.elapsedTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime,
                    TimeUnit.NANOSECONDS);

            return srsp;
        }
    };

    pending.add(completionService.submit(task));
}

From source file:co.pugo.convert.ConvertServlet.java

/**
 * download imageData and encode it base64
 * @param imageLinks set of image links extracted with extractImageLinks()
 * @return map, key = imageLink, value = base64 encoded image
 *///from  www  . j  ava  2  s  .  com
private HashMap<String, String> downloadImageData(Set<String> imageLinks) {
    HashMap<String, String> imageData = new HashMap<>();
    ExecutorService service = Executors.newCachedThreadPool();
    for (final String imageLink : imageLinks) {
        RunnableFuture<byte[]> future = new FutureTask<>(new Callable<byte[]>() {
            @Override
            public byte[] call() {
                try {
                    URL srcUrl = new URL(imageLink);
                    URLConnection urlConnection = srcUrl.openConnection();
                    return IOUtils.toByteArray(urlConnection.getInputStream());
                } catch (IOException e) {
                    LOG.severe(e.getMessage());
                    return null;
                }
            }
        });
        service.execute(future);
        try {
            imageData.put(imageLink, Base64.encodeBase64String(future.get()));
        } catch (InterruptedException | ExecutionException e) {
            LOG.severe(e.getMessage());
        }
    }
    service.shutdown();
    try {
        service.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        LOG.severe(e.getMessage());
    }
    return imageData;
}

From source file:com.datastax.driver.core.LastResultSetFuture.java

/**
 * {@inheritDoc}// w  w w .  java  2 s.  com
 * <p>
 * <i>Note:</i> This method will finish executing all remaining statements
 * until one generates an error and return the result set from that last one
 * or until there is no more time left.
 *
 * @author paouelle
 *
 * @see com.google.common.util.concurrent.AbstractFuture#get(long, java.util.concurrent.TimeUnit)
 */
@Override
public ResultSet get(long timeout, TimeUnit unit)
        throws InterruptedException, TimeoutException, ExecutionException {
    final long end = System.nanoTime() + unit.toNanos(timeout);
    ResultSetFuture future;

    while (true) {
        synchronized (statements) {
            future = this.future;
            // note that our listener above will actually be executing the next
            // statements automatically
            if (!statements.hasNext()) {
                break;
            }
        }
        // note that our listener above will actually be executing the next
        // statements automatically
        // --- Future treats negative timeouts just like zero.
        future.get(end - System.nanoTime(), TimeUnit.NANOSECONDS);
    }
    return future.get(end - System.nanoTime(), TimeUnit.NANOSECONDS);
}

From source file:com.netflix.genie.web.jobs.workflow.impl.JobTask.java

/**
 * {@inheritDoc}/*from   w  w w.j a  v  a 2 s  .  c  o  m*/
 */
@Override
public void executeTask(@NotNull final Map<String, Object> context) throws GenieException, IOException {
    final long start = System.nanoTime();
    final Set<Tag> tags = Sets.newHashSet();
    try {
        final JobExecutionEnvironment jobExecEnv = (JobExecutionEnvironment) context
                .get(JobConstants.JOB_EXECUTION_ENV_KEY);
        final String jobWorkingDirectory = jobExecEnv.getJobWorkingDir().getCanonicalPath();
        final Writer writer = (Writer) context.get(JobConstants.WRITER_KEY);
        final String jobId = jobExecEnv.getJobRequest().getId()
                .orElseThrow(() -> new GeniePreconditionException("No job id found. Unable to continue"));
        log.info("Starting Job Task for job {}", jobId);

        final Optional<String> setupFile = jobExecEnv.getJobRequest().getSetupFile();
        if (setupFile.isPresent()) {
            final String jobSetupFile = setupFile.get();
            if (StringUtils.isNotBlank(jobSetupFile)) {
                final String localPath = jobWorkingDirectory + JobConstants.FILE_PATH_DELIMITER + jobSetupFile
                        .substring(jobSetupFile.lastIndexOf(JobConstants.FILE_PATH_DELIMITER) + 1);

                fts.getFile(jobSetupFile, localPath);

                writer.write("# Sourcing setup file specified in job request" + System.lineSeparator());
                writer.write(
                        JobConstants.SOURCE
                                + localPath.replace(jobWorkingDirectory,
                                        "${" + JobConstants.GENIE_JOB_DIR_ENV_VAR + "}")
                                + System.lineSeparator());

                // Append new line
                writer.write(System.lineSeparator());
            }
        }

        // Iterate over and get all configs and dependencies
        final Collection<String> configsAndDependencies = Sets.newHashSet();
        configsAndDependencies.addAll(jobExecEnv.getJobRequest().getDependencies());
        configsAndDependencies.addAll(jobExecEnv.getJobRequest().getConfigs());
        for (final String dependentFile : configsAndDependencies) {
            if (StringUtils.isNotBlank(dependentFile)) {
                final String localPath = jobWorkingDirectory + JobConstants.FILE_PATH_DELIMITER + dependentFile
                        .substring(dependentFile.lastIndexOf(JobConstants.FILE_PATH_DELIMITER) + 1);

                fts.getFile(dependentFile, localPath);
            }
        }

        // Copy down the attachments if any to the current working directory
        this.attachmentService.copy(jobId, jobExecEnv.getJobWorkingDir());
        // Delete the files from the attachment service to save space on disk
        this.attachmentService.delete(jobId);

        // Print out the current Envrionment to a env file before running the command.
        writer.write("# Dump the environment to a env.log file" + System.lineSeparator());
        writer.write("env | sort > " + "${" + JobConstants.GENIE_JOB_DIR_ENV_VAR + "}"
                + JobConstants.GENIE_ENV_PATH + System.lineSeparator());

        // Append new line
        writer.write(System.lineSeparator());

        writer.write("# Kick off the command in background mode and wait for it using its pid"
                + System.lineSeparator());

        writer.write(StringUtils.join(jobExecEnv.getCommand().getExecutable(), StringUtils.SPACE)
                + JobConstants.WHITE_SPACE + jobExecEnv.getJobRequest().getCommandArgs().orElse(EMPTY_STRING)
                + JobConstants.STDOUT_REDIRECT + "${" + JobConstants.GENIE_JOB_DIR_ENV_VAR + "}/"
                + JobConstants.STDOUT_LOG_FILE_NAME + JobConstants.STDERR_REDIRECT + "${"
                + JobConstants.GENIE_JOB_DIR_ENV_VAR + "}/" + JobConstants.STDERR_LOG_FILE_NAME + " &"
                + System.lineSeparator());

        // Save PID of children process, used in trap handlers to kill and verify termination
        writer.write(JobConstants.EXPORT + JobConstants.CHILDREN_PID_ENV_VAR + "=$!" + System.lineSeparator());
        // Wait for the above process started in background mode. Wait lets us get interrupted by kill signals.
        writer.write("wait ${" + JobConstants.CHILDREN_PID_ENV_VAR + "}" + System.lineSeparator());

        // Append new line
        writer.write(System.lineSeparator());

        // capture exit code and write to temporary genie.done
        writer.write("# Write the return code from the command in the done file." + System.lineSeparator());
        writer.write(JobConstants.GENIE_DONE_FILE_CONTENT_PREFIX + "${" + JobConstants.GENIE_JOB_DIR_ENV_VAR
                + "}" + "/" + JobConstants.GENIE_TEMPORARY_DONE_FILE_NAME + System.lineSeparator());

        // atomically swap temporary and actual genie.done file if one doesn't exist
        writer.write(
                "# Swapping done file, unless one exist created by trap handler." + System.lineSeparator());
        writer.write("mv -n " + "${" + JobConstants.GENIE_JOB_DIR_ENV_VAR + "}" + "/"
                + JobConstants.GENIE_TEMPORARY_DONE_FILE_NAME + " " + "${" + JobConstants.GENIE_JOB_DIR_ENV_VAR
                + "}" + "/" + JobConstants.GENIE_DONE_FILE_NAME + System.lineSeparator());

        // Print the timestamp once its done running.
        writer.write("echo End: `date '+%Y-%m-%d %H:%M:%S'`\n");

        log.info("Finished Job Task for job {}", jobId);
        MetricsUtils.addSuccessTags(tags);
    } catch (final Throwable t) {
        MetricsUtils.addFailureTagsWithException(tags, t);
        throw t;
    } finally {
        this.getRegistry().timer(JOB_TASK_TIMER_NAME, tags).record(System.nanoTime() - start,
                TimeUnit.NANOSECONDS);
    }
}

From source file:org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.java

private void locateThenCall() {
    conn.getLocator().getRegionLocation(tableName, row, tries > 1).whenComplete((loc, error) -> {
        if (error != null) {
            onError(error,/*from   ww w  .j  a  va  2  s.  com*/
                    () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName + " failed, tries = "
                            + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
                            + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + " ms, time elapsed = "
                            + elapsedMs() + " ms",
                    err -> {
                    });
            return;
        }
        call(loc);
    });
}

From source file:at.ac.univie.isc.asio.engine.DatasetResource.java

/**
 * Invoke request processing and observe the results.
 *
 * @param async   response continuation/*  w  w  w  .j av a 2  s  .c  om*/
 * @param command parsed request
 */
private void process(final AsyncResponse async, final Command command) {
    try {
        final Subscription subscription = connector.accept(command).subscribe(SendResults.to(async));
        AsyncListener.cleanUp(subscription).listenTo(async);
        async.setTimeout(timeout.getAs(TimeUnit.NANOSECONDS, 0L), TimeUnit.NANOSECONDS);
    } catch (final Throwable error) {
        resumeWithError(async, error);
        throw error; // try to trigger uncaught exception handlers
    }
}