Example usage for java.util.concurrent TimeUnit NANOSECONDS

List of usage examples for java.util.concurrent TimeUnit NANOSECONDS

Introduction

In this page you can find the example usage for java.util.concurrent TimeUnit NANOSECONDS.

Prototype

TimeUnit NANOSECONDS

To view the source code for java.util.concurrent TimeUnit NANOSECONDS.

Click Source Link

Document

Time unit representing one thousandth of a microsecond.

Usage

From source file:org.jasig.springframework.web.portlet.context.PortletContextLoader.java

/**
 * Initialize Spring's portlet application context for the given portlet context,
 * using the application context provided at construction time, or creating a new one
 * according to the "{@link #CONTEXT_CLASS_PARAM contextClass}" and
 * "{@link #CONFIG_LOCATION_PARAM contextConfigLocation}" context-params.
 * @param portletContext current portlet context
 * @return the new PortletApplicationContext
 * @see #CONTEXT_CLASS_PARAM/*w w w  .  ja  va  2  s .co  m*/
 * @see #CONFIG_LOCATION_PARAM
 */
public PortletApplicationContext initWebApplicationContext(PortletContext portletContext) {
    if (portletContext
            .getAttribute(PortletApplicationContext.ROOT_PORTLET_APPLICATION_CONTEXT_ATTRIBUTE) != null) {
        throw new IllegalStateException(
                "Cannot initialize context because there is already a root portlet application context present - "
                        + "check whether you have multiple PortletContextLoader* definitions in your portlet.xml!");
    }

    Log logger = LogFactory.getLog(PortletContextLoader.class);
    portletContext.log("Initializing Spring root PortletApplicationContext");
    if (logger.isInfoEnabled()) {
        logger.info("Root portlet PortletApplicationContext: initialization started");
    }
    long startTime = System.nanoTime();

    try {
        // Store context in local instance variable, to guarantee that
        // it is available on PortletContext shutdown.
        if (this.context == null) {
            this.context = createPortletApplicationContext(portletContext);
        }
        if (this.context instanceof ConfigurablePortletApplicationContext) {
            configureAndRefreshPortletApplicationContext((ConfigurablePortletApplicationContext) this.context,
                    portletContext);
        }
        portletContext.setAttribute(PortletApplicationContext.ROOT_PORTLET_APPLICATION_CONTEXT_ATTRIBUTE,
                this.context);

        ClassLoader ccl = Thread.currentThread().getContextClassLoader();
        if (ccl == PortletContextLoader.class.getClassLoader()) {
            currentContext = this.context;
        } else if (ccl != null) {
            currentContextPerThread.put(ccl, this.context);
        }

        if (logger.isDebugEnabled()) {
            logger.debug("Published root PortletApplicationContext as PortletContext attribute with name ["
                    + PortletApplicationContext.ROOT_PORTLET_APPLICATION_CONTEXT_ATTRIBUTE + "]");
        }
        if (logger.isInfoEnabled()) {
            long elapsedTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
            logger.info("Root PortletApplicationContext: initialization completed in " + elapsedTime + " ms");
        }

        return this.context;
    } catch (RuntimeException ex) {
        logger.error("Portlet context initialization failed", ex);
        portletContext.setAttribute(PortletApplicationContext.ROOT_PORTLET_APPLICATION_CONTEXT_ATTRIBUTE, ex);
        throw ex;
    } catch (Error err) {
        logger.error("Portlet context initialization failed", err);
        portletContext.setAttribute(PortletApplicationContext.ROOT_PORTLET_APPLICATION_CONTEXT_ATTRIBUTE, err);
        throw err;
    }
}

From source file:org.apache.solr.client.solrj.impl.HttpClusterStateProvider.java

private Map<String, List<String>> getAliases(boolean forceFetch) {
    if (this.liveNodes == null) {
        throw new RuntimeException(
                "We don't know of any live_nodes to fetch the" + " latest aliases information from. "
                        + "If you think your Solr cluster is up and is accessible,"
                        + " you could try re-creating a new CloudSolrClient using working"
                        + " solrUrl(s) or zkHost(s).");
    }/*from  www. j a  va  2s .co m*/

    if (forceFetch || this.aliases == null || TimeUnit.SECONDS.convert((System.nanoTime() - aliasesTimestamp),
            TimeUnit.NANOSECONDS) > getCacheTimeout()) {
        for (String nodeName : liveNodes) {
            try (HttpSolrClient client = new HttpSolrClient.Builder()
                    .withBaseSolrUrl(Utils.getBaseUrlForNodeName(nodeName, urlScheme))
                    .withHttpClient(httpClient).build()) {

                Map<String, List<String>> aliases = new CollectionAdminRequest.ListAliases().process(client)
                        .getAliasesAsLists();
                this.aliases = aliases;
                this.aliasesTimestamp = System.nanoTime();
                return Collections.unmodifiableMap(this.aliases);
            } catch (SolrServerException | RemoteSolrException | IOException e) {
                // Situation where we're hitting an older Solr which doesn't have LISTALIASES
                if (e instanceof RemoteSolrException && ((RemoteSolrException) e).code() == 400) {
                    log.warn("LISTALIASES not found, possibly using older Solr server. Aliases won't work"
                            + " unless you re-create the CloudSolrClient using zkHost(s) or upgrade Solr server",
                            e);
                    this.aliases = Collections.emptyMap();
                    this.aliasesTimestamp = System.nanoTime();
                    return aliases;
                }
                log.warn("Attempt to fetch cluster state from "
                        + Utils.getBaseUrlForNodeName(nodeName, urlScheme) + " failed.", e);
            }
        }

        throw new RuntimeException("Tried fetching aliases using all the node names we knew of, i.e. "
                + liveNodes + ". However, " + "succeeded in obtaining the cluster state from none of them."
                + "If you think your Solr cluster is up and is accessible,"
                + " you could try re-creating a new CloudSolrClient using a working" + " solrUrl or zkHost.");
    } else {
        return Collections.unmodifiableMap(this.aliases); // cached copy is fresh enough
    }
}

From source file:com.netflix.genie.core.services.impl.LocalJobRunner.java

private void createInitFailureDetailsFile(final String id, final Exception e) {
    final long start = System.nanoTime();
    try {/* www . j av  a 2 s .  com*/
        final File jobDir = new File(this.baseWorkingDirPath.getFile(), id);
        if (jobDir.exists()) {
            final File detailsFile = new File(jobDir, JobConstants.GENIE_INIT_FAILURE_MESSAGE_FILE_NAME);
            final boolean detailsFileExists = !detailsFile.createNewFile();
            if (detailsFileExists) {
                log.warn("Init failure details file exists");
            }
            try (final PrintWriter p = new PrintWriter(
                    new OutputStreamWriter(new FileOutputStream(detailsFile), StandardCharsets.UTF_8))) {
                p.format(" *** Initialization failure for job: %s ***%n" + "%n" + "Exception: %s - %s%n"
                        + "Trace:%n", id, e.getClass().getCanonicalName(), e.getMessage());
                e.printStackTrace(p);
            }
            log.info("Created init failure details file {}", detailsFile);
        } else {
            log.error("Could not create init failure details file, job directory does not exist");
        }
    } catch (Throwable t) {
        log.error("Failed to create init failure details file", t);
    } finally {
        this.createInitFailureDetailsFileTimer.record(System.nanoTime() - start, TimeUnit.NANOSECONDS);
    }
}

From source file:gobblin.service.modules.flow.IdentityFlowToJobSpecCompiler.java

@Override
public Map<Spec, SpecExecutorInstanceProducer> compileFlow(Spec spec) {
    Preconditions.checkNotNull(spec);//from   ww  w. j a  v  a  2s .c  o m
    Preconditions.checkArgument(spec instanceof FlowSpec,
            "IdentityFlowToJobSpecCompiler only converts FlowSpec to JobSpec");

    long startTime = System.nanoTime();
    Map<Spec, SpecExecutorInstanceProducer> specExecutorInstanceMap = Maps.newLinkedHashMap();

    FlowSpec flowSpec = (FlowSpec) spec;
    String source = flowSpec.getConfig().getString(ServiceConfigKeys.FLOW_SOURCE_IDENTIFIER_KEY);
    String destination = flowSpec.getConfig().getString(ServiceConfigKeys.FLOW_DESTINATION_IDENTIFIER_KEY);
    log.info(String.format("Compiling flow for source: %s and destination: %s", source, destination));

    JobSpec jobSpec;
    JobSpec.Builder jobSpecBuilder = JobSpec.builder(flowSpec.getUri()).withConfig(flowSpec.getConfig())
            .withDescription(flowSpec.getDescription()).withVersion(flowSpec.getVersion());

    if (flowSpec.getTemplateURIs().isPresent() && templateCatalog.isPresent()) {
        // Only first template uri will be honored for Identity
        jobSpecBuilder = jobSpecBuilder.withTemplate(flowSpec.getTemplateURIs().get().iterator().next());
        try {
            jobSpec = new ResolvedJobSpec(jobSpecBuilder.build(), templateCatalog.get());
            log.info("Resolved JobSpec properties are: " + jobSpec.getConfigAsProperties());
        } catch (SpecNotFoundException | JobTemplate.TemplateException e) {
            throw new RuntimeException("Could not resolve template in JobSpec from TemplateCatalog", e);
        }
    } else {
        jobSpec = jobSpecBuilder.build();
        log.info("Unresolved JobSpec properties are: " + jobSpec.getConfigAsProperties());
    }

    // Remove schedule
    jobSpec.setConfig(jobSpec.getConfig().withoutPath(ConfigurationKeys.JOB_SCHEDULE_KEY));

    // Add job.name and job.group
    if (flowSpec.getConfig().hasPath(ConfigurationKeys.FLOW_NAME_KEY)) {
        jobSpec.setConfig(jobSpec.getConfig().withValue(ConfigurationKeys.JOB_NAME_KEY,
                flowSpec.getConfig().getValue(ConfigurationKeys.FLOW_NAME_KEY)));
    }
    if (flowSpec.getConfig().hasPath(ConfigurationKeys.FLOW_GROUP_KEY)) {
        jobSpec.setConfig(jobSpec.getConfig().withValue(ConfigurationKeys.JOB_GROUP_KEY,
                flowSpec.getConfig().getValue(ConfigurationKeys.FLOW_GROUP_KEY)));
    }

    // Add flow execution id for this compilation
    long flowExecutionId = System.currentTimeMillis();
    jobSpec.setConfig(jobSpec.getConfig().withValue(ConfigurationKeys.FLOW_EXECUTION_ID_KEY,
            ConfigValueFactory.fromAnyRef(flowExecutionId)));

    // Reset properties in Spec from Config
    jobSpec.setConfigAsProperties(ConfigUtils.configToProperties(jobSpec.getConfig()));

    for (TopologySpec topologySpec : topologySpecMap.values()) {
        try {
            Map<String, String> capabilities = (Map<String, String>) topologySpec
                    .getSpecExecutorInstanceProducer().getCapabilities().get();
            for (Map.Entry<String, String> capability : capabilities.entrySet()) {
                log.info(String.format(
                        "Evaluating current JobSpec: %s against TopologySpec: %s with "
                                + "capability of source: %s and destination: %s ",
                        jobSpec.getUri(), topologySpec.getUri(), capability.getKey(), capability.getValue()));
                if (source.equals(capability.getKey()) && destination.equals(capability.getValue())) {
                    specExecutorInstanceMap.put(jobSpec, topologySpec.getSpecExecutorInstanceProducer());
                    log.info(String.format(
                            "Current JobSpec: %s is executable on TopologySpec: %s. Added TopologySpec as candidate.",
                            jobSpec.getUri(), topologySpec.getUri()));

                    log.info("Since we found a candidate executor, we will not try to compute more. "
                            + "(Intended limitation for IdentityFlowToJobSpecCompiler)");
                    return specExecutorInstanceMap;
                }
            }
        } catch (InterruptedException | ExecutionException e) {
            Instrumented.markMeter(this.flowCompilationFailedMeter);
            throw new RuntimeException("Cannot determine topology capabilities", e);
        }
    }
    Instrumented.markMeter(this.flowCompilationSuccessFulMeter);
    Instrumented.updateTimer(this.flowCompilationTimer, System.nanoTime() - startTime, TimeUnit.NANOSECONDS);

    return specExecutorInstanceMap;
}

From source file:co.marcin.novaguilds.impl.storage.AbstractDatabaseStorage.java

/**
 * Prepares the statements//from w w  w.j  av  a 2s  .c o m
 */
protected void prepareStatements() {
    try {
        long nanoTime = System.nanoTime();
        LoggerUtils.info("Preparing statements...");
        preparedStatementMap.clear();
        connect();

        int returnKeys = isStatementReturnGeneratedKeysSupported() ? Statement.RETURN_GENERATED_KEYS
                : Statement.NO_GENERATED_KEYS;

        //Guilds insert (id, tag, name, leader, spawn, allies, alliesinv, war, nowarinv, money, points, lives, timerest, lostlive, activity, created, bankloc, slots, openinv)
        String guildsInsertSQL = "INSERT INTO `" + Config.MYSQL_PREFIX.getString()
                + "guilds` VALUES(null,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);";
        PreparedStatement guildsInsert = getConnection().prepareStatement(guildsInsertSQL, returnKeys);
        preparedStatementMap.put(PreparedStatements.GUILDS_INSERT, guildsInsert);

        //Guilds select
        String guildsSelectSQL = "SELECT * FROM `" + Config.MYSQL_PREFIX.getString() + "guilds`";
        PreparedStatement guildsSelect = getConnection().prepareStatement(guildsSelectSQL);
        preparedStatementMap.put(PreparedStatements.GUILDS_SELECT, guildsSelect);

        //Guilds delete
        String guildsDeleteSQL = "DELETE FROM `" + Config.MYSQL_PREFIX.getString() + "guilds` WHERE `id`=?";
        PreparedStatement guildsDelete = getConnection().prepareStatement(guildsDeleteSQL);
        preparedStatementMap.put(PreparedStatements.GUILDS_DELETE, guildsDelete);

        //Guilds update
        String guildsUpdateSQL = "UPDATE `" + Config.MYSQL_PREFIX.getString()
                + "guilds` SET `tag`=?, `name`=?, `leader`=?, `spawn`=?, `allies`=?, `alliesinv`=?, `war`=?, `nowarinv`=?, `money`=?, `points`=?, `lives`=?, `timerest`=?, `lostlive`=?, `activity`=?, `bankloc`=?, `slots`=?, `openinv`=?, `banner`=? WHERE `id`=?";
        PreparedStatement guildsUpdate = getConnection().prepareStatement(guildsUpdateSQL);
        preparedStatementMap.put(PreparedStatements.GUILDS_UPDATE, guildsUpdate);

        //Players insert (id, uuid, name, guild, invitedto, points, kills, deaths)
        String playersInsertSQL = "INSERT INTO `" + Config.MYSQL_PREFIX.getString()
                + "players` VALUES(null,?,?,?,?,?,?,?)";
        PreparedStatement playersInsert = getConnection().prepareStatement(playersInsertSQL, returnKeys);
        preparedStatementMap.put(PreparedStatements.PLAYERS_INSERT, playersInsert);

        //Players select
        String playerSelectSQL = "SELECT * FROM `" + Config.MYSQL_PREFIX.getString() + "players`";
        PreparedStatement playersSelect = getConnection().prepareStatement(playerSelectSQL);
        preparedStatementMap.put(PreparedStatements.PLAYERS_SELECT, playersSelect);

        //Players update
        String playersUpdateSQL = "UPDATE `" + Config.MYSQL_PREFIX.getString()
                + "players` SET `invitedto`=?, `guild`=?, `points`=?, `kills`=?, `deaths`=? WHERE `uuid`=?";
        PreparedStatement playersUpdate = getConnection().prepareStatement(playersUpdateSQL);
        preparedStatementMap.put(PreparedStatements.PLAYERS_UPDATE, playersUpdate);

        //Players delete
        String playersDeleteSQL = "DELETE FROM `" + Config.MYSQL_PREFIX.getString() + "players` WHERE `id`=?";
        PreparedStatement playersDelete = getConnection().prepareStatement(playersDeleteSQL);
        preparedStatementMap.put(PreparedStatements.PLAYERS_DELETE, playersDelete);

        //Regions insert (id, loc_1, loc_2, guild, world)
        String regionsInsertSQL = "INSERT INTO `" + Config.MYSQL_PREFIX.getString()
                + "regions` VALUES(null,?,?,?,?);";
        PreparedStatement regionsInsert = getConnection().prepareStatement(regionsInsertSQL, returnKeys);
        preparedStatementMap.put(PreparedStatements.REGIONS_INSERT, regionsInsert);

        //Regions select
        String regionsSelectSQL = "SELECT * FROM `" + Config.MYSQL_PREFIX.getString() + "regions`";
        PreparedStatement regionsSelect = getConnection().prepareStatement(regionsSelectSQL);
        preparedStatementMap.put(PreparedStatements.REGIONS_SELECT, regionsSelect);

        //Regions delete
        String regionsDeleteSQL = "DELETE FROM `" + Config.MYSQL_PREFIX.getString() + "regions` WHERE `id`=?";
        PreparedStatement regionsDelete = getConnection().prepareStatement(regionsDeleteSQL);
        preparedStatementMap.put(PreparedStatements.REGIONS_DELETE, regionsDelete);

        //Regions update
        String regionsUpdateSQL = "UPDATE `" + Config.MYSQL_PREFIX.getString()
                + "regions` SET `loc_1`=?, `loc_2`=?, `guild`=?, `world`=? WHERE `id`=?";
        PreparedStatement regionsUpdate = getConnection().prepareStatement(regionsUpdateSQL);
        preparedStatementMap.put(PreparedStatements.REGIONS_UPDATE, regionsUpdate);

        //Ranks insert (id, name, guild, permissions, players, default, clone)
        String ranksInsertSQL = "INSERT INTO `" + Config.MYSQL_PREFIX.getString()
                + "ranks` VALUES(null,?,?,?,?,?,?);";
        PreparedStatement ranksInsert = getConnection().prepareStatement(ranksInsertSQL, returnKeys);
        preparedStatementMap.put(PreparedStatements.RANKS_INSERT, ranksInsert);

        //Ranks select
        String ranksSelectSQL = "SELECT * FROM `" + Config.MYSQL_PREFIX.getString() + "ranks`";
        PreparedStatement ranksSelect = getConnection().prepareStatement(ranksSelectSQL);
        preparedStatementMap.put(PreparedStatements.RANKS_SELECT, ranksSelect);

        //Ranks delete
        String ranksDeleteSQL = "DELETE FROM `" + Config.MYSQL_PREFIX.getString() + "ranks` WHERE `id`=?";
        PreparedStatement ranksDelete = getConnection().prepareStatement(ranksDeleteSQL);
        preparedStatementMap.put(PreparedStatements.RANKS_DELETE, ranksDelete);

        //Ranks delete (guild)
        String ranksDeleteGuildSQL = "DELETE FROM `" + Config.MYSQL_PREFIX.getString()
                + "ranks` WHERE `guild`=?";
        PreparedStatement ranksDeleteGuild = getConnection().prepareStatement(ranksDeleteGuildSQL);
        preparedStatementMap.put(PreparedStatements.RANKS_DELETE_GUILD, ranksDeleteGuild);

        //Ranks update
        String ranksUpdateSQL = "UPDATE `" + Config.MYSQL_PREFIX.getString()
                + "ranks` SET `name`=?, `guild`=?, `permissions`=?, `members`=?, `def`=?, `clone`=? WHERE `id`=?";
        PreparedStatement ranksUpdate = getConnection().prepareStatement(ranksUpdateSQL);
        preparedStatementMap.put(PreparedStatements.RANKS_UPDATE, ranksUpdate);

        //Log
        LoggerUtils.info("Statements prepared in "
                + TimeUnit.MILLISECONDS.convert((System.nanoTime() - nanoTime), TimeUnit.NANOSECONDS) / 1000.0
                + "s");
    } catch (SQLException e) {
        LoggerUtils.exception(e);
    }
}

From source file:org.apache.solr.client.solrj.retry.RetryingSolrServer.java

private void handleSuccess(SolrRequest request, NamedList response, int retryCount, long startTime,
        long requestDuration) {
    String successPrefix = ROOT_PREFIX + "success.";
    if (retryCount > 0) {
        String retriedRequestsPrefix = successPrefix + "retry.";
        long totalRequestDuration = System.nanoTime() - startTime;
        metrics.updateHistogram(retriedRequestsPrefix + "count", retryCount);
        metrics.updateTimer(retriedRequestsPrefix + "time", totalRequestDuration, TimeUnit.NANOSECONDS);
        //    metrics.updateHistogram(retriedRequestsPrefix + "count.req." + requestKey, retryCount);
        //    metrics.updateHistogram(retriedRequestsPrefix + "count.top." + firstExceptionTopLevelMsg, retryCount);
        //    metrics.updateHistogram(retriedRequestsPrefix + "count.root." + firstExceptionRootCauseMsg, retryCount);
        //    metrics.updateTimer(retriedRequestsPrefix + "time.req." + requestKey, totalRequestDuration, TimeUnit.NANOSECONDS);
        //    metrics.updateTimer(retriedRequestsPrefix + "time.top." + firstExceptionTopLevelMsg, totalRequestDuration, TimeUnit.NANOSECONDS);
        //    metrics.updateTimer(retriedRequestsPrefix + "time.root." + firstExceptionRootCauseMsg, totalRequestDuration, TimeUnit.NANOSECONDS);
    }//from   w w  w .j a  va 2s .  c o  m
    metrics.updateHistogram(successPrefix + "anycount", retryCount);
    metrics.updateTimer(ROOT_PREFIX + "time", requestDuration, TimeUnit.NANOSECONDS);
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfWriter.java

void write(List<DistributedLogManager> logs, double writeRate, int maxOutstandingBytesForThisThread,
        long numRecordsForThisThread, long numBytesForThisThread) throws Exception {
    log.info(//  w  ww .  j  a va 2s . c om
            "Write thread started with : logs = {}, rate = {},"
                    + " num records = {}, num bytes = {}, max outstanding bytes = {}",
            logs.stream().map(l -> l.getStreamName()).collect(Collectors.toList()), writeRate,
            numRecordsForThisThread, numBytesForThisThread, maxOutstandingBytesForThisThread);

    List<CompletableFuture<AsyncLogWriter>> writerFutures = logs.stream()
            .map(manager -> manager.openAsyncLogWriter()).collect(Collectors.toList());
    List<AsyncLogWriter> writers = result(FutureUtils.collect(writerFutures));

    long txid = writers.stream().mapToLong(writer -> writer.getLastTxId()).max().orElse(0L);
    txid = Math.max(0L, txid);

    RateLimiter limiter;
    if (writeRate > 0) {
        limiter = RateLimiter.create(writeRate);
    } else {
        limiter = null;
    }
    final Semaphore semaphore;
    if (maxOutstandingBytesForThisThread > 0) {
        semaphore = new Semaphore(maxOutstandingBytesForThisThread);
    } else {
        semaphore = null;
    }

    // Acquire 1 second worth of records to have a slower ramp-up
    if (limiter != null) {
        limiter.acquire((int) writeRate);
    }

    long totalWritten = 0L;
    long totalBytesWritten = 0L;
    final int numLogs = logs.size();
    while (true) {
        for (int i = 0; i < numLogs; i++) {
            if (numRecordsForThisThread > 0 && totalWritten >= numRecordsForThisThread) {
                markPerfDone();
            }
            if (numBytesForThisThread > 0 && totalBytesWritten >= numBytesForThisThread) {
                markPerfDone();
            }
            if (null != semaphore) {
                semaphore.acquire(payload.length);
            }

            totalWritten++;
            totalBytesWritten += payload.length;
            if (null != limiter) {
                limiter.acquire(payload.length);
            }
            final long sendTime = System.nanoTime();
            writers.get(i).write(new LogRecord(++txid, Unpooled.wrappedBuffer(payload))).thenAccept(dlsn -> {
                if (null != semaphore) {
                    semaphore.release(payload.length);
                }

                recordsWritten.increment();
                bytesWritten.add(payload.length);

                long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                recorder.recordValue(latencyMicros);
                cumulativeRecorder.recordValue(latencyMicros);
            }).exceptionally(cause -> {
                log.warn("Error at writing records", cause);
                System.exit(-1);
                return null;
            });
        }
    }
}

From source file:com.cloudera.livy.rsc.driver.RSCDriver.java

/**
 * Initializes the SparkContext used by this driver. This implementation creates a
 * context with the provided configuration. Subclasses can override this behavior,
 * and returning a null context is allowed. In that case, the context exposed by
 * JobContext will be null./*from w  w w .  j a  va 2 s.com*/
 */
protected JavaSparkContext initializeContext() throws Exception {
    long t1 = System.nanoTime();
    LOG.info("Starting Spark context...");
    JavaSparkContext sc = new JavaSparkContext(conf);
    LOG.info("Spark context finished initialization in {}ms",
            TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t1));
    return sc;
}

From source file:fr.xebia.management.statistics.ProfileAspectTest.java

@Test(timeout = 5000)
public void testMaxActiveSemaphoreAcquisition() throws Exception {
    // initialize
    testService.testMaxActiveSemaphoreAcquisition();

    String name = "test-max-active-semaphore-acquisition";
    ServiceStatistics serviceStatistics = profileAspect.serviceStatisticsByName.get(name);
    assertNotNull(serviceStatistics);/*from w  w w. ja va  2 s.  c  o m*/
    assertEquals(TimeUnit.NANOSECONDS.convert(1000, TimeUnit.MILLISECONDS),
            serviceStatistics.getMaxActiveSemaphoreAcquisitionMaxTimeInNanos());

    serviceStatistics.getMaxActiveSemaphore().acquire();
    assertEquals(0, serviceStatistics.getMaxActiveSemaphore().availablePermits());

    long start = System.currentTimeMillis();
    try {
        testService.testMaxActiveSemaphoreAcquisition();
        fail("ServiceUnavailableException expected");
    } catch (ServiceUnavailableException ex) {
        // Expected
    }
    long waited = System.currentTimeMillis() - start;

    // reset
    serviceStatistics.setMaxActive(1);
    testService.testMaxActiveSemaphoreAcquisition();

    assertTrue(waited >= 900L);
}

From source file:com.amazonaws.services.sqs.buffered.ReceiveQueueBuffer.java

/**
 * maybe create more receive tasks. extra receive tasks won't be created if
 * we are already at the maximum number of receive tasks, or if we are at
 * the maximum number of prefetched buffers
 *///from   w w  w  . ja  va 2 s. c om
private void spawnMoreReceiveTasks() {

    if (shutDown)
        return;

    int desiredBatches = config.getMaxDoneReceiveBatches();
    desiredBatches = desiredBatches < 1 ? 1 : desiredBatches;

    synchronized (finishedTasks) {
        if (finishedTasks.size() >= desiredBatches)
            return;

        //if we have some finished batches already, and
        //existing inflight batches will bring us to the limit,
        //don't spawn more. if our finished tasks cache is empty, we will
        //always spawn a thread.
        if (finishedTasks.size() > 0
                && (finishedTasks.size() + inflightReceiveMessageBatches) >= desiredBatches)
            return;
    }

    synchronized (taskSpawnSyncPoint) {
        if (visibilityTimeoutNanos == -1) {
            GetQueueAttributesRequest request = new GetQueueAttributesRequest().withQueueUrl(qUrl)
                    .withAttributeNames("VisibilityTimeout");
            ResultConverter.appendUserAgent(request, AmazonSQSBufferedAsyncClient.USER_AGENT);
            long visibilityTimeoutSeconds = Long
                    .parseLong(sqsClient.getQueueAttributes(request).getAttributes().get("VisibilityTimeout"));
            visibilityTimeoutNanos = TimeUnit.NANOSECONDS.convert(visibilityTimeoutSeconds, TimeUnit.SECONDS);
        }

        int max = config.getMaxInflightReceiveBatches();
        //must allow at least one inflight receive task, or receive won't
        //work at all.
        max = max > 0 ? max : 1;
        int toSpawn = max - inflightReceiveMessageBatches;
        if (toSpawn > 0) {
            ReceiveMessageBatchTask task = new ReceiveMessageBatchTask(this);
            ++inflightReceiveMessageBatches;
            ++bufferCounter;
            if (log.isTraceEnabled()) {
                log.trace("Spawned receive batch #" + bufferCounter + " (" + inflightReceiveMessageBatches
                        + " of " + max + " inflight) for queue " + qUrl);
            }
            executor.execute(task);
        }
    }
}