Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.activemq.usecases.NetworkBridgeProducerFlowControlTest.java

public void testSendFailIfNoSpaceReverseDoesNotBlockQueueNetwork() throws Exception {
    final int NUM_MESSAGES = 100;
    final long TEST_MESSAGE_SIZE = 1024;
    final long SLOW_CONSUMER_DELAY_MILLIS = 100;

    final ActiveMQQueue slowDestination = new ActiveMQQueue(
            NetworkBridgeProducerFlowControlTest.class.getSimpleName()
                    + ".slow.shared?consumer.prefetchSize=1");

    final ActiveMQQueue fastDestination = new ActiveMQQueue(
            NetworkBridgeProducerFlowControlTest.class.getSimpleName()
                    + ".fast.shared?consumer.prefetchSize=1");

    // Start a local and a remote broker.
    BrokerService localBroker = createBroker(
            new URI("broker:(tcp://localhost:0" + ")?brokerName=broker0&persistent=false&useJmx=true"));
    createBroker(new URI("broker:(tcp://localhost:0" + ")?brokerName=broker1&persistent=false&useJmx=true"));
    localBroker.getSystemUsage().setSendFailIfNoSpace(true);

    // Set a policy on the local broker that limits the maximum size of the
    // slow shared queue.
    PolicyEntry policyEntry = new PolicyEntry();
    policyEntry.setMemoryLimit(5 * TEST_MESSAGE_SIZE);
    PolicyMap policyMap = new PolicyMap();
    policyMap.put(slowDestination, policyEntry);
    localBroker.setDestinationPolicy(policyMap);

    // Create an outbound bridge from the local broker to the remote broker.
    // The bridge is configured with the remoteDispatchType enhancement.
    NetworkConnector nc = bridgeBrokers("broker0", "broker1");
    nc.setAlwaysSyncSend(true);//  w w  w .  j  a  va 2 s .c om
    nc.setPrefetchSize(1);
    nc.setDuplex(true);

    startAllBrokers();
    waitForBridgeFormation();

    // Start two asynchronous consumers on the local broker, one for each
    // of the two shared queues, and keep track of how long it takes for
    // each of the consumers to receive all the messages.
    final CountDownLatch fastConsumerLatch = new CountDownLatch(NUM_MESSAGES);
    final CountDownLatch slowConsumerLatch = new CountDownLatch(NUM_MESSAGES);

    final long startTimeMillis = System.currentTimeMillis();
    final AtomicLong fastConsumerTime = new AtomicLong();
    final AtomicLong slowConsumerTime = new AtomicLong();

    Thread fastWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                fastConsumerLatch.await();
                fastConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    Thread slowWaitThread = new Thread() {
        @Override
        public void run() {
            try {
                slowConsumerLatch.await();
                slowConsumerTime.set(System.currentTimeMillis() - startTimeMillis);
            } catch (InterruptedException ex) {
                exceptions.add(ex);
                Assert.fail(ex.getMessage());
            }
        }
    };

    fastWaitThread.start();
    slowWaitThread.start();

    createConsumer("broker0", fastDestination, fastConsumerLatch);
    MessageConsumer slowConsumer = createConsumer("broker0", slowDestination, slowConsumerLatch);
    MessageIdList messageIdList = brokers.get("broker0").consumers.get(slowConsumer);
    messageIdList.setProcessingDelay(SLOW_CONSUMER_DELAY_MILLIS);

    // Send the test messages to the local broker's shared queues. The
    // messages are either persistent or non-persistent to demonstrate the
    // difference between synchronous and asynchronous dispatch.
    persistentDelivery = false;
    sendMessages("broker1", fastDestination, NUM_MESSAGES);
    sendMessages("broker1", slowDestination, NUM_MESSAGES);

    fastWaitThread.join(TimeUnit.SECONDS.toMillis(60));
    slowWaitThread.join(TimeUnit.SECONDS.toMillis(60));

    assertTrue("no exceptions on the wait threads:" + exceptions, exceptions.isEmpty());

    LOG.info("Fast consumer duration (ms): " + fastConsumerTime.get());
    LOG.info("Slow consumer duration (ms): " + slowConsumerTime.get());

    assertTrue("fast time set", fastConsumerTime.get() > 0);
    assertTrue("slow time set", slowConsumerTime.get() > 0);

    // Verify the behaviour as described in the description of this class.
    Assert.assertTrue(fastConsumerTime.get() < slowConsumerTime.get() / 10);
}

From source file:InMemoryLookupTable.java

/**
 * Iterate on the given 2 vocab words// w ww .  j  av a 2 s  .  c om
 *
 * @param w1 the first word to iterate on
 * @param w2 the second word to iterate on
 * @param nextRandom next random for sampling
 */
@Override
@Deprecated
public void iterateSample(T w1, T w2, AtomicLong nextRandom, double alpha) {
    if (w2 == null || w2.getIndex() < 0 || w1.getIndex() == w2.getIndex() || w1.getLabel().equals("STOP")
            || w2.getLabel().equals("STOP") || w1.getLabel().equals("UNK") || w2.getLabel().equals("UNK"))
        return;
    //current word vector
    INDArray l1 = this.syn0.slice(w2.getIndex());

    //error for current word and context
    INDArray neu1e = Nd4j.create(vectorLength);

    for (int i = 0; i < w1.getCodeLength(); i++) {
        int code = w1.getCodes().get(i);
        int point = w1.getPoints().get(i);
        if (point >= syn0.rows() || point < 0)
            throw new IllegalStateException("Illegal point " + point);
        //other word vector

        INDArray syn1 = this.syn1.slice(point);

        double dot = Nd4j.getBlasWrapper().dot(l1, syn1);

        if (dot < -MAX_EXP || dot >= MAX_EXP)
            continue;

        int idx = (int) ((dot + MAX_EXP) * ((double) expTable.length / MAX_EXP / 2.0));
        if (idx >= expTable.length)
            continue;

        //score
        double f = expTable[idx];
        //gradient
        double g = useAdaGrad ? w1.getGradient(i, (1 - code - f)) : (1 - code - f) * alpha;

        if (neu1e.data().dataType() == DataBuffer.Type.FLOAT) {
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, syn1, neu1e);
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, l1, syn1);

        }

        else {
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, syn1, neu1e);
            Nd4j.getBlasWrapper().level1().axpy(syn1.length(), g, l1, syn1);

        }

    }

    int target = w1.getIndex();
    int label;
    //negative sampling
    if (negative > 0)
        for (int d = 0; d < negative + 1; d++) {
            if (d == 0)
                label = 1;
            else {
                nextRandom.set(nextRandom.get() * 25214903917L + 11);
                int idx = Math.abs((int) (nextRandom.get() >> 16) % table.length());

                target = table.getInt(idx);
                if (target <= 0)
                    target = (int) nextRandom.get() % (vocab.numWords() - 1) + 1;

                if (target == w1.getIndex())
                    continue;
                label = 0;
            }

            if (target >= syn1Neg.rows() || target < 0)
                continue;

            double f = Nd4j.getBlasWrapper().dot(l1, syn1Neg.slice(target));
            double g;
            if (f > MAX_EXP)
                g = useAdaGrad ? w1.getGradient(target, (label - 1)) : (label - 1) * alpha;
            else if (f < -MAX_EXP)
                g = label * (useAdaGrad ? w1.getGradient(target, alpha) : alpha);
            else
                g = useAdaGrad
                        ? w1.getGradient(target,
                                label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))])
                        : (label - expTable[(int) ((f + MAX_EXP) * (expTable.length / MAX_EXP / 2))]) * alpha;
            if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
                Nd4j.getBlasWrapper().axpy(g, syn1Neg.slice(target), neu1e);
            else
                Nd4j.getBlasWrapper().axpy((float) g, syn1Neg.slice(target), neu1e);

            if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
                Nd4j.getBlasWrapper().axpy(g, l1, syn1Neg.slice(target));
            else
                Nd4j.getBlasWrapper().axpy((float) g, l1, syn1Neg.slice(target));
        }

    if (syn0.data().dataType() == DataBuffer.Type.DOUBLE)
        Nd4j.getBlasWrapper().axpy(1.0, neu1e, l1);

    else
        Nd4j.getBlasWrapper().axpy(1.0f, neu1e, l1);

}

From source file:org.apache.nifi.processors.hive.SelectHiveQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile fileToProcess = (context.hasIncomingConnection() ? session.get() : null);
    FlowFile flowfile = null;//from  ww  w. j  a v  a  2 s.c o m

    // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
    // However, if we have no FlowFile and we have connections coming from other Processors, then
    // we know that we should run only if we have a FlowFile.
    if (context.hasIncomingConnection()) {
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final HiveDBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE)
            .asControllerService(HiveDBCPService.class);
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());

    final boolean flowbased = !(context.getProperty(HIVEQL_SELECT_QUERY).isSet());

    // Source the SQL
    final String selectQuery;

    if (context.getProperty(HIVEQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(HIVEQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, new InputStreamCallback() {
            @Override
            public void process(InputStream in) throws IOException {
                queryContents.append(IOUtils.toString(in));
            }
        });
        selectQuery = queryContents.toString();
    }

    final String outputFormat = context.getProperty(HIVEQL_OUTPUT_FORMAT).getValue();
    final StopWatch stopWatch = new StopWatch(true);
    final boolean header = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();
    final String altHeader = context.getProperty(HIVEQL_CSV_ALT_HEADER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final String delimiter = context.getProperty(HIVEQL_CSV_DELIMITER)
            .evaluateAttributeExpressions(fileToProcess).getValue();
    final boolean quote = context.getProperty(HIVEQL_CSV_QUOTE).asBoolean();
    final boolean escape = context.getProperty(HIVEQL_CSV_HEADER).asBoolean();

    try (final Connection con = dbcpService.getConnection();
            final Statement st = (flowbased ? con.prepareStatement(selectQuery) : con.createStatement())) {

        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            flowfile = session.create();
        } else {
            flowfile = fileToProcess;
        }

        flowfile = session.write(flowfile, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing query {}", new Object[] { selectQuery });
                    if (flowbased) {
                        // Hive JDBC Doesn't Support this yet:
                        // ParameterMetaData pmd = ((PreparedStatement)st).getParameterMetaData();
                        // int paramCount = pmd.getParameterCount();

                        // Alternate way to determine number of params in SQL.
                        int paramCount = StringUtils.countMatches(selectQuery, "?");

                        if (paramCount > 0) {
                            setParameters(1, (PreparedStatement) st, paramCount, fileToProcess.getAttributes());
                        }
                    }

                    final ResultSet resultSet = (flowbased ? ((PreparedStatement) st).executeQuery()
                            : st.executeQuery(selectQuery));

                    if (AVRO.equals(outputFormat)) {
                        nrOfRows.set(HiveJdbcCommon.convertToAvroStream(resultSet, out));
                    } else if (CSV.equals(outputFormat)) {
                        CsvOutputOptions options = new CsvOutputOptions(header, altHeader, delimiter, quote,
                                escape);
                        nrOfRows.set(HiveJdbcCommon.convertToCsvStream(resultSet, out, options));
                    } else {
                        nrOfRows.set(0L);
                        throw new ProcessException("Unsupported output format: " + outputFormat);
                    }
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // Set attribute for how many rows were selected
        flowfile = session.putAttribute(flowfile, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        // Set MIME type on output document and add extension to filename
        if (AVRO.equals(outputFormat)) {
            flowfile = session.putAttribute(flowfile, CoreAttributes.MIME_TYPE.key(), AVRO_MIME_TYPE);
            flowfile = session.putAttribute(flowfile, CoreAttributes.FILENAME.key(),
                    flowfile.getAttribute(CoreAttributes.FILENAME.key()) + ".avro");
        } else if (CSV.equals(outputFormat)) {
            flowfile = session.putAttribute(flowfile, CoreAttributes.MIME_TYPE.key(), CSV_MIME_TYPE);
            flowfile = session.putAttribute(flowfile, CoreAttributes.FILENAME.key(),
                    flowfile.getAttribute(CoreAttributes.FILENAME.key()) + ".csv");
        }

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { flowfile, nrOfRows.get() });

        if (context.hasIncomingConnection()) {
            // If the flow file came from an incoming connection, issue a Modify Content provenance event

            session.getProvenanceReporter().modifyContent(flowfile, "Retrieved " + nrOfRows.get() + " rows",
                    stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        } else {
            // If we created a flow file from rows received from Hive, issue a Receive provenance event
            session.getProvenanceReporter().receive(flowfile, dbcpService.getConnectionURL(),
                    stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        }
        session.transfer(flowfile, REL_SUCCESS);
    } catch (final ProcessException | SQLException e) {
        logger.error("Issue processing SQL {} due to {}.", new Object[] { selectQuery, e });
        if (flowfile == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute HiveQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute HiveQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, flowfile, e });
                flowfile = session.penalize(flowfile);
            } else {
                logger.error("Unable to execute HiveQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(flowfile, REL_FAILURE);
        }
    } finally {

    }
}

From source file:com.jivesoftware.os.routing.bird.deployable.TenantRoutingBirdProviderBuilder.java

public ConnectionDescriptorsProvider build(OAuthSigner signer) {
    HttpClientConfig httpClientConfig = HttpClientConfig.newBuilder().build();
    final HttpClient httpClient = new HttpClientFactoryProvider()
            .createHttpClientFactory(Collections.singletonList(httpClientConfig), false)
            .createClient(signer, routesHost, routesPort);

    AtomicLong activeCount = new AtomicLong();
    final ObjectMapper mapper = new ObjectMapper();
    mapper.configure(SerializationFeature.INDENT_OUTPUT, true);
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
    ConnectionDescriptorsProvider connectionsProvider = (connectionsRequest, expectedReleaseGroup) -> {
        activeCount.incrementAndGet();//from  www . j  a v a  2  s .  com
        try {
            LOG.debug("Requesting connections:{}", connectionsRequest);

            String postEntity;
            try {
                postEntity = mapper.writeValueAsString(connectionsRequest);
            } catch (JsonProcessingException e) {
                LOG.error("Error serializing request parameters object to a string.  Object " + "was "
                        + connectionsRequest + " " + e.getMessage());
                return null;
            }

            HttpResponse response;
            try {
                response = httpClient.postJson(routesPath, postEntity, null);
            } catch (HttpClientException e) {
                LOG.error(
                        "Error posting query request to server.  The entity posted was {} and the endpoint posted to was {}",
                        new Object[] { postEntity, routesPath }, e);
                return null;
            }

            int statusCode = response.getStatusCode();
            if (statusCode >= 200 && statusCode < 300) {
                byte[] responseBody = response.getResponseBody();
                try {
                    ConnectionDescriptorsResponse connectionDescriptorsResponse = mapper.readValue(responseBody,
                            ConnectionDescriptorsResponse.class);
                    if (!connectionsRequest.getRequestUuid()
                            .equals(connectionDescriptorsResponse.getRequestUuid())) {
                        LOG.warn("Request UUIDs are misaligned, request:{} response:{}", connectionsRequest,
                                connectionDescriptorsResponse);
                    }
                    if (connectionDescriptorsResponse.getReturnCode() >= 0 && expectedReleaseGroup != null
                            && !expectedReleaseGroup.equals(connectionDescriptorsResponse.getReleaseGroup())) {
                        String responseEntity = new String(responseBody, StandardCharsets.UTF_8);
                        LOG.warn(
                                "Release group changed, active:{} request:{} requestEntity:{} responseEntity:{} response:{}",
                                activeCount.get(), connectionsRequest, postEntity, responseEntity,
                                connectionDescriptorsResponse);
                    }
                    LOG.debug("Request:{} ConnectionDescriptors:{}", connectionsRequest,
                            connectionDescriptorsResponse);
                    return connectionDescriptorsResponse;
                } catch (IOException x) {
                    LOG.error("Failed to deserialize response:" + new String(responseBody) + " "
                            + x.getMessage());
                    return null;
                }
            }
            return null;
        } finally {
            activeCount.decrementAndGet();
        }
    };
    return connectionsProvider;
}

From source file:core.Reconciler.java

public void run() {

    System.out.println("Reconciler: Started the reconciler thread");
    sfdcService = new SalesforceService(Configuration.getSalesForceConsumerSecret(),
            Configuration.getSalesForceConsumerKey(), Configuration.getSalesForceUsername(),
            Configuration.getSalesForcePassword(), Configuration.isSalesforceSandbox());

    SendGridClient.initV2(Configuration.getSendGridUsername(), Configuration.getSendGridPassword(),
            Configuration.getAlertEmailRecipient(), Configuration.getAlertEmailSender());

    NiprClient lClient = NiprClientConfiguration.getNiprClient(Configuration.getGetNiprAlertEndpoint(),
            Configuration.getNiprUsername(), Configuration.getNiprPassword());

    AtomicLong lRetryInterval = null;
    UUID lResyncTriggerId = LicenseDB.getResyncTriggerId();
    while (true) {

        if (Configuration.isPauseSync()) {
            System.out.println("System has been paused");
            try {
                Thread.sleep(36000000);
            } catch (Exception ex) {

            }/*  w  ww  . java 2s  .  c  o m*/
            continue;
        }
        try {
            lRetryInterval = new AtomicLong(Configuration.getReconcilerRetry());
            lResyncTriggerId = LicenseDB.getResyncTriggerId();
            System.out.println("Reconciler: Current triggered Resync ID " + lResyncTriggerId);
            // Get the latest copy. This is a Deep Copy
            Map<String, LicenseInternal> lUnprocessedLicenses = LicenseDB.getUnprocessedLicenses();
            Map<String, GregorianCalendar> lDaysToSync = LicenseDB.getPendingNiprSyncDates();

            Map<String, LicenseInternal> lLicenses = new HashMap<String, LicenseInternal>();
            Map<String, GregorianCalendar> lSuccessDates = new HashMap<String, GregorianCalendar>();

            DoNiprSync(lClient, lDaysToSync, lUnprocessedLicenses, lLicenses, lSuccessDates);

            System.out.println(
                    "Reconciler: " + lLicenses.size() + " new licenses to be processed in Sales Force ");
            if (lLicenses.size() > 0) {

                // Process information in sales force, save the remaining
                // for next run
                lUnprocessedLicenses = ProcessInfoInSalesForce(lLicenses, lRetryInterval);
            }

            System.out.println(
                    "Reconciler: Total Failed licenses in in the system " + lUnprocessedLicenses.size());

            // This transfers reference, do not use the map after this call
            // but get a fresh copy.
            // Update in the cache, which also serves the UI
            LicenseDB.setUnprocessedLicenses(lUnprocessedLicenses);

            LicenseDB.updateNiprSyncDates(lSuccessDates);

            UUID lLatestTriggerId = LicenseDB.getResyncTriggerId();
            if (lLatestTriggerId.compareTo(lResyncTriggerId) != 0) {
                System.out.println(
                        "Reconciler: Reconciler retrying with minimum sleep as resync triggered by user");
                Thread.sleep(MIN_SLEEP_INTERVAL);
                continue;
            }
            long lInterval = lRetryInterval.get();

            if (lUnprocessedLicenses.isEmpty()) {
                // Get the current time and set the interval till next day noon.
                Calendar cal = Calendar.getInstance();
                int lCurrentHour = cal.get(Calendar.HOUR_OF_DAY);
                // If currentHour is in the morning before 9am, we want to run at 12pm today, since nipr alerts is not generated yet
                // If currentHour is after 9am, we want to run next day noon which will be 12 hours + 24 - lCurrentHour
                if (lCurrentHour < 9) {
                    lInterval = (12 - lCurrentHour) * 60 * 60 * 1000;
                } else {
                    lInterval = (24 - lCurrentHour + 12) * 60 * 60 * 1000;
                }
            }
            System.out.println("Reconciler: Sleeping for " + lInterval + "ms");
            try {
                Thread.sleep(lInterval);
            } catch (InterruptedException lIntrEx) {
                System.out.println("Reconciler: interrupted");
            }
        } catch (Exception ex) {
            System.out.println("Reconciler mainloop threw an exception " + ex.getMessage());
        }
    }
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at an increasing target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them.//from   w ww.j  a  v a  2  s. c om
 * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test.
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS.
 * @param deltaQPS delta QPS.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @param numIntervalsToIncreaseQPS number of intervals to increase QPS.
 * @throws Exception
 */

public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    double currentQPS = startQPS;
    int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS);
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                reportStartTime = currentTime;
                numReportIntervals++;

                if (numReportIntervals == numIntervalsToIncreaseQPS) {
                    // Try to find the next interval.
                    double newQPS = currentQPS + deltaQPS;
                    int newQueryIntervalMs;
                    // Skip the target QPS with the same interval as the previous one.
                    while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) {
                        newQPS += deltaQPS;
                    }
                    if (newQueryIntervalMs == 0) {
                        LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS.");
                    } else {
                        // Find the next interval.
                        LOGGER.info(
                                "--------------------------------------------------------------------------------");
                        LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS);
                        int numQueriesExecutedInt = numQueriesExecuted.get();
                        LOGGER.info(
                                "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                        + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                                currentQPS, timePassed, numQueriesExecutedInt,
                                numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                                totalBrokerTime.get() / (double) numQueriesExecutedInt,
                                totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);

                        currentQPS = newQPS;
                        queryIntervalMs = newQueryIntervalMs;
                        LOGGER.info(
                                "Increase target QPS to: {}, the following statistics are for the new target QPS.",
                                currentQPS);
                    }
                } else {
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                    + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                            currentQPS, timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) {
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            currentQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.apache.pulsar.client.impl.ConsumerImpl.java

private void internalGetLastMessageIdAsync(final Backoff backoff, final AtomicLong remainingTime,
        CompletableFuture<MessageId> future) {
    ClientCnx cnx = cnx();//from ww  w.jav  a 2s  .  co m
    if (isConnected() && cnx != null) {
        if (!Commands.peerSupportsGetLastMessageId(cnx.getRemoteEndpointProtocolVersion())) {
            future.completeExceptionally(new PulsarClientException.NotSupportedException(
                    "GetLastMessageId Not supported for ProtocolVersion: "
                            + cnx.getRemoteEndpointProtocolVersion()));
        }

        long requestId = client.newRequestId();
        ByteBuf getLastIdCmd = Commands.newGetLastMessageId(consumerId, requestId);
        log.info("[{}][{}] Get topic last message Id", topic, subscription);

        cnx.sendGetLastMessageId(getLastIdCmd, requestId).thenAccept((result) -> {
            log.info("[{}][{}] Successfully getLastMessageId {}:{}", topic, subscription, result.getLedgerId(),
                    result.getEntryId());
            future.complete(
                    new MessageIdImpl(result.getLedgerId(), result.getEntryId(), result.getPartition()));
        }).exceptionally(e -> {
            log.error("[{}][{}] Failed getLastMessageId command", topic, subscription);
            future.completeExceptionally(e.getCause());
            return null;
        });
    } else {
        long nextDelay = Math.min(backoff.next(), remainingTime.get());
        if (nextDelay <= 0) {
            future.completeExceptionally(new PulsarClientException.TimeoutException(
                    "Could not getLastMessageId within configured timeout."));
            return;
        }

        ((ScheduledExecutorService) listenerExecutor).schedule(() -> {
            log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms",
                    topic, getHandlerName(), nextDelay);
            remainingTime.addAndGet(-nextDelay);
            internalGetLastMessageIdAsync(backoff, remainingTime, future);
        }, nextDelay, TimeUnit.MILLISECONDS);
    }
}

From source file:org.jasig.ssp.service.impl.PersonServiceImpl.java

@Override
public PagingWrapper<Person> syncCoaches() {
    long methodStart = new Date().getTime();
    final Collection<Person> coaches = Lists.newArrayList();

    if (Thread.currentThread().isInterrupted()) {
        LOGGER.info("Abandoning syncCoaches because of thread interruption");
        return new PagingWrapper<Person>(coaches);
    }/*from ww w  .j  av a 2  s  .  c om*/

    final Collection<String> coachUsernames = getAllCoachUsernamesFromDirectory();

    long mergeLoopStart = new Date().getTime();
    final AtomicLong timeInExternalReads = new AtomicLong();
    final AtomicLong timeInExternalWrites = new AtomicLong();
    for (final String coachUsername : coachUsernames) {

        if (Thread.currentThread().isInterrupted()) {
            LOGGER.info("Abandoning syncCoaches on username {} because of thread interruption", coachUsername);
            break;
        }

        long singlePersonStart = new Date().getTime();

        final AtomicReference<Person> coach = new AtomicReference<Person>();

        try {
            withCoachSyncTransaction(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    long localPersonLookupStart = new Date().getTime();
                    try {
                        coach.set(personFromUsername(coachUsername));
                    } catch (final ObjectNotFoundException e) {
                        LOGGER.debug("Coach {} not found", coachUsername);
                    }
                    long localPersonLookupEnd = new Date().getTime();
                    TIMING_LOGGER.info("Read local coach by username {} in {} ms", coachUsername,
                            localPersonLookupEnd - localPersonLookupStart);

                    // Does coach exist in local SSP.person table?

                    if (coach.get() == null) {

                        // Attempt to find coach in external data
                        try {
                            long externalPersonLookupStart = new Date().getTime();

                            final ExternalPerson externalPerson = externalPersonService
                                    .getByUsername(coachUsername);

                            long externalPersonLookupEnd = new Date().getTime();
                            long externalPersonLookupElapsed = externalPersonLookupEnd
                                    - externalPersonLookupStart;
                            timeInExternalReads.set(timeInExternalReads.get() + externalPersonLookupElapsed);
                            TIMING_LOGGER.info("Read external coach by username {} in {} ms", coachUsername,
                                    externalPersonLookupElapsed);

                            long externalPersonSyncStart = new Date().getTime();

                            coach.set(new Person()); // NOPMD
                            externalPersonService.updatePersonFromExternalPerson(coach.get(), externalPerson,
                                    true);

                            long externalPersonSyncEnd = new Date().getTime();
                            long externalPersonSyncElapsed = externalPersonSyncEnd - externalPersonSyncStart;
                            timeInExternalWrites.set(timeInExternalWrites.get() + externalPersonSyncElapsed);
                            TIMING_LOGGER.info("Synced external coach by username {} in {} ms", coachUsername,
                                    externalPersonSyncElapsed);

                        } catch (final ObjectNotFoundException e) {
                            LOGGER.debug("Coach {} not found in external data", coachUsername);
                        }
                    }
                    return coach.get();
                }
            });
        } catch (ConstraintViolationException e) {
            if ("uq_person_school_id".equals(e.getConstraintName())) {
                LOGGER.warn("Skipping coach with non-unique schoolId '{}' (username '{}')",
                        new Object[] { coach.get().getSchoolId(), coachUsername, e });
                coach.set(null);
            } else if ("unique_person_username".equals(e.getConstraintName())) {
                LOGGER.warn("Skipping coach with non-unique username '{}' (schoolId '{}')",
                        new Object[] { coachUsername, coach.get().getSchoolId(), e });
                coach.set(null);
            } else {
                throw e;
            }
        }

        if (coach.get() != null) {
            coaches.add(coach.get());
        }
        long singlePersonEnd = new Date().getTime();
        TIMING_LOGGER.info("SSP coach merge for username {} completed in {} ms", coachUsername,
                singlePersonEnd - singlePersonStart);
    }
    Long mergeLoopEnd = new Date().getTime();
    TIMING_LOGGER.info("All SSP merges for {} coaches completed in {} ms. Reading: {} ms. Writing: {} ms",
            new Object[] { coachUsernames.size(), mergeLoopEnd - mergeLoopStart, timeInExternalReads.get(),
                    timeInExternalWrites.get() });

    PagingWrapper pw = new PagingWrapper<Person>(coaches);
    long methodEnd = new Date().getTime();
    TIMING_LOGGER.info("Read and merged PersonAttributesService {} coaches in {} ms", coaches.size(),
            methodEnd - methodStart);
    return pw;
}

From source file:com.btoddb.fastpersitentqueue.InMemorySegmentMgrTest.java

@Test
public void testThreading() throws IOException, ExecutionException {
    final int entrySize = 1000;
    final int numEntries = 3000;
    final int numPushers = 3;
    int numPoppers = 3;

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    mgr.setMaxSegmentSizeInBytes(10000);
    mgr.init();/*from   w ww.j a v a2 s  .c  o m*/

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = idGen.incrementAndGet();
                        pushSum.addAndGet(x);
                        FpqEntry entry = new FpqEntry(x, new byte[entrySize]);
                        mgr.push(entry);
                        if (x % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !mgr.isEmpty()) {
                    try {
                        FpqEntry entry;
                        while (null != (entry = mgr.pop())) {
                            if (entry.getId() % 500 == 0) {
                                System.out.println("popped ID = " + entry.getId());
                            }

                            popSum.addAndGet(entry.getId());
                            numPops.incrementAndGet();
                            Thread.sleep(popRand.nextInt(5));
                        }
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(popSum.get(), is(pushSum.get()));
    assertThat(mgr.getNumberOfEntries(), is(0L));
    assertThat(mgr.getNumberOfActiveSegments(), is(1));
    assertThat(mgr.getSegments(), hasSize(1));
    assertThat(FileUtils.listFiles(theDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE), is(empty()));

    // make sure we tested paging in/out
    assertThat(mgr.getNumberOfSwapOut(), is(greaterThan(0L)));
    assertThat(mgr.getNumberOfSwapIn(), is(mgr.getNumberOfSwapOut()));
}