Example usage for java.util.concurrent ConcurrentLinkedQueue add

List of usage examples for java.util.concurrent ConcurrentLinkedQueue add

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentLinkedQueue add.

Prototype

public boolean add(E e) 

Source Link

Document

Inserts the specified element at the tail of this queue.

Usage

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at a target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them./*from  w w w .j a v a  2s . c o m*/
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS (target QPS).
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                int numQueriesExecutedInt = numQueriesExecuted.get();
                LOGGER.info(
                        "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                        startQPS, timePassed, numQueriesExecutedInt,
                        numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                        totalBrokerTime.get() / (double) numQueriesExecutedInt,
                        totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                reportStartTime = currentTime;
                numReportIntervals++;

                if ((numIntervalsToReportAndClearStatistics != 0)
                        && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                    numReportIntervals = 0;
                    startTime = currentTime;
                    reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                            statisticsList);
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            startQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.ramidore.logic.system.PointBattleLogic.java

@Override
public boolean execute(PacketData data) {

    Matcher matcher = pattern.matcher(data.getStrData());

    if (!isEnd && currentStageNo < 6 && matcher.matches()) {

        if (currentStageNo == 0) {

            SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm");

            this.id = sdf.format(new Date());

            LOG.info(id);/*from   www .  ja v a 2s  . co  m*/

            currentStat.setId(id);

            addStageNo();
        }

        Matcher unitMatcher = unitPattern.matcher(data.getStrData());

        while (unitMatcher.find()) {

            ConcurrentLinkedQueue<PbLogBean> dataQ = chartDataQList.get(currentStageNo - 1);
            ConcurrentLinkedQueue<PbLogBean> allDataQ = chartDataQList.get(5);

            int point = RamidoreUtil.intValueFromDescHexString(unitMatcher.group(1));

            if (!dupChecker.check(point)) {
                continue;
            }

            pointMap.put(currentStageNo, point);

            currentData = new PbLogBean(id, sequentialNo, currentStageNo, stageSequentialNo, point,
                    pointMap.get(currentStageNo - 1));
            dataQ.add(currentData);
            allDataQ.add(currentData);

            // 
            statRealTimeData();

            LOG.info(sequentialNo + "\t" + currentStageNo + "\t" + stageSequentialNo + "\t" + point);

            sequentialNo++;
            stageSequentialNo++;
        }

        return true;
    } else if (currentStageNo == 6) {

        isEnd = true;
    }

    return false;
}

From source file:org.apache.hadoop.hdfs.notifier.server.ServerCore.java

/**
 * Used to handle a generated notification:
 * - sending the notifications to the clients which subscribed to the
 *   associated event./*from  w w  w. j  a v  a 2s .  c o m*/
 * - saving the notification in the history.
 * @param n
 */
@Override
public void handleNotification(NamespaceNotification n) {
    int queuedCount = 0;
    if (LOG.isDebugEnabled()) {
        LOG.debug("Handling " + NotifierUtils.asString(n) + " ...");
    }

    // Add the notification to the queues
    Set<Long> clientsForNotification = getClientsForNotification(n);
    if (clientsForNotification != null && clientsForNotification.size() > 0) {
        synchronized (clientsForNotification) {
            for (Long clientId : clientsForNotification) {
                ConcurrentLinkedQueue<NamespaceNotification> clientQueue = clientsData.get(clientId).queue;
                // Just test that the client wasn't removed meanwhile
                if (clientQueue == null) {
                    continue;
                }
                clientQueue.add(n);
                queuedCount++;
            }
        }

        ServerDispatcher.queuedNotificationsCount.addAndGet(queuedCount);
    }

    // Save it in history
    serverHistory.storeNotification(n);

    if (LOG.isDebugEnabled()) {
        LOG.debug("Done handling " + NotifierUtils.asString(n));
    }
}

From source file:de.da_sense.moses.client.service.MosesService.java

/**
 * Creates a new Hook.//w ww.  ja v a 2  s.c om
 * @param hookType EHookType The Type of Hook
 * @param messageType EMessageType The type of message
 * @param executable The Executable for the Hook
 */
public void registerHook(HookTypesEnum hookType, MessageTypesEnum messageType, Executable executable) {
    ConcurrentLinkedQueue<ExecutableWithType> hook = getHook(hookType);
    if (messageType != MessageTypesEnum.SPAMMABLE) {
        for (ExecutableWithType et : hook) {
            if (messageType.equals(et.t)) {
                Log.d("MoSeS.SERVICE", "Removed a duplicated message of type " + messageType.toString()
                        + " from hook " + hookType.toString());
                unregisterHook(hookType, et.e);
            }
        }
    }
    hook.add(new ExecutableWithType(messageType, executable));
}

From source file:edu.cornell.mannlib.vitro.webapp.rdfservice.impl.sparql.RDFServiceSparql.java

private List<Statement> sort(List<Statement> stmts) {
    List<Statement> output = new ArrayList<Statement>();
    int originalSize = stmts.size();
    if (originalSize == 1)
        return stmts;
    List<Statement> remaining = stmts;
    ConcurrentLinkedQueue<com.hp.hpl.jena.rdf.model.Resource> subjQueue = new ConcurrentLinkedQueue<com.hp.hpl.jena.rdf.model.Resource>();
    for (Statement stmt : remaining) {
        if (stmt.getSubject().isURIResource()) {
            subjQueue.add(stmt.getSubject());
            break;
        }//from w  w  w  .  j  a  va 2  s . co m
    }
    if (subjQueue.isEmpty()) {
        throw new RuntimeException("No named subject in statement patterns");
    }
    while (remaining.size() > 0) {
        if (subjQueue.isEmpty()) {
            subjQueue.add(remaining.get(0).getSubject());
        }
        while (!subjQueue.isEmpty()) {
            com.hp.hpl.jena.rdf.model.Resource subj = subjQueue.poll();
            List<Statement> temp = new ArrayList<Statement>();
            for (Statement stmt : remaining) {
                if (stmt.getSubject().equals(subj)) {
                    output.add(stmt);
                    if (stmt.getObject().isResource()) {
                        subjQueue.add((com.hp.hpl.jena.rdf.model.Resource) stmt.getObject());
                    }
                } else {
                    temp.add(stmt);
                }
            }
            remaining = temp;
        }
    }
    if (output.size() != originalSize) {
        throw new RuntimeException(
                "original list size was " + originalSize + " but sorted size is " + output.size());
    }
    return output;
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at an increasing target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them.//from   w  w w  . ja  v a2s  .c om
 * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test.
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS.
 * @param deltaQPS delta QPS.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @param numIntervalsToIncreaseQPS number of intervals to increase QPS.
 * @throws Exception
 */

public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    double currentQPS = startQPS;
    int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS);
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                reportStartTime = currentTime;
                numReportIntervals++;

                if (numReportIntervals == numIntervalsToIncreaseQPS) {
                    // Try to find the next interval.
                    double newQPS = currentQPS + deltaQPS;
                    int newQueryIntervalMs;
                    // Skip the target QPS with the same interval as the previous one.
                    while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) {
                        newQPS += deltaQPS;
                    }
                    if (newQueryIntervalMs == 0) {
                        LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS.");
                    } else {
                        // Find the next interval.
                        LOGGER.info(
                                "--------------------------------------------------------------------------------");
                        LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS);
                        int numQueriesExecutedInt = numQueriesExecuted.get();
                        LOGGER.info(
                                "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                        + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                                currentQPS, timePassed, numQueriesExecutedInt,
                                numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                                totalBrokerTime.get() / (double) numQueriesExecutedInt,
                                totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);

                        currentQPS = newQPS;
                        queryIntervalMs = newQueryIntervalMs;
                        LOGGER.info(
                                "Increase target QPS to: {}, the following statistics are for the new target QPS.",
                                currentQPS);
                    }
                } else {
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                    + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                            currentQPS, timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) {
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            currentQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:com.chinamobile.bcbsp.comm.CommunicatorNew.java

@Override
public ConcurrentLinkedQueue<IMessage> getMessageQueue(String vertexID) throws IOException {
    ArrayList<IMessage> tmpList = messageManager.removeIncomedQueue(vertexID);
    // Note Singleton
    if (tmpList == null) {
        return singletonMsg;
    }//from w w  w  .java 2s  .c  om
    ConcurrentLinkedQueue<IMessage> tmpQueue = new ConcurrentLinkedQueue<IMessage>();
    while (!tmpList.isEmpty()) {
        tmpQueue.add(tmpList.remove(0));
    }
    tmpList = null;
    return tmpQueue;
}

From source file:io.pravega.test.integration.MultiReadersEndToEndTest.java

private Collection<Integer> readAllEvents(final int numParallelReaders, ClientFactory clientFactory,
        final String readerGroupName, final int numSegments) {
    ConcurrentLinkedQueue<Integer> read = new ConcurrentLinkedQueue<>();
    final ExecutorService executorService = Executors.newFixedThreadPool(numParallelReaders,
            new ThreadFactoryBuilder().setNameFormat("testreader-pool-%d").build());
    List<Future<?>> futures = new ArrayList<>();
    for (int i = 0; i < numParallelReaders; i++) {
        futures.add(executorService.submit(() -> {
            final String readerId = UUID.randomUUID().toString();
            @Cleanup/*ww  w.  j  a va 2s  .c o m*/
            final EventStreamReader<Integer> reader = clientFactory.createReader(readerId, readerGroupName,
                    new IntegerSerializer(), ReaderConfig.builder().build());
            int emptyCount = 0;
            while (emptyCount <= numSegments) {
                try {
                    final Integer integerEventRead = reader.readNextEvent(100).getEvent();
                    if (integerEventRead != null) {
                        read.add(integerEventRead);
                        emptyCount = 0;
                    } else {
                        emptyCount++;
                    }
                } catch (ReinitializationRequiredException e) {
                    throw new RuntimeException(e);
                }
            }
        }));
    }

    // Wait until all readers are done.
    futures.forEach(f -> FutureHelpers.getAndHandleExceptions(f, RuntimeException::new));
    executorService.shutdownNow();
    return read;
}

From source file:tw.edu.sju.ee.eea.module.signal.oscillogram.FourierTransformerRenderer.java

public void renderer(ConcurrentLinkedQueue<XYChart.Data> queue, double time, ValueInput vi, int samplerate) {
    try {/* w ww.j  a  v a  2s  .c om*/
        if (vi.available() < 10) {
            return;
        }
        while (vi.readValue() != Double.POSITIVE_INFINITY) {
            if (vi.available() < 10) {
                return;
            }
        }
        double[] array = new double[1024];
        for (int i = 0; i < array.length; i++) {
            array[i] = vi.readValue();
        }
        Complex[] transform = fft.transform(array, TransformType.FORWARD);
        //            double[] absolute = ComplexArray.getAbsolute(Arrays.copyOf(transform, transform.length / 2 + 1));
        double[] abs = ComplexArray.getAbsolute(transform);

        double[] absolute = new double[abs.length / 2 + 1];
        absolute[0] = abs[0];
        for (int i = 1; i < absolute.length; i++) {
            absolute[i] = abs[i] + abs[abs.length - i];
        }

        double x = samplerate / (double) transform.length;
        for (int i = 0; i < absolute.length; i++) {
            queue.add(new XYChart.Data(i * x, absolute[i] / transform.length));
        }
        vi.skip(vi.available() - array.length);
        System.out.println(vi.available());
    } catch (IOException ex) {
        Exceptions.printStackTrace(ex);
    }
}

From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java

private void groupAndSend(Stream<Action> actions, int tries) {
    long locateTimeoutNs;
    if (operationTimeoutNs > 0) {
        locateTimeoutNs = remainingTimeNs();
        if (locateTimeoutNs <= 0) {
            failAll(actions, tries);// w  ww . ja v  a  2s  .  c  o  m
            return;
        }
    } else {
        locateTimeoutNs = -1L;
    }
    ConcurrentMap<ServerName, ServerRequest> actionsByServer = new ConcurrentHashMap<>();
    ConcurrentLinkedQueue<Action> locateFailed = new ConcurrentLinkedQueue<>();
    CompletableFuture.allOf(
            actions.map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(),
                    RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> {
                        if (error != null) {
                            error = translateException(error);
                            if (error instanceof DoNotRetryIOException) {
                                failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), "");
                                return;
                            }
                            addError(action, error, null);
                            locateFailed.add(action);
                        } else {
                            computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new)
                                    .addAction(loc, action);
                        }
                    })).toArray(CompletableFuture[]::new))
            .whenComplete((v, r) -> {
                if (!actionsByServer.isEmpty()) {
                    send(actionsByServer, tries);
                }
                if (!locateFailed.isEmpty()) {
                    tryResubmit(locateFailed.stream(), tries);
                }
            });
}