Example usage for java.util.concurrent ConcurrentLinkedQueue size

List of usage examples for java.util.concurrent ConcurrentLinkedQueue size

Introduction

In this page you can find the example usage for java.util.concurrent ConcurrentLinkedQueue size.

Prototype

public int size() 

Source Link

Document

Returns the number of elements in this queue.

Usage

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

@Override
public ConcurrentLinkedQueue<IMessage> removeOutgoingQueue(String index) {
    ConcurrentLinkedQueue<IMessage> outgoingQueue = null;
    synchronized (this.outgoingQueues) {
        outgoingQueue = this.outgoingQueues.remove(index);
    }//from w ww. java  2 s  .  com
    if (outgoingQueue == null) {
        return null;
    }
    int removedCount = outgoingQueue.size();
    long removeLength = this.sizeOfMessage * removedCount;
    this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem - removeLength;
    this.countOfMessagesDataInMem = this.countOfMessagesDataInMem - removedCount;
    this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem
            - (sizeOfRef * 2 + (index.length() * sizeOfChar) + sizeOfEmptyMessageQueue);
    return outgoingQueue;
}

From source file:com.google.cloud.dns.testing.LocalDnsHelper.java

/**
 * Creates a new change, stores it, and if delayChange > 0, invokes processing in a new thread.
 *///  ww w . j a  v a  2 s .c o m
Response createChange(String projectId, String zoneName, Change change, String... fields) {
    ZoneContainer zoneContainer = findZone(projectId, zoneName);
    if (zoneContainer == null) {
        return Error.NOT_FOUND.response(
                String.format("The 'parameters.managedZone' resource named %s does not exist.", zoneName));
    }
    Response response = checkChange(change, zoneContainer);
    if (response != null) {
        return response;
    }
    Change completeChange = new Change();
    if (change.getAdditions() != null) {
        completeChange.setAdditions(ImmutableList.copyOf(change.getAdditions()));
    }
    if (change.getDeletions() != null) {
        completeChange.setDeletions(ImmutableList.copyOf(change.getDeletions()));
    }
    /* We need to set ID for the change. We are working in concurrent environment. We know that the
    element fell on an index between 1 and maxId (index 0 is the default change which creates SOA
    and NS), so we will reset all IDs between 0 and maxId (all of them are valid for the respective
    objects). */
    ConcurrentLinkedQueue<Change> changeSequence = zoneContainer.changes();
    changeSequence.add(completeChange);
    int maxId = changeSequence.size();
    int index = 0;
    for (Change c : changeSequence) {
        if (index == maxId) {
            break;
        }
        c.setId(String.valueOf(index++));
    }
    completeChange.setStatus("pending");
    completeChange.setStartTime(ISODateTimeFormat.dateTime().withZoneUTC().print(System.currentTimeMillis()));
    invokeChange(projectId, zoneName, completeChange.getId());
    Change result = OptionParsers.extractFields(completeChange, fields);
    try {
        return new Response(HTTP_OK, jsonFactory.toString(result));
    } catch (IOException e) {
        return Error.INTERNAL_ERROR
                .response(String.format("Error when serializing change %s in managed zone %s in project %s.",
                        result.getId(), zoneName, projectId));
    }
}

From source file:org.restcomm.app.qoslib.Services.Events.EventUploader.java

public void report(boolean local, Location location) {
    try {/*from w  w w .ja  v  a 2s.  c o  m*/
        //now create a list of eventData variables
        List<EventData> eventDataList = new ArrayList<EventData>();
        String strPhone = "";
        long duration = 0;

        EventDataEnvelope eventDataEnvelope = null;
        if (event != null) {
            // Dont send an unconfirmed Travel event
            if (event.getEventType() == EventType.TRAVEL_CHECK
                    && owner.getTravelDetector().isConfirmed() == false) {
                LoggerUtil.logToFile(LoggerUtil.Level.DEBUG, TAG, "report", "skip unconfirmed travel event");
                return;
            }

            EventData eventData = generateEventDataFromEvent(event, local);
            if (eventData == null)
                return;
            //MMCLogger.logToFile(MMCLogger.Level.DEBUG, TAG, "run", "reporting event type=" + event.getEventType() + ",lat:" + eventData.getFltEventLat() + ",local=" + local);
            if (eventData.getFltEventLat() == 0 && location != null) {
                eventData.setFltEventLat((float) location.getLatitude());
                eventData.setFltEventLng((float) location.getLongitude());
                eventData.setiUncertainty((int) location.getAccuracy());
            }
            eventData.setCallID(event.getLocalID());
            EventData eventData2 = null;
            eventDataList.add(eventData);

            if (event.getEventType() == EventType.APP_MONITORING && event.getDownloadSpeed() > 0
                    && event.getUploadSpeed() > 0) {
                //App throughput was getting stored twice
                boolean stored = PreferenceManager.getDefaultSharedPreferences(owner)
                        .getBoolean(PreferenceKeys.Miscellaneous.THROUGHPUT_STORED, false);
                if (!stored) {
                    owner.getReportManager().storeEvent(eventData);
                    PreferenceManager.getDefaultSharedPreferences(owner).edit()
                            .putBoolean(PreferenceKeys.Miscellaneous.THROUGHPUT_STORED, true).commit();
                } else
                    PreferenceManager.getDefaultSharedPreferences(owner).edit()
                            .putBoolean(PreferenceKeys.Miscellaneous.THROUGHPUT_STORED, false).commit();
                ;
            }
            if (event.getEventType() == EventType.MAN_PLOTTING) {
                eventData.setLookupid1(event.getBuildingID());
                eventData.setRunningApps(event.getAppData()); // contains user's polyline
            }

            // Event is 'reported' locally before GPS sampling is complete
            // to make it show up on the map as soon as it gets a first fix
            //if (local == true && ((event.getEventType() != EventType.MAN_SPEEDTEST && event.getEventType() != EventType.LATENCY_TEST && event.getEventType() != EventType.APP_MONITORING) || event.latency != 0))
            //if (local == true && (event.getEventType().waitsForSpeed() == false || event.getLatency() != 0))
            //   (local == false && event.getEventType() == EventType.MAN_SPEEDTEST))  // but for speedtest, wait until complete
            if ((event.getEventType().waitsForSpeed() == false || event.getLatency() != 0)) {
                if (event.getLocalID() > 0 && eventData.getFltEventLng() != 0) {
                    owner.getReportManager().updateEventField(event.getLocalID(), Events.KEY_LATITUDE,
                            String.valueOf(eventData.getFltEventLat()));
                    owner.getReportManager().updateEventField(event.getLocalID(), Events.KEY_LONGITUDE,
                            String.valueOf(eventData.getFltEventLng()));
                } else if (event.getLocalID() == 0) {
                    int evtID = owner.getReportManager().storeEvent(eventData); // .reportEvent (eventData, event, local, location);
                    event.setLocalID(evtID);
                    eventData.setCallID(evtID);
                }
                if (complimentaryEvent != null) {
                    if (complimentaryEvent.getLocalID() > 0 && location != null) {
                        owner.getReportManager().updateEventField(complimentaryEvent.getLocalID(),
                                Events.KEY_LATITUDE, String.valueOf(location.getLatitude()));
                        owner.getReportManager().updateEventField(complimentaryEvent.getLocalID(),
                                Events.KEY_LONGITUDE, String.valueOf(location.getLongitude()));
                    } else if (complimentaryEvent.getLocalID() == 0) {
                        int evtID = owner.getReportManager().storeEvent(eventData); //(eventData2, complimentaryEvent, local, location);
                        complimentaryEvent.setLocalID(evtID);
                    }
                }
            }
            if (local)
                return;

            //if the complimentary event is not null, then this event must be
            //the "starting end" of an event couple. If so, then this event should
            //be uploaded alongside the main event
            if (complimentaryEvent != null && local == false) {
                eventData2 = generateEventDataFromEvent(complimentaryEvent, local);
                if (eventData2 != null) {
                    //complimentaryEvent.setFlag (EventObj.SERVER_SENDING, true);
                    eventData2.setCallID(complimentaryEvent.getLocalID());
                    eventDataList.add(eventData2);
                }
            }
            //now create the eventDataEnvelope to wrap the list of eventData variables
            //along with other required variables
            String phoneNumFirst4 = "";
            if (strPhone != null && strPhone.length() > 4)
                phoneNumFirst4 = strPhone.substring(0, 4);

            eventDataEnvelope = generateEventDataEnvFromEventList(eventDataList, phoneNumFirst4);
            // when event is filled in, travel and fillin would like to see it before sending
            if (owner.isServiceRunning() && owner.getTravelDetector() != null)
                owner.getTravelDetector().eventCompleted(event);
        } else // null event create dummy event envelope to trigger sending the event queue (without adding a new event)
            eventDataEnvelope = new EventDataEnvelope();

        boolean bSent = false, bFromQueue = false, bAddedQueue = false;
        loadEventsQueue(); // only loads if queue hasn't loaded yet (ensure loaded)
        ConcurrentLinkedQueue<EventDataEnvelope> eventQueue = owner.getEventManager().getEventQueue();

        // Send this event and any other events that were in the queue, as long as it didn't fail to send
        while (eventDataEnvelope != null) {
            bSent = uploadEventEnvelope(eventDataEnvelope, bFromQueue); // as long as event was sent to server, it sent (even if server had an error)
            if (!bSent) {
                //if (!bFromQueue)
                {
                    bAddedQueue = true;
                    eventQueue.add(eventDataEnvelope);
                    while (eventQueue.size() > 200)
                        eventQueue.poll();
                }
                break;
            } else {
                eventDataEnvelope = eventQueue.poll();
                bFromQueue = true;
            }
        }
        // persist the queue every 3 hrs in case something happens
        if (event != null && (event.isCheckin || bAddedQueue))
            saveEvents(eventQueue);
    } finally {
        if (!local) {
            //LoggerUtil.logToFile(LoggerUtil.Level.DEBUG, TAG, "report(false)", "uploadingEvent(false)");
            owner.uploadingEvent(false);

        }
    }
}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

@Override
public int getIncomingQueueSize(String dstVertexID) {
    ConcurrentLinkedQueue<IMessage> incomingQueue = null;
    // Get the hash bucket index.
    int hashCode = dstVertexID.hashCode();
    int hashIndex = hashCode % this.hashBucketNumber; // bucket index
    hashIndex = (hashIndex < 0 ? hashIndex + this.hashBucketNumber : hashIndex);
    BucketMeta meta = this.incomingQueues.get(hashIndex);
    incomingQueue = meta.queueMap.get(dstVertexID);
    if (incomingQueue != null) {
        return incomingQueue.size();
    } else {/*from  ww w.ja  va 2s . c  o  m*/
        return 0;
    }
}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

/**
 * Cache the bucket of messages indexed by bucketIndex onto disk file.
 * @param queuesBuckets/* ww w.  j  av  a 2 s  . c o  m*/
 * @param bucketIndex
 * @param queuePath
 * @throws IOException
 */
private void saveBucket(ArrayList<BucketMeta> queuesBuckets, int bucketIndex, String queuePath)
        throws IOException {
    if (queuesBuckets.get(bucketIndex).countInMemory < this.countThresholdForBucket) {
        return;
    }
    LOG.info("[MessageQueuesForDisk] is saving the [" + queuePath + " Bucket-" + bucketIndex + "] >>> size = "
            + queuesBuckets.get(bucketIndex).countInMemory + ".");
    long start = System.currentTimeMillis();
    /* Clock */
    File messagesDataFileBucket;
    FileWriter fwMessagesData;
    BufferedWriter bwMessagesData;
    File messagesDataFileQueue = new File(this.messagesDataFile + "/" + queuePath);
    if (!messagesDataFileQueue.exists()) {
        if (!messagesDataFileQueue.mkdir()) {
            throw new IOException("Make dir " + messagesDataFileQueue + " fail!");
        }
    }
    messagesDataFileBucket = new File(messagesDataFileQueue + "/" + "bucket-" + bucketIndex);
    boolean isNewFile = false;
    // The bucket file does not exit, create it.
    if (!messagesDataFileBucket.exists()) {
        if (!messagesDataFileBucket.createNewFile()) {
            throw new IOException("Create bucket file" + messagesDataFileBucket + " failed!");
        }
        isNewFile = true;
    }
    // Append to the bucket file by line.
    fwMessagesData = new FileWriter(messagesDataFileBucket, true);
    bwMessagesData = new BufferedWriter(fwMessagesData, 65536);
    if (isNewFile) {
        // Write the file header.
        bwMessagesData.write(Constants.MSG_BUCKET_FILE_HEADER + "-" + queuePath + "-" + bucketIndex);
    }
    ConcurrentHashMap<String, ConcurrentLinkedQueue<IMessage>> queueMap = queuesBuckets
            .get(bucketIndex).queueMap;
    ConcurrentLinkedQueue<IMessage> tempQueue = null;
    Entry<String, ConcurrentLinkedQueue<IMessage>> entry = null;
    Iterator<Entry<String, ConcurrentLinkedQueue<IMessage>>> it = queueMap.entrySet().iterator();
    // Traverse the map of queues and cache them to disk file.
    while (it.hasNext()) {
        entry = it.next();
        String key = entry.getKey();
        tempQueue = entry.getValue();
        if (tempQueue.size() <= 0) {
            continue;
        }
        bwMessagesData.newLine();
        bwMessagesData.write(key + Constants.KV_SPLIT_FLAG + queueToString(tempQueue));
        this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem
                - (sizeOfRef + sizeOfInteger + sizeOfEmptyMessageQueue);
    } // while
    bwMessagesData.close();
    fwMessagesData.close();
    // Update the meta data of the bucket.
    BucketMeta meta = queuesBuckets.get(bucketIndex);
    // Update the size of messages data in memory.
    this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem - meta.lengthInMemory;
    this.countOfMessagesDataInMem = this.countOfMessagesDataInMem - meta.countInMemory;
    meta.onDiskFlag = true;
    meta.lengthInMemory = 0;
    meta.countInMemory = 0;
    meta.queueMap.clear();
    this.writeDiskTime = this.writeDiskTime + (System.currentTimeMillis() - start);
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at a target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them./*from   w  w  w. jav  a 2  s  . c  o m*/
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS (target QPS).
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                int numQueriesExecutedInt = numQueriesExecuted.get();
                LOGGER.info(
                        "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                        startQPS, timePassed, numQueriesExecutedInt,
                        numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                        totalBrokerTime.get() / (double) numQueriesExecutedInt,
                        totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                reportStartTime = currentTime;
                numReportIntervals++;

                if ((numIntervalsToReportAndClearStatistics != 0)
                        && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                    numReportIntervals = 0;
                    startTime = currentTime;
                    reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                            statisticsList);
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            startQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

@Override
public ConcurrentLinkedQueue<IMessage> removeIncomingQueue(String dstVerID) {
    ConcurrentLinkedQueue<IMessage> incomingQueue = null;
    // Get the hash bucket index.
    int hashCode = dstVerID.hashCode();
    int hashIndex = hashCode % this.hashBucketNumber; // bucket index
    hashIndex = (hashIndex < 0 ? hashIndex + this.hashBucketNumber : hashIndex);
    BucketMeta meta = this.incomingQueues.get(hashIndex);
    // The bucket is on disk.
    if (meta.onDiskFlag) {
        this.incomingFileLocks[hashIndex].lock();
        /** Lock */
        try {/*from w  ww  .jav a  2 s  .c o  m*/
            loadBucket(this.incomingQueues, hashIndex, "incoming");
        } catch (IOException e) {
            throw new RuntimeException("[MessageQueuesForDisk:removeIncomingQueue]", e);
        } finally {
            this.incomingFileLocks[hashIndex].unlock();
            /** Unlock */
        }
    }
    meta = this.incomingQueues.get(hashIndex);
    incomingQueue = meta.queueMap.remove(dstVerID);
    if (incomingQueue == null) {
        incomingQueue = new ConcurrentLinkedQueue<IMessage>();
    }
    int removedCount = incomingQueue.size();
    long removedLength = removedCount * this.sizeOfMessage;
    // Update the meta data.
    meta.count = meta.count - removedCount;
    meta.countInMemory = meta.countInMemory - removedCount;
    meta.length = meta.length - removedLength;
    meta.lengthInMemory = meta.lengthInMemory - removedLength;
    this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem - removedLength;
    this.countOfMessagesDataInMem = this.countOfMessagesDataInMem - removedCount;
    this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem
            - (sizeOfRef * 2 + (dstVerID.length() * sizeOfChar) + sizeOfEmptyMessageQueue);
    return incomingQueue;
}

From source file:org.restcomm.app.qoslib.Services.Events.EventUploader.java

/**
 * Loads event requests from storage, and adds it to the queue 
 *///from   w  w  w. j  a  v  a 2 s  . c  om
protected void loadEventsQueue() {

    ConcurrentLinkedQueue<EventDataEnvelope> eventQueue = owner.getEventManager().getEventQueue();
    if (eventQueue == null) {
        eventQueue = new ConcurrentLinkedQueue<EventDataEnvelope>();
        owner.getEventManager().setEventQueue(eventQueue);
    } else
        return;

    Gson gson = new Gson();
    SharedPreferences secureSettings = MainService.getSecurePreferences(owner);
    if (secureSettings.contains(PreferenceKeys.Miscellaneous.EVENTS_QUEUE)) {
        try {
            String strQueue = secureSettings.getString(PreferenceKeys.Miscellaneous.EVENTS_QUEUE, "");
            //LoggerUtil.logToFile(LoggerUtil.Level.DEBUG, TAG, "loadQueue", strQueue);
            if (strQueue.length() < 100)
                return;
            JSONArray jsonqueue = new JSONArray(strQueue);
            for (int i = 0; i < jsonqueue.length(); i++) {
                JSONObject jsonRequest = jsonqueue.getJSONObject(i);
                //if(jsonRequest.getString("type").equals(EventDataEnvelope.TAG)) 
                {
                    EventDataEnvelope request = gson.fromJson(jsonRequest.toString(), EventDataEnvelope.class);
                    //EventDataEnvelope request = new EventDataEnvelope(jsonRequest);
                    eventQueue.add(request);
                }
            }
            // remove the oldest events until queue is below 1000
            while (eventQueue.size() > 300)
                eventQueue.poll();
        } catch (JSONException e) {
            LoggerUtil.logToFile(LoggerUtil.Level.ERROR, TAG, "loadEventsQueue",
                    "JSONException loading events from storage", e);
        } catch (Exception e) {
            LoggerUtil.logToFile(LoggerUtil.Level.ERROR, TAG, "loadEventsQueue",
                    "Exception loading events from storage", e);
        }
    }

}

From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java

@Override
@SuppressWarnings("unchecked")
public ConcurrentLinkedQueue<IMessage> removeIncomedQueue(String dstVertID) {
    ConcurrentLinkedQueue<IMessage> incomedQueue = null;
    // Get the hash bucket index.
    int hashCode = dstVertID.hashCode();
    int hashIndex = hashCode % this.hashBucketNumber; // bucket index
    hashIndex = (hashIndex < 0 ? hashIndex + this.hashBucketNumber : hashIndex);
    BucketMeta meta = this.incomedQueues.get(hashIndex);
    // The bucket is on disk.
    if (meta.onDiskFlag) {
        this.incomedFileLocks[hashIndex].lock();
        /** Lock */
        try {/*from   ww  w.ja  v  a  2 s. co m*/
            loadBucket(this.incomedQueues, hashIndex, "incomed");
        } catch (IOException e) {
            LOG.info("==> bucket-" + hashIndex + ", VertexID = " + dstVertID);
            LOG.info("size = " + meta.queueMap.get(dstVertID).size());
            throw new RuntimeException("==> bucket-" + hashIndex + ", VertexID = " + dstVertID, e);
        } finally {
            this.incomedFileLocks[hashIndex].unlock();
            /** Unlock */
        }
    }
    meta = this.incomedQueues.get(hashIndex);
    this.currentBucket = hashIndex;
    incomedQueue = meta.queueMap.remove(dstVertID);
    if (incomedQueue == null) {
        incomedQueue = new ConcurrentLinkedQueue<IMessage>();
    }
    int removedCount = incomedQueue.size();
    long removedLength = removedCount * this.sizeOfMessage;
    // Update the meta data.
    meta.count = meta.count - removedCount;
    meta.countInMemory = meta.countInMemory - removedCount;
    meta.length = meta.length - removedLength;
    meta.lengthInMemory = meta.lengthInMemory - removedLength;
    this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem - removedLength;
    this.countOfMessagesDataInMem = this.countOfMessagesDataInMem - removedCount;
    this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem
            - (sizeOfRef * 2 + (dstVertID.length() * sizeOfChar) + sizeOfEmptyMessageQueue);
    return incomedQueue;
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at an increasing target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them./*from   w w  w. j  a v a  2s .co m*/
 * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test.
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS.
 * @param deltaQPS delta QPS.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @param numIntervalsToIncreaseQPS number of intervals to increase QPS.
 * @throws Exception
 */

public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    double currentQPS = startQPS;
    int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS);
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                reportStartTime = currentTime;
                numReportIntervals++;

                if (numReportIntervals == numIntervalsToIncreaseQPS) {
                    // Try to find the next interval.
                    double newQPS = currentQPS + deltaQPS;
                    int newQueryIntervalMs;
                    // Skip the target QPS with the same interval as the previous one.
                    while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) {
                        newQPS += deltaQPS;
                    }
                    if (newQueryIntervalMs == 0) {
                        LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS.");
                    } else {
                        // Find the next interval.
                        LOGGER.info(
                                "--------------------------------------------------------------------------------");
                        LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS);
                        int numQueriesExecutedInt = numQueriesExecuted.get();
                        LOGGER.info(
                                "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                        + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                                currentQPS, timePassed, numQueriesExecutedInt,
                                numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                                totalBrokerTime.get() / (double) numQueriesExecutedInt,
                                totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);

                        currentQPS = newQPS;
                        queryIntervalMs = newQueryIntervalMs;
                        LOGGER.info(
                                "Increase target QPS to: {}, the following statistics are for the new target QPS.",
                                currentQPS);
                    }
                } else {
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                    + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                            currentQPS, timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) {
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            currentQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}