Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue(Collection<? extends E> c) 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE , initially containing the elements of the given collection, added in traversal order of the collection's iterator.

Usage

From source file:com.SecUpwN.AIMSICD.service.AimsicdService.java

/**
 * Updates Neighbouring Cell details/*  w w  w . ja  v a 2s . co m*/
 */
public List<Cell> updateNeighbouringCells() {
    List<Cell> neighboringCells = new ArrayList<>();

    List<NeighboringCellInfo> neighboringCellInfo;
    neighboringCellInfo = tm.getNeighboringCellInfo();
    if (neighboringCellInfo.size() == 0) {
        // try to poll the neighboring cells for a few seconds
        final LinkedBlockingQueue<NeighboringCellInfo> neighboringCellBlockingQueue = new LinkedBlockingQueue<>(
                100);
        final PhoneStateListener listener = new PhoneStateListener() {
            private void handle() {
                List<NeighboringCellInfo> neighboringCellInfo;
                neighboringCellInfo = tm.getNeighboringCellInfo();
                if (neighboringCellInfo.size() == 0) {
                    return;
                }
                Log.i(TAG, "neighbouringCellInfo empty - event based polling succeeded!");
                tm.listen(this, PhoneStateListener.LISTEN_NONE);
                neighboringCellBlockingQueue.addAll(neighboringCellInfo);
            }

            @Override
            public void onServiceStateChanged(ServiceState serviceState) {
                handle();
            }

            @Override
            public void onDataConnectionStateChanged(int state) {
                handle();
            }

            @Override
            public void onDataConnectionStateChanged(int state, int networkType) {
                handle();
            }

            @Override
            public void onSignalStrengthsChanged(SignalStrength signalStrength) {
                handle();
            }

            @Override
            public void onCellInfoChanged(List<CellInfo> cellInfo) {
                handle();
            }
        };
        Log.i(TAG, "neighbouringCellInfo empty - start polling");

        //LISTEN_CELL_INFO added in API 17
        if (Build.VERSION.SDK_INT > 16) {
            tm.listen(listener, PhoneStateListener.LISTEN_CELL_INFO | PhoneStateListener.LISTEN_CELL_LOCATION
                    | PhoneStateListener.LISTEN_DATA_CONNECTION_STATE | PhoneStateListener.LISTEN_SERVICE_STATE
                    | PhoneStateListener.LISTEN_SIGNAL_STRENGTHS);
        } else {
            tm.listen(listener,
                    PhoneStateListener.LISTEN_CELL_LOCATION | PhoneStateListener.LISTEN_DATA_CONNECTION_STATE
                            | PhoneStateListener.LISTEN_SERVICE_STATE
                            | PhoneStateListener.LISTEN_SIGNAL_STRENGTHS);
        }

        for (int i = 0; i < 10 && neighboringCellInfo.size() == 0; i++) {
            try {
                Log.i(TAG, "neighbouringCellInfo empty - try " + i);
                NeighboringCellInfo info = neighboringCellBlockingQueue.poll(1, TimeUnit.SECONDS);
                if (info == null) {
                    neighboringCellInfo = tm.getNeighboringCellInfo();
                    if (neighboringCellInfo.size() > 0) {
                        Log.i(TAG, "neighbouringCellInfo empty - try " + i + " succeeded time based");
                        break;
                    } else {
                        continue;
                    }
                }
                ArrayList<NeighboringCellInfo> cellInfoList = new ArrayList<NeighboringCellInfo>(
                        neighboringCellBlockingQueue.size() + 1);
                while (info != null) {
                    cellInfoList.add(info);
                    info = neighboringCellBlockingQueue.poll(1, TimeUnit.SECONDS);
                }
                neighboringCellInfo = cellInfoList;
            } catch (InterruptedException e) {
                // normal
            }
        }
    }

    Log.i(TAG, "neighbouringCellInfo Size - " + neighboringCellInfo.size());
    for (NeighboringCellInfo neighbourCell : neighboringCellInfo) {
        Log.i(TAG, "neighbouringCellInfo - CID:" + neighbourCell.getCid() + " LAC:" + neighbourCell.getLac()
                + " RSSI:" + neighbourCell.getRssi() + " PSC:" + neighbourCell.getPsc());

        final Cell cell = new Cell(neighbourCell.getCid(), neighbourCell.getLac(), neighbourCell.getRssi(),
                neighbourCell.getPsc(), neighbourCell.getNetworkType(), false);
        neighboringCells.add(cell);
    }

    return neighboringCells;
}

From source file:com.secupwn.aimsicd.service.CellTracker.java

/**
 *  Description:    Updates Neighboring Cell details
 *
 *                  TODO: add more details...
 *
 *
 *///www  .  jav  a  2 s .  c  o  m
public List<Cell> updateNeighboringCells() {
    List<Cell> neighboringCells = new ArrayList<>();
    List<NeighboringCellInfo> neighboringCellInfo = tm.getNeighboringCellInfo();
    if (neighboringCellInfo == null) {
        neighboringCellInfo = new ArrayList<>();
    }

    Boolean nclp = tinydb.getBoolean("nc_list_present"); // NC list present? (default is false)

    //if nclp = true then check for neighboringCellInfo
    if (neighboringCellInfo != null && neighboringCellInfo.size() == 0 && nclp) {

        log.info("NeighboringCellInfo is empty: start polling...");

        // Try to poll the neighboring cells for a few seconds
        neighboringCellBlockingQueue = new LinkedBlockingQueue<>(100); // TODO What is this ??

        //LISTEN_CELL_INFO added in API 17
        // TODO: See issue #555 (DeviceApi17.java is using API 18 CellInfoWcdma calls.
        if (Build.VERSION.SDK_INT > 17) {
            DeviceApi18.startListening(tm, phoneStatelistener);
        } else {
            tm.listen(phoneStatelistener,
                    PhoneStateListener.LISTEN_CELL_LOCATION | PhoneStateListener.LISTEN_CELL_INFO | // API 17
                            PhoneStateListener.LISTEN_DATA_CONNECTION_STATE
                            | PhoneStateListener.LISTEN_SERVICE_STATE
                            | PhoneStateListener.LISTEN_SIGNAL_STRENGTHS);
        }

        // TODO: Consider removing ??
        for (int i = 0; i < 10 && neighboringCellInfo.size() == 0; i++) {
            try {
                log.debug("NeighboringCellInfo empty: trying " + i);
                NeighboringCellInfo info = neighboringCellBlockingQueue.poll(1, TimeUnit.SECONDS);
                if (info == null) {
                    neighboringCellInfo = tm.getNeighboringCellInfo();
                    if (neighboringCellInfo != null) {
                        if (neighboringCellInfo.size() > 0) {
                            // Can we think of a better log message here?
                            log.debug("NeighboringCellInfo found on " + i + " try. (time based)");
                            break;
                        } else {
                            continue;
                        }
                    }
                }
                List<NeighboringCellInfo> cellInfoList = new ArrayList<>(
                        neighboringCellBlockingQueue.size() + 1);
                while (info != null) {
                    cellInfoList.add(info);
                    info = neighboringCellBlockingQueue.poll(1, TimeUnit.SECONDS);
                }
                neighboringCellInfo = cellInfoList;
            } catch (InterruptedException e) {
                // TODO: Add a more valuable message here!
                log.error("", e);
            }
        }
    }

    //log.debug(mTAG + ": neighboringCellInfo size: " + neighboringCellInfo.size());

    // Add NC list to DBi_measure:nc_list
    for (NeighboringCellInfo neighborCell : neighboringCellInfo) {
        log.info("NeighboringCellInfo -" + " LAC:" + neighborCell.getLac() + " CID:" + neighborCell.getCid()
                + " PSC:" + neighborCell.getPsc() + " RSSI:" + neighborCell.getRssi());

        final Cell cell = new Cell(neighborCell.getCid(), neighborCell.getLac(), neighborCell.getRssi(),
                neighborCell.getPsc(), neighborCell.getNetworkType(), false);
        neighboringCells.add(cell);
    }
    return neighboringCells;
}

From source file:org.apache.bookkeeper.bookie.LedgerCacheTest.java

/**
 * Race where a flush would fail because a garbage collection occurred at
 * the wrong time./* w w w  . j  a  va  2  s .  c o  m*/
 * {@link https://issues.apache.org/jira/browse/BOOKKEEPER-604}
 */
@Test(timeout = 60000)
public void testFlushDeleteRace() throws Exception {
    newLedgerCache();
    final AtomicInteger rc = new AtomicInteger(0);
    final LinkedBlockingQueue<Long> ledgerQ = new LinkedBlockingQueue<Long>(1);
    final byte[] masterKey = "masterKey".getBytes();
    Thread newLedgerThread = new Thread() {
        public void run() {
            try {
                for (int i = 0; i < 1000 && rc.get() == 0; i++) {
                    ledgerCache.setMasterKey(i, masterKey);
                    ledgerQ.put((long) i);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in new ledger thread", e);
            }
        }
    };
    newLedgerThread.start();

    Thread flushThread = new Thread() {
        public void run() {
            try {
                while (true) {
                    Long id = ledgerQ.peek();
                    if (id == null) {
                        continue;
                    }
                    LOG.info("Put entry for {}", id);
                    try {
                        ledgerCache.putEntryOffset((long) id, 1, 0);
                    } catch (Bookie.NoLedgerException nle) {
                        //ignore
                    }
                    ledgerCache.flushLedger(true);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in flush thread", e);
            }
        }
    };
    flushThread.start();

    Thread deleteThread = new Thread() {
        public void run() {
            try {
                while (true) {
                    long id = ledgerQ.take();
                    LOG.info("Deleting {}", id);
                    ledgerCache.deleteLedger(id);
                }
            } catch (Exception e) {
                rc.set(-1);
                LOG.error("Exception in delete thread", e);
            }
        }
    };
    deleteThread.start();

    newLedgerThread.join();
    assertEquals("Should have been no errors", rc.get(), 0);

    deleteThread.interrupt();
    flushThread.interrupt();
}

From source file:com.addthis.hydra.task.source.AbstractStreamFileDataSource.java

@Override
public void init() {
    if (legacyMode != null) {
        magicMarksNumber = 0;/*from ww w .ja va 2s. c o m*/
        useSimpleMarks = true;
        if (legacyMode.startsWith("stream")) {
            log.info("Using legacy mode for 'stream2' marks");
            useLegacyStreamPath = true;
        } else {
            log.info("Using legacy mode for 'mesh' marks");
        }
    }
    try {
        if (ignoreMarkDir) {
            File md = new File(markDir);

            if (md.exists()) {
                FileUtils.deleteDirectory(md);
                log.info("Deleted marks directory : {}", md);
            }
        }

        markDirFile = LessFiles.initDirectory(markDir);
        if (useSimpleMarks) {
            PageFactory<DBKey, SimpleMark> factory = ConcurrentPage.ConcurrentPageFactory.singleton;
            markDB = new PageDB<>(markDirFile, SimpleMark.class, MARK_PAGE_SIZE, MARK_PAGES, factory);
        } else {
            PageFactory<DBKey, SimpleMark> factory = ConcurrentPage.ConcurrentPageFactory.singleton;
            markDB = new PageDB<>(markDirFile, Mark.class, MARK_PAGE_SIZE, MARK_PAGES, factory);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    if (shardTotal == null || shardTotal == 0) {
        shardTotal = config.nodeCount;
    }
    if (shards == null) {
        shards = config.calcShardList(shardTotal);
    }
    PersistentStreamFileSource persistentStreamFileSource = getSource();
    source = persistentStreamFileSource;
    if (!processAllData && !hash
            && !((persistentStreamFileSource != null) && persistentStreamFileSource.hasMod())) {
        log.error(
                "possible source misconfiguration.  lacks both 'hash' and '{{mod}}'.  fix or set processAllData:true");
        throw new RuntimeException("Possible Source Misconfiguration");
    }
    try {
        if (persistentStreamFileSource != null) {
            if (!persistentStreamFileSource.init(getMarkDirFile(), shards)) {
                throw new IllegalStateException("Failure to initialize input source");
            }
        }
        if (filter != null) {
            setSource(new StreamSourceFiltered(source, filter));
        }
        if (hash) {
            setSource(new StreamSourceHashed(source, shards, shardTotal, useLegacyStreamPath));
        }
        log.info("buffering[capacity={};workers={};preopen={};marks={};maxSkip={};shards={}]", buffer, workers,
                preOpen, markDir, skipSourceExit, LessStrings.join(shards, ","));
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    queue = new LinkedBlockingQueue<>(buffer);

    List<CompletableFuture<Void>> workerFutures = new ArrayList<>();
    for (int i = 0; i < workers; i++) {
        Runnable sourceWorker = new SourceWorker(i);
        workerFutures.add(runAsync(sourceWorker, workerThreadPool).whenComplete((ignored, error) -> {
            if (error != null) {
                shuttingDown.set(true);
                closeFuture.completeExceptionally(error);
            }
        }));
    }
    aggregateWorkerFuture = allOf(workerFutures.toArray(new CompletableFuture[workerFutures.size()]));
    aggregateWorkerFuture.thenRunAsync(this::close);
}

From source file:com.jkoolcloud.tnt4j.streams.inputs.TNTInputStream.java

/**
 * Creates thread pool executor service for a given number of threads with bounded tasks queue - queue size is
 * 2x{@code threadsQty}. When queue size is reached, new tasks are offered to queue using defined offer timeout. If
 * task can't be put into queue over this time, task is skipped with making warning log entry. Thus memory use does
 * not grow drastically if consumers can't keep up the pace of producers filling in the queue, making producers
 * synchronize with consumers./*from  w  w w  .  java 2  s .c  o  m*/
 *
 * @param threadsQty
 *            the number of threads in the pool
 * @param offerTimeout
 *            how long to wait before giving up on offering task to queue
 *
 * @return the newly created thread pool executor
 *
 * @see ThreadPoolExecutor#ThreadPoolExecutor(int, int, long, TimeUnit, BlockingQueue, ThreadFactory)
 */
private ExecutorService getBoundedExecutorService(int threadsQty, final int offerTimeout) {
    StreamsThreadFactory stf = new StreamsThreadFactory("StreamBoundedExecutorThread-"); // NON-NLS
    stf.addThreadFactoryListener(new StreamsThreadFactoryListener());

    ThreadPoolExecutor tpe = new ThreadPoolExecutor(threadsQty, threadsQty, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(threadsQty * 2), stf);

    tpe.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    logger().log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamTaskRejected(r);
                }
            } catch (InterruptedException exc) {
                halt(true);
            }
        }
    });

    return tpe;
}

From source file:dk.dbc.opensearch.datadock.DatadockMain.java

private void initializeServices() throws ObjectRepositoryException, InstantiationException,
        IllegalAccessException, PluginException, HarvesterIOException, IllegalStateException,
        ParserConfigurationException, IOException, IllegalArgumentException, SQLException,
        InvocationTargetException, SAXException, ConfigurationException, ClassNotFoundException {
    log.trace("Initializing plugin resolver");
    //        String host = FedoraConfig.getHost();
    //        String port = FedoraConfig.getPort();
    //        String user = FedoraConfig.getUser();
    //        String pass = FedoraConfig.getPassPhrase();
    FcrepoReader reader = new FcrepoReader(host, port);
    FcrepoModifier modifier = new FcrepoModifier(host, port, user, pass);
    PluginResolver pluginResolver = new PluginResolver();

    //String javascriptPath = FileSystemConfig.getScriptPath();

    flowMapCreator = new FlowMapCreator(this.pluginFlowXmlPath, this.pluginFlowXsdPath);
    Map<String, List<PluginTask>> flowMap = flowMapCreator.createMap(pluginResolver, reader, modifier,
            javascriptPath);/*w  w w  .  j ava 2 s  .co  m*/

    log.trace("Initializing harvester");
    IHarvest harvester = this.initializeHarvester();

    log.trace("Initializing the DatadockPool");
    LinkedBlockingQueue<Runnable> queue = new LinkedBlockingQueue<Runnable>(this.queueSize);
    ThreadPoolExecutor threadpool = new ThreadPoolExecutor(this.corePoolSize, this.maxPoolSize,
            this.keepAliveTime, TimeUnit.SECONDS, queue);
    DatadockPool datadockPool = new DatadockPool(threadpool, harvester, flowMap);

    log.trace("Initializing the DatadockManager");
    datadockManager = new DatadockManager(datadockPool, harvester, flowMap);
}

From source file:io.nats.client.ITClusterTest.java

/**
 * Ensures that if a ping is not ponged within the pingInterval, that a disconnect/reconnect
 * takes place.//from   w  w  w  .j  ava2 s. c om
 * <p>
 * <p>We test this by setting maxPingsOut < 0 and setting the pingInterval very small. After
 * the first
 * disconnect, we measure the reconnect-to-disconnect time to ensure it isn't greater than 2
 * * pingInterval.
 *
 * @throws Exception if anything goes wrong
 */
@Test
public void testPingReconnect() throws Exception {
    final int reconnects = 4;
    final AtomicInteger timesReconnected = new AtomicInteger();
    //        setLogLevel(Level.DEBUG);
    try (NatsServer s1 = runServerOnPort(1222)) {
        Options opts = new Options.Builder(defaultOptions()).dontRandomize().reconnectWait(200).pingInterval(50)
                .maxPingsOut(-1).timeout(1000).build();
        opts.servers = Nats.processUrlArray(testServers);

        final CountDownLatch wg = new CountDownLatch(reconnects);
        final BlockingQueue<Long> rch = new LinkedBlockingQueue<Long>(reconnects);
        final BlockingQueue<Long> dch = new LinkedBlockingQueue<Long>(reconnects);

        opts.disconnectedCb = new DisconnectedCallback() {
            public void onDisconnect(ConnectionEvent event) {
                dch.add(System.nanoTime());
            }
        };

        opts.reconnectedCb = new ReconnectedCallback() {
            @Override
            public void onReconnect(ConnectionEvent event) {
                rch.add(System.nanoTime());
                wg.countDown();
            }
        };

        try (ConnectionImpl c = (ConnectionImpl) opts.connect()) {
            wg.await();
            s1.shutdown();

            // Throw away the first one
            dch.take();
            for (int i = 0; i < reconnects - 1; i++) {
                Long disconnectedAt = dch.take();
                Long reconnectedAt = rch.take();
                Long pingCycle = TimeUnit.NANOSECONDS.toMillis(disconnectedAt - reconnectedAt);
                assertFalse(String.format("Reconnect due to ping took %d msec", pingCycle),
                        pingCycle > 2 * c.getOptions().getPingInterval());
            }
        }
    }
}

From source file:eu.stratosphere.nephele.ipc.Server.java

/**
 * Constructs a server listening on the named port and address. Parameters passed must
 * be of the named class. The <code>handlerCount</handlerCount> determines
 * the number of handler threads that will be used to process calls.
 *//*from   ww w . ja  va  2s .  co m*/
protected Server(String bindAddress, int port, Class<? extends IOReadableWritable> invocationClass,
        int handlerCount, String serverName) throws IOException {
    this.bindAddress = bindAddress;
    this.port = port;
    this.invocationClass = invocationClass;
    this.handlerCount = handlerCount;
    this.socketSendBufferSize = 0;
    this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER;
    this.callQueue = new LinkedBlockingQueue<Call>(maxQueueSize);
    this.maxIdleTime = 2 * 1000;
    this.maxConnectionsToNuke = 10;
    this.thresholdIdleConnections = 4000;

    // Start the listener here and let it bind to the port
    listener = new Listener();
    this.port = listener.getAddress().getPort();
    this.tcpNoDelay = false;

    // Create the responder here
    responder = new Responder();
}

From source file:com.taobao.adfs.distributed.rpc.Server.java

/**
 * Constructs a server listening on the named port and address. Parameters passed must be of the named class. The
 * <code>handlerCount</handlerCount> determines
 * the number of handler threads that will be used to process calls.
 * /*  w w w .j  a va 2  s.c  o  m*/
 */
protected Server(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount,
        Configuration conf, String serverName) throws IOException {
    this.bindAddress = bindAddress;
    this.conf = conf;
    this.port = port;
    this.paramClass = paramClass;
    this.handlerCount = handlerCount;
    this.socketSendBufferSize = 0;
    this.maxQueueSize = handlerCount * MAX_QUEUE_SIZE_PER_HANDLER;
    this.callQueue = new LinkedBlockingQueue<Call>(maxQueueSize);
    this.readThreads = conf.getInt("ipc.server.read.threadpool.size", 40);
    this.maxIdleTime = 2 * conf.getInt("ipc.client.connection.maxidletime", 1000);
    this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10);
    this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000);

    // Start the listener here and let it bind to the port
    listener = new Listener();
    this.port = listener.getAddress().getPort();
    this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);

    // Create the responder here
    responder = new Responder();
}

From source file:com.datatorrent.stram.StreamingContainerManager.java

private void aggregateMetrics(long windowId, Map<Integer, EndWindowStats> endWindowStatsMap) {
    Collection<OperatorMeta> logicalOperators = getLogicalPlan().getAllOperators();
    //for backward compatibility
    for (OperatorMeta operatorMeta : logicalOperators) {
        @SuppressWarnings("deprecation")
        Context.CountersAggregator aggregator = operatorMeta.getValue(OperatorContext.COUNTERS_AGGREGATOR);
        if (aggregator == null) {
            continue;
        }//from   w  w  w .j a v a2s.c  o  m
        Collection<PTOperator> physicalOperators = plan.getAllOperators(operatorMeta);
        List<Object> counters = Lists.newArrayList();
        for (PTOperator operator : physicalOperators) {
            EndWindowStats stats = endWindowStatsMap.get(operator.getId());
            if (stats != null && stats.counters != null) {
                counters.add(stats.counters);
            }
        }
        if (counters.size() > 0) {
            @SuppressWarnings("deprecation")
            Object aggregate = aggregator.aggregate(counters);
            latestLogicalCounters.put(operatorMeta.getName(), aggregate);
        }
    }

    for (OperatorMeta operatorMeta : logicalOperators) {
        AutoMetric.Aggregator aggregator = operatorMeta.getMetricAggregatorMeta() != null
                ? operatorMeta.getMetricAggregatorMeta().getAggregator()
                : null;
        if (aggregator == null) {
            continue;
        }
        Collection<PTOperator> physicalOperators = plan.getAllOperators(operatorMeta);
        List<AutoMetric.PhysicalMetricsContext> metricPool = Lists.newArrayList();

        for (PTOperator operator : physicalOperators) {
            EndWindowStats stats = endWindowStatsMap.get(operator.getId());
            if (stats != null && stats.metrics != null) {
                PhysicalMetricsContextImpl physicalMetrics = new PhysicalMetricsContextImpl(operator.getId(),
                        stats.metrics);
                metricPool.add(physicalMetrics);
            }
        }
        if (metricPool.isEmpty()) {
            //nothing to aggregate
            continue;
        }
        Map<String, Object> lm = aggregator.aggregate(windowId, metricPool);

        if (lm != null && lm.size() > 0) {
            Queue<Pair<Long, Map<String, Object>>> windowMetrics = logicalMetrics.get(operatorMeta.getName());
            if (windowMetrics == null) {
                windowMetrics = new LinkedBlockingQueue<Pair<Long, Map<String, Object>>>(METRIC_QUEUE_SIZE) {
                    private static final long serialVersionUID = 1L;

                    @Override
                    public boolean add(Pair<Long, Map<String, Object>> longMapPair) {
                        if (remainingCapacity() <= 1) {
                            remove();
                        }
                        return super.add(longMapPair);
                    }
                };
                logicalMetrics.put(operatorMeta.getName(), windowMetrics);
            }
            LOG.debug("Adding to logical metrics for {}", operatorMeta.getName());
            windowMetrics.add(new Pair<Long, Map<String, Object>>(windowId, lm));
            Map<String, Object> oldValue = latestLogicalMetrics.put(operatorMeta.getName(), lm);
            if (oldValue == null) {
                try {
                    saveMetaInfo();
                } catch (IOException ex) {
                    LOG.error(
                            "Cannot save application meta info to DFS. App data sources will not be available.",
                            ex);
                }
            }
        }
    }
}