Example usage for java.util.concurrent BlockingQueue isEmpty

List of usage examples for java.util.concurrent BlockingQueue isEmpty

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:com.opengamma.bbg.BloombergHistoricalTimeSeriesSource.java

@Override
public Map<ExternalIdBundle, HistoricalTimeSeries> getHistoricalTimeSeries(Set<ExternalIdBundle> identifierSet,
        String dataSource, String dataProvider, String dataField, LocalDate start, boolean includeStart,
        LocalDate end, boolean includeEnd) {
    ArgumentChecker.notNull(identifierSet, "identifierSet");
    ArgumentChecker.notNull(dataField, "dataField");
    ArgumentChecker.notNull(start, "start");
    ArgumentChecker.notNull(end, "end");
    Validate.isTrue(ObjectUtils.equals(dataSource, BLOOMBERG_DATA_SOURCE_NAME),
            getClass().getName() + "cannot support " + dataSource);

    if (end.isBefore(start)) {
        throw new IllegalArgumentException("end must be after start");
    }//from   www  .ja v a  2  s  .com

    ensureStarted();
    s_logger.debug("Getting historical data for {}", identifierSet);

    if (identifierSet.isEmpty()) {
        s_logger.info("Historical data request for empty identifier set");
        return Collections.emptyMap();
    }
    Map<String, ExternalIdBundle> bbgSecDomainMap = Maps.newHashMap();
    Request request = getRefDataService().createRequest(BLOOMBERG_HISTORICAL_DATA_REQUEST);
    Element securitiesElem = request.getElement(BLOOMBERG_SECURITIES_REQUEST);
    for (ExternalIdBundle identifiers : identifierSet) {
        ExternalId preferredIdentifier = BloombergDomainIdentifierResolver
                .resolvePreferredIdentifier(identifiers);
        s_logger.debug("Resolved preferred identifier {} from identifier bundle {}", preferredIdentifier,
                identifiers);
        String bbgKey = BloombergDomainIdentifierResolver.toBloombergKeyWithDataProvider(preferredIdentifier,
                dataProvider);
        securitiesElem.appendValue(bbgKey);
        bbgSecDomainMap.put(bbgKey, identifiers);
    }

    Element fieldElem = request.getElement(BLOOMBERG_FIELDS_REQUEST);
    fieldElem.appendValue(dataField);

    // TODO: inclusive start / exclusive end
    request.set("periodicityAdjustment", "ACTUAL");
    request.set("periodicitySelection", "DAILY");
    request.set("startDate", printYYYYMMDD(start));
    request.set("endDate", printYYYYMMDD(end));
    request.set("adjustmentSplit", true);

    _statistics.recordStatistics(bbgSecDomainMap.keySet(), Collections.singleton(dataField));
    CorrelationID cid = submitBloombergRequest(request);
    BlockingQueue<Element> resultElements = getResultElement(cid);
    if (resultElements == null || resultElements.isEmpty()) {
        s_logger.warn("Unable to get historical data for {}", identifierSet);
        return null;
    }

    //REVIEW simon 2011/11/01: should this be deduped with the single case? 
    Map<ExternalIdBundle, HistoricalTimeSeries> result = Maps.newHashMap();
    for (Element resultElem : resultElements) {
        if (resultElem.hasElement(RESPONSE_ERROR)) {
            s_logger.warn("Response error");
            processError(resultElem.getElement(RESPONSE_ERROR));
            continue;
        }
        Element securityElem = resultElem.getElement(SECURITY_DATA);
        if (securityElem.hasElement(SECURITY_ERROR)) {
            processError(securityElem.getElement(SECURITY_ERROR));
        }
        if (securityElem.hasElement(FIELD_EXCEPTIONS)) {
            Element fieldExceptions = securityElem.getElement(FIELD_EXCEPTIONS);

            for (int i = 0; i < fieldExceptions.numValues(); i++) {
                Element fieldException = fieldExceptions.getValueAsElement(i);
                String fieldId = fieldException.getElementAsString(FIELD_ID);
                s_logger.warn("Field error on {}", fieldId);
                Element errorInfo = fieldException.getElement(ERROR_INFO);
                processError(errorInfo);
            }
        }
        if (securityElem.hasElement(FIELD_DATA)) {
            processFieldData(securityElem, dataField, bbgSecDomainMap, result);
        }
    }
    if (identifierSet.size() != result.size()) {
        s_logger.warn("Failed to get time series results for ({}/{}) {}",
                new Object[] { identifierSet.size() - result.size(), identifierSet.size(),
                        Sets.difference(identifierSet, result.keySet()) });
    }
    return result;
}

From source file:com.all.backend.web.services.LocalPushService.java

@SuppressWarnings("unchecked")
@Override//w w  w.ja v a2s. c  o m
public AllMessage<?> pullMessage(ContactInfo contact) {
    long id = contact.getId();
    BlockingQueue<AllMessage<?>> queue;
    synchronized (messages) {
        queue = messages.get(id);
        if (queue == null) {
            queue = new LinkedBlockingQueue<AllMessage<?>>();
            messages.put(id, queue);
        }
    }
    AllMessage<?> poll = null;
    try {
        poll = queue.poll(1, TimeUnit.MINUTES);
    } catch (Exception e) {
        log.error(e, e);
    }
    if (poll == null) {
        synchronized (messages) {
            if (queue.isEmpty()) {
                messages.remove(id);
            }
        }
    }
    return poll;
}

From source file:com.splout.db.qnode.QNodeHandlerContext.java

/**
 * This method can be called to initialize a pool of connections to a dnode. This method may be called from multiple
 * threads so it should be safe to call it concurrently.
 *///from   w w w  .  j a v a2 s .  co m
public void initializeThriftClientCacheFor(String dnode) throws TTransportException, InterruptedException {
    // this lock is on the whole cache but we would actually be interested in a per-DNode lock...
    // there's only one lock for simplicity.
    thriftClientCacheLock.lock();
    try {
        // initialize queue for this DNode
        BlockingQueue<DNodeService.Client> dnodeQueue = thriftClientCache.get(dnode);
        if (dnodeQueue == null) {
            // this assures that the per-DNode queue is only created once and then reused.
            dnodeQueue = new LinkedBlockingDeque<DNodeService.Client>(thriftClientPoolSize);
        }
        if (dnodeQueue.isEmpty()) {
            try {
                for (int i = dnodeQueue.size(); i < thriftClientPoolSize; i++) {
                    dnodeQueue.put(DNodeClient.get(dnode));
                }
                // we only put the queue if all connections have been populated
                thriftClientCache.put(dnode, dnodeQueue);
            } catch (TTransportException e) {
                log.error("Error while trying to populate queue for " + dnode
                        + ", will discard created connections.", e);
                while (!dnodeQueue.isEmpty()) {
                    dnodeQueue.poll().getOutputProtocol().getTransport().close();
                }
                throw e;
            }
        } else {
            // it should be safe to call this method from different places concurrently
            // so we contemplate the case where another Thread already populated the queue
            // and only populate it if it's really empty.
            log.warn(Thread.currentThread().getName() + " : queue for [" + dnode
                    + "] is not empty - it was populated before.");
        }
    } finally {
        thriftClientCacheLock.unlock();
    }
}

From source file:com.fluidops.iwb.api.CommunicationServiceImpl.java

protected static void handlePendingRequestsInternal() {
    BlockingQueue<UpdateRequest> requests = CommunicationServiceImpl.requests.instance();

    Long start = System.currentTimeMillis();

    handleRequestWarning(requests.size());

    // NOTE: we implement a while on top of the boolean
    // variable (rather than the requests variable itself)
    // in order to be able to push the synchronized block inside;
    // this allows for interleaving of pushing and polling
    // and thus improved eCM core performance, as it imposes minimal
    // blocking time to the process filling the requests array
    boolean requestsEmpty = false;

    Set<URI> updatedURIs = new HashSet<URI>();
    Set<URI> deletedURIs = new HashSet<URI>();
    Set<URI> contextsWithRemoveOp = new HashSet<URI>();
    Set<URI> contextsWithAddOp = new HashSet<URI>();

    int ctr = 0;/*from w w w.  j a  v  a 2 s.co  m*/
    ReadWriteDataManager dm = null;
    try {
        while (!requestsEmpty) {
            UpdateRequest request = null;
            if (requests.isEmpty())
                requestsEmpty = true; // abort
            else
                request = requests.poll();

            // we process the request outside of the synchronized
            // block, in order to release the lock as early as possible
            if (request != null) {
                try {
                    // we only open the data manager if required, and only once
                    if (dm == null)
                        dm = ReadWriteDataManagerImpl.openDataManager(Global.repository);

                    request.handleRequest(dm, updatedURIs, deletedURIs, contextsWithRemoveOp,
                            contextsWithAddOp);
                    ctr++;
                    if ((ctr % 1000) == 0)
                        logger.info(
                                "Synching requests into INT DB - count=" + ctr + " queue=" + requests.size());
                } catch (Exception e) {
                    logger.error(e.getMessage(), e);
                }
            }
        }

        if (ctr > 0) // something has been changed
        {
            String cleanupMsg = dm.cleanupMetaGarbage(contextsWithRemoveOp);

            KeywordIndexAPI.updateUrisInIndex(updatedURIs);
            KeywordIndexAPI.updateUrisInIndex(deletedURIs);

            logger.debug("Synchronized " + ctr + " objects to INT database in "
                    + (System.currentTimeMillis() - start) + "ms (" + cleanupMsg + ")");
        } // otherwise: no action has been performed, nothing to do
    } finally {
        ReadWriteDataManagerImpl.closeQuietly(dm);
    }
}

From source file:at.salzburgresearch.vgi.vgianalyticsframework.activityanalysis.pipeline.impl.VgiPipelineImpl.java

/**
 * Starts the pipeline. Create producers, lets them read the VGI operations, receives the data and sends the operations to a set of consumers
 *///from   w w w.j a  va  2s.co m
@Override
public void start() {
    timerStart = new Date();

    BlockingQueue<IVgiFeature> queue = new ArrayBlockingQueue<IVgiFeature>(queueSize);

    /** Create thread(s) which will read the PBF files */
    Thread[] producerThread = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {

        producerName = (settings.isReadQuadtree()) ? "vgiOperationPbfReaderQuadtree" : "vgiOperationPbfReader";

        producer = ctx.getBean(producerName, IVgiAnalysisPipelineProducer.class);
        producer.setQueue(queue);
        producerThread[i] = new Thread(producer);

        producer.setProducerCount(numThreads);
        producer.setProducerNumber(i);
        producer.setPbfDataFolder((pbfDataFolder != null) ? pbfDataFolder : settings.getPbfDataFolder());

        producer.setFilterNodeId(filterNodeId);
        producer.setFilterWayId(filterWayId);
        producer.setFilterRelationId(filterRelationId);
        producer.setFilterGeometryType(filterGeometryType);
        producer.setConstrainedFilter(constrainedFilter);
        producer.setCoordinateOnly(coordinateOnly);

        producer.setFilterFileId(filterFileId);

        producerThread[i].start();
    }

    List<IVgiFeature> currentBatch = new ArrayList<IVgiFeature>();

    try {
        doBeforeFirstBatch();

        /** Read queue as long as it is not empty */
        boolean resumePipeline = true;
        while (resumePipeline) {
            resumePipeline = false;
            for (int i = 0; i < numThreads; i++) {
                if (producerThread[i].isAlive())
                    resumePipeline = true;
            }
            if (!queue.isEmpty())
                resumePipeline = true;
            if (!resumePipeline)
                break;

            IVgiFeature currentFeature = queue.poll(60, TimeUnit.MILLISECONDS);

            if (currentFeature == null)
                continue;

            /** Detach batch if minimum batch size is reached */
            if (currentBatch.size() >= batchSize) {
                detachBatch(currentBatch);
                currentBatch.clear();
            }

            currentBatch.add(currentFeature);
        }

        if (currentBatch.size() > 0) {
            detachBatch(currentBatch);
        }

        doAfterLastBatch();

    } catch (InterruptedException e) {
        log.error("error joining producer thread", e);
    }

    if (settings.getActionAnalyzerList() != null) {
        writeMetaData(settings.getResultFolder());
    }
}

From source file:org.apache.camel.component.seda.SedaConsumer.java

public void run() {
    BlockingQueue<Exchange> queue = endpoint.getQueue();
    // loop while we are allowed, or if we are stopping loop until the queue is empty
    while (queue != null && (isRunAllowed())) {
        Exchange exchange = null;/*  w w  w.  j  a  v  a 2  s .c o  m*/
        try {
            exchange = queue.poll(1000, TimeUnit.MILLISECONDS);
            if (exchange != null) {
                try {
                    sendToConsumers(exchange);

                    // log exception if an exception occurred and was not handled
                    if (exchange.getException() != null) {
                        getExceptionHandler().handleException("Error processing exchange", exchange,
                                exchange.getException());
                    }
                } catch (Exception e) {
                    getExceptionHandler().handleException("Error processing exchange", exchange, e);
                }
            } else if (shutdownPending && queue.isEmpty()) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace(
                            "Shutdown is pending, so this consumer thread is breaking out because the task queue is empty.");
                }
                // we want to shutdown so break out if there queue is empty
                break;
            }
        } catch (InterruptedException e) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Sleep interrupted, are we stopping? " + (isStopping() || isStopped()));
            }
            continue;
        } catch (Throwable e) {
            if (exchange != null) {
                getExceptionHandler().handleException("Error processing exchange", exchange, e);
            } else {
                getExceptionHandler().handleException(e);
            }
        }
    }

    latch.countDown();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Ending this polling consumer thread, there are still " + latch.getCount()
                + " consumer threads left.");
    }
}

From source file:org.apache.camel.impl.DefaultServicePool.java

public synchronized Service acquire(Key key) {
    BlockingQueue<Service> services = pool.get(key);
    if (services == null || services.isEmpty()) {
        if (log.isTraceEnabled()) {
            log.trace("No free services in pool to acquire for key: " + key);
        }/*  w  w  w  .j  av  a  2 s  .c o  m*/
        return null;
    }

    Service answer = services.poll();
    if (log.isTraceEnabled()) {
        log.trace("Acquire: " + key + " service: " + answer);
    }
    return answer;
}

From source file:org.apache.cassandra.concurrent.ContinuationsExecutor.java

/**
 * Drains the task queue into a new list, normally using drainTo. But if the
 * queue is a DelayQueue or any other kind of queue for which poll or
 * drainTo may fail to remove some elements, it deletes them one by one.
 */// w  ww .j  a  v a 2s .  c  o m
private List<Runnable> drainQueue() {
    BlockingQueue<Runnable> q = workQueue;
    List<Runnable> taskList = new ArrayList<Runnable>();
    q.drainTo(taskList);
    if (!q.isEmpty()) {
        for (Runnable r : q.toArray(new Runnable[0])) {
            if (q.remove(r))
                taskList.add(r);
        }
    }
    return taskList;
}

From source file:org.apache.falcon.service.FeedSLAMonitoringService.java

@SuppressWarnings("unchecked")
private void deserialize(Path path) throws FalconException {
    try {/* www .  j a va 2  s .  c  o m*/
        Map<String, Object> state = deserializeInternal(path);
        pendingInstances = new ConcurrentHashMap<>();
        Map<Pair<String, String>, BlockingQueue<Date>> pendingInstancesCopy = (Map<Pair<String, String>, BlockingQueue<Date>>) state
                .get("pendingInstances");
        // queue size can change during restarts, hence copy
        for (Map.Entry<Pair<String, String>, BlockingQueue<Date>> entry : pendingInstancesCopy.entrySet()) {
            BlockingQueue<Date> value = new LinkedBlockingQueue<>(queueSize);
            BlockingQueue<Date> oldValue = entry.getValue();
            LOG.debug("Number of old instances:{}, new queue size:{}", oldValue.size(), queueSize);
            while (!oldValue.isEmpty()) {
                Date instance = oldValue.remove();
                if (value.size() == queueSize) { // if full
                    LOG.debug("Deserialization: Removing value={} for <feed,cluster>={}", value.peek(),
                            entry.getKey());
                    value.remove();
                }
                LOG.debug("Deserialization Adding: key={} to <feed,cluster>={}", entry.getKey(), instance);
                value.add(instance);
            }
            pendingInstances.put(entry.getKey(), value);
        }
        lastCheckedAt = new Date((Long) state.get("lastCheckedAt"));
        lastSerializedAt = new Date((Long) state.get("lastSerializedAt"));
        monitoredFeeds = new ConcurrentHashSet<>(); // will be populated on the onLoad of entities.
        LOG.debug("Restored the service from old state.");
    } catch (IOException | ClassNotFoundException e) {
        throw new FalconException("Couldn't deserialize the old state", e);
    }
}

From source file:org.apache.hadoop.ipc.CallQueueManager.java

/**
 * Checks if queue is empty by checking at CHECKPOINT_NUM points with
 * CHECKPOINT_INTERVAL_MS interval.// ww w.j  a v a 2 s.c om
 * This doesn't mean the queue might not fill up at some point later, but
 * it should decrease the probability that we lose a call this way.
 */
private boolean queueIsReallyEmpty(BlockingQueue<?> q) {
    for (int i = 0; i < CHECKPOINT_NUM; i++) {
        try {
            Thread.sleep(CHECKPOINT_INTERVAL_MS);
        } catch (InterruptedException ie) {
            return false;
        }
        if (!q.isEmpty()) {
            return false;
        }
    }
    return true;
}