Example usage for java.util Queue size

List of usage examples for java.util Queue size

Introduction

In this page you can find the example usage for java.util Queue size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this collection.

Usage

From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java

@Test
public void testPutTake_RespectsOrderOnPersistence() throws Exception {
    if (isPersistent()) {
        TransactionalQueueManager mgr1 = createQueueManager();

        QueueSession s1 = mgr1.getQueueSession();
        Queue q1 = s1.getQueue("queue1");
        mgr1.start();//from ww w.jav  a2 s.  co  m
        assertEquals("Queue size", 0, q1.size());
        final int numberOfElements = 10;
        for (int i = 1; i <= numberOfElements; i++) {
            q1.put("String" + i);
            assertEquals("Queue size", i, q1.size());
        }

        mgr1.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);

        TransactionalQueueManager mgr2 = createQueueManager();

        QueueSession s2 = mgr2.getQueueSession();
        Queue q2 = s2.getQueue("queue1");
        mgr2.start();
        for (int i = 1; i <= numberOfElements; i++) {
            Object o = q2.take();
            assertNotNull(o);
            assertEquals("Queue content", "String" + i, o);
        }
        assertEquals("Queue size", 0, q2.size());

        purgeQueue(q2);

        mgr2.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);
    }
}

From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java

@Test
public void testPutTakeUntakeRollbackUntake() throws Exception {
    final TransactionalQueueManager mgr = createQueueManager();
    mgr.start();/* w  w  w . ja  v a 2s.c  o m*/

    final Latch latch = new Latch();

    final Serializable object1 = "string1";
    final Serializable object2 = "string2";

    Thread t = new Thread() {
        @Override
        public void run() {
            try {
                latch.countDown();
                Thread.sleep(200);
                QueueSession s = mgr.getQueueSession();
                Queue q = s.getQueue("queue1");
                assertEquals("Queue size", 0, q.size());

                s.begin();
                q.put(object1);
                q.put(object2);
                q.take();
                q.take();
                s.commit();

                s.begin();
                q.untake(object1);
                s.commit();

                s.begin();
                q.untake(object2);
                s.rollback();
            } catch (Exception e) {
                // ignore, let test fail
            }
        }
    };
    t.start();
    latch.await();
    long t0 = System.currentTimeMillis();
    QueueSession s = mgr.getQueueSession();
    Queue q = s.getQueue("queue1");
    assertEquals("Queue size", 0, q.size());
    Object o = q.take();
    long t1 = System.currentTimeMillis();
    t.join();
    assertNotNull(o);
    assertEquals("Queue content", object1, o);
    assertEquals("Queue size", 0, q.size());
    assertTrue(t1 - t0 > 100);

    purgeQueue(q);

    mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);
}

From source file:org.apache.gobblin.http.ApacheHttpRequestBuilderTest.java

/**
 * Build a {@link HttpUriRequest} from a {@link GenericRecord}
 *//*from  ww w . ja  v a 2 s . c  o  m*/
public void testBuildWriteRequest() throws IOException {
    String urlTemplate = "http://www.test.com/a/part1:${part1}/a/part2:${part2}";
    String verb = "post";
    ApacheHttpRequestBuilder builder = spy(new ApacheHttpRequestBuilder(urlTemplate, verb, "application/json"));
    ArgumentCaptor<RequestBuilder> requestBuilderArgument = ArgumentCaptor.forClass(RequestBuilder.class);

    Queue<BufferedRecord<GenericRecord>> queue = HttpTestUtils.createQueue(1, false);
    AsyncRequest<GenericRecord, HttpUriRequest> request = builder.buildRequest(queue);
    verify(builder).build(requestBuilderArgument.capture());

    RequestBuilder expected = RequestBuilder.post();
    expected.setUri("http://www.test.com/a/part1:01/a/part2:02?param1=01");
    String payloadStr = "{\"id\":\"id0\"}";
    expected.addHeader(HttpHeaders.CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType())
            .setEntity(new StringEntity(payloadStr, ContentType.APPLICATION_JSON));

    // Compare HttpUriRequest
    HttpTestUtils.assertEqual(requestBuilderArgument.getValue(), expected);
    Assert.assertEquals(request.getRecordCount(), 1);
    Assert.assertEquals(queue.size(), 0);
}

From source file:it.geosolutions.geobatch.geotiff.retile.GeotiffRetilerAction.java

@Override
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {
    try {//from   w w w.jav  a2s  . c o  m

        if (configuration == null) {
            final String message = "GeotiffRetiler::execute(): flow configuration is null.";
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message);
            throw new ActionException(this, message);
        }
        if (events.size() == 0) {
            throw new ActionException(this,
                    "GeotiffRetiler::execute(): Unable to process an empty events queue.");
        }

        if (LOGGER.isInfoEnabled())
            LOGGER.info("GeotiffRetiler::execute(): Starting with processing...");

        listenerForwarder.started();

        // The return
        final Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();

        while (events.size() > 0) {

            FileSystemEvent event = events.remove();

            File eventFile = event.getSource();
            FileSystemEventType eventType = event.getEventType();

            if (eventFile.exists() && eventFile.canRead() && eventFile.canWrite()) {
                /*
                 * If here: we can start retiler actions on the incoming file event
                 */

                if (eventFile.isDirectory()) {

                    File[] fileList = eventFile.listFiles();
                    int size = fileList.length;
                    for (int progress = 0; progress < size; progress++) {

                        File inFile = fileList[progress];

                        final String absolutePath = inFile.getAbsolutePath();
                        final String inputFileName = FilenameUtils.getName(absolutePath);

                        if (LOGGER.isInfoEnabled())
                            LOGGER.info("is going to retile: " + inputFileName);

                        try {

                            listenerForwarder.setTask("GeotiffRetiler");
                            GeoTiffRetilerUtils.reTile(inFile, configuration, getTempDir());

                            // set the output
                            /*
                             * COMMENTED OUT 21 Feb 2011: simone: If the event represents a Dir
                             * we have to return a Dir. Do not matter failing files.
                             * 
                             * carlo: we may also want to check if a file is already tiled!
                             * 
                             * File outputFile=reTile(inFile); if (outputFile!=null){ //TODO:
                             * here we use the same event for each file in the ret.add(new
                             * FileSystemEvent(outputFile, eventType)); }
                             */

                        } catch (UnsupportedOperationException uoe) {
                            listenerForwarder.failed(uoe);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn(uoe.getLocalizedMessage(), uoe);
                            continue;
                        } catch (IOException ioe) {
                            listenerForwarder.failed(ioe);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn(ioe.getLocalizedMessage(), ioe);
                            continue;
                        } catch (IllegalArgumentException iae) {
                            listenerForwarder.failed(iae);
                            if (LOGGER.isWarnEnabled())
                                LOGGER.warn(iae.getLocalizedMessage(), iae);
                            continue;
                        } finally {
                            listenerForwarder.setProgress((progress * 100) / ((size != 0) ? size : 1));
                            listenerForwarder.progressing();
                        }
                    }

                    if (LOGGER.isInfoEnabled())
                        LOGGER.info("SUCCESSFULLY completed work on: " + event.getSource());

                    // add the directory to the return
                    ret.add(event);
                } else {
                    // file is not a directory
                    try {
                        listenerForwarder.setTask("GeotiffRetiler");
                        final File outputFile = GeoTiffRetilerUtils.reTile(eventFile, configuration,
                                getTempDir());

                        if (LOGGER.isInfoEnabled())
                            LOGGER.info("SUCCESSFULLY completed work on: " + event.getSource());
                        listenerForwarder.setProgress(100);
                        ret.add(new FileSystemEvent(outputFile, eventType));

                    } catch (UnsupportedOperationException uoe) {
                        listenerForwarder.failed(uoe);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn(uoe.getLocalizedMessage(), uoe);
                        continue;
                    } catch (IOException ioe) {
                        listenerForwarder.failed(ioe);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn(ioe.getLocalizedMessage(), ioe);
                        continue;
                    } catch (IllegalArgumentException iae) {
                        listenerForwarder.failed(iae);
                        if (LOGGER.isWarnEnabled())
                            LOGGER.warn(iae.getLocalizedMessage(), iae);
                        continue;
                    } finally {

                        listenerForwarder.setProgress((100) / ((events.size() != 0) ? events.size() : 1));
                        listenerForwarder.progressing();
                    }
                }
            } else {
                final String message = "The passed file event refers to a not existent "
                        + "or not readable/writeable file! File: " + eventFile.getAbsolutePath();
                if (LOGGER.isWarnEnabled())
                    LOGGER.warn(message);
                final IllegalArgumentException iae = new IllegalArgumentException(message);
                listenerForwarder.failed(iae);
            }
        } // endwile
        listenerForwarder.completed();

        // return
        if (ret.size() > 0) {
            events.clear();
            return ret;
        } else {
            /*
             * If here: we got an error no file are set to be returned the input queue is
             * returned
             */
            return events;
        }
    } catch (Exception t) {
        if (LOGGER.isErrorEnabled())
            LOGGER.error(t.getLocalizedMessage(), t);
        final ActionException exc = new ActionException(this, t.getLocalizedMessage(), t);
        listenerForwarder.failed(exc);
        throw exc;
    }
}

From source file:org.apache.streams.rss.provider.RssLinkProvider.java

@Override
public StreamsResultSet readCurrent() {
    Queue<StreamsDatum> result = Queues.newConcurrentLinkedQueue();
    synchronized (this.entries) {
        if (this.entries.isEmpty() && this.doneProviding.get()) {
            this.keepRunning.set(false);
        }/*from  ww w. j av  a2s.c  om*/

        while (!this.entries.isEmpty()) {
            ObjectNode node = this.entries.poll();

            try {
                while (!result.offer(new StreamsDatum(node.get("uri").asText()))) {
                    Thread.yield();
                }
            } catch (Exception e) {
                LOGGER.error("Problem offering up new StreamsDatum: {}", node.asText());
            }
        }
    }
    LOGGER.debug("** ReadCurrent return {} streams datums", result.size());

    return new StreamsResultSet(result);
}

From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java

@Test
public void testPutWithPersistence() throws Exception {
    if (isPersistent()) {
        TransactionalQueueManager mgr = createQueueManager();

        try {/*from  www  .j  av a 2  s.c o  m*/
            QueueSession s = mgr.getQueueSession();
            Queue q = s.getQueue("queue1");
            mgr.start();
            q.put("String1");
            assertEquals("Queue size", 1, q.size());

            q = s.getQueue("queue1");
            assertEquals("Queue size", 1, q.size());
        } finally {
            mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);
        }

        mgr = createQueueManager();
        try {
            QueueSession s = mgr.getQueueSession();
            Queue q = s.getQueue("queue1");
            mgr.start();
            assertEquals("Queue size", 1, q.size());

            purgeQueue(q);
        } finally {
            mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);
        }
    } else {
        logger.info("Ignoring test because queue manager is not persistent");
    }
}

From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java

@Test
public void testTransactedPutRollbackWithPersistence() throws Exception {
    if (isPersistent()) {
        TransactionalQueueManager mgr = createQueueManager();

        try {/*from   ww  w.  j a  va  2s . c  o m*/
            mgr.start();

            QueueSession s = mgr.getQueueSession();
            Queue q = s.getQueue("queue1");
            s.begin();
            q.put("String1");
            assertEquals("Queue size", 1, q.size());
            s.rollback();
            assertEquals("Queue size", 0, q.size());

            s = mgr.getQueueSession();
            q = s.getQueue("queue1");
            assertEquals("Queue size", 0, q.size());

            mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);

            mgr = createQueueManager();
            mgr.start();
            s = mgr.getQueueSession();
            q = s.getQueue("queue1");
            assertEquals("Queue size", 0, q.size());

            purgeQueue(q);
        } finally {

            mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL);

        }
    } else {
        logger.info("Ignoring test because queue manager is not persistent");
    }
}

From source file:org.apache.hadoop.hdfs.notifier.server.TestServerHistory.java

@Test
public void testBasicQueueNotification() throws Exception {
    // Starting without a ramp-up phase
    DummyServerCore core = new DummyServerCore();
    ServerHistory history = new ServerHistory(core, false);
    long historyLength = 100;
    history.setHistoryLength(historyLength);
    Queue<NamespaceNotification> historyNotifications;

    new Thread(history).start();

    // Step 1 - test with FILE_ADDED
    history.storeNotification(new NamespaceNotification("/a/b", EventType.FILE_ADDED.getByteValue(), 10));
    history.storeNotification(new NamespaceNotification("/a/c", EventType.FILE_ADDED.getByteValue(), 11));
    historyNotifications = new LinkedList<NamespaceNotification>();
    history.addNotificationsToQueue(new NamespaceEvent("/a", EventType.FILE_ADDED.getByteValue()), 10,
            historyNotifications);/*from   ww  w  .j a v a 2s. c  om*/
    Assert.assertEquals(1, historyNotifications.size());
    Assert.assertEquals(11, historyNotifications.peek().txId);
    Assert.assertEquals("/a/c", historyNotifications.peek().path);

    // Step 2 - test with FILE_CLOSED
    history.storeNotification(new NamespaceNotification("/a/d", EventType.FILE_CLOSED.getByteValue(), 12));
    history.storeNotification(new NamespaceNotification("/a/e", EventType.FILE_CLOSED.getByteValue(), 13));
    historyNotifications = new LinkedList<NamespaceNotification>();
    history.addNotificationsToQueue(new NamespaceEvent("/a", EventType.FILE_CLOSED.getByteValue()), 12,
            historyNotifications);
    Assert.assertEquals(1, historyNotifications.size());
    Assert.assertEquals(13, historyNotifications.peek().txId);
    Assert.assertEquals("/a/e", historyNotifications.peek().path);

    // test the sub directories
    historyNotifications = new LinkedList<NamespaceNotification>();
    history.addNotificationsToQueue(new NamespaceEvent("/", EventType.FILE_ADDED.getByteValue()), 10,
            historyNotifications);
    Assert.assertEquals(1, historyNotifications.size());
    history.addNotificationsToQueue(new NamespaceEvent("/", EventType.FILE_CLOSED.getByteValue()), 10,
            historyNotifications);
    Assert.assertEquals(3, historyNotifications.size());

    core.shutdown();
}

From source file:password.pwm.svc.report.ReportService.java

private void updateCacheFromLdap() throws ChaiUnavailableException, ChaiOperationException,
        PwmOperationalException, PwmUnrecoverableException {
    LOGGER.debug(PwmConstants.REPORTING_SESSION_LABEL,
            "beginning process to updating user cache records from ldap");
    if (status != STATUS.OPEN) {
        return;// ww w .  ja  va 2 s. c  om
    }
    cancelFlag = false;
    reportStatus = new ReportStatusInfo(settings.getSettingsHash());
    reportStatus.setInProgress(true);
    reportStatus.setStartDate(new Date());
    try {
        final Queue<UserIdentity> allUsers = new LinkedList<>(getListOfUsers());
        reportStatus.setTotal(allUsers.size());
        while (status == STATUS.OPEN && !allUsers.isEmpty() && !cancelFlag) {
            final Date startUpdateTime = new Date();
            final UserIdentity userIdentity = allUsers.poll();
            try {
                if (updateCachedRecordFromLdap(userIdentity)) {
                    reportStatus.setUpdated(reportStatus.getUpdated() + 1);
                }
            } catch (Exception e) {
                String errorMsg = "error while updating report cache for " + userIdentity.toString()
                        + ", cause: ";
                errorMsg += e instanceof PwmException ? ((PwmException) e).getErrorInformation().toDebugStr()
                        : e.getMessage();
                final ErrorInformation errorInformation;
                errorInformation = new ErrorInformation(PwmError.ERROR_REPORTING_ERROR, errorMsg);
                LOGGER.error(PwmConstants.REPORTING_SESSION_LABEL, errorInformation.toDebugStr());
                reportStatus.setLastError(errorInformation);
                reportStatus.setErrors(reportStatus.getErrors() + 1);
            }
            reportStatus.setCount(reportStatus.getCount() + 1);
            reportStatus.getEventRateMeter().markEvents(1);
            final TimeDuration totalUpdateTime = TimeDuration.fromCurrent(startUpdateTime);
            if (settings.isAutoCalcRest()) {
                avgTracker.addSample(totalUpdateTime.getTotalMilliseconds());
                Helper.pause(avgTracker.avgAsLong());
            } else {
                Helper.pause(settings.getRestTime().getTotalMilliseconds());
            }
        }
        if (cancelFlag) {
            reportStatus.setLastError(
                    new ErrorInformation(PwmError.ERROR_SERVICE_NOT_AVAILABLE, "report cancelled by operator"));
        }
    } finally {
        reportStatus.setFinishDate(new Date());
        reportStatus.setInProgress(false);
    }
    LOGGER.debug(PwmConstants.REPORTING_SESSION_LABEL,
            "update user cache process completed: " + JsonUtil.serialize(reportStatus));
}

From source file:org.hyperic.hq.zevents.ZeventManager.java

private String getDiagnostics() {
    synchronized (INIT_LOCK) {
        StringBuffer res = new StringBuffer();

        res.append("ZEvent Manager Diagnostics:\n")
                .append("    Queue Size:        " + _eventQueue.size() + "\n")
                .append("    Events Handled:    " + _numEvents + "\n")
                .append("    Max Time In Queue: " + _maxTimeInQueue + "ms\n\n")
                .append("ZEvent Listener Diagnostics:\n");
        PrintfFormat timingFmt = new PrintfFormat("        %-30s max=%-7.2f avg=%-5.2f " + "num=%-5d\n");
        synchronized (_listenerLock) {

            for (Entry<Class<? extends Zevent>, List<TimingListenerWrapper<Zevent>>> ent : _listeners
                    .entrySet()) {//from   w w  w  .jav  a  2s.  c o  m
                List<TimingListenerWrapper<Zevent>> listeners = ent.getValue();
                res.append("    EventClass: " + ent.getKey() + "\n");
                for (TimingListenerWrapper<Zevent> l : listeners) {
                    Object[] args = new Object[] { l.toString(), new Double(l.getMaxTime()),
                            new Double(l.getAverageTime()), new Long(l.getNumEvents()) };

                    res.append(timingFmt.sprintf(args));
                }
                res.append("\n");
            }

            res.append("    Global Listeners:\n");
            for (TimingListenerWrapper<Zevent> l : _globalListeners) {
                Object[] args = new Object[] { l.toString(), new Double(l.getMaxTime()),
                        new Double(l.getAverageTime()), new Long(l.getNumEvents()), };

                res.append(timingFmt.sprintf(args));
            }
        }

        synchronized (_registeredBuffers) {
            PrintfFormat fmt = new PrintfFormat("    %-30s size=%d\n");
            res.append("\nZevent Registered Buffers:\n");
            for (Entry<Queue<?>, TimingListenerWrapper<Zevent>> ent : _registeredBuffers.entrySet()) {
                Queue<?> q = ent.getKey();
                TimingListenerWrapper<Zevent> targ = ent.getValue();
                res.append(fmt.sprintf(new Object[] { targ.toString(), new Integer(q.size()), }));
                res.append(timingFmt.sprintf(new Object[] { "", // Target
                        // already
                        // printed
                        // above
                        new Double(targ.getMaxTime()), new Double(targ.getAverageTime()),
                        new Long(targ.getNumEvents()), }));
            }
        }

        return res.toString();
    }
}