List of usage examples for java.util Queue size
int size();
From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java
@Test public void testRecoverColdRestart() throws Exception { TransactionalQueueManager mgr = createQueueManager(); QueueSession s = mgr.getQueueSession(); Queue q = s.getQueue("warmRecoverQueue"); mgr.start();/*from w ww. j a v a2 s . c om*/ int toPopulate = 500; // Populate queue Random rnd = new Random(); for (int j = 0; j < toPopulate; j++) { byte[] o = new byte[2048]; rnd.nextBytes(o); q.put(o); } assertEquals(toPopulate, q.size()); // Stop and recreate TransactionalQueueManager simulating a cold restart mgr.stop(); mgr = createQueueManager(); s = mgr.getQueueSession(); q = s.getQueue("warmRecoverQueue"); mgr.start(); if (isPersistent()) { assertEquals(toPopulate, q.size()); } else { assertEquals(0, q.size()); } }
From source file:com.barchart.http.server.TestHttpServer.java
@Test public void testTooManyConnections() throws Exception { final Queue<Integer> status = new LinkedBlockingQueue<Integer>(); final Runnable r = new Runnable() { @Override//from ww w . j ava 2 s . c o m public void run() { try { final HttpResponse response = client .execute(new HttpGet("http://localhost:" + port + "/client-disconnect")); status.add(response.getStatusLine().getStatusCode()); } catch (final Exception e) { e.printStackTrace(); } } }; final Thread t1 = new Thread(r); t1.start(); final Thread t2 = new Thread(r); t2.start(); t1.join(); t2.join(); assertEquals(2, status.size()); assertTrue(status.contains(200)); assertTrue(status.contains(503)); }
From source file:org.apache.hadoop.hive.ql.QueryPlan.java
/** * Populate api.QueryPlan from exec structures. This includes constructing the * dependency graphs of stages and operators. * * @throws IOException/*w w w .jav a 2 s . co m*/ */ private void populateQueryPlan() throws IOException { query.setStageGraph(new org.apache.hadoop.hive.ql.plan.api.Graph()); query.getStageGraph().setNodeType(NodeType.STAGE); Queue<Task<? extends Serializable>> tasksToVisit = new LinkedList<Task<? extends Serializable>>(); Set<Task<? extends Serializable>> tasksVisited = new HashSet<Task<? extends Serializable>>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.size() != 0) { Task<? extends Serializable> task = tasksToVisit.remove(); tasksVisited.add(task); // populate stage org.apache.hadoop.hive.ql.plan.api.Stage stage = new org.apache.hadoop.hive.ql.plan.api.Stage(); stage.setStageId(task.getId()); stage.setStageType(task.getType()); query.addToStageList(stage); if (task instanceof ExecDriver) { // populate map task ExecDriver mrTask = (ExecDriver) task; org.apache.hadoop.hive.ql.plan.api.Task mapTask = new org.apache.hadoop.hive.ql.plan.api.Task(); mapTask.setTaskId(stage.getStageId() + "_MAP"); mapTask.setTaskType(TaskType.MAP); stage.addToTaskList(mapTask); populateOperatorGraph(mapTask, mrTask.getWork().getMapWork().getAliasToWork().values()); // populate reduce task if (mrTask.hasReduce()) { org.apache.hadoop.hive.ql.plan.api.Task reduceTask = new org.apache.hadoop.hive.ql.plan.api.Task(); reduceTask.setTaskId(stage.getStageId() + "_REDUCE"); reduceTask.setTaskType(TaskType.REDUCE); stage.addToTaskList(reduceTask); Collection<Operator<? extends OperatorDesc>> reducerTopOps = new ArrayList<Operator<? extends OperatorDesc>>(); reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer()); populateOperatorGraph(reduceTask, reducerTopOps); } } else { org.apache.hadoop.hive.ql.plan.api.Task otherTask = new org.apache.hadoop.hive.ql.plan.api.Task(); otherTask.setTaskId(stage.getStageId() + "_OTHER"); otherTask.setTaskType(TaskType.OTHER); stage.addToTaskList(otherTask); } if (task instanceof ConditionalTask) { org.apache.hadoop.hive.ql.plan.api.Adjacency listEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); listEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE); listEntry.setNode(task.getId()); ConditionalTask t = (ConditionalTask) task; for (Task<? extends Serializable> listTask : t.getListTasks()) { if (t.getChildTasks() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency childEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); childEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE); childEntry.setNode(listTask.getId()); // done processing the task for (Task<? extends Serializable> childTask : t.getChildTasks()) { childEntry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } } query.getStageGraph().addToAdjacencyList(childEntry); } listEntry.addToChildren(listTask.getId()); if (!tasksVisited.contains(listTask)) { tasksToVisit.add(listTask); } } query.getStageGraph().addToAdjacencyList(listEntry); } else if (task.getChildTasks() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency entry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE); entry.setNode(task.getId()); // done processing the task for (Task<? extends Serializable> childTask : task.getChildTasks()) { entry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } } query.getStageGraph().addToAdjacencyList(entry); } } }
From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java
@Test public void testBench() throws Exception { TransactionalQueueManager mgr = createQueueManager(); try {//from w ww .j a va2 s.co m mgr.start(); QueueSession s = mgr.getQueueSession(); Queue q = s.getQueue("queue1"); Random rnd = new Random(); long t0 = System.currentTimeMillis(); for (int i = 0; i < 1; i++) { for (int j = 0; j < 500; j++) { byte[] o = new byte[2048]; rnd.nextBytes(o); q.put(o); } while (q.size() > 0) { q.take(); } } long t1 = System.currentTimeMillis(); logger.info("Time: " + (t1 - t0) + " ms"); purgeQueue(q); } finally { mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL); } }
From source file:org.mule.util.queue.AbstractTransactionQueueManagerTestCase.java
@Test public void testTakePutOverCapacity() throws Exception { final TransactionalQueueManager mgr = createQueueManager(); mgr.start();/*from w w w. j ava 2 s. co m*/ mgr.setDefaultQueueConfiguration( new QueueConfiguration(2, new QueueStoreAdapter<Serializable>(new SimpleMemoryObjectStore()))); final Latch latch = new Latch(); Thread t = new Thread() { @Override public void run() { try { latch.await(); Thread.sleep(200); QueueSession s = mgr.getQueueSession(); Queue q = s.getQueue("queue1"); Object o = q.take(); assertEquals("Queue content", "String1", o); } catch (Exception e) { // ignore, let test fail } } }; t.start(); QueueSession s = mgr.getQueueSession(); Queue q = s.getQueue("queue1"); assertEquals("Queue size", 0, q.size()); q.put("String1"); q.put("String2"); latch.countDown(); long t0 = System.currentTimeMillis(); q.put("String3"); long t1 = System.currentTimeMillis(); t.join(); assertEquals("Queue size", 2, q.size()); assertTrue(t1 - t0 > 100); purgeQueue(q); mgr.stop(AbstractResourceManager.SHUTDOWN_MODE_NORMAL); }
From source file:com.mtgi.analytics.XmlBehaviorEventPersisterImpl.java
public void persist(Queue<BehaviorEvent> events) { try {/* w w w . ja v a2 s. c o m*/ BehaviorEventSerializer serializer = new BehaviorEventSerializer(); for (BehaviorEvent event : events) { if (event.getId() == null) event.setId(randomUUID()); BehaviorEvent parent = event.getParent(); if (parent != null && parent.getId() == null) parent.setId(randomUUID()); synchronized (this) { serializer.serialize(writer, event); writer.writeCharacters("\n"); } } synchronized (this) { writer.flush(); stream.flush(); } } catch (Exception error) { log.error("Error persisting events; discarding " + events.size() + " events without saving", error); events.clear(); } }
From source file:fr.landel.utils.commons.CastUtilsTest.java
/** * Check cast list/*from www . ja va 2 s . c o m*/ */ @Test public void testGetQueue() { Queue<String> queue = new LinkedList<>(); queue.add("value1"); queue.add(null); queue.add("value2"); assertTrue(CollectionUtils.isEmpty(CastUtils.getLinkedListAsQueue(null, String.class))); assertTrue(CollectionUtils.isEmpty(CastUtils.getLinkedTransferQueue(null, String.class))); assertTrue(CollectionUtils.isEmpty(CastUtils.getPriorityQueue(null, String.class))); assertTrue(CollectionUtils.isEmpty(CastUtils.getLinkedBlockingQueue(null, String.class))); assertTrue(CollectionUtils.isEmpty(CastUtils.getPriorityBlockingQueue(null, String.class))); assertTrue(CollectionUtils.isEmpty(CastUtils.getArrayBlockingQueue(null, String.class, queue.size()))); Queue<String> result = CastUtils.getLinkedListAsQueue(queue, String.class); assertEquals("value1", result.poll()); assertNull(result.poll()); assertEquals("value2", result.poll()); result = CastUtils.getLinkedTransferQueue(queue, String.class); assertEquals("value1", result.poll()); assertEquals("value2", result.poll()); result = CastUtils.getPriorityQueue(queue, String.class); assertEquals("value1", result.poll()); assertEquals("value2", result.poll()); result = CastUtils.getLinkedBlockingQueue(queue, String.class); assertEquals("value1", result.poll()); assertEquals("value2", result.poll()); result = CastUtils.getPriorityBlockingQueue(queue, String.class); assertEquals("value1", result.poll()); assertEquals("value2", result.poll()); result = CastUtils.getArrayBlockingQueue(queue, String.class, queue.size()); assertEquals("value1", result.poll()); assertEquals("value2", result.poll()); assertEquals(0, CastUtils.getLinkedListAsQueue(12, String.class).size()); Queue<Integer> queue2 = new LinkedList<>(); queue2.add(2); assertEquals(0, CastUtils.getLinkedListAsQueue(queue2, String.class).size()); }
From source file:com.barchart.netty.server.http.TestHttpServer.java
@Test public void testTooManyConnections() throws Exception { final Queue<Integer> status = new LinkedBlockingQueue<Integer>(); final Runnable r = new Runnable() { @Override/*from ww w. j av a2s. com*/ public void run() { try { final HttpResponse response = client .execute(new HttpGet("http://localhost:" + port + "/client-disconnect")); status.add(response.getStatusLine().getStatusCode()); EntityUtils.consume(response.getEntity()); } catch (final Exception e) { e.printStackTrace(); } } }; final Thread t1 = new Thread(r); t1.start(); final Thread t2 = new Thread(r); t2.start(); t1.join(); t2.join(); assertEquals(2, status.size()); assertTrue(status.contains(200)); assertTrue(status.contains(503)); }
From source file:com.funambol.pushlistener.service.taskexecutor.ScheduledTaskExecutor.java
/** * Returns the health status of the executor * @return the health status of the executor *//* w w w. j a va 2 s . c o m*/ public ScheduledTaskExecutorHealthStatus getScheduledTaskExecutorHealthStatus() { ScheduledTaskExecutorHealthStatus status = null; int poolSize = getPoolSize(); long completedTaskCount = getCompletedTaskCount(); int activeCount = getActiveCount(); long taskCount = getTaskCount(); Queue queue = getQueue(); boolean shutDown = isShutdown(); int taskQueued = 0; if (queue != null) { taskQueued = queue.size(); } status = new ScheduledTaskExecutorHealthStatus(((Object) this).toString(), poolSize, activeCount, completedTaskCount, taskCount, taskQueued, shutDown); return status; }
From source file:org.jolokia.client.request.J4pReadIntegrationTest.java
@Test public void error404ConnectionTest() throws Exception { final J4pReadRequest req = new J4pReadRequest(itSetup.getAttributeMBean(), "LongSeconds"); try {//from ww w .ja va 2 s . c o m stop(); startWithoutAgent(); j4pClient.execute(req); fail(); } catch (J4pRemoteException exp) { assertEquals(404, exp.getStatus()); } stop(); start(); final CyclicBarrier barrier = new CyclicBarrier(10); final Queue errors = new ConcurrentLinkedQueue(); Runnable run = new Runnable() { public void run() { try { j4pClient.execute(req); } catch (Exception e) { errors.add(1); System.err.println(e); } try { barrier.await(); } catch (InterruptedException ex) { return; } catch (BrokenBarrierException ex) { return; } } }; for (int i = 0; i < 10; i++) { new Thread(run).start(); } if (barrier.await() == 0) { //System.err.println("Finished"); assertEquals(0, errors.size(), "Concurrent calls should work"); } }