List of usage examples for java.util.concurrent BlockingQueue add
boolean add(E e);
From source file:com.netflix.curator.framework.imps.TestFramework.java
@Test public void testConnectionState() throws Exception { Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try {// w w w . j ava 2s .c o m final BlockingQueue<ConnectionState> queue = new LinkedBlockingQueue<ConnectionState>(); ConnectionStateListener listener = new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { queue.add(newState); } }; client.getConnectionStateListenable().addListener(listener); client.start(); Assert.assertEquals(queue.poll(timing.multiple(4).seconds(), TimeUnit.SECONDS), ConnectionState.CONNECTED); server.stop(); Assert.assertEquals(queue.poll(timing.multiple(4).seconds(), TimeUnit.SECONDS), ConnectionState.SUSPENDED); Assert.assertEquals(queue.poll(timing.multiple(4).seconds(), TimeUnit.SECONDS), ConnectionState.LOST); } finally { IOUtils.closeQuietly(client); } }
From source file:io.nats.client.ITClusterTest.java
/** * Ensures that if a ping is not ponged within the pingInterval, that a disconnect/reconnect * takes place.//from w w w . java 2 s.c om * <p> * <p>We test this by setting maxPingsOut < 0 and setting the pingInterval very small. After * the first * disconnect, we measure the reconnect-to-disconnect time to ensure it isn't greater than 2 * * pingInterval. * * @throws Exception if anything goes wrong */ @Test public void testPingReconnect() throws Exception { final int reconnects = 4; final AtomicInteger timesReconnected = new AtomicInteger(); // setLogLevel(Level.DEBUG); try (NatsServer s1 = runServerOnPort(1222)) { Options opts = new Options.Builder(defaultOptions()).dontRandomize().reconnectWait(200).pingInterval(50) .maxPingsOut(-1).timeout(1000).build(); opts.servers = Nats.processUrlArray(testServers); final CountDownLatch wg = new CountDownLatch(reconnects); final BlockingQueue<Long> rch = new LinkedBlockingQueue<Long>(reconnects); final BlockingQueue<Long> dch = new LinkedBlockingQueue<Long>(reconnects); opts.disconnectedCb = new DisconnectedCallback() { public void onDisconnect(ConnectionEvent event) { dch.add(System.nanoTime()); } }; opts.reconnectedCb = new ReconnectedCallback() { @Override public void onReconnect(ConnectionEvent event) { rch.add(System.nanoTime()); wg.countDown(); } }; try (ConnectionImpl c = (ConnectionImpl) opts.connect()) { wg.await(); s1.shutdown(); // Throw away the first one dch.take(); for (int i = 0; i < reconnects - 1; i++) { Long disconnectedAt = dch.take(); Long reconnectedAt = rch.take(); Long pingCycle = TimeUnit.NANOSECONDS.toMillis(disconnectedAt - reconnectedAt); assertFalse(String.format("Reconnect due to ping took %d msec", pingCycle), pingCycle > 2 * c.getOptions().getPingInterval()); } } } }
From source file:org.opencastproject.videosegmenter.impl.VideoSegmenterServiceImpl.java
/** * Fills the look ahead buffer with the next <code>STABILITY_THRESHOLD</code> images. * //from ww w . j a va2s .co m * @param queue * the buffer * @param currentBuffer * the current buffer * @param dsh * the data source handler * @throws IOException * if reading from the data source fails */ private void fillLookAheadBuffer(BlockingQueue<Buffer> queue, Buffer currentBuffer, FrameGrabber dsh) throws IOException { queue.clear(); queue.add(currentBuffer); for (int i = 0; i < stabilityThreshold - 1; i++) { Buffer b = dsh.getBuffer(); if (b != null && !b.isEOM()) queue.add(b); else return; } }
From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelector.java
@SuppressWarnings({ "ForLoopReplaceableByForEach" }) @Test// w w w . j a v a2 s .c o m public void testRotatingLeadership() throws Exception { final int LEADER_QTY = 5; final int REPEAT_QTY = 3; final Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); try { final BlockingQueue<Integer> leaderList = new LinkedBlockingQueue<Integer>(); List<LeaderSelector> selectors = Lists.newArrayList(); for (int i = 0; i < LEADER_QTY; ++i) { final int ourIndex = i; LeaderSelector leaderSelector = new LeaderSelector(client, PATH_NAME, new LeaderSelectorListener() { @Override public void takeLeadership(CuratorFramework client) throws Exception { timing.sleepABit(); leaderList.add(ourIndex); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { } }); selectors.add(leaderSelector); } List<Integer> localLeaderList = Lists.newArrayList(); for (int i = 1; i <= REPEAT_QTY; ++i) { for (LeaderSelector leaderSelector : selectors) { if (i > 1) { leaderSelector.requeue(); } else { leaderSelector.start(); } } while (localLeaderList.size() != (i * selectors.size())) { Integer polledIndex = leaderList.poll(10, TimeUnit.SECONDS); Assert.assertNotNull(polledIndex); localLeaderList.add(polledIndex); } timing.sleepABit(); } for (LeaderSelector leaderSelector : selectors) { leaderSelector.close(); } System.out.println(localLeaderList); for (int i = 0; i < REPEAT_QTY; ++i) { Set<Integer> uniques = Sets.newHashSet(); for (int j = 0; j < selectors.size(); ++j) { Assert.assertTrue(localLeaderList.size() > 0); int thisIndex = localLeaderList.remove(0); Assert.assertFalse(uniques.contains(thisIndex)); uniques.add(thisIndex); } } } finally { client.close(); } }
From source file:org.kuali.rice.krad.data.provider.ProviderRegistryImplTest.java
/** * Verifies ProviderRegistryImpl is threadsafe *///from ww w . j av a 2s . c o m @Test public void testConcurrency() throws InterruptedException { final Class<? extends Provider>[] TYPES = new Class[] { Provider.class, MetadataProvider.class, PersistenceProvider.class, CustomProvider.class }; int providers = 50; int threads = providers * 2; // just use live threads for all consumers/producers to ensure no consumer deadlock final BlockingQueue<Provider> queue = new LinkedBlockingQueue<Provider>(); ExecutorService threadpool = Executors.newFixedThreadPool(threads); Callable<Object>[] producers = new Callable[providers]; Callable<Object>[] consumers = new Callable[providers]; Callable<Object> producer = new Callable<Object>() { @Override public Object call() throws Exception { Provider p = mock(TYPES[RandomUtils.nextInt(5)]); registry.registerProvider(p); queue.add(p); return null; } }; Callable<Object> consumer = new Callable<Object>() { @Override public Object call() throws Exception { Provider p = queue.take(); registry.unregisterProvider(p); return null; } }; Arrays.fill(producers, producer); Arrays.fill(consumers, consumer); List<Callable<Object>> tasks = new ArrayList<Callable<Object>>(providers * 2); tasks.addAll(Arrays.asList(producers)); tasks.addAll(Arrays.asList(consumers)); Collections.shuffle(tasks); System.out.println("Registering and unregistering " + providers + " providers"); threadpool.invokeAll(tasks, 10, TimeUnit.SECONDS); // all producers and consumers should have run, we should be back at 0 providers registered assertEquals(0, registry.getProviders().size()); }
From source file:org.apache.eagle.jobrunning.crawler.RunningJobCrawlerImpl.java
public void addIntoProcessingQueueAndList(Set<JobContext> jobSet, BlockingQueue<JobContext> queue, ResourceType type) {//from w w w .ja v a2 s .c o m try { readWriteLock.writeLock().lock(); LOG.info("Write lock acquired"); List<String> processingList = zkStateManager.readProcessedJobs(type); processingList.addAll(extractJobList(type)); for (JobContext context : jobSet) { String jobId = context.jobId; if (!processingList.contains(jobId)) { addIntoProcessingList(type, context); queue.add(context); } } } finally { try { readWriteLock.writeLock().unlock(); LOG.info("Write lock released"); } catch (Throwable t) { LOG.error("Fail to release Write lock", t); } } }
From source file:eagle.jobrunning.crawler.RunningJobCrawlerImpl.java
public void addIntoProcessingQueueAndList(Set<String> jobIdSet, BlockingQueue<JobContext> queue, ResourceType type) {//from www . j av a 2s . com try { readWriteLock.writeLock().lock(); LOG.info("Write lock acquired"); List<String> processingList = zkStateManager.readProcessedJobs(type); processingList.addAll(extractJobList(type)); for (String jobId : jobIdSet) { if (!processingList.contains(jobId)) { JobContext context = new JobContext(jobId, System.currentTimeMillis()); addIntoProcessingList(type, context); queue.add(context); } } } finally { try { readWriteLock.writeLock().unlock(); LOG.info("Write lock released"); } catch (Throwable t) { LOG.error("Fail to release Write lock", t); } } }
From source file:gridool.taskqueue.sender.SenderResponseTaskQueue.java
@Override public void onResponse(@Nonnull GridTaskResponseMessage response) { final String jobId = response.getJobId(); final GridNode senderNode = response.getSenderNode(); final BlockingQueue<GridTaskResult> queue = queueMap.get(jobId); if (queue != null) { if (LOG.isDebugEnabled()) { LOG.debug("Received a GridTaskResponseMessage for a task: " + response.getTaskId() + " of Job [" + jobId + "] that was executed on " + (senderNode == null ? "localhost" : senderNode)); }//from w ww . j av a2s . com byte[] resultBytes = response.getMessage(); String deployGroup = response.getDeploymentGroup(); ClassLoader ldr = registry.getDeploymentGroupClassLoader(deployGroup); final GridTaskResult result; try { result = marshaller.unmarshall(resultBytes, ldr); } catch (GridException e) { throw new IllegalStateException("failed to unmarshall message from node: " + senderNode, e); } queue.add(result); } else { LOG.error("SenderResponseQueue is not found for a task: " + response.getTaskId() + " of Job [" + jobId + "] that was executed on " + (senderNode == null ? "localhost" : senderNode)); } }
From source file:com.curecomp.primefaces.migrator.PrimefacesMigration.java
private static void findWidgetVarUsages(Path sourceFile, WidgetVarLocation widgetVarLocation, BlockingQueue<WidgetVarLocation> foundUsages, BlockingQueue<WidgetVarLocation> skippedUsages, BlockingQueue<WidgetVarLocation> unusedOrAmbiguous) throws IOException { try (BufferedReader br = Files.newBufferedReader(sourceFile, StandardCharsets.UTF_8)) { int lineNr = 0; String line;/*w ww. ja v a 2 s . c o m*/ while ((line = br.readLine()) != null) { lineNr++; int startIndex = 0; int endIndex = -1; while ((startIndex = line.indexOf(widgetVarLocation.widgetVar, endIndex + 1)) > -1) { endIndex = startIndex + widgetVarLocation.widgetVar.length(); if (sourceFile.equals(widgetVarLocation.location) && lineNr == widgetVarLocation.lineNr && startIndex == widgetVarLocation.columnNr) { continue; } WidgetVarLocation usage = new WidgetVarLocation(widgetVarLocation.widgetVar, sourceFile, lineNr, startIndex, line); // Only look at lines that use the word as a whole and not just as a part if ((startIndex == 0 || !Character.isJavaIdentifierStart(line.charAt(startIndex - 1))) && (line.length() == endIndex || !Character.isJavaIdentifierPart(line.charAt(endIndex)))) { // We skip usages that occur as the last word of a line or usages that don't call methods directly if (endIndex == line.length() || endIndex < line.length() && line.charAt(endIndex) != '.') { skippedUsages.add(usage); } else { foundUsages.add(usage); } } else { skippedUsages.add(usage); } unusedOrAmbiguous.remove(widgetVarLocation); } } } }
From source file:com.jbrisbin.vpc.jobsched.SubmitClosure.java
@Override public Object call(Object[] args) { log.debug("args: " + args); String exch = args[0].toString(); String route = args[1].toString(); final Object body = args[2]; Map headers = null;//w w w . j av a 2 s . com final BlockingQueue<Object> resultsQueue = new LinkedBlockingQueue<Object>(); Queue replyQueue = rabbitAdmin.declareQueue(); SimpleMessageListenerContainer listener = new SimpleMessageListenerContainer(); listener.setQueues(replyQueue); if (args.length > 3) { for (int i = 3; i <= args.length; i++) { if (args[i] instanceof MessageListener) { MessageListener callback = (MessageListener) args[3]; listener.setMessageListener(callback); } else if (args[i] instanceof Map) { headers = (Map) args[i]; } } } else { listener.setMessageListener(new MessageListener() { public void onMessage(Message message) { byte[] body = message.getBody(); try { resultsQueue.add(mapper.readValue(body, 0, body.length, Map.class)); } catch (IOException e) { log.error(e.getMessage(), e); } } }); } final Map msgHdrs = headers; rabbitTemplate.send(exch, route, new MessageCreator() { public Message createMessage() { MessageProperties props = new RabbitMessageProperties(); props.setContentType("application/json"); if (null != msgHdrs) { props.getHeaders().putAll(msgHdrs); } String uuid = UUID.randomUUID().toString(); props.setCorrelationId(uuid.getBytes()); ByteArrayOutputStream out = new ByteArrayOutputStream(); try { mapper.writeValue(out, body); } catch (IOException e) { log.error(e.getMessage(), e); } Message msg = new Message(out.toByteArray(), props); return msg; } }); Object results = null; try { results = resultsQueue.poll(5, TimeUnit.SECONDS); } catch (InterruptedException e) { log.error(e.getMessage(), e); } listener.stop(); return results; }