List of usage examples for java.util.concurrent BlockingQueue size
int size();
From source file:com.attribyte.essem.ApplicationCache.java
ApplicationCache(final AsyncClient client, final RequestOptions requestOptions, final ESEndpoint esEndpoint, final Logger logger) { this.client = client; this.requestOptions = requestOptions; this.esEndpoint = esEndpoint; this.logger = logger; final BlockingQueue<Runnable> requestQueue = new ArrayBlockingQueue<>(4096); final Gauge<Integer> requestQueueSize = new Gauge<Integer>() { @Override/* w ww . j av a 2 s .c o m*/ public Integer getValue() { return requestQueue.size(); } }; final ThreadPoolExecutor requestExecutor = new ThreadPoolExecutor(2, 8, 5L, TimeUnit.MINUTES, requestQueue, new ThreadFactoryBuilder().setNameFormat("application-cache-%d").build()); requestExecutor.prestartAllCoreThreads(); final Counter rejectedRequests = new Counter(); requestExecutor.setRejectedExecutionHandler(new RejectedExecutionHandler() { @Override public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) { rejectedRequests.inc(); } }); this.requestExecutor = MoreExecutors .listeningDecorator(MoreExecutors.getExitingExecutorService(requestExecutor)); this.appRequestTimer = new Timer(); this.appRequestErrors = new Counter(); this.nameRequestTimer = new Timer(); this.nameRequestErrors = new Counter(); this.statsRequestTimer = new Timer(); this.statsRequestErrors = new Counter(); Gauge<Integer> appCacheSize = new Gauge<Integer>() { @Override public Integer getValue() { return appCache.size(); } }; this.metrics = ImmutableMap.<String, com.codahale.metrics.Metric>builder() .put("request-queue-size", requestQueueSize).put("rejected-background-requests", rejectedRequests) .put("app-requests", appRequestTimer).put("app-request-errors", appRequestErrors) .put("name-requests", nameRequestTimer).put("name-request-errors", nameRequestErrors) .put("app-cache-size", appCacheSize).put("stats-requests", statsRequestTimer) .put("stats-request-errors", statsRequestErrors).build(); }
From source file:ubic.gemma.core.loader.genome.gene.ncbi.NcbiGeneLoader.java
void doLoad(final BlockingQueue<Gene> geneQueue) { StopWatch timer = new StopWatch(); timer.start();/*from w w w .ja v a2s .c o m*/ while (!(converterDone.get() && geneQueue.isEmpty())) { Gene gene = null; try { // the converted genes. gene = geneQueue.poll(); if (gene == null) { continue; } persisterHelper.persistOrUpdate(gene); if (++loadedGeneCount % 1000 == 0 || timer.getTime() > 30 * 1000) { NcbiGeneLoader.log.info("Processed " + loadedGeneCount + " genes. Queue has " + geneQueue.size() + " items; last gene: " + gene); timer.reset(); timer.start(); } } catch (Exception e) { NcbiGeneLoader.log.error("Error while loading gene: " + gene + ": " + e.getMessage(), e); loaderDone.set(true); throw new RuntimeException(e); } } NcbiGeneLoader.log.info("Loaded " + loadedGeneCount + " genes. "); loaderDone.set(true); }
From source file:com.splout.db.qnode.QNodeHandlerContext.java
/** * This method can be called to initialize a pool of connections to a dnode. This method may be called from multiple * threads so it should be safe to call it concurrently. *///from w w w . j a v a 2s . com public void initializeThriftClientCacheFor(String dnode) throws TTransportException, InterruptedException { // this lock is on the whole cache but we would actually be interested in a per-DNode lock... // there's only one lock for simplicity. thriftClientCacheLock.lock(); try { // initialize queue for this DNode BlockingQueue<DNodeService.Client> dnodeQueue = thriftClientCache.get(dnode); if (dnodeQueue == null) { // this assures that the per-DNode queue is only created once and then reused. dnodeQueue = new LinkedBlockingDeque<DNodeService.Client>(thriftClientPoolSize); } if (dnodeQueue.isEmpty()) { try { for (int i = dnodeQueue.size(); i < thriftClientPoolSize; i++) { dnodeQueue.put(DNodeClient.get(dnode)); } // we only put the queue if all connections have been populated thriftClientCache.put(dnode, dnodeQueue); } catch (TTransportException e) { log.error("Error while trying to populate queue for " + dnode + ", will discard created connections.", e); while (!dnodeQueue.isEmpty()) { dnodeQueue.poll().getOutputProtocol().getTransport().close(); } throw e; } } else { // it should be safe to call this method from different places concurrently // so we contemplate the case where another Thread already populated the queue // and only populate it if it's really empty. log.warn(Thread.currentThread().getName() + " : queue for [" + dnode + "] is not empty - it was populated before."); } } finally { thriftClientCacheLock.unlock(); } }
From source file:org.batoo.jpa.benchmark.BenchmarkTest.java
private void waitUntilFinish(ThreadPoolExecutor executor) { final BlockingQueue<Runnable> workQueue = executor.getQueue(); try {//from ww w . j a v a 2 s.c o m final long started = System.currentTimeMillis(); int lastToGo = workQueue.size(); final int total = workQueue.size(); int performed = 0; int maxStatusMessageLength = 0; while (!workQueue.isEmpty()) { final float doneNow = lastToGo - workQueue.size(); performed += doneNow; final float elapsed = (System.currentTimeMillis() - started) / 1000; lastToGo = workQueue.size(); if (performed > 0) { final float throughput = performed / elapsed; final float eta = ((elapsed * total) / performed) - elapsed; final float percentDone = (100 * (float) lastToGo) / total; final int gaugeDone = (int) ((100 - percentDone) / 5); final String gauge = "[" + StringUtils.repeat("", gaugeDone) + StringUtils.repeat("-", 20 - gaugeDone) + "]"; final String sampling = this.profilingQueue.size() > 0 ? MessageFormat.format(" | Samples {0}", this.profilingQueue.size()) : ""; if ((maxStatusMessageLength != 0) || (eta > 5)) { String statusMessage = MessageFormat.format( "\r{4} %{5,number,00.00} | ETA {2} | LAST TPS {0} ops / sec | AVG TPS {1,number,#.0} | LEFT {3}{6}", // doneNow, throughput, this.etaToString((int) eta), workQueue.size(), gauge, percentDone, sampling); maxStatusMessageLength = Math.max(statusMessage.length(), maxStatusMessageLength); statusMessage = StringUtils.leftPad(statusMessage, maxStatusMessageLength - statusMessage.length()); System.out.print(statusMessage); } } if (elapsed > BenchmarkTest.MAX_TEST_TIME) { throw new IllegalStateException("Max allowed test time exceeded"); } Thread.sleep(1000); } if (maxStatusMessageLength > 0) { System.out.print("\r" + StringUtils.repeat(" ", maxStatusMessageLength) + "\r"); } executor.shutdown(); if (!executor.awaitTermination(10, TimeUnit.SECONDS)) { BenchmarkTest.LOG.warn("Forcefully shutting down the thread pool"); executor.shutdownNow(); } BenchmarkTest.LOG.warn("Iterations completed"); } catch (final InterruptedException e) { throw new RuntimeException(e); } }
From source file:com.tyndalehouse.step.tools.modules.ConvertXmlToOSISModule.java
private void convert() throws Exception { final BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(1024); final ExecutorService executorService = new ThreadPoolExecutor(3, 3, 1, TimeUnit.DAYS, queue); final File[] files = SOURCE_DIRECTORY.listFiles(); for (final File f : files) { if (f.isDirectory()) { final File[] unzippedFiles = f.listFiles(); for (final File unzipped : unzippedFiles) { if (unzipped.getName().endsWith(".xml")) { executorService.submit(new Runnable() { @Override public void run() { try { convertToXml(f.getName(), unzipped); LOGGER.debug("Finished [{}], [{}] remaining", f.getName(), queue.size()); } catch (Exception e) { LOGGER.error("Failed to convert [{}]", f.getName(), e); }//from w ww . j a v a 2 s. c om } }); break; } } // break; } } executorService.shutdown(); }
From source file:org.apache.nifi.processor.util.listen.dispatcher.SocketChannelDispatcher.java
public SocketChannelDispatcher(final EventFactory<E> eventFactory, final ChannelHandlerFactory<E, AsyncChannelDispatcher> handlerFactory, final BlockingQueue<ByteBuffer> bufferPool, final BlockingQueue<E> events, final ComponentLog logger, final int maxConnections, final SSLContext sslContext, final SslContextFactory.ClientAuth clientAuth, final Charset charset) { this.eventFactory = eventFactory; this.handlerFactory = handlerFactory; this.bufferPool = bufferPool; this.events = events; this.logger = logger; this.maxConnections = maxConnections; this.keyQueue = new LinkedBlockingQueue<>(maxConnections); this.sslContext = sslContext; this.clientAuth = clientAuth; this.charset = charset; if (bufferPool == null || bufferPool.size() == 0 || bufferPool.size() != maxConnections) { throw new IllegalArgumentException( "A pool of available ByteBuffers equal to the maximum number of connections is required"); }/* w w w . ja v a 2 s. c o m*/ }
From source file:org.springframework.integration.ip.tcp.connection.CachingClientConnectionFactoryTests.java
@Test // @Repeat(1000) // INT-3722 public void gatewayIntegrationTest() throws Exception { final List<String> connectionIds = new ArrayList<String>(); final AtomicBoolean okToRun = new AtomicBoolean(true); Executors.newSingleThreadExecutor().execute(() -> { while (okToRun.get()) { Message<?> m = inbound.receive(1000); if (m != null) { connectionIds.add((String) m.getHeaders().get(IpHeaders.CONNECTION_ID)); replies.send(MessageBuilder.withPayload("foo:" + new String((byte[]) m.getPayload())) .copyHeaders(m.getHeaders()).build()); }/*from ww w . j a va 2 s . c o m*/ } }); TestingUtilities.waitListening(serverCf, null); new DirectFieldAccessor(this.clientGatewayCf).setPropertyValue("port", this.serverCf.getPort()); this.toGateway.send(new GenericMessage<>("Hello, world!")); Message<?> m = fromGateway.receive(1000); assertNotNull(m); assertEquals("foo:" + "Hello, world!", new String((byte[]) m.getPayload())); BlockingQueue<?> connections = TestUtils.getPropertyValue(this.gatewayCF, "pool.available", BlockingQueue.class); // wait until the connection is returned to the pool int n = 0; while (n++ < 100 && connections.size() == 0) { Thread.sleep(100); } // assert we use the same connection from the pool toGateway.send(new GenericMessage<String>("Hello, world2!")); m = fromGateway.receive(1000); assertNotNull(m); assertEquals("foo:" + "Hello, world2!", new String((byte[]) m.getPayload())); assertEquals(2, connectionIds.size()); assertEquals(connectionIds.get(0), connectionIds.get(1)); okToRun.set(false); }
From source file:ubic.gemma.loader.genome.gene.ncbi.NcbiGeneLoader.java
/** * @param geneQueue/*from w w w . ja v a 2 s . c o m*/ */ void doLoad(final BlockingQueue<Gene> geneQueue) { StopWatch timer = new StopWatch(); timer.start(); int skipped = 0; while (!(converterDone.get() && geneQueue.isEmpty())) { Gene gene = null; try { // the converted genes. gene = geneQueue.poll(); if (gene == null) { continue; } if (gene.getProducts().isEmpty()) { // // log.warn( gene + " has no products, skipping" ); // common!!! // skipped++; } persisterHelper.persistOrUpdate(gene); if (++loadedGeneCount % 1000 == 0 || timer.getTime() > 30 * 1000) { log.info("Processed " + loadedGeneCount + " genes. Queue has " + geneQueue.size() + " items; last gene: " + gene); if (skipped > 0) { log.info(skipped + " skipped because they had no gene products."); } timer.reset(); timer.start(); } } catch (Exception e) { log.error("Error while loading gene: " + gene + ": " + e.getMessage(), e); loaderDone.set(true); throw new RuntimeException(e); } } log.info("Loaded " + loadedGeneCount + " genes. "); loaderDone.set(true); }
From source file:org.springframework.amqp.rabbit.connection.CachingConnectionFactoryTests.java
@Test public void testWithConnectionFactoryCachedConnectionIdleAreClosed() throws Exception { com.rabbitmq.client.ConnectionFactory mockConnectionFactory = mock( com.rabbitmq.client.ConnectionFactory.class); final List<com.rabbitmq.client.Connection> mockConnections = new ArrayList<com.rabbitmq.client.Connection>(); final List<Channel> mockChannels = new ArrayList<Channel>(); doAnswer(new Answer<com.rabbitmq.client.Connection>() { private int connectionNumber; @Override/*from ww w . j ava2s . co m*/ public com.rabbitmq.client.Connection answer(InvocationOnMock invocation) throws Throwable { com.rabbitmq.client.Connection connection = mock(com.rabbitmq.client.Connection.class); doAnswer(new Answer<Channel>() { private int channelNumber; @Override public Channel answer(InvocationOnMock invocation) throws Throwable { Channel channel = mock(Channel.class); when(channel.isOpen()).thenReturn(true); int channelNumnber = ++this.channelNumber; when(channel.toString()).thenReturn("mockChannel" + channelNumnber); mockChannels.add(channel); return channel; } }).when(connection).createChannel(); int connectionNumber = ++this.connectionNumber; when(connection.toString()).thenReturn("mockConnection" + connectionNumber); when(connection.isOpen()).thenReturn(true); mockConnections.add(connection); return connection; } }).when(mockConnectionFactory).newConnection(any(ExecutorService.class), anyString()); CachingConnectionFactory ccf = new CachingConnectionFactory(mockConnectionFactory); ccf.setExecutor(mock(ExecutorService.class)); ccf.setCacheMode(CacheMode.CONNECTION); ccf.setConnectionCacheSize(5); ccf.afterPropertiesSet(); Set<?> allocatedConnections = TestUtils.getPropertyValue(ccf, "allocatedConnections", Set.class); assertEquals(0, allocatedConnections.size()); BlockingQueue<?> idleConnections = TestUtils.getPropertyValue(ccf, "idleConnections", BlockingQueue.class); assertEquals(0, idleConnections.size()); Connection conn1 = ccf.createConnection(); Connection conn2 = ccf.createConnection(); Connection conn3 = ccf.createConnection(); assertEquals(3, allocatedConnections.size()); assertEquals(0, idleConnections.size()); conn1.close(); conn2.close(); conn3.close(); assertEquals(3, allocatedConnections.size()); assertEquals(3, idleConnections.size()); when(mockConnections.get(0).isOpen()).thenReturn(false); when(mockConnections.get(1).isOpen()).thenReturn(false); Connection conn4 = ccf.createConnection(); assertEquals(3, allocatedConnections.size()); assertEquals(2, idleConnections.size()); assertSame(conn3, conn4); conn4.close(); assertEquals(3, allocatedConnections.size()); assertEquals(3, idleConnections.size()); assertEquals("1", ccf.getCacheProperties().get("openConnections")); ccf.destroy(); assertEquals(3, allocatedConnections.size()); assertEquals(3, idleConnections.size()); assertEquals("0", ccf.getCacheProperties().get("openConnections")); }
From source file:org.apache.falcon.service.FeedSLAMonitoringService.java
void addNewPendingFeedInstances(Date from, Date to) throws FalconException { Set<String> currentClusters = DeploymentUtil.getCurrentClusters(); for (String feedName : monitoredFeeds) { Feed feed = EntityUtil.getEntity(EntityType.FEED, feedName); for (Cluster feedCluster : feed.getClusters().getClusters()) { if (currentClusters.contains(feedCluster.getName())) { Date nextInstanceTime = from; Pair<String, String> key = new Pair<>(feed.getName(), feedCluster.getName()); BlockingQueue<Date> instances = pendingInstances.get(key); if (instances == null) { instances = new LinkedBlockingQueue<>(queueSize); Date feedStartTime = feedCluster.getValidity().getStart(); Frequency retentionFrequency = FeedHelper.getRetentionFrequency(feed, feedCluster); ExpressionHelper evaluator = ExpressionHelper.get(); ExpressionHelper.setReferenceDate(new Date()); Date retention = new Date(evaluator.evaluate(retentionFrequency.toString(), Long.class)); if (feedStartTime.before(retention)) { feedStartTime = retention; }//from w w w. j a va 2s. co m nextInstanceTime = feedStartTime; } Set<Date> exists = new HashSet<>(instances); org.apache.falcon.entity.v0.cluster.Cluster currentCluster = EntityUtil .getEntity(EntityType.CLUSTER, feedCluster.getName()); nextInstanceTime = EntityUtil.getNextStartTime(feed, currentCluster, nextInstanceTime); while (nextInstanceTime.before(to)) { if (instances.size() >= queueSize) { // if no space, first make some space LOG.debug("Removing instance={} for <feed,cluster>={}", instances.peek(), key); exists.remove(instances.peek()); instances.remove(); } LOG.debug("Adding instance={} for <feed,cluster>={}", nextInstanceTime, key); if (exists.add(nextInstanceTime)) { instances.add(nextInstanceTime); } nextInstanceTime = new Date(nextInstanceTime.getTime() + ONE_MS); nextInstanceTime = EntityUtil.getNextStartTime(feed, currentCluster, nextInstanceTime); } pendingInstances.put(key, instances); } } } }