List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue
public LinkedBlockingQueue(Collection<? extends E> c)
From source file:com.ebay.pulsar.metriccalculator.processor.MetricCassandraCollector.java
private void init() { workQueue = new LinkedBlockingQueue<Runnable>(m_workingQueueSize); worker = new ThreadPoolExecutor(m_workerThreadSize, m_workerThreadSize, 30, TimeUnit.SECONDS, workQueue, new NamedThreadFactory("CassandraRequestWorker"), new ThreadPoolExecutor.CallerRunsPolicy()); timer = MCScheduler.getMCScheduler(); timer.scheduleWithFixedDelay(new CassandraChecker(), ONE_MINUTE, ONE_MINUTE, TimeUnit.MILLISECONDS); }
From source file:ch.trivadis.sample.twitter.StreamCollector.java
public void start() { System.out.println("start() ..."); // Create an appropriately sized blocking queue BlockingQueue<String> queue = new LinkedBlockingQueue<String>(10000); // create the endpoint DefaultStreamingEndpoint endpoint = createEndpoint(); System.out.println("endpoint created ..."); endpoint.stallWarnings(false);/*w w w .ja va 2 s. c o m*/ // create an authentication Authentication auth = new OAuth1(consumerKey, consumerSecret, accessToken, accessTokenSecret); // Create a new BasicClient. By default gzip is enabled. client = new ClientBuilder().name("sampleExampleClient").hosts(Constants.STREAM_HOST).endpoint(endpoint) .authentication(auth).processor(new StringDelimitedProcessor(queue)).build(); System.out.println("client created ..."); // Create an executor service which will spawn threads to do the actual // work of parsing the incoming messages and // calling the listeners on each message ExecutorService service = Executors.newFixedThreadPool(this.numberOfProcessingThreads); // Wrap our BasicClient with the twitter4j client Twitter4jStatusClient t4jClient = new Twitter4jStatusClient(client, queue, Lists.newArrayList(listener2), service); // Establish a connection t4jClient.connect(); System.out.println("connection established ..."); for (int threads = 0; threads < this.numberOfProcessingThreads; threads++) { // This must be called once per processing thread t4jClient.process(); System.out.println("thread " + threads + " started ..."); } }
From source file:com.bleum.canton.jms.scheduler.AbstractJMSScheduler.java
/** * Run the tasks using threads./* w ww . ja v a2 s. c om*/ * * @param tasks */ private void excuteTasks(List<JMSTask> tasks) { int tSize = tasks.size(); if (tSize <= 0) { return; } ThreadPoolExecutor executor = new ThreadPoolExecutor(1, threads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(maxTasksPerThread * threads)); for (final JMSTask task : tasks) { final int mRetry = this.maxRetry; final int mAckRetry = this.maxAckRetry; runFutures.add(executor.submit(new Runnable() { @Override public void run() { try { sendMessage(task); if (clientAck == JMSTaskConstant.NO_ACKNOWLEDGE) { jmsTaskDao.updateTaskCompeleted(task.getId()); } else if (clientAck == JMSTaskConstant.CLIENT_ACKNOWLEDGE) { // if sent maxRetry times, won't wait for // acknowledge, just complete it. jmsTaskDao.updateTaskProcessed(task, mAckRetry); } } catch (Exception e) { // if retried sending maxRetry times, make it fatal, and // no longer retry. task.setLastError(e.getClass().getSimpleName() + ":" + e.getMessage()); jmsTaskDao.updateErrorTask(task, mRetry); } } })); } }
From source file:fr.xebia.springframework.concurrent.ThreadPoolExecutorFactory.java
@Override protected ThreadPoolExecutor createInstance() throws Exception { Assert.isTrue(this.corePoolSize >= 0, "corePoolSize must be greater than or equal to zero"); Assert.isTrue(this.maximumPoolSize > 0, "maximumPoolSize must be greater than zero"); Assert.isTrue(this.maximumPoolSize >= this.corePoolSize, "maximumPoolSize must be greater than or equal to corePoolSize"); Assert.isTrue(this.queueCapacity >= 0, "queueCapacity must be greater than or equal to zero"); CustomizableThreadFactory threadFactory = new CustomizableThreadFactory(this.beanName + "-"); threadFactory.setDaemon(true);/* www. j a v a 2 s. c o m*/ BlockingQueue<Runnable> blockingQueue; if (queueCapacity == 0) { blockingQueue = new SynchronousQueue<Runnable>(); } else { blockingQueue = new LinkedBlockingQueue<Runnable>(queueCapacity); } ThreadPoolExecutor instance = new SpringJmxEnabledThreadPoolExecutor(corePoolSize, // maximumPoolSize, // keepAliveTimeInSeconds, // TimeUnit.SECONDS, // blockingQueue, // threadFactory, // rejectedExecutionHandlerClass.newInstance(), // new ObjectName("java.util.concurrent:type=ThreadPoolExecutor,name=" + beanName)); return instance; }
From source file:org.yamj.core.service.ScanningScheduler.java
@Scheduled(initialDelay = 15000, fixedDelay = 45000) public void scanArtwork() throws Exception { int maxThreads = configService.getIntProperty("yamj3.scheduler.artworkscan.maxThreads", 1); if (maxThreads <= 0) { if (!messageDisabledArtwork) { messageDisabledArtwork = Boolean.TRUE; LOG.info("Artwork scanning is disabled"); }/*from ww w. j a v a 2 s. c om*/ return; } else { messageDisabledArtwork = Boolean.FALSE; } int maxResults = configService.getIntProperty("yamj3.scheduler.artworkscan.maxResults", 30); List<QueueDTO> queueElements = artworkStorageService.getArtworkQueueForScanning(maxResults); if (CollectionUtils.isEmpty(queueElements)) { LOG.debug("No artwork found to scan"); return; } LOG.info("Found {} artwork objects to process; scan with {} threads", queueElements.size(), maxThreads); BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements); ExecutorService executor = Executors.newFixedThreadPool(maxThreads); for (int i = 0; i < maxThreads; i++) { ArtworkScannerRunner worker = new ArtworkScannerRunner(queue, artworkScannerService); executor.execute(worker); } executor.shutdown(); // run until all workers have finished while (!executor.isTerminated()) { try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException ignore) { } } LOG.debug("Finished artwork scanning"); }
From source file:azkaban.execapp.FlowRunnerManager.java
private TrackingThreadPool createExecutorService(int nThreads) { boolean useNewThreadPool = azkabanProps.getBoolean(EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE, false); logger.info("useNewThreadPool: " + useNewThreadPool); if (useNewThreadPool) { threadPoolQueueSize = azkabanProps.getInt(EXECUTOR_THREADPOOL_WORKQUEUE_SIZE, nThreads); logger.info("workQueueSize: " + threadPoolQueueSize); // using a bounded queue for the work queue. The default rejection policy // {@ThreadPoolExecutor.AbortPolicy} is used TrackingThreadPool executor = new TrackingThreadPool(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(threadPoolQueueSize), this); return executor; } else {/*w ww . j av a2s. com*/ // the old way of using unbounded task queue. // if the running tasks are taking a long time or stuck, this queue // will be very very long. return new TrackingThreadPool(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), this); } }
From source file:com.clustercontrol.plugin.impl.AsyncWorkerPlugin.java
@Override public void activate() { for (String worker : workers) { _executorLock.put(worker, new Object()); _counterLock.put(worker, new Object()); synchronized (_executorLock.get(worker)) { String defaultClassPrefix; defaultClassPrefix = "com.clustercontrol.notify.factory."; log.info("defaultClassPrefix=" + defaultClassPrefix); log.info("worker=" + worker); String className = null; if (worker.equals(CREATE_JOB_SESSION_TASK_FACTORY)) { className = HinemosPropertyUtil.getHinemosPropertyStr( _keyPrefix + worker + _keyPostfixFactoryClassName, "com.clustercontrol.jobmanagement.factory." + worker); } else { className = HinemosPropertyUtil.getHinemosPropertyStr( _keyPrefix + worker + _keyPostfixFactoryClassName, defaultClassPrefix + worker); }/*w w w . j a v a2 s. c o m*/ if (className == null || "".equals(className)) { log.warn("class not defined. (" + _keyPrefix + worker + _keyPostfixFactoryClassName + ")"); } try { Class<?> clazz = Class.forName(className); if (clazz.newInstance() instanceof AsyncTaskFactory) { AsyncTaskFactory taskFactory = (AsyncTaskFactory) clazz.newInstance(); _factoryMap.put(worker, taskFactory); _nextTaskIdMap.put(worker, 0L); log.info("initialized task id for " + worker + " : " + HinemosManagerMain._instanceId); } else { log.warn("class is not sub class of AsyncTaskFactory. (" + className + ")"); continue; } } catch (ClassNotFoundException e) { log.warn("class not found. (" + className + ")", e); continue; } catch (Exception e) { log.warn("instantiation failure. (" + className + ")", e); continue; } int threadSize; if (worker.equals(NOTIFY_STATUS_TASK_FACTORY) || worker.equals(CREATE_JOB_SESSION_TASK_FACTORY)) { threadSize = HinemosPropertyUtil .getHinemosPropertyNum(_keyPrefix + worker + _keyPostfixThreadSize, Long.valueOf(_threadSizeDefault)) .intValue(); } else { threadSize = HinemosPropertyUtil .getHinemosPropertyNum(_keyPrefix + worker + _keyPostfixThreadSize, Long.valueOf(8)) .intValue(); } int queueSize = HinemosPropertyUtil.getHinemosPropertyNum( _keyPrefix + worker + _keyPostfixQueueSize, Long.valueOf(_queueSizeDefault)).intValue(); log.info("activating asynchronous worker. (worker = " + worker + ", class = " + className + ", threadSize = " + threadSize + ", queueSize = " + queueSize + ")"); ThreadPoolExecutor executor = new MonitoredThreadPoolExecutor(threadSize, threadSize, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(queueSize), new AsyncThreadFactory(worker), new TaskRejectionHandler(worker)); _executorMap.put(worker, executor); long shutdownTimeout = HinemosPropertyUtil.getHinemosPropertyNum( _keyPrefix + worker + _keyPostfixShutdownTimeout, Long.valueOf(_shutdownTimeoutDefault)); _shutdownTimeoutMap.put(worker, shutdownTimeout); } } _initializedLatch.countDown(); for (String worker : workers) { if (HinemosManagerMain._startupMode != StartupMode.MAINTENANCE) { log.info("executing persisted task : " + worker); runPersistedTask(worker); } } if (HinemosManagerMain._startupMode == StartupMode.MAINTENANCE) { log.info("skipped persisted task execution (startup mode is MAINTENANCE)"); HinemosManagerMain.addStartupTask(new AsyncWorkerStartupTask()); } }
From source file:cx.fbn.nevernote.threads.SyncRunner.java
public SyncRunner(String logname, String u, String i, String r, String b, String uid, String pswd, String cpswd) {//from w w w.ja va 2s. c o m logger = new ApplicationLogger(logname); noteSignal = new NoteSignal(); status = new StatusSignal(); tagSignal = new TagSignal(); notebookSignal = new NotebookSignal(); noteIndexSignal = new NoteIndexSignal(); noteSignal = new NoteSignal(); searchSignal = new SavedSearchSignal(); syncSignal = new SyncSignal(); resourceSignal = new NoteResourceSignal(); limitSignal = new LimitSignal(); resourceUrl = r; indexUrl = i; behaviorUrl = b; dbuid = uid; dburl = u; dbpswd = pswd; dbcpswd = cpswd; // this.setAutoDelete(false); isConnected = false; syncNeeded = false; authRefreshNeeded = false; keepRunning = true; idle = true; disableUploads = false; ignoreTags = new TreeSet<String>(); ignoreNotebooks = new TreeSet<String>(); ignoreLinkedNotebooks = new TreeSet<String>(); // setAutoDelete(false); workQueue = new LinkedBlockingQueue<String>(MAX_QUEUED_WAITING); }
From source file:com.coinblesk.server.service.WalletService.java
/*** * Add a listener for when a transaction we are watching's confidence * changed due to a new block./*from w w w . j a va 2 s .co m*/ * * After the transaction is {bitcoin.minconf} blocks deep, we remove the tx * from the database, as it is considered safe. * * The method should only be called after complete download of the * blockchain, since the handler is called for every block and transaction * we are watching, which will result in high CPU and memory consumption and * might exceed the JVM memory limit. After download is complete, blocks * arrive only sporadically and this is not a problem. */ private void addConficenceChangedHandler() { // Use a custom thread pool to speed up the processing of transactions. // Queue is blocking and limited to 10'000 // to avoid memory exhaustion. After threshold is reached, the // CallerRunsPolicy() forces blocking behavior. ContextPropagatingThreadFactory factory = new ContextPropagatingThreadFactory("listenerFactory"); Executor listenerExecutor = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(10000), factory, new ThreadPoolExecutor.CallerRunsPolicy()); wallet.addTransactionConfidenceEventListener(listenerExecutor, (wallet, tx) -> { if (tx.getConfidence().getDepthInBlocks() >= appConfig.getMinConf() && !removed.contains(tx.getHash())) { LOG.debug("remove tx we got from the network {}", tx); try { transactionService.removeTransaction(tx); } catch (EmptyResultDataAccessException e) { LOG.debug("tx was not in tx table {}", tx); } try { txQueueService.removeTx(tx); } catch (EmptyResultDataAccessException e) { LOG.debug("tx was not in txqueue table {}", tx); } removed.add(tx.getHash()); } }); }
From source file:com.nextdoor.bender.handler.BaseHandler.java
/** * Method called by Handler implementations to process records. * * @param context Lambda invocation context. * @throws HandlerException//from ww w. j a v a 2 s . co m */ private void processInternal(Context context) throws HandlerException { Stat runtime = new Stat("runtime.ns"); runtime.start(); Source source = this.getSource(); DeserializerProcessor deser = source.getDeserProcessor(); List<OperationProcessor> operations = source.getOperationProcessors(); List<String> containsStrings = source.getContainsStrings(); List<Pattern> regexPatterns = source.getRegexPatterns(); this.getIpcService().setContext(context); Iterator<InternalEvent> events = this.getInternalEventIterator(); /* * For logging purposes log when the function started running */ this.monitor.invokeTimeNow(); AtomicLong eventCount = new AtomicLong(0); AtomicLong oldestArrivalTime = new AtomicLong(System.currentTimeMillis()); AtomicLong oldestOccurrenceTime = new AtomicLong(System.currentTimeMillis()); /* * eventQueue allows for InternalEvents to be pulled from the Iterator and published to a * stream. A Thread is created that loops through events in the iterator and offers them to the * queue. Note that offering will be blocked if the queue is full (back pressure being applied). * When the iterator reaches the end (hasNext = false) the queue is closed. */ this.eventQueue = new Queue<InternalEvent>(new LinkedBlockingQueue<InternalEvent>(this.queueSize)); /* * Thread will live for duration of invocation and supply Stream with events. */ new Thread(new Runnable() { @Override public void run() { while (events.hasNext()) { try { eventQueue.offer(events.next()); } catch (Queue.ClosedQueueException e) { break; } } try { eventQueue.close(); } catch (Queue.ClosedQueueException e) { } } }).start(); Stream<InternalEvent> input = this.eventQueue.jdkStream(); /* * Filter out raw events */ Stream<InternalEvent> filtered = input.filter( /* * Perform regex filter */ ievent -> { eventCount.incrementAndGet(); String eventStr = ievent.getEventString(); /* * Apply String contains filters before deserialization */ for (String containsString : containsStrings) { if (eventStr.contains(containsString)) { return false; } } /* * Apply regex patterns before deserialization */ for (Pattern regexPattern : regexPatterns) { Matcher m = regexPattern.matcher(eventStr); if (m.find()) { return false; } } return true; }); /* * Deserialize */ Stream<InternalEvent> deserialized = filtered.map(ievent -> { DeserializedEvent data = deser.deserialize(ievent.getEventString()); if (data == null || data.getPayload() == null) { logger.warn("Failed to deserialize: " + ievent.getEventString()); return null; } ievent.setEventObj(data); return ievent; }).filter(Objects::nonNull); /* * Perform Operations */ Stream<InternalEvent> operated = deserialized; for (OperationProcessor operation : operations) { operated = operation.perform(operated); } /* * Serialize */ Stream<InternalEvent> serialized = operated.map(ievent -> { try { String raw = null; raw = this.ser.serialize(this.wrapper.getWrapped(ievent)); ievent.setSerialized(raw); return ievent; } catch (SerializationException e) { return null; } }).filter(Objects::nonNull); /* * Transport */ serialized.forEach(ievent -> { /* * Update times */ updateOldest(oldestArrivalTime, ievent.getArrivalTime()); updateOldest(oldestOccurrenceTime, ievent.getEventTime()); try { this.getIpcService().add(ievent); } catch (TransportException e) { logger.warn("error adding event", e); } }); /* * Wait for transporters to finish */ try { this.getIpcService().flush(); } catch (TransportException e) { throw new HandlerException("encounted TransportException while shutting down ipcService", e); } catch (InterruptedException e) { throw new HandlerException("thread was interruptedwhile shutting down ipcService", e); } finally { String evtSource = this.getSourceName(); runtime.stop(); if (!this.skipWriteStats) { writeStats(eventCount.get(), oldestArrivalTime.get(), oldestOccurrenceTime.get(), evtSource, runtime); } if (logger.isTraceEnabled()) { getGCStats(); } } }