List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue
public LinkedBlockingQueue()
From source file:eu.stratosphere.nephele.services.iomanager.IOManager.java
/** * Creates a block channel writer that writes to the given channel. The writer writes asynchronously (write-behind), * accepting write request, carrying them out at some time and returning the written segment its return queue afterwards. * /* w ww.j a v a 2 s . c o m*/ * @param channelID The descriptor for the channel to write to. * @return A block channel writer that writes to the given channel. * @throws IOException Thrown, if the channel for the writer could not be opened. */ public BlockChannelWriter createBlockChannelWriter(Channel.ID channelID) throws IOException { if (this.isClosed) { throw new IllegalStateException("I/O-Manger is closed."); } return new BlockChannelWriter(channelID, this.writers[channelID.getThreadNum()].requestQueue, new LinkedBlockingQueue<MemorySegment>(), 1); }
From source file:com.spotify.heroic.HeroicCore.java
/** * Setup a fixed thread pool executor that correctly handles unhandled exceptions. * * @param threads Number of threads to configure. *///from ww w. j ava 2 s . co m private ExecutorService setupExecutor(final int threads) { return new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), new ThreadFactoryBuilder().setNameFormat("heroic-core-%d") .setUncaughtExceptionHandler(uncaughtExceptionHandler).build()) { @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); if (t == null && (r instanceof Future<?>)) { try { ((Future<?>) r).get(); } catch (CancellationException e) { t = e; } catch (ExecutionException e) { t = e.getCause(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } if (t != null) { if (log.isErrorEnabled()) { log.error("Unhandled exception caught in core executor", t); log.error("Exiting (code=2)"); } else { System.err.println("Unhandled exception caught in core executor"); System.err.println("Exiting (code=2)"); t.printStackTrace(System.err); } System.exit(2); } } }; }
From source file:com.all.messengine.impl.DefaultMessEngine.java
public DefaultMessEngine() { queue = new LinkedBlockingQueue<Message<?>>(); messageExecutor = Executors.newCachedThreadPool(); engineExecutor = Executors.newSingleThreadExecutor(); listeners = new HashMap<String, List<MessageListener<? extends Message<?>>>>(); responseManager = new ResponseManager(); shuttingDown = new AtomicBoolean(false); }
From source file:org.archive.crawler.frontier.BdbFrontier.java
@Override protected void initOtherQueues() throws DatabaseException { boolean recycle = (recoveryCheckpoint != null); // tiny risk of OutOfMemoryError: if giant number of snoozed // queues all wake-to-ready at once readyClassQueues = new LinkedBlockingQueue<String>(); inactiveQueuesByPrecedence = new ConcurrentSkipListMap<Integer, Queue<String>>(); retiredQueues = bdb.getStoredQueue("retiredQueues", String.class, recycle); // primary snoozed queues snoozedClassQueues = new DelayQueue<DelayedWorkQueue>(); // just in case: overflow for extreme situations snoozedOverflow = bdb.getStoredMap("snoozedOverflow", Long.class, DelayedWorkQueue.class, true, false); this.futureUris = bdb.getStoredMap("futureUris", Long.class, CrawlURI.class, true, recoveryCheckpoint != null); // initialize master map in which other queues live this.pendingUris = createMultipleWorkQueues(); }
From source file:org.apache.axis2.transport.jms.JMSListener.java
/** * Start this JMS Listener (Transport Listener) * * @throws AxisFault// w w w.j av a2s .com */ public void start() throws AxisFault { // create thread pool of workers workerPool = new ThreadPoolExecutor(1, WORKERS_MAX_THREADS, WORKER_KEEP_ALIVE, TIME_UNIT, new LinkedBlockingQueue(), new org.apache.axis2.util.threadpool.DefaultThreadFactory( new ThreadGroup("JMS Worker thread group"), "JMSWorker")); Iterator iter = connectionFactories.values().iterator(); while (iter.hasNext()) { JMSConnectionFactory conFac = (JMSConnectionFactory) iter.next(); JMSMessageReceiver msgRcvr = new JMSMessageReceiver(conFac, workerPool, configCtx); try { conFac.listen(msgRcvr); } catch (JMSException e) { handleException("Error starting connection factory : " + conFac.getName(), e); } } }
From source file:com.all.backend.web.services.LocalPushService.java
public void pushMessage(long mail, AllMessage<?> message) { BlockingQueue<AllMessage<?>> queue; synchronized (messages) { queue = messages.get(mail);//from ww w.j a va2 s . c o m if (queue == null) { queue = new LinkedBlockingQueue<AllMessage<?>>(); messages.put(mail, queue); } try { // log.info("pushing message to " + mail + " content:\n" + message); queue.put(message); } catch (InterruptedException e) { log.error(e, e); } } }
From source file:com.taobao.datax.engine.schedule.Engine.java
private NamedThreadPoolExecutor initReaderPool(JobConf jobConf, StoragePool sp) throws Exception { JobPluginConf readerJobConf = jobConf.getReaderConf(); String pluginName = readerJobConf.getName(); logger.info("pluginName:" + pluginName); PluginParam sparam = readerJobConf.getPluginParams(); //hdfs reader ? hive_sql hive reader logger.info("replace hdfs reader to hive reader check"); if ("hdfsreader".equals(pluginName) && StringUtils.isNotBlank(sparam.getValue("hive_sql", ""))) { pluginName = "hivereader"; //readerConf.setName("hivereader"); //readerConf.setClassName("com.taobao.datax.plugins.reader.hivereader.HiveReader"); logger.info("replace hdfs reader to hive reader"); }/*from www . ja v a 2s . co m*/ PluginConf readerConf = pluginReg.get(pluginName); String pluginPath = readerConf.getPath(); if (StringUtils.isEmpty(pluginPath)) { pluginPath = engineConf.getPluginRootPath() + "reader/" + pluginName; readerConf.setPath(pluginPath); } logger.info("path:" + pluginPath); Class<?> myClass = pluginClassCache.get(pluginPath); if (myClass == null) { logger.info(String.format("DataX Reader %s try to load path %s .", readerConf.getName(), pluginPath)); /*JarLoader jarLoader = new JarLoader( new String[] { pluginPath });*/ JarLoader jarLoader = getJarLoader(pluginPath); myClass = jarLoader.loadClass(readerConf.getClassName()); pluginClassCache.put(pluginPath, myClass); } ReaderWorker readerWorkerForPreAndPost = new ReaderWorker(readerConf, myClass); readerWorkerForPreAndPost.setParam(sparam); readerWorkerForPreAndPost.init(); logger.info("DataX Reader prepare work begins ."); int code = readerWorkerForPreAndPost.prepare(sparam); if (code != 0) { throw new DataExchangeException("DataX Reader prepare work failed!"); } logger.info("DataX Reader prepare work ends ."); logger.info("DataX Reader split work begins ."); List<PluginParam> readerSplitParams = readerWorkerForPreAndPost.doSplit(sparam); logger.info(String.format("DataX Reader splits this job into %d sub-jobs", readerSplitParams.size())); logger.info("DataX Reader split work ends ."); int concurrency = readerJobConf.getConcurrency(); if (concurrency <= 0 || concurrency > MAX_CONCURRENCY) { throw new IllegalArgumentException( String.format("Reader concurrency set to be %d, make sure it must be between [%d, %d] .", concurrency, 1, MAX_CONCURRENCY)); } concurrency = Math.min(concurrency, readerSplitParams.size()); if (concurrency <= 0) { concurrency = 1; } readerJobConf.setConcurrency(concurrency); NamedThreadPoolExecutor readerPool = new NamedThreadPoolExecutor(readerJobConf.getId(), readerJobConf.getConcurrency(), readerJobConf.getConcurrency(), 1L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>()); readerPool.setPostWorker(readerWorkerForPreAndPost); readerPool.setParam(sparam); readerPool.prestartAllCoreThreads(); logger.info("DataX Reader starts to read data ."); for (PluginParam param : readerSplitParams) { ReaderWorker readerWorker = new ReaderWorker(readerConf, pluginClassCache.get(pluginPath)); readerWorker.setParam(param); readerWorker.setLineSender(new BufferedLineExchanger(null, sp.getStorageForReader(), this.engineConf.getStorageBufferSize())); //readerPool.execute(readerWorker); readerPool.submitJob(readerWorker); readerMonitorPool.monitor(readerWorker); } return readerPool; }
From source file:metlos.executors.batch.BatchExecutorTest.java
private long rapidFireSimpleExecutorTime(final int taskDurationMillis, int nofJobs, int nofThreads) throws Exception { ThreadPoolExecutor ex = new ThreadPoolExecutor(nofThreads, nofThreads, 0, TimeUnit.NANOSECONDS, new LinkedBlockingQueue<Runnable>()); List<Callable<Void>> payload = getCallables(taskDurationMillis, nofJobs); return measureExecutionTime(System.currentTimeMillis(), ex.invokeAll(payload)); }
From source file:co.beem.project.beem.FacebookTextService.java
/** * {@inheritDoc}//from ww w. j ava 2 s . co m */ @Override public void onCreate() { super.onCreate(); smackAndroid = SmackAndroid.init(FacebookTextService.this); StrictMode.ThreadPolicy policy = new StrictMode.ThreadPolicy.Builder().permitAll().build(); StrictMode.setThreadPolicy(policy); savingMessageQueue = new LinkedBlockingQueue<co.beem.project.beem.service.Message>(); loadingUserAvatarQueue = new LinkedBlockingDeque<String>(); stateChangeQueue = new LinkedBlockingQueue<User>(); databaseHelper = getHelper(); try { setupDatabaseConnection(); } catch (Exception e) { e.printStackTrace(); } isRunning = true; sessionManager = new SessionManager(FacebookTextService.this); savingMessageOnBackgroundThread(new SavingNewMessageTask()); savingMessageOnBackgroundThread(new UpdateUserStateTask()); handler = new Handler(); registerReceiver(mReceiver, new IntentFilter(ConnectivityManager.CONNECTIVITY_ACTION)); registerReceiver(mOnOffReceiver, new IntentFilter(FacebookTextApplication.GET_AVATAR)); registerReceiver(mOnOffReceiver, new IntentFilter(FacebookTextApplication.UPDATE_USER_STATE)); registerReceiver(mOnOffReceiver, new IntentFilter(FacebookTextApplication.PUSH_NOTIFICATION_FAVORITE_ONLINE)); mSettings = PreferenceManager.getDefaultSharedPreferences(this); mSettings.registerOnSharedPreferenceChangeListener(mPreferenceListener); if (mSettings.getBoolean(FacebookTextApplication.USE_AUTO_AWAY_KEY, false)) { mOnOffReceiverIsRegistered = true; registerReceiver(mOnOffReceiver, new IntentFilter(Intent.ACTION_SCREEN_OFF)); registerReceiver(mOnOffReceiver, new IntentFilter(Intent.ACTION_SCREEN_ON)); // registerReceiver(sma, filter) } String tmpJid = mSettings.getString(FacebookTextApplication.ACCOUNT_USERNAME_KEY, "").trim(); mLogin = StringUtils.parseName(tmpJid); boolean useSystemAccount = mSettings.getBoolean(FacebookTextApplication.USE_SYSTEM_ACCOUNT_KEY, false); mPort = DEFAULT_XMPP_PORT; mService = StringUtils.parseServer(tmpJid); mHost = mService; initMemorizingTrustManager(); if (mSettings.getBoolean(FacebookTextApplication.ACCOUNT_SPECIFIC_SERVER_KEY, false)) { mHost = mSettings.getString(FacebookTextApplication.ACCOUNT_SPECIFIC_SERVER_HOST_KEY, "").trim(); if ("".equals(mHost)) mHost = mService; String tmpPort = mSettings.getString(FacebookTextApplication.ACCOUNT_SPECIFIC_SERVER_PORT_KEY, "5222"); if (!"".equals(tmpPort)) mPort = Integer.parseInt(tmpPort); } if (mSettings.getBoolean(FacebookTextApplication.FULL_JID_LOGIN_KEY, false) || "gmail.com".equals(mService) || "googlemail.com".equals(mService) || useSystemAccount) { mLogin = tmpJid; } configure(ProviderManager.getInstance()); mNotificationManager = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); Roster.setDefaultSubscriptionMode(SubscriptionMode.manual); mBind = new XmppFacade(this); savingMessageOnBackgroundThread(new DownloadAvatarTask()); Log.d(TAG, "Create FacebookTextService \t id: " + mLogin + " \t host: " + mHost + "\tmPort" + mPort + "\t service" + mService); }
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
private void collectLedgersFromAllocator(final URI uri, final com.twitter.distributedlog.DistributedLogManagerFactory factory, final List<String> allocationPaths, final Set<Long> ledgers) throws IOException { final LinkedBlockingQueue<String> poolQueue = new LinkedBlockingQueue<String>(); for (String allocationPath : allocationPaths) { String rootPath = uri.getPath() + "/" + allocationPath; try {/*from w w w .j a v a2s. c o m*/ List<String> pools = getZooKeeperClient(factory).get().getChildren(rootPath, false); for (String pool : pools) { poolQueue.add(rootPath + "/" + pool); } } catch (KeeperException e) { throw new ZKException("Failed to get list of pools from " + rootPath, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new DLInterruptedException("Interrupted on getting list of pools from " + rootPath, e); } } logger.info("Collecting ledgers from allocators for {} : {}", uri, poolQueue); executeAction(poolQueue, 10, new Action<String>() { @Override public void execute(String poolPath) throws IOException { try { collectLedgersFromPool(poolPath); } catch (InterruptedException e) { throw new DLInterruptedException( "Interrupted on collecting ledgers from allocation pool " + poolPath, e); } catch (KeeperException e) { throw new ZKException("Failed to collect ledgers from allocation pool " + poolPath, e.code()); } } private void collectLedgersFromPool(String poolPath) throws InterruptedException, ZooKeeperClient.ZooKeeperConnectionException, KeeperException { List<String> allocators = getZooKeeperClient(factory).get().getChildren(poolPath, false); for (String allocator : allocators) { String allocatorPath = poolPath + "/" + allocator; byte[] data = getZooKeeperClient(factory).get().getData(allocatorPath, false, new Stat()); if (null != data && data.length > 0) { try { long ledgerId = DLUtils.bytes2LedgerId(data); synchronized (ledgers) { ledgers.add(ledgerId); } } catch (NumberFormatException nfe) { logger.warn("Invalid ledger found in allocator path {} : ", allocatorPath, nfe); } } } } }); logger.info("Collected ledgers from allocators for {}.", uri); }