List of usage examples for java.util.concurrent LinkedBlockingDeque LinkedBlockingDeque
public LinkedBlockingDeque()
From source file:fr.gael.dhus.util.http.ParallelizedDownloadManager.java
/** * Creates a new Manager./*from ww w . ja v a2 s .c om*/ * * @param core_pool_size the number of threads to keep in the pool, even if they are idle. * * @param max_pool_size the maximum number of threads to allow in the pool. * * @param keep_alive when the number of threads is greater than the core, this is the * maximum time that excess idle threads will wait for new tasks before * terminating. * * @param time_unit the time unit for the keepAliveTime argument. * * @param client_producer a custom http client provider to use custom http clients. * may be null. * * @param temp_dir base path for incomplete files (temporary directory). * may be null. */ public ParallelizedDownloadManager(int core_pool_size, int max_pool_size, long keep_alive, TimeUnit time_unit, HttpAsyncClientProducer client_producer, Path temp_dir) { BlockingQueue<Runnable> work_queue = new LinkedBlockingDeque<>(); this.threadPool = new ThreadPoolExecutor(core_pool_size, max_pool_size, keep_alive, time_unit, work_queue, new DaemonThreadFactory()); if (client_producer != null) { this.http_client = new InterruptibleHttpClient(client_producer); } else { this.http_client = new InterruptibleHttpClient(); } if (temp_dir != null) { if (!Files.isDirectory(temp_dir)) { throw new IllegalArgumentException("Given temp dir is not a dir"); } this.tempDir = temp_dir; } else { this.tempDir = null; } }
From source file:org.apache.hadoop.raid.DirectoryTraversal.java
public DirectoryTraversal(String friendlyName, Collection<Path> roots, FileSystem fs, Filter filter, int numThreads, boolean doShuffle, boolean allowUseStandby, boolean checkLeafDir) throws IOException { this.output = new ArrayBlockingQueue<FileStatus>(OUTPUT_QUEUE_SIZE); this.directories = new LinkedBlockingDeque<Path>(); this.fs = fs; if (ENABLE_AVATAR_STANDBY && allowUseStandby && fs instanceof DistributedAvatarFileSystem) { avatarFs = (DistributedAvatarFileSystem) fs; } else {// w w w . j a v a2s . co m avatarFs = null; } this.filter = filter; this.totalDirectories = new AtomicInteger(roots.size()); this.processors = new Processor[numThreads]; this.activeThreads = new AtomicInteger(numThreads); this.doShuffle = doShuffle; this.allowStandby = allowUseStandby; if (doShuffle) { List<Path> toShuffleAndAdd = new ArrayList<Path>(); toShuffleAndAdd.addAll(roots); Collections.shuffle(toShuffleAndAdd); this.directories.addAll(toShuffleAndAdd); } else { this.directories.addAll(roots); } LOG.info("Starting with directories:" + roots.toString() + " numThreads:" + numThreads); if (roots.isEmpty()) { try { output.put(FINISH_TOKEN); } catch (InterruptedException e) { throw new IOException(e); } return; } for (int i = 0; i < processors.length; ++i) { if (checkLeafDir) { processors[i] = new LeafDirectoryProcessor(); } else { processors[i] = new Processor(); } processors[i].setName(friendlyName + i); } for (int i = 0; i < processors.length; ++i) { processors[i].start(); } }
From source file:com.mgmtp.perfload.core.console.status.StatusHandler.java
private void addThreadActivity(final Integer daemonId, final Integer processId, final int activeThreads) { StatusInfoKey key = new StatusInfoKey(daemonId, processId, null); Deque<ThreadActivity> taDeque = threadActivities.get(key); if (taDeque == null) { Deque<ThreadActivity> newDeque = new LinkedBlockingDeque<>(); taDeque = threadActivities.putIfAbsent(key, newDeque); if (taDeque == null) { taDeque = newDeque;//from w w w .ja v a 2 s . c om } } String timestamp = TIMESTAMP_FORMAT.format(Calendar.getInstance()); taDeque.offerLast(new ThreadActivity(daemonId, processId, activeThreads, timestamp)); }
From source file:com.edgenius.wiki.webapp.admin.action.AdvanceAdminAction.java
public String redeployShell() { User anonymous = userReadingService.getUserByName(null); //in shell side, page request also verify if space exists or not, if not, it will do space request //so here won't do space request final LinkedBlockingDeque<String[]> pageQ = new LinkedBlockingDeque<String[]>(); //This will make GW request Shell.key again - so if Shell site cleans the data, re-deploy still can work out. //If shell data is still there, it will return same key as GW instanceID and address won't changed. Shell.key = null;/*from www. ja v a 2 s. c om*/ new Thread(new Runnable() { public void run() { int pageCount = 0; do { try { String[] str = pageQ.take(); if (StringUtils.equalsIgnoreCase(SharedConstants.SYSTEM_SPACEUNAME, str[0])) break; Shell.notifyPageCreate(str[0], str[1], true); pageCount++; //don't explode the too many concurrent request to Shell! Thread.sleep(1000); if ((pageCount % QUOTA_RESET_COUNT) == 0) { log.warn("Maximumn page shell request count arrived {}, sleep another 24 hours ", QUOTA_RESET_COUNT); //google app engine has quota limitation. Here will sleep 24 hours to wait next quota reset. Thread.sleep(24 * 3600 * 1000); log.warn("Maximumn page shell request sleep is end, restart page request process"); } } catch (InterruptedException e) { log.error("Thread interrupted for shell request", e); } } while (true); ActivityLog activity = new ActivityLog(); activity.setType(ActivityType.Type.SYSTEM_EVENT.getCode()); activity.setSubType(ActivityType.SubType.REDEPLOY_SHELL.getCode()); activity.setTgtResourceName("SHELL-DEPLOYED");//hardcode activity.setCreatedDate(new Date()); activityLog.save(activity); log.info("Shell page request is done for {} pages", pageCount); } }).start(); int pageCount = 0; long start = System.currentTimeMillis(); log.info("Shell redeploy request starting..."); List<Space> spaces = spaceDAO.getObjects(); if (spaces != null) { for (Space space : spaces) { if (space.isPrivate() || space.containExtLinkType(Space.EXT_LINK_SHELL_DISABLED) || StringUtils.equalsIgnoreCase(SharedConstants.SYSTEM_SPACEUNAME, space.getUnixName())) continue; String spaceUname = space.getUnixName(); List<String> pages = pageDAO.getPagesUuidInSpace(spaceUname); if (pages != null) { for (String puuid : pages) { if (!securityService.isAllowPageReading(spaceUname, puuid, anonymous)) continue; try { pageQ.put(new String[] { spaceUname, puuid }); pageCount++; } catch (InterruptedException e) { log.error("Thread interrupted for shell Page Queue", e); } } } } log.info("All shell request put into queue. Pages{}; Takes {}s", new Object[] { pageCount, (System.currentTimeMillis() - start) / 1000 }); } try { pageQ.put(new String[] { SharedConstants.SYSTEM_SPACEUNAME, "" }); } catch (InterruptedException e) { log.error("Thread interrupted for shell Page Queue - end sign", e); } getRequest().setAttribute("message", messageService.getMessage("redeploy.shell.invoked")); return MESSAGE; }
From source file:com.emc.ecs.sync.source.FilesystemSource.java
@Override public SyncEstimate createEstimate() { SyncEstimate estimate = new SyncEstimate(); final EnhancedThreadPoolExecutor dirExecutor = new EnhancedThreadPoolExecutor(8, new LinkedBlockingDeque<Runnable>(), "dirEstimator"); final EnhancedThreadPoolExecutor fileExecutor = new EnhancedThreadPoolExecutor(8, new LinkedBlockingDeque<Runnable>(100), "fileEstimator"); dirExecutor.submit(new EstimateTask(rootFile, estimate, dirExecutor, fileExecutor)); new Thread(new Runnable() { @Override/*from w w w .j a v a2 s.com*/ public void run() { while (true) { try { if (dirExecutor.getActiveCount() + fileExecutor.getActiveCount() == 0) { dirExecutor.shutdown(); fileExecutor.shutdown(); break; } Thread.sleep(1000); } catch (Throwable t) { log.warn("unexpected exception in estimation monitor", t); } } } }).start(); return estimate; }
From source file:com.amazon.alexa.avs.http.AVSClient.java
/** * Constructor that takes a host, a {@link DirectiveQueue}, and a {@link SslContextFactory} . * The provided {@link SslContextFactory} may allow bypassing server certificates, or handling * TLS/SSL in different ways.//from ww w.j av a 2 s.c o m * * @param host * The URL of the AVS host. * @param directiveEnqueuer * The {@link DirectiveQueue} where {@link DirectiveGroup}s will be passed to be * processed. * @param sslContextFactory * The {@link SslContextFactory} to use for validating certificates. * @param parsingFailedHandler * The handler for handling parse failures. * @throws Exception */ public AVSClient(URL host, MultipartParserConsumer multipartParserConsumer, SslContextFactory sslContextFactory, ParsingFailedHandler parsingFailedHandler) throws Exception { http2Client = new HTTP2Client(); this.host = host; this.sslContextFactory = sslContextFactory; requestQueue = new LinkedBlockingDeque<>(); requestResponseParser = new MultipartParser(multipartParserConsumer); downchannelParser = new MultipartParser(multipartParserConsumer); this.parsingFailedHandler = parsingFailedHandler; createNewHttpClient(); requestThread = new RequestThread(requestQueue); if (StringUtils.isNotBlank(accessToken)) { startRequestThread(); startDownchannelThread(); } }
From source file:org.commoncrawl.service.listcrawler.DataTransferAgent.java
public static void main(String[] args) { Logger logger = Logger.getLogger("org.commoncrawl"); logger.setLevel(Level.INFO);/*from w w w .ja va2 s . c o m*/ BasicConfigurator.configure(); Configuration conf = new Configuration(); conf.addResource("core-site.xml"); conf.addResource("hdfs-site.xml"); // set a big io buffer size ... conf.setInt("io.file.buffer.size", 4096 * 1024); final File transferLogDir = new File("/home/rana/ccprod/data/proxy_xfr_log"); final Path hdfsCacheDataPath = new Path("crawl/proxy/cache/"); final File shutdownFile = new File("/home/rana/ccprod/data/shutdown_xfr"); // create a deque .. final LinkedBlockingDeque<ProxyTransferItem> itemQueue = new LinkedBlockingDeque<ProxyTransferItem>(); final EventLoop eventLoop = new EventLoop(); eventLoop.start(); try { final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf); Thread transferThreads[] = new Thread[TRANSFER_THREADS_PER_HOST * mappingsTable.size()]; Semaphore shutdownSemaphore = new Semaphore(0); int threadIndex = 0; for (int i = 0; i < TRANSFER_THREADS_PER_HOST; ++i) { int serverIdx = 0; for (CCBridgeServerMapping mapping : mappingsTable) { transferThreads[(i * mappingsTable.size()) + serverIdx++] = startTransferThread(threadIndex++, mapping, shutdownFile, fs, conf, itemQueue, eventLoop, shutdownSemaphore); } } Thread scannerThread = new Thread(new Runnable() { long _lastScanId = -1; long _lastOutOfOrderDataDirId = -1L; static final int SCAN_INTERVAL_MS = 500; @Override public void run() { while (true) { try { if (shutdownFile.exists()) { LOG.info("Shutdown File Detected in ScanTimer Outer Loop. Exiting Scan Thread"); return; } LOG.info("Scanning For Files based on filter. Last Known Scan Id is:" + _lastScanId); FileStatus fileList[] = fs.listStatus(hdfsCacheDataPath, new PathFilter() { @Override public boolean accept(Path path) { try { if (path.getName().startsWith("cacheData-")) { // extract file id ... long currentFileId = Long .parseLong(path.getName().substring("cacheData-".length())); // figure out if we are going to process it ... if (_lastScanId == -1 || currentFileId > _lastScanId) { return true; } } } catch (Exception e) { LOG.error("Caught Exception Processing Path Filter:" + CCStringUtils.stringifyException(e)); } return false; } }); LOG.info("Scan returned:" + fileList.length + " Number of Valid Files"); long latestFileId = 0L; for (FileStatus file : fileList) { // extract file id ... long currentFileId = Long .parseLong(file.getPath().getName().substring("cacheData-".length())); // figure out if we are going to process it ... if (_lastScanId == -1 || currentFileId > _lastScanId) { // cache max latest id .. latestFileId = Math.max(latestFileId, currentFileId); File logFile = hdfsCacheFileToLogFileLocation(transferLogDir, file); if (logFile != null) { if (logFile.exists()) { LOG.info("Skipping:" + file.getPath().getName()); } else { LOG.info("Queueing File:" + file.getPath().getName()); itemQueue.add(new ProxyTransferItem(file.getPath(), logFile, file.getPath().getName())); } } } } // ok update lastest file id _lastScanId = Math.max(_lastScanId, latestFileId); FileStatus outofOrderDataDirs[] = fs .globStatus(new Path("crawl/proxy/dtAgentOutOfOrderTransfers/*")); for (FileStatus outOfOrderDataDir : outofOrderDataDirs) { long dataDirId = Long.parseLong(outOfOrderDataDir.getPath().getName()); if (dataDirId > _lastOutOfOrderDataDirId) { FileStatus candidates[] = fs .globStatus(new Path(outOfOrderDataDir.getPath(), "part-*")); for (FileStatus candidate : candidates) { File logFile = outOfOrderFileToLogFileLocation(transferLogDir, candidate.getPath()); if (logFile != null) { String candidateName = candidate.getPath().getParent().getName() + "-" + candidate.getPath().getName(); if (logFile.exists()) { LOG.info("Skipping OOB FILE:" + candidateName); } else { LOG.info("Queueing OOB FILE:" + candidateName); itemQueue.add(new ProxyTransferItem(candidate.getPath(), logFile, candidateName)); } } } _lastOutOfOrderDataDirId = dataDirId; } } LOG.info("Finish Scan. Last Known Scan Id is now:" + _lastScanId); } catch (Exception e) { LOG.error(CCStringUtils.stringifyException(e)); } try { Thread.sleep(SCAN_INTERVAL_MS); } catch (InterruptedException e) { } } } }); // start scanner thread ... scannerThread.start(); LOG.info("Waiting on Transfer Threads"); shutdownSemaphore.acquireUninterruptibly(TRANSFER_THREADS_PER_HOST * mappingsTable.size()); LOG.info("ALL Transfer Threads Dead."); // wait for scanner thread to die LOG.info("Waiting for Scanner Thread to Die."); try { scannerThread.join(); } catch (InterruptedException e) { } LOG.info("Killing Event Loop"); eventLoop.stop(); } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); } }
From source file:com.vaporwarecorp.mirror.feature.alexa.AlexaCommandManagerImpl.java
@Override public void start() { // check if AlexaManager is already running if (mAlexaManager != null) { return;//from ww w. ja v a 2 s . com } // get our AlexaManager instance for convenience Timber.d("Starting Alexa commands"); // prepare our queue mAvsQueue = new LinkedBlockingDeque<>(); // instantiate our audio player mAudioPlayer = AlexaAudioPlayer.getInstance(mAppManager.getAppContext()); mAudioPlayer.addCallback(mAudioPlayerCallback); // create the AlexaManager mAlexaManager = AlexaManager.getInstance(mAppManager.getAppContext(), "MirrorApp"); }
From source file:com.amazon.alexa.avs.AVSController.java
public AVSController(ExpectSpeechListener listenHandler, AVSAudioPlayerFactory audioFactory, AlertManagerFactory alarmFactory, AVSClientFactory avsClientFactory, DialogRequestIdAuthority dialogRequestIdAuthority, boolean wakeWordAgentEnabled, WakeWordIPCFactory wakewordIPCFactory, WakeWordDetectedHandler wakeWakeDetectedHandler) throws Exception { this.wakeWordAgentEnabled = wakeWordAgentEnabled; this.wakeWordDetectedHandler = wakeWakeDetectedHandler; if (this.wakeWordAgentEnabled) { try {// w ww. ja v a 2 s . c o m log.info("Creating Wake Word IPC | port number: " + WAKE_WORD_AGENT_PORT_NUMBER); this.wakeWordIPC = wakewordIPCFactory.createWakeWordIPC(this, WAKE_WORD_AGENT_PORT_NUMBER); this.wakeWordIPC.init(); Thread.sleep(1000); log.info("Created Wake Word IPC ok."); } catch (IOException e) { log.error("Error creating Wake Word IPC ok.", e); } } initializeMicrophone(); this.player = audioFactory.getAudioPlayer(this); this.player.registerAlexaSpeechListener(this); this.dialogRequestIdAuthority = dialogRequestIdAuthority; speechRequestAudioPlayerPauseController = new SpeechRequestAudioPlayerPauseController(player); expectSpeechListeners = new HashSet<ExpectSpeechListener>( Arrays.asList(listenHandler, speechRequestAudioPlayerPauseController)); dependentQueue = new LinkedBlockingDeque<>(); independentQueue = new LinkedBlockingDeque<>(); DirectiveEnqueuer directiveEnqueuer = new DirectiveEnqueuer(dialogRequestIdAuthority, dependentQueue, independentQueue); avsClient = avsClientFactory.getAVSClient(directiveEnqueuer, this); alertManager = alarmFactory.getAlertManager(this, this, AlertsFileDataStore.getInstance()); // Ensure that we have attempted to finish loading all alarms from file before sending // synchronize state alertManager.loadFromDisk(new ResultListener() { @Override public void onSuccess() { sendSynchronizeStateEvent(); } @Override public void onFailure() { sendSynchronizeStateEvent(); } }); // ensure we notify AVS of playbackStopped on app exit Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { player.stop(); avsClient.shutdown(); } }); dependentDirectiveThread = new BlockableDirectiveThread(dependentQueue, this, "DependentDirectiveThread"); independentDirectiveThread = new BlockableDirectiveThread(independentQueue, this, "IndependentDirectiveThread"); lastUserInteractionTimestampSeconds = new AtomicLong(System.currentTimeMillis() / MILLISECONDS_PER_SECOND); scheduledExecutor.scheduleAtFixedRate(new UserInactivityReport(), USER_INACTIVITY_REPORT_PERIOD_HOURS, USER_INACTIVITY_REPORT_PERIOD_HOURS, TimeUnit.HOURS); }
From source file:com.linkedin.pinot.transport.perf.ScatterGatherPerfClient.java
private void setup() { MetricsRegistry registry = new MetricsRegistry(); _timedExecutor = new ScheduledThreadPoolExecutor(1); _service = new ThreadPoolExecutor(10, 10, 10, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>()); _eventLoopGroup = new NioEventLoopGroup(10); _timer = new HashedWheelTimer(); NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_"); PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(_eventLoopGroup, _timer, clientMetrics);/*from w w w. j av a2 s. c o m*/ _pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, _maxActiveConnections, 300000, 10, rm, _timedExecutor, MoreExecutors.sameThreadExecutor(), registry); rm.setPool(_pool); _scatterGather = new ScatterGatherImpl(_pool, _service); for (AsyncReader r : _readerThreads) { r.start(); } }