List of usage examples for java.util.concurrent ConcurrentLinkedQueue poll
public E poll()
From source file:com.ibm.crail.storage.StorageServer.java
public static void main(String[] args) throws Exception { Logger LOG = CrailUtils.getLogger(); CrailConfiguration conf = new CrailConfiguration(); CrailConstants.updateConstants(conf); CrailConstants.printConf();//from w w w .ja v a2s.c o m CrailConstants.verify(); int splitIndex = 0; for (String param : args) { if (param.equalsIgnoreCase("--")) { break; } splitIndex++; } //default values StringTokenizer tokenizer = new StringTokenizer(CrailConstants.STORAGE_TYPES, ","); if (!tokenizer.hasMoreTokens()) { throw new Exception("No storage types defined!"); } String storageName = tokenizer.nextToken(); int storageType = 0; HashMap<String, Integer> storageTypes = new HashMap<String, Integer>(); storageTypes.put(storageName, storageType); for (int type = 1; tokenizer.hasMoreElements(); type++) { String name = tokenizer.nextToken(); storageTypes.put(name, type); } int storageClass = -1; //custom values if (args != null) { Option typeOption = Option.builder("t").desc("storage type to start").hasArg().build(); Option classOption = Option.builder("c").desc("storage class the server will attach to").hasArg() .build(); Options options = new Options(); options.addOption(typeOption); options.addOption(classOption); CommandLineParser parser = new DefaultParser(); try { CommandLine line = parser.parse(options, Arrays.copyOfRange(args, 0, splitIndex)); if (line.hasOption(typeOption.getOpt())) { storageName = line.getOptionValue(typeOption.getOpt()); storageType = storageTypes.get(storageName).intValue(); } if (line.hasOption(classOption.getOpt())) { storageClass = Integer.parseInt(line.getOptionValue(classOption.getOpt())); } } catch (ParseException e) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("Storage tier", options); System.exit(-1); } } if (storageClass < 0) { storageClass = storageType; } StorageTier storageTier = StorageTier.createInstance(storageName); if (storageTier == null) { throw new Exception("Cannot instantiate datanode of type " + storageName); } String extraParams[] = null; splitIndex++; if (args.length > splitIndex) { extraParams = new String[args.length - splitIndex]; for (int i = splitIndex; i < args.length; i++) { extraParams[i - splitIndex] = args[i]; } } storageTier.init(conf, extraParams); storageTier.printConf(LOG); RpcClient rpcClient = RpcClient.createInstance(CrailConstants.NAMENODE_RPC_TYPE); rpcClient.init(conf, args); rpcClient.printConf(LOG); ConcurrentLinkedQueue<InetSocketAddress> namenodeList = CrailUtils.getNameNodeList(); ConcurrentLinkedQueue<RpcConnection> connectionList = new ConcurrentLinkedQueue<RpcConnection>(); while (!namenodeList.isEmpty()) { InetSocketAddress address = namenodeList.poll(); RpcConnection connection = rpcClient.connect(address); connectionList.add(connection); } RpcConnection rpcConnection = connectionList.peek(); if (connectionList.size() > 1) { rpcConnection = new RpcDispatcher(connectionList); } LOG.info("connected to namenode(s) " + rpcConnection.toString()); StorageServer server = storageTier.launchServer(); StorageRpcClient storageRpc = new StorageRpcClient(storageType, CrailStorageClass.get(storageClass), server.getAddress(), rpcConnection); HashMap<Long, Long> blockCount = new HashMap<Long, Long>(); long sumCount = 0; while (server.isAlive()) { StorageResource resource = server.allocateResource(); if (resource == null) { break; } else { storageRpc.setBlock(resource.getAddress(), resource.getLength(), resource.getKey()); DataNodeStatistics stats = storageRpc.getDataNode(); long newCount = stats.getFreeBlockCount(); long serviceId = stats.getServiceId(); long oldCount = 0; if (blockCount.containsKey(serviceId)) { oldCount = blockCount.get(serviceId); } long diffCount = newCount - oldCount; blockCount.put(serviceId, newCount); sumCount += diffCount; LOG.info("datanode statistics, freeBlocks " + sumCount); } } while (server.isAlive()) { DataNodeStatistics stats = storageRpc.getDataNode(); long newCount = stats.getFreeBlockCount(); long serviceId = stats.getServiceId(); long oldCount = 0; if (blockCount.containsKey(serviceId)) { oldCount = blockCount.get(serviceId); } long diffCount = newCount - oldCount; blockCount.put(serviceId, newCount); sumCount += diffCount; LOG.info("datanode statistics, freeBlocks " + sumCount); Thread.sleep(2000); } }
From source file:Main.java
private static byte[] mergeBytes(byte[] in, ConcurrentLinkedQueue<byte[]> decompressUnfinishedDataQueue) { if (!decompressUnfinishedDataQueue.isEmpty()) { ByteArrayOutputStream out = new ByteArrayOutputStream(); try {//from w w w . ja va 2 s.c om while (!decompressUnfinishedDataQueue.isEmpty()) { out.write(decompressUnfinishedDataQueue.poll()); } out.write(in); in = out.toByteArray(); } catch (IOException e) { throw new RuntimeException(e); } finally { try { out.close(); } catch (IOException e) { } } } return in; }
From source file:com.linkedin.pinot.perf.QueryRunner.java
/** * Use multiple threads to run query at an increasing target QPS. * * Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue at the target QPS, and start {numThreads} worker threads to fetch queries from the queue and send them. * We start with the start QPS, and keep adding delta QPS to the start QPS during the test. The main thread is * responsible for collecting the statistic information and log them periodically. * * @param conf perf benchmark driver config. * @param queryFile query file./*from w ww . j a va2s.co m*/ * @param numThreads number of threads sending queries. * @param startQPS start QPS * @param deltaQPS delta QPS * @throws Exception */ @SuppressWarnings("InfiniteLoopStatement") public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numThreads, double startQPS, double deltaQPS) throws Exception { final long randomSeed = 123456789L; final Random random = new Random(randomSeed); final int timePerTargetQPSMillis = 60000; final int queueLengthThreshold = Math.max(20, (int) startQPS); final List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } final int numQueries = queries.size(); final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); final AtomicInteger counter = new AtomicInteger(0); final AtomicLong totalResponseTime = new AtomicLong(0L); final ExecutorService executorService = Executors.newFixedThreadPool(numThreads); final ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); double currentQPS = startQPS; int intervalMillis = (int) (MILLIS_PER_SECOND / currentQPS); for (int i = 0; i < numThreads; i++) { executorService.submit(new Runnable() { @Override public void run() { while (true) { String query = queryQueue.poll(); if (query == null) { try { Thread.sleep(1); continue; } catch (InterruptedException e) { LOGGER.error("Interrupted.", e); return; } } long startTime = System.currentTimeMillis(); try { driver.postQuery(query); counter.getAndIncrement(); totalResponseTime.getAndAdd(System.currentTimeMillis() - startTime); } catch (Exception e) { LOGGER.error("Caught exception while running query: {}", query, e); return; } } } }); } LOGGER.info("Start with QPS: {}, delta QPS: {}", startQPS, deltaQPS); while (true) { long startTime = System.currentTimeMillis(); while (System.currentTimeMillis() - startTime <= timePerTargetQPSMillis) { if (queryQueue.size() > queueLengthThreshold) { executorService.shutdownNow(); throw new RuntimeException("Cannot achieve target QPS of: " + currentQPS); } queryQueue.add(queries.get(random.nextInt(numQueries))); Thread.sleep(intervalMillis); } double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND; int count = counter.getAndSet(0); double avgResponseTime = ((double) totalResponseTime.getAndSet(0)) / count; LOGGER.info("Target QPS: {}, Interval: {}ms, Actual QPS: {}, Avg Response Time: {}ms", currentQPS, intervalMillis, count / timePassedSeconds, avgResponseTime); // Find a new interval int newIntervalMillis; do { currentQPS += deltaQPS; newIntervalMillis = (int) (MILLIS_PER_SECOND / currentQPS); } while (newIntervalMillis == intervalMillis); intervalMillis = newIntervalMillis; } }
From source file:io.pravega.controller.eventProcessor.impl.SerializedRequestHandler.java
/** * Run method is called only if work queue is not empty. So we can safely do a workQueue.poll. * WorkQueue.poll should only happen in the run method and no where else. * * @param key key for which we want to process the next event * @param workQueue work queue for the key *//*from w w w.j a v a 2 s . c o m*/ private void run(String key, ConcurrentLinkedQueue<Work> workQueue) { Work work = workQueue.poll(); processEvent(work.getEvent()).whenComplete((r, e) -> { if (e != null && toPostpone(work.getEvent(), work.getPickupTime(), e)) { handleWorkPostpone(key, workQueue, work); } else { if (e != null) { work.getResult().completeExceptionally(e); } else { work.getResult().complete(r); } handleWorkComplete(key, workQueue, work); } }); }
From source file:com.interacciones.mxcashmarketdata.driver.queue.QueueWriteFile.java
public void ReadQueue(ConcurrentLinkedQueue<String> msgQueue) { LOGGER.debug("Reading from queue... Sizing " + msgQueue.size()); while (true) { Iterator<String> it = msgQueue.iterator(); while (it.hasNext()) { String msg = (String) it.next(); //messageProcessing.receive(msg.toString()); msgQueue.poll(); }/* w ww .ja v a 2s .co m*/ try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); LOGGER.error("Error: " + e.getMessage()); } } }
From source file:org.apache.storm.daemon.DrpcServer.java
@Override public DRPCRequest fetchRequest(String functionName) throws AuthorizationException, TException { meterFetchRequestCalls.mark();/*from ww w . j av a2s . c om*/ Map<String, String> map = new HashMap<>(); map.put(DRPCAuthorizerBase.FUNCTION_NAME, functionName); checkAuthorization(authorizer, map, "fetchRequest"); ConcurrentLinkedQueue<DRPCRequest> queue = acquireQueue(functionName); DRPCRequest req = queue.poll(); if (req != null) { LOG.debug("Fetched request for {} at {}", functionName, System.currentTimeMillis()); return req; } else { return new DRPCRequest("", ""); } }
From source file:org.languagetool.server.PipelinePool.java
Pipeline getPipeline(PipelineSettings settings) throws Exception { if (pool != null) { // expire old pipelines in queues (where settings may be used, but some of the created pipelines are unused) long expireCheckDelta = System.currentTimeMillis() - pipelineExpireCheckTimestamp; if (expireCheckDelta > PIPELINE_EXPIRE_TIME) { AtomicInteger removed = new AtomicInteger(); pipelineExpireCheckTimestamp = System.currentTimeMillis(); //pool.asMap().forEach((s, queue) -> queue.removeIf(Pipeline::isExpired)); pool.asMap().forEach((s, queue) -> queue.removeIf(pipeline -> { if (pipeline.isExpired()) { removed.getAndIncrement(); return true; } else { return false; }// w w w. ja v a 2 s.co m })); ServerTools.print("Removing " + removed.get() + " expired pipelines"); } requests++; ConcurrentLinkedQueue<Pipeline> pipelines = pool.get(settings); if (requests % 1000 == 0) { ServerTools.print( String.format("Pipeline cache stats: %f hit rate", (double) pipelinesUsed / requests)); } Pipeline pipeline = pipelines.poll(); if (pipeline == null) { //ServerTools.print(String.format("No prepared pipeline found for %s; creating one.", settings)); pipeline = createPipeline(settings.lang, settings.motherTongue, settings.query, settings.globalConfig, settings.user); } else { pipelinesUsed++; //ServerTools.print(String.format("Prepared pipeline found for %s; using it.", settings)); } return pipeline; } else { return createPipeline(settings.lang, settings.motherTongue, settings.query, settings.globalConfig, settings.user); } }
From source file:com.interacciones.mxcashmarketdata.mama.queue.QueueReader.java
public void ReadQueue(ConcurrentLinkedQueue<Parser> msgQueue) { LOGGER.debug("Reading from queue... Sizing " + msgQueue.size()); while (true) { Iterator<Parser> it = msgQueue.iterator(); while (it.hasNext()) { Parser msg = (Parser) it.next(); LOGGER.debug("Message Type: " + msg.TypeMessage()); LOGGER.debug("Symbol (Emisora):" + msg.Emisora()); sendMessage.sendMessage(msg); msgQueue.poll(); }// ww w. ja v a 2s . c om try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); LOGGER.error("Error: " + e.getMessage()); } } }
From source file:org.geowebcache.storage.JobObject.java
private void addLogs(ConcurrentLinkedQueue<JobLogObject> logs) { JobLogObject joblog;/*from ww w .j a v a 2 s . com*/ while (!logs.isEmpty()) { synchronized (logs) { joblog = logs.poll(); } synchronized (newLogs) { newLogs.add(joblog); } } }
From source file:cltestgrid.Upload2.java
private void saveBlobsInParallel(List<MyFileItem> items) { final int THREAD_COUNT = 20; final ConcurrentLinkedQueue<MyFileItem> tasks = new ConcurrentLinkedQueue<MyFileItem>(items); final CountDownLatch doneLatch = new CountDownLatch(THREAD_COUNT); for (int i = 0; i < THREAD_COUNT; i++) { ThreadManager.createThreadForCurrentRequest(new Runnable() { public void run() { MyFileItem item = null;/*from ww w . j a va 2 s. co m*/ try { while ((item = tasks.poll()) != null) { try { saveBlob(item.blobName, item.contentType, item.dataCollector.toByteArray()); // Saving blob may throw a LockException due to CloudStorage issue // http://code.google.com/p/googleappengine/issues/detail?id=8592 // Therefore retry two times in case of LockException: } catch (com.google.appengine.api.files.LockException e) { try { log.log(Level.WARNING, "retry saving blob " + item.blobName + " because of LockException when saving it first time", e); saveBlob(item.blobName, item.contentType, item.dataCollector.toByteArray()); } catch (com.google.appengine.api.files.LockException e2) { log.log(Level.WARNING, "second retry saving blob " + item.blobName + " because of LockException when saving it at first retry", e2); saveBlob(item.blobName, item.contentType, item.dataCollector.toByteArray()); } } } } catch (Throwable t) { if (item != null) { item.saveError = t; } log.log(Level.SEVERE, "Error while saving blob", t); } finally { doneLatch.countDown(); } } }).start(); } try { doneLatch.await(); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while saving blobs", e); } }