List of usage examples for java.util.concurrent ConcurrentLinkedQueue add
public boolean add(E e)
From source file:org.ramidore.logic.system.PointBattleLogic.java
/** * 1??????./*from w w w . j a v a 2 s . co m*/ * * @param id ? * @param dataList ? */ private void statData(String id, List<PbLogBean> dataList) { // key : ? value : Map<Integer, Integer> pointMap = new HashMap<>(); // key : ? value : ???????? Map<Integer, Integer> mobCountMap = new HashMap<>(); for (int i = 0; i < 7; i++) { // 0?? pointMap.put(i, 0); mobCountMap.put(i, 0); } for (PbLogBean data : dataList) { pointMap.put(data.getStageNo(), data.getPoint()); mobCountMap.put(data.getStageNo(), data.getStageSequentialNo()); } for (PbLogBean data : dataList) { data.setPointOffset(pointMap.get(data.getStageNo() - 1)); ConcurrentLinkedQueue<PbLogBean> dataQ = chartDataQList.get(data.getStageNo() - 1); ConcurrentLinkedQueue<PbLogBean> allDataQ = chartDataQList.get(chartDataQList.size() - 1); dataQ.add(data); allDataQ.add(data); } PbStatTable row = new PbStatTable(); row.setId(id); row.setPoint1(pointMap.get(1)); row.setMobCount1(mobCountMap.get(1)); row.setStage1(); row.setPoint2(pointMap.get(2) - pointMap.get(1)); row.setMobCount2((mobCountMap.get(2))); row.setStage2(); row.setPoint3(pointMap.get(3) - pointMap.get(2)); row.setMobCount3((mobCountMap.get(3))); row.setStage3(); row.setPoint4(pointMap.get(4) - pointMap.get(3)); row.setMobCount4(mobCountMap.get(4)); row.setStage4(); row.setPoint5(pointMap.get(5) - pointMap.get(4)); row.setMobCount5(mobCountMap.get(5)); row.setStage5(); row.setPoint2Total(row.getPoint1() + row.getPoint2()); row.setPoint3Total(row.getPoint2Total() + row.getPoint3()); row.setPoint4Total(row.getPoint3Total() + row.getPoint4()); row.setPointTotal(pointMap.get(5)); statTable.getItems().add(row); }
From source file:ict.ocrabase.main.java.client.bulkload.LoadHFiles.java
public void doQuickBulkLoad(Map<String, Map<byte[], List<Path>>> loadMap) throws IOException, InterruptedException { Configuration config = getConf(); int loadThreadNum = config.getInt("bulkload.loadthread.num", 10); for (Map.Entry<String, Map<byte[], List<Path>>> entry : loadMap.entrySet()) { String tableName = entry.getKey(); LOG.info("Start loading table " + tableName); Map<byte[], List<Path>> regionMap = entry.getValue(); ConcurrentLinkedQueue<LoadItem> conQueue = new ConcurrentLinkedQueue<LoadHFiles.LoadItem>(); for (Map.Entry<byte[], List<Path>> item : regionMap.entrySet()) { conQueue.add(new LoadItem(item.getKey(), item.getValue())); }//from w ww .j ava 2 s.co m int threadNum; if (regionMap.size() < loadThreadNum) { threadNum = regionMap.size(); } else { threadNum = loadThreadNum; } LoadHFileThread[] threads = new LoadHFileThread[threadNum]; for (int i = 0; i < threadNum; i++) { threads[i] = new LoadHFileThread(tableName, conQueue); } LOG.info("Starting threads"); for (int i = 0; i < threadNum; i++) { threads[i].start(); } LOG.info("Started threads!"); for (int i = 0; i < threadNum; i++) { threads[i].join(); } progress = 1; } }
From source file:com.chinamobile.bcbsp.comm.BDBMap.java
/** * When duplicates are allowed, this method return all values associated with * the key//from www . ja v a 2 s .c o m */ public ConcurrentLinkedQueue<IMessage> getDupilcates(K key) { String tmp; int indexOf$; Collection<V> valueSet = storedMap.duplicates(key); ConcurrentLinkedQueue<IMessage> list = new ConcurrentLinkedQueue<IMessage>(); Iterator<V> it = valueSet.iterator(); while (it.hasNext()) { // Note Need To Be Upgraded. IMessage tmpmsg = new BSPMessage(); tmp = it.next().toString(); indexOf$ = tmp.indexOf('$'); tmpmsg.fromString(tmp.substring(indexOf$ + 1)); list.add(tmpmsg); } storedMap.remove(key); bdbMapSize.addAndGet(-list.size()); return list; }
From source file:edu.cornell.mannlib.vitro.webapp.rdfservice.impl.jena.RDFServiceJena.java
private List<Statement> sort(List<Statement> stmts) { List<Statement> output = new ArrayList<Statement>(); int originalSize = stmts.size(); if (originalSize == 1) { return stmts; }// w w w . j a va 2 s. c o m List<Statement> remaining = stmts; ConcurrentLinkedQueue<Resource> subjQueue = new ConcurrentLinkedQueue<Resource>(); for (Statement stmt : remaining) { if (stmt.getSubject().isURIResource()) { subjQueue.add(stmt.getSubject()); break; } } if (subjQueue.isEmpty()) { log.warn("No named subject in statement patterns"); return stmts; } while (remaining.size() > 0) { if (subjQueue.isEmpty()) { subjQueue.add(remaining.get(0).getSubject()); } while (!subjQueue.isEmpty()) { Resource subj = subjQueue.poll(); List<Statement> temp = new ArrayList<Statement>(); for (Statement stmt : remaining) { if (stmt.getSubject().equals(subj)) { output.add(stmt); if (stmt.getObject().isResource()) { subjQueue.add((Resource) stmt.getObject()); } } else { temp.add(stmt); } } remaining = temp; } } if (output.size() != originalSize) { throw new RuntimeException( "original list size was " + originalSize + " but sorted size is " + output.size()); } return output; }
From source file:org.apache.sysml.runtime.matrix.data.LibMatrixDNN.java
private static void addMatrixBlocks(int poolSize, TaskType type, ConvolutionParameters params, ConcurrentLinkedQueue<MatrixBlock> im2ColOutBlocks, ConcurrentLinkedQueue<MatrixBlock> doutReshapedBlocks, ConcurrentLinkedQueue<MatrixBlock> partialRetBlocks) { for (int i = 0; i < poolSize; i++) { if (type == TaskType.LoopedIm2ColConv2d || type == TaskType.LoopedIm2ColConv2dBwdFilter) { MatrixBlock im2ColOutBlock = new MatrixBlock(params.C * params.R * params.S, params.P * params.Q, false);//from w w w .ja va 2 s .co m im2ColOutBlock.allocateDenseBlock(true); im2ColOutBlocks.add(im2ColOutBlock); } if (type == TaskType.LoopedIm2ColConv2dBwdFilter) { MatrixBlock partialRetBlock = new MatrixBlock(params.K, params.C * params.R * params.S, false); partialRetBlock.allocateDenseBlock(true); partialRetBlocks.add(partialRetBlock); } if (type == TaskType.LoopedIm2ColConv2dBwdData || type == TaskType.LoopedIm2ColConv2dBwdFilter) { MatrixBlock doutReshapedBlock = new MatrixBlock(params.P * params.Q, params.K, false); doutReshapedBlock.allocateDenseBlock(true); doutReshapedBlocks.add(doutReshapedBlock); } } }
From source file:com.linkedin.pinot.core.query.scheduler.PrioritySchedulerTest.java
@Test public void testMultiThreaded() throws InterruptedException { // add queries from multiple threads and verify that all those are executed PropertiesConfiguration conf = new PropertiesConfiguration(); conf.setProperty(ResourceManager.QUERY_WORKER_CONFIG_KEY, 60); conf.setProperty(ResourceManager.QUERY_RUNNER_CONFIG_KEY, 20); conf.setProperty(ResourceLimitPolicy.THREADS_PER_QUERY_PCT, 50); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 60); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 40); conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 10); final TestPriorityScheduler scheduler = TestPriorityScheduler.create(conf); scheduler.start();/* ww w .j a v a 2 s .c o m*/ final Random random = new Random(); final ConcurrentLinkedQueue<ListenableFuture<byte[]>> results = new ConcurrentLinkedQueue<>(); final int numThreads = 3; final int queriesPerThread = 10; numQueries = new CountDownLatch(numThreads * queriesPerThread); for (int i = 0; i < numThreads; i++) { final int index = i; new Thread(new Runnable() { @Override public void run() { for (int j = 0; j < queriesPerThread; j++) { results.add(scheduler.submit(createServerQueryRequest(Integer.toString(index), metrics))); Uninterruptibles.sleepUninterruptibly(random.nextInt(100), TimeUnit.MILLISECONDS); } } }).start(); } numQueries.await(); scheduler.stop(); }
From source file:com.predic8.membrane.core.interceptor.apimanagement.statistics.AMStatisticsCollector.java
public void addExchangeToQueue(Exchange exc) { String apiKey = (String) exc.getProperty(Exchange.API_KEY); if (apiKey != null) { ConcurrentLinkedQueue<Exchange> exchangeQueue = exchangesForApiKey.get(apiKey); // See SO 3752194 for explanation for this if (exchangeQueue == null) { ConcurrentLinkedQueue<Exchange> newValue = new ConcurrentLinkedQueue<Exchange>(); exchangeQueue = exchangesForApiKey.putIfAbsent(apiKey, newValue); if (exchangeQueue == null) exchangeQueue = newValue; }/*from w w w . j a v a 2 s. co m*/ exchangeQueue.add(exc); } }
From source file:oculus.aperture.graph.aggregation.impl.ModularityAggregator.java
@Override public void run() { logger.debug("Running kSnap clustering algorithm on " + nodeMap.size() + " nodes and " + linkMap.size() + " links..."); StopWatch stopWatch = new StopWatch(); stopWatch.start();/*from w w w . j a va 2 s . c o m*/ HashMap<String, ModularityNode> linklookup = new HashMap<String, ModularityAggregator.ModularityNode>(); for (Node n : nodeMap.values()) { ModularityNode mn = new ModularityNode(n); linklookup.put(n.getId(), mn); groups.add(mn); } links = new ArrayList<ModularityLink>(); for (Link l : linkMap.values()) { if (linklookup.containsKey(l.getSourceId()) && linklookup.containsKey(l.getTargetId())) { //if this is not true we have links pointing to an invalid node... ModularityLink ml = new ModularityLink(linklookup.get(l.getSourceId()), linklookup.get(l.getTargetId())); links.add(ml); ModularityNode start = linklookup.get(l.getSourceId()); ModularityNode end = linklookup.get(l.getSourceId()); start.addLink(ml); end.addLink(ml); } } boolean notterminate = true; int linksize; while (notterminate) { final List<Future<?>> futures = new ArrayList<Future<?>>(); notterminate = false; final PriorityBlockingQueue<ModularityLink> linksort = new PriorityBlockingQueue<ModularityLink>(); linksize = links.size(); final int itrsize = linksize / nThreads; for (int i = 0; i < nThreads; i++) { final int passval = i; Future<?> foo = executor.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { boolean nt = false; for (int lnknum = 0; lnknum < itrsize; lnknum++) { ModularityLink ln = links.get(passval * itrsize + lnknum); long nc = 0; if (ln.source.neighbourcounts.containsKey(ln.target)) { nc = ln.source.neighbourcounts.get(ln.target).intValue(); } else { System.out.println("Oooops"); } long q = nc - (ln.source.totalvolume * ln.target.totalvolume) / 2; if (q > 0) nt = true; ln.q.set(q); linksort.add(ln); } return nt; } }); futures.add(foo); } for (Future<?> foo : futures) { try { notterminate = (Boolean) foo.get(); } catch (InterruptedException interruptedCancellingAndSignalling) { Thread.currentThread().interrupt(); } catch (ExecutionException wtf) { wtf.printStackTrace(); } } if (!notterminate) break; //Now we take each link in the queue and add it to maximal matching ConcurrentLinkedQueue<ModularityLink> maximalmatching = new ConcurrentLinkedQueue<ModularityAggregator.ModularityLink>(); ConcurrentSkipListSet<ModularityNode> vertexcheck = new ConcurrentSkipListSet<ModularityAggregator.ModularityNode>(); ModularityLink top = linksort.poll(); maximalmatching.add(top); vertexcheck.add(top.source); vertexcheck.add(top.target); while (!linksort.isEmpty()) { ModularityLink nlnk = linksort.poll(); if (nlnk.q.intValue() < 0) continue; if (vertexcheck.contains(nlnk.source) || vertexcheck.contains(nlnk.target)) continue; maximalmatching.add(nlnk); vertexcheck.add(nlnk.source); vertexcheck.add(nlnk.target); } //Now we take all the pairs in maximal matching and fuse them for (ModularityLink ln : maximalmatching) { ModularityNode so = ln.source; ModularityNode tr = ln.target; so.assimilate(tr); groups.remove(tr); links.remove(ln); } linksize = links.size(); if (linksize == 1) notterminate = false; } /* final List<Future<?>> futures = new ArrayList<Future<?>>(); Future<?> foo = executor.submit(new Runnable(){ @Override public void run() { }}); futures.add(foo); */ clusterSet = new ArrayList<Set<Node>>(); for (ModularityNode g : groups) { if (cancel) { setStatusWaiting(); return; } Set<Node> set = new HashSet<Node>(); clusterSet.add(set); for (Node n : g.nodes) { if (cancel) { setStatusWaiting(); return; } set.add(n); } } if (clusterer != null) { graphResult = clusterer.convertClusterSet(clusterSet); } stopWatch.stop(); System.out.println("Finished Modularity clustering algorithm."); System.out.println("Algorithm took " + stopWatch.toString());//30 = 33.487 stopWatch.reset(); this.result = result; }
From source file:com.chinamobile.bcbsp.comm.CombinerTool.java
/** combine the message queues. * @param outgoingQueue//from w w w .j a v a 2s. c o m */ private ConcurrentLinkedQueue<IMessage> combine(ConcurrentLinkedQueue<IMessage> outgoingQueue) { // Map of outgoing queues indexed by destination vertex ID. TreeMap<String, ConcurrentLinkedQueue<IMessage>> outgoingQueues = new TreeMap<String, ConcurrentLinkedQueue<IMessage>>(); ConcurrentLinkedQueue<IMessage> tempQueue = null; IMessage tempMessage = null; // Traverse the outgoing queue and put the messages with the same // dstVertexID into the same queue in the tree map. Iterator<IMessage> iter = outgoingQueue.iterator(); String dstVertexID = null; /**The result queue for return.*/ ConcurrentLinkedQueue<IMessage> resultQueue = new ConcurrentLinkedQueue<IMessage>(); while (iter.hasNext()) { tempMessage = iter.next(); dstVertexID = tempMessage.getDstVertexID(); tempQueue = outgoingQueues.get(dstVertexID); if (tempQueue == null) { tempQueue = new ConcurrentLinkedQueue<IMessage>(); } tempQueue.add(tempMessage); outgoingQueues.put(dstVertexID, tempQueue); } // Do combine operation for each of the outgoing queues. for (Entry<String, ConcurrentLinkedQueue<IMessage>> entry : outgoingQueues.entrySet()) { tempQueue = entry.getValue(); tempMessage = (IMessage) this.combiner.combine(tempQueue.iterator()); resultQueue.add(tempMessage); } outgoingQueue.clear(); outgoingQueues.clear(); return resultQueue; }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run queries as fast as possible. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the * queue and send them./*from ww w .ja v a 2 s . c om*/ * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @throws Exception */ public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); // Keep 20 queries inside the query queue. while (queryQueue.size() == 20) { Thread.sleep(1); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); reportStartTime = currentTime; numReportIntervals++; if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) { numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }