List of usage examples for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue
public ConcurrentLinkedQueue()
From source file:com.numenta.taurus.TaurusBaseActivity.java
/** * Track background tasks making sure to synchronize the background task lifecycle with the * activity lifecycle./*www . j av a 2 s . c o m*/ * * @see #onPause() */ public void trackBackgroundTask(AsyncTask task) { if (_taskList == null) { _taskList = new ConcurrentLinkedQueue<AsyncTask>(); } _taskList.add(task); }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run queries as fast as possible. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the * queue and send them./*from w w w. ja v a2 s . co m*/ * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @throws Exception */ public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, int reportIntervalMs, int numIntervalsToReportAndClearStatistics) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); // Keep 20 queries inside the query queue. while (queryQueue.size() == 20) { Thread.sleep(1); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); reportStartTime = currentTime; numReportIntervals++; if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals == numIntervalsToReportAndClearStatistics)) { numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, " + "Average Client Time: {}ms.", timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector.java
/** * Sweep phase of gc candidate deletion. * <p>/*from w ww . j ava 2 s. co m*/ * Performs the following steps depending upon the type of the blob store refer * {@link org.apache.jackrabbit.oak.plugins.blob.SharedDataStore.Type}: * * <ul> * <li>Shared</li> * <li> * <ul> * <li> Merge all marked references (from the mark phase run independently) available in the data store meta * store (from all configured independent repositories). * <li> Retrieve all blob ids available. * <li> Diffs the 2 sets above to retrieve list of blob ids not used. * <li> Deletes only blobs created after * (earliest time stamp of the marked references - #maxLastModifiedInterval) from the above set. * </ul> * </li> * * <li>Default</li> * <li> * <ul> * <li> Mark phase already run. * <li> Retrieve all blob ids available. * <li> Diffs the 2 sets above to retrieve list of blob ids not used. * <li> Deletes only blobs created after * (time stamp of the marked references - #maxLastModifiedInterval). * </ul> * </li> * </ul> * * @return the number of blobs deleted * @throws Exception the exception * @param fs the garbage collector file state * @param markStart the start time of mark to take as reference for deletion */ protected long sweep(GarbageCollectorFileState fs, long markStart) throws Exception { long earliestRefAvailTime; // Merge all the blob references available from all the reference files in the data store meta store // Only go ahead if merge succeeded try { earliestRefAvailTime = GarbageCollectionType.get(blobStore).mergeAllMarkedReferences(blobStore, fs); LOG.debug("Earliest reference available for timestamp [{}]", earliestRefAvailTime); earliestRefAvailTime = (earliestRefAvailTime < markStart ? earliestRefAvailTime : markStart); } catch (Exception e) { return 0; } // Find all blob references after iterating over the whole repository (new BlobIdRetriever(fs)).call(); // Calculate the references not used difference(fs); long count = 0; long deleted = 0; long lastMaxModifiedTime = getLastMaxModifiedTime(earliestRefAvailTime); LOG.debug("Starting sweep phase of the garbage collector"); LOG.debug("Sweeping blobs with modified time > than the configured max deleted time ({}). ", timestampToString(lastMaxModifiedTime)); ConcurrentLinkedQueue<String> exceptionQueue = new ConcurrentLinkedQueue<String>(); LineIterator iterator = FileUtils.lineIterator(fs.getGcCandidates(), Charsets.UTF_8.name()); List<String> ids = newArrayList(); while (iterator.hasNext()) { ids.add(iterator.next()); if (ids.size() >= getBatchCount()) { count += ids.size(); deleted += sweepInternal(ids, exceptionQueue, lastMaxModifiedTime); ids = newArrayList(); } } if (!ids.isEmpty()) { count += ids.size(); deleted += sweepInternal(ids, exceptionQueue, lastMaxModifiedTime); } BufferedWriter writer = null; try { if (!exceptionQueue.isEmpty()) { writer = Files.newWriter(fs.getGarbage(), Charsets.UTF_8); saveBatchToFile(newArrayList(exceptionQueue), writer); } } finally { LineIterator.closeQuietly(iterator); IOUtils.closeQuietly(writer); } if (!exceptionQueue.isEmpty()) { LOG.warn( "Unable to delete some blobs entries from the blob store. Details around such blob entries can " + "be found in [{}]", fs.getGarbage().getAbsolutePath()); } if (count != deleted) { LOG.warn( "Deleted only [{}] blobs entries from the [{}] candidates identified. This may happen if blob " + "modified time is > " + "than the max deleted time ({})", deleted, count, timestampToString(lastMaxModifiedTime)); } // Remove all the merged marked references GarbageCollectionType.get(blobStore).removeAllMarkedReferences(blobStore); LOG.debug("Ending sweep phase of the garbage collector"); return deleted; }
From source file:com.huobi.demo.socketio.core.IOConnection.java
/** * Transport connected./*from ww w. j a v a 2 s . c om*/ * * {@link IOTransport} calls this when a connection is established. */ public synchronized void transportConnected() { setState(STATE_READY); if (reconnectTask != null) { reconnectTask.cancel(); reconnectTask = null; } resetTimeout(); if (transport.canSendBulk()) { ConcurrentLinkedQueue<String> outputBuffer = this.outputBuffer; this.outputBuffer = new ConcurrentLinkedQueue<String>(); try { // DEBUG String[] texts = outputBuffer.toArray(new String[outputBuffer.size()]); logger.debug("Bulk start:"); for (String text : texts) { logger.debug("> " + text); } logger.debug("Bulk end"); // DEBUG END transport.sendBulk(texts); } catch (IOException e) { this.outputBuffer = outputBuffer; } } else { String text; while ((text = outputBuffer.poll()) != null) sendPlain(text); } this.keepAliveInQueue = false; }
From source file:org.sunnycode.zkws.socketio.impl.IOConnection.java
/** * Transport connected./*from ww w.ja v a 2 s. c o m*/ * * {@link IOTransport} calls this when a connection is established. */ public synchronized void transportConnected() { setState(STATE_READY); if (reconnectTask != null) { reconnectTask.cancel(); reconnectTask = null; } resetTimeout(); if (transport.canSendBulk()) { ConcurrentLinkedQueue<String> outputBuffer = this.outputBuffer; this.outputBuffer = new ConcurrentLinkedQueue<String>(); try { // DEBUG String[] texts = outputBuffer.toArray(new String[outputBuffer.size()]); logger.info("Bulk start:"); for (String text : texts) { logger.info("> " + text); } logger.info("Bulk end"); // DEBUG END transport.sendBulk(texts); } catch (IOException e) { this.outputBuffer = outputBuffer; } } else { String text; while ((text = outputBuffer.poll()) != null) sendPlain(text); } this.keepAliveInQueue = false; }
From source file:com.ibm.crail.tools.CrailBenchmark.java
void readSequentialAsync(String filename, int size, int loop, int batch) throws Exception { System.out.println("readSequentialAsync, filename " + filename + ", size " + size + ", loop " + loop + ", batch " + batch); ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>(); for (int i = 0; i < batch; i++) { CrailBuffer buf = null;//from w w w .ja va 2s. co m if (size == CrailConstants.BUFFER_SIZE) { buf = fs.allocateBuffer(); } else if (size < CrailConstants.BUFFER_SIZE) { CrailBuffer _buf = fs.allocateBuffer(); _buf.clear().limit(size); buf = _buf.slice(); } else { buf = OffHeapBuffer.wrap(ByteBuffer.allocateDirect(size)); } bufferQueue.add(buf); } //warmup warmUp(filename, warmup, bufferQueue); //benchmark System.out.println("starting benchmark..."); double sumbytes = 0; double ops = 0; fs.getStatistics().reset(); CrailFile file = fs.lookup(filename).get().asFile(); CrailInputStream directStream = file.getDirectInputStream(file.getCapacity()); HashMap<Integer, CrailBuffer> futureMap = new HashMap<Integer, CrailBuffer>(); LinkedBlockingQueue<Future<CrailResult>> futureQueue = new LinkedBlockingQueue<Future<CrailResult>>(); long start = System.currentTimeMillis(); for (int i = 0; i < batch - 1 && ops < loop; i++) { CrailBuffer buf = bufferQueue.poll(); buf.clear(); Future<CrailResult> future = directStream.read(buf); futureQueue.add(future); futureMap.put(future.hashCode(), buf); ops = ops + 1.0; } while (ops < loop) { CrailBuffer buf = bufferQueue.poll(); buf.clear(); Future<CrailResult> future = directStream.read(buf); futureQueue.add(future); futureMap.put(future.hashCode(), buf); future = futureQueue.poll(); CrailResult result = future.get(); buf = futureMap.get(future.hashCode()); bufferQueue.add(buf); sumbytes = sumbytes + result.getLen(); ops = ops + 1.0; } while (!futureQueue.isEmpty()) { Future<CrailResult> future = futureQueue.poll(); CrailResult result = future.get(); futureMap.get(future.hashCode()); sumbytes = sumbytes + result.getLen(); ops = ops + 1.0; } long end = System.currentTimeMillis(); double executionTime = ((double) (end - start)) / 1000.0; double throughput = 0.0; double latency = 0.0; double sumbits = sumbytes * 8.0; if (executionTime > 0) { throughput = sumbits / executionTime / 1000.0 / 1000.0; latency = 1000000.0 * executionTime / ops; } directStream.close(); System.out.println("execution time " + executionTime); System.out.println("ops " + ops); System.out.println("sumbytes " + sumbytes); System.out.println("throughput " + throughput); System.out.println("latency " + latency); fs.getStatistics().print("close"); }
From source file:io.hops.ha.common.TransactionStateImpl.java
private void persistApplicationStateToRemove() throws StorageException { if (!applicationsStateToRemove.isEmpty()) { ApplicationStateDataAccess DA = (ApplicationStateDataAccess) RMStorageFactory .getDataAccess(ApplicationStateDataAccess.class); Queue<ApplicationState> appToRemove = new ConcurrentLinkedQueue<ApplicationState>(); for (ApplicationId appId : applicationsStateToRemove) { appToRemove.add(new ApplicationState(appId.toString())); }//www. j a v a 2 s. co m DA.removeAll(appToRemove); //TODO remove appattempts } }
From source file:com.p3authentication.preferences.Prefs.java
private void redownload_images() { // TODO Auto-generated method stub File Path = getExternalFilesDir(null); String BASEURL = getResources().getString(R.string.base_URL); enqueue = new ConcurrentLinkedQueue<>(); String[] imagenames = getResources().getStringArray(R.array.pic_name); boolean temp = Splashscreen.isDownloadManagerAvailable(getApplicationContext()); for (int index = 0; index < imagenames.length; index++) { File image = new File(Path + "/" + imagenames[index]); if (image.exists()) { image.delete();/*from w ww.jav a2 s . c o m*/ if (temp == true) { downloadFile(BASEURL, imagenames[index]); } } } }
From source file:dendroscope.autumn.hybridnumber.ComputeHybridNumber.java
/** * recursively compute the hybrid number * * @param root1/*from www . ja v a 2s . c om*/ * @param root2 * @param isReduced @return hybrid number * @param retry * @param topLevel * @param scoreAbove * @param additionalAbove */ private int computeHybridNumberRec(final Root root1, final Root root2, boolean isReduced, Integer previousHybrid, BitSet retry, final boolean topLevel, final int scoreAbove, final ValuesList additionalAbove) throws IOException, CanceledException { if (System.currentTimeMillis() > nextTime) { synchronized (progressListener) { nextTime += waitTime; waitTime *= 1.5; progressListener.incrementProgress(); } } else progressListener.checkForCancel(); // System.err.println("computeHybridNumberRec: tree1=" + Basic.toString(root1.getTaxa()) + " tree2=" + Basic.toString(root2.getTaxa())); // root1.reorderSubTree(); // root2.reorderSubTree(); if (checking) { root1.checkTree(); root2.checkTree(); } BitSet taxa = root1.getTaxa(); String key = root1.toStringTreeSparse() + root2.toStringTreeSparse(); // System.err.println("Key: "+key); Integer value; synchronized (lookupTable) { value = (Integer) lookupTable.get(key); if (value != null) return value; } if (!root2.getTaxa().equals(taxa)) throw new RuntimeException("Unequal taxon sets: X=" + Basic.toString(root1.getTaxa()) + " vs " + Basic.toString(root2.getTaxa())); if (!isReduced) { switch (SubtreeReduction.apply(root1, root2, null)) { case ISOMORPHIC: synchronized (lookupTable) { lookupTable.put(key, 0); } if (topLevel) { bestScore.lowerTo(0); progressListener.setSubtask("Best score: " + bestScore); } return 0; // two trees are isomorphic, no hybrid node needed case REDUCED: // a reduction was performed, cannot maintain lexicographical ordering in removal loop below previousHybrid = null; break; case IRREDUCIBLE: break; } Single<Integer> placeHolderTaxa = new Single<Integer>(); final Pair<Root, Root> clusterTrees = ClusterReduction.apply(root1, root2, placeHolderTaxa); final boolean retryTop = false && (previousHybrid != null && placeHolderTaxa.get() < previousHybrid); // if the taxa involved in the cluster reduction come before the previously removed hybrid, do full retry // retryTop doesn't work final BitSet fRetry = retry; if (clusterTrees != null) // will perform cluster-reduction { final Value score1 = new Value(0); final Value score2 = new Value(1); // because the cluster could not be reduced using an subtree reduction, can assume that we will need one reticulation for this final boolean verbose = ProgramProperties.get("verbose-HL-parallel", false); if (verbose) System.err.println("Starting parallel loop"); final CountDownLatch countDownLatch = new CountDownLatch(2); final Integer fPrevious = previousHybrid; // setup task: final Task task1 = new Task(); // first of two cluster-reduction tasks task1.setRunnable(new Runnable() { public void run() { try { if (verbose) { System.err.println("Launching thread on cluster-reduction"); System.err .println("Active threads " + scheduledThreadPoolExecutor.getActiveCount()); } final ValuesList additionalAbove1 = additionalAbove.copyWithAdditionalElement(score2); if (scoreAbove + additionalAbove1.sum() < bestScore.get()) { int h = computeHybridNumberRec(root1, root2, false, fPrevious, fRetry, false, scoreAbove, additionalAbove1); score1.set(h); } else { score1.set(LARGE); } additionalAbove1.clear(); } catch (Exception ex) { while (countDownLatch.getCount() > 0) countDownLatch.countDown(); } countDownLatch.countDown(); } }); final Task task2 = new Task(); // second of two cluster-reduction tasks task2.setRunnable(new Runnable() { public void run() { try { if (verbose) { System.err.println("Launching thread on cluster-reduction"); System.err .println("Active threads " + scheduledThreadPoolExecutor.getActiveCount()); } final ValuesList additionalAbove2 = additionalAbove.copyWithAdditionalElement(score1); if (scoreAbove + additionalAbove2.sum() < bestScore.get()) { int h = computeHybridNumberRec(clusterTrees.getFirst(), clusterTrees.getSecond(), true, fPrevious, fRetry, false, scoreAbove, additionalAbove2); score2.set(h); } else { score2.set(LARGE); } additionalAbove2.clear(); } catch (Exception ex) { while (countDownLatch.getCount() > 0) countDownLatch.countDown(); } countDownLatch.countDown(); } }); // start a task in this thread scheduledThreadPoolExecutor.execute(task1); task2.run(); task1.run(); // try to run task1 in current thread if it hasn't yet started execution. If the task is already running or has completed, will simply return try { if (verbose) System.err.println("waiting..."); // wait until all tasks have completed countDownLatch.await(); if (verbose) System.err.println("done"); } catch (InterruptedException e) { Basic.caught(e); } clusterTrees.getFirst().deleteSubTree(); clusterTrees.getSecond().deleteSubTree(); int total = scoreAbove + additionalAbove.sum() + score1.get() + score2.get(); if (topLevel && (total < bestScore.get())) // score above will be zero, but put this here anyway to avoid confusion { bestScore.lowerTo(total); progressListener.setSubtask("Current best score: " + bestScore); } synchronized (lookupTable) { Integer old = (Integer) lookupTable.get(key); if (old == null || total < old) lookupTable.put(key, total); } return score1.get() + score2.get(); } } List<Root> leaves1 = root1.getAllLeaves(); if (leaves1.size() <= 2) // try 2 rather than one... { return 0; } final boolean verbose = ProgramProperties.get("verbose-HL-parallel", false); if (verbose) System.err.println("Starting parallel loop"); final CountDownLatch countDownLatch = new CountDownLatch(leaves1.size()); final Value bestSubH = new Value(LARGE); // schedule all tasks to be performed final ConcurrentLinkedQueue<Task> queue = new ConcurrentLinkedQueue<Task>(); for (Node leaf2remove : leaves1) { final BitSet taxa2remove = ((Root) leaf2remove).getTaxa(); if (previousHybrid == null || previousHybrid < taxa2remove.nextSetBit(0)) { if (scoreAbove + additionalAbove.sum() + 1 >= bestScore.get()) return LARGE; // other thread has found a better result, abort // setup task: final Task task = new Task(); task.setRunnable(new Runnable() { public void run() { try { if (verbose) { System.err.println("Launching thread on " + Basic.toString(taxa2remove)); System.err .println("Active threads " + scheduledThreadPoolExecutor.getActiveCount()); } queue.remove(task); if (scoreAbove + additionalAbove.sum() + 1 < bestScore.get()) { Root tree1X = CopyWithTaxaRemoved.apply(root1, taxa2remove); Root tree2X = CopyWithTaxaRemoved.apply(root2, taxa2remove); Refine.apply(tree1X, tree2X); int scoreBelow = computeHybridNumberRec(tree1X, tree2X, false, taxa2remove.nextSetBit(0), null, false, scoreAbove + 1, additionalAbove) + 1; if (topLevel && scoreBelow < bestScore.get()) { bestScore.lowerTo(scoreBelow); progressListener.setSubtask("Current best score: " + bestScore); } synchronized (bestSubH) { if (scoreBelow < bestSubH.get()) bestSubH.set(scoreBelow); } tree1X.deleteSubTree(); tree2X.deleteSubTree(); } } catch (Exception ex) { while (countDownLatch.getCount() > 0) countDownLatch.countDown(); } countDownLatch.countDown(); } }); queue.add(task); } else // no task for this item, count down { countDownLatch.countDown(); progressListener.checkForCancel(); } } // grab one task for the current thread: Task taskForCurrentThread = queue.size() > 0 ? queue.poll() : null; // launch all others in the executor for (Task task : queue) scheduledThreadPoolExecutor.execute(task); // start a task in this thread if (taskForCurrentThread != null) taskForCurrentThread.run(); // try to run other tasks from the queue. Note that any task that is already running will return immediately while (queue.size() > 0) { Task task = queue.poll(); if (task != null) task.run(); } try { if (verbose) System.err.println("waiting..."); // wait until all tasks have completed countDownLatch.await(); if (verbose) System.err.println("done"); } catch (InterruptedException e) { Basic.caught(e); return LARGE; } // return the best value synchronized (lookupTable) { Integer old = (Integer) lookupTable.get(key); if (old == null || old > bestSubH.get()) lookupTable.put(key, bestSubH.get()); } return bestSubH.get(); }
From source file:io.hops.util.DBUtility.java
public static RMNode processHopRMNodeCompsForScheduler(RMNodeComps hopRMNodeComps, RMContext rmContext) throws InvalidProtocolBufferException { org.apache.hadoop.yarn.api.records.NodeId nodeId; RMNode rmNode = null;//from w w w.ja v a 2 s . c o m if (hopRMNodeComps != null) { nodeId = ConverterUtils.toNodeId(hopRMNodeComps.getRMNodeId()); rmNode = rmContext.getRMNodes().get(nodeId); // The first time we are receiving the RMNode, this will happen when the node registers if (rmNode == null) { // Retrieve heartbeat boolean nextHeartbeat = true; // Create Resource Resource resource = null; if (hopRMNodeComps.getHopResource() != null) { resource = Resource.newInstance(hopRMNodeComps.getHopResource().getMemory(), hopRMNodeComps.getHopResource().getVirtualCores()); } else { LOG.error("ResourceOption should not be null"); resource = Resource.newInstance(0, 0); } /*rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(), hopRMNodeComps.getHopRMNode().getCommandPort(), hopRMNodeComps.getHopRMNode().getHttpPort(), ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()), resourceOption, hopRMNodeComps.getHopRMNode().getNodemanagerVersion(), hopRMNodeComps.getHopRMNode().getHealthReport(), hopRMNodeComps.getHopRMNode().getLastHealthReportTime(), nextHeartbeat);*/ rmNode = new RMNodeImplDist(nodeId, rmContext, hopRMNodeComps.getHopRMNode().getHostName(), hopRMNodeComps.getHopRMNode().getCommandPort(), hopRMNodeComps.getHopRMNode().getHttpPort(), ResourceTrackerService.resolve(hopRMNodeComps.getHopRMNode().getHostName()), resource, hopRMNodeComps.getHopRMNode().getNodemanagerVersion()); // Force Java to put the host in cache NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort()); } // Update the RMNode if (hopRMNodeComps.getHopRMNode() != null) { ((RMNodeImplDist) rmNode).setState(hopRMNodeComps.getHopRMNode().getCurrentState()); } if (hopRMNodeComps.getHopUpdatedContainerInfo() != null) { List<io.hops.metadata.yarn.entity.UpdatedContainerInfo> hopUpdatedContainerInfoList = hopRMNodeComps .getHopUpdatedContainerInfo(); if (hopUpdatedContainerInfoList != null && !hopUpdatedContainerInfoList.isEmpty()) { ConcurrentLinkedQueue<org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> updatedContainerInfoQueue = new ConcurrentLinkedQueue<>(); Map<Integer, org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo> ucis = new HashMap<>(); LOG.debug(hopRMNodeComps.getRMNodeId() + " getting ucis " + hopUpdatedContainerInfoList.size() + " pending event " + hopRMNodeComps.getPendingEvent().getId().getEventId()); for (io.hops.metadata.yarn.entity.UpdatedContainerInfo hopUCI : hopUpdatedContainerInfoList) { if (!ucis.containsKey(hopUCI.getUpdatedContainerInfoId())) { ucis.put(hopUCI.getUpdatedContainerInfoId(), new org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo( new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(), new ArrayList<org.apache.hadoop.yarn.api.records.ContainerStatus>(), hopUCI.getUpdatedContainerInfoId())); } ContainerId cid = ConverterUtils.toContainerId(hopUCI.getContainerId()); io.hops.metadata.yarn.entity.ContainerStatus hopContainerStatus = hopRMNodeComps .getHopContainersStatusMap().get(hopUCI.getContainerId()); org.apache.hadoop.yarn.api.records.ContainerStatus conStatus = org.apache.hadoop.yarn.api.records.ContainerStatus .newInstance(cid, ContainerState.valueOf(hopContainerStatus.getState()), hopContainerStatus.getDiagnostics(), hopContainerStatus.getExitstatus()); // Check ContainerStatus state to add it in the appropriate list if (conStatus != null) { LOG.debug("add uci for container " + conStatus.getContainerId() + " status " + conStatus.getState()); if (conStatus.getState().equals(ContainerState.RUNNING)) { ucis.get(hopUCI.getUpdatedContainerInfoId()).getNewlyLaunchedContainers() .add(conStatus); } else if (conStatus.getState().equals(ContainerState.COMPLETE)) { ucis.get(hopUCI.getUpdatedContainerInfoId()).getCompletedContainers() .add(conStatus); } } } for (org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo uci : ucis .values()) { updatedContainerInfoQueue.add(uci); } ((RMNodeImplDist) rmNode).setUpdatedContainerInfo(updatedContainerInfoQueue); } else { LOG.debug(hopRMNodeComps.getRMNodeId() + " hopUpdatedContainerInfoList = null || hopUpdatedContainerInfoList.isEmpty() " + hopRMNodeComps.getPendingEvent().getId().getEventId()); } } else { LOG.debug(hopRMNodeComps.getRMNodeId() + " hopRMNodeFull.getHopUpdatedContainerInfo()=null " + hopRMNodeComps.getPendingEvent().getId().getEventId()); } } return rmNode; }