List of usage examples for java.util Queue size
int size();
From source file:com.googlesource.gerrit.plugins.supermanifest.JiriManifestParser.java
public static JiriProjects getProjects(GerritRemoteReader reader, String repoKey, String ref, String manifest) throws ConfigInvalidException, IOException { try (RepoMap<String, Repository> repoMap = new RepoMap<>()) { repoMap.put(repoKey, reader.openRepository(repoKey)); Queue<ManifestItem> q = new LinkedList<>(); q.add(new ManifestItem(repoKey, manifest, ref, "", false)); HashMap<String, HashSet<String>> processedRepoFiles = new HashMap<>(); HashMap<String, JiriProjects.Project> projectMap = new HashMap<>(); while (q.size() != 0) { ManifestItem mi = q.remove(); Repository repo = repoMap.get(mi.repoKey); if (repo == null) { repo = reader.openRepository(mi.repoKey); repoMap.put(mi.repoKey, repo); }/*w w w . j a va2 s. com*/ HashSet<String> processedFiles = processedRepoFiles.get(mi.repoKey); if (processedFiles == null) { processedFiles = new HashSet<String>(); processedRepoFiles.put(mi.repoKey, processedFiles); } if (processedFiles.contains(mi.manifest)) { continue; } processedFiles.add(mi.manifest); JiriManifest m; try { m = parseManifest(repo, mi.ref, mi.manifest); } catch (JAXBException | XMLStreamException e) { throw new ConfigInvalidException("XML parse error", e); } for (JiriProjects.Project project : m.projects.getProjects()) { project.fillDefault(); if (mi.revisionPinned && project.Key().equals(mi.projectKey)) { project.setRevision(mi.ref); } if (projectMap.containsKey(project.Key())) { if (!projectMap.get(project.Key()).equals(project)) throw new ConfigInvalidException(String.format( "Duplicate conflicting project %s in manifest %s\n%s\n%s", project.Key(), mi.manifest, project.toString(), projectMap.get(project.Key()).toString())); } else { projectMap.put(project.Key(), project); } } URI parentURI; try { parentURI = new URI(mi.manifest); } catch (URISyntaxException e) { throw new ConfigInvalidException("Invalid parent URI", e); } for (JiriManifest.LocalImport l : m.imports.getLocalImports()) { ManifestItem tw = new ManifestItem(mi.repoKey, parentURI.resolve(l.getFile()).getPath(), mi.ref, mi.projectKey, mi.revisionPinned); q.add(tw); } for (JiriManifest.Import i : m.imports.getImports()) { i.fillDefault(); URI uri; try { uri = new URI(i.getRemote()); } catch (URISyntaxException e) { throw new ConfigInvalidException("Invalid URI", e); } String iRepoKey = new Project.NameKey(StringUtils.strip(uri.getPath(), "/")).toString(); String iRef = i.getRevision(); boolean revisionPinned = true; if (iRef.isEmpty()) { iRef = REFS_HEADS + i.getRemotebranch(); revisionPinned = false; } ManifestItem tmi = new ManifestItem(iRepoKey, i.getManifest(), iRef, i.Key(), revisionPinned); q.add(tmi); } } return new JiriProjects(projectMap.values().toArray(new JiriProjects.Project[0])); } }
From source file:com.clustercontrol.jobmanagement.util.JobMultiplicityCache.java
/** * ???????//from w ww . j ava2 s. c om * ?[]?? * @param facilityId * @return */ public static Integer getRunningMultiplicity(String facilityId) { try { _lock.readLock(); Map<String, Queue<JobSessionNodeEntityPK>> runningCache = getRunningCache(); Queue<JobSessionNodeEntityPK> runningQueue = runningCache.get(facilityId); return runningQueue == null ? 0 : runningQueue.size(); } finally { _lock.readUnlock(); } }
From source file:com.clustercontrol.jobmanagement.util.JobMultiplicityCache.java
/** * ???????//from ww w .j a va 2s . c o m * ?[]?? * @param facilityId * @return */ public static Integer getWaitMultiplicity(String facilityId) { try { _lock.readLock(); Map<String, Queue<JobSessionNodeEntityPK>> waitingCache = getWaitingCache(); Queue<JobSessionNodeEntityPK> waitingQueue = waitingCache.get(facilityId); return waitingQueue == null ? 0 : waitingQueue.size(); } finally { _lock.readUnlock(); } }
From source file:au.org.ala.delta.editor.EditorPreferences.java
/** * Adds the supplied filename to the top of the most recently used files. * //from w w w. j a v a 2 s . c o m * @param filename */ public static void addFileToMRU(String filename) { Queue<String> q = new LinkedList<String>(); q.add(filename); String[] existingFiles = getPreviouslyUsedFiles(); if (existingFiles != null) { for (String existingFile : existingFiles) { if (!q.contains(existingFile)) { q.add(existingFile); } } } StringBuilder b = new StringBuilder(); for (int i = 0; i < MAX_SIZE_MRU && q.size() > 0; ++i) { if (i > 0) { b.append(MRU_SEPARATOR); } b.append(q.poll()); } Preferences prefs = Preferences.userNodeForPackage(DeltaEditor.class); prefs.put(MRU_PREF_KEY, b.toString()); try { prefs.sync(); } catch (BackingStoreException e) { throw new RuntimeException(e); } }
From source file:com.clustercontrol.jobmanagement.util.JobMultiplicityCache.java
/** * ????????????//from w w w.ja va 2s .c o m * @param facilityId * @return */ public static boolean isRunNow(String facilityId) { int multiplicity = 0; int queueSize = 0; try { NodeInfo nodeInfo = new RepositoryControllerBean().getNode(facilityId); multiplicity = nodeInfo.getJobMultiplicity(); } catch (FacilityNotFound e) { m_log.warn("kick " + e.getMessage()); } catch (HinemosUnknown e) { m_log.warn("kick " + e.getMessage(), e); } if (multiplicity == 0) { // ??0?????????????? return true; } try { _lock.readLock(); Map<String, Queue<JobSessionNodeEntityPK>> runningCache = getRunningCache(); Queue<JobSessionNodeEntityPK> runningQueue = runningCache.get(facilityId); queueSize = runningQueue == null ? 0 : runningQueue.size(); if (m_log.isDebugEnabled()) { m_log.debug("isRunNow runningQueue : " + runningQueue); } return queueSize < multiplicity; } finally { _lock.readUnlock(); } }
From source file:com.clustercontrol.jobmanagement.util.JobMultiplicityCache.java
/** * ??/*from w w w.j a va 2 s. c o m*/ * ?????????????????? * * @param facilityId * @return */ public static void kick(String facilityId) { m_log.debug("kick " + facilityId); boolean kickFlag = false; try { _lock.writeLock(); HashMap<String, Queue<JobSessionNodeEntityPK>> waitingCache = getWaitingCache(); HashMap<String, Queue<JobSessionNodeEntityPK>> runningCache = getRunningCache(); Queue<JobSessionNodeEntityPK> waitingQueue = waitingCache.get(facilityId); Queue<JobSessionNodeEntityPK> runningQueue = runningCache.get(facilityId); if (waitingQueue == null || waitingQueue.size() == 0) { return; } if (runningQueue == null) { runningQueue = new LinkedList<JobSessionNodeEntityPK>(); runningCache.put(facilityId, runningQueue); } if (isRunNow(facilityId)) { JpaTransactionManager jtm = new JpaTransactionManager(); try { jtm.begin(); JobSessionNodeEntityPK pk = waitingQueue.peek(); //// waitQueue??(??????) m_log.debug("kick remove waitQueue : " + pk); int status = new JobSessionNodeImpl().wait2running(pk); // ? if (status == 0) { m_log.debug("kick add runningQueue : " + pk); waitingQueue.poll(); //// waitQueue? runningQueue.offer(pk); //// runningQueue? kickFlag = true; } // ?????????? else if (status == 1) { m_log.debug("kick not add runningQueue : " + pk); waitingQueue.poll(); //// waitQueue? kickFlag = true; } jtm.commit(); } catch (Exception e) { m_log.warn("kick : " + e.getClass().getSimpleName() + ", " + e.getMessage(), e); jtm.rollback(); } finally { jtm.close(); } } storeWaitingCache(waitingCache); storeRunningCache(runningCache); if (m_log.isDebugEnabled()) { for (JobSessionNodeEntityPK q : runningQueue) { m_log.debug("kick runningQueue : " + q); } for (JobSessionNodeEntityPK q : waitingQueue) { m_log.debug("kick waitQueue : " + q); } } if (kickFlag) { kick(facilityId); } } finally { _lock.writeUnlock(); } }
From source file:com.linkedin.pinot.core.startree.StarTreeSerDe.java
/** * Helper method to write the star tree nodes for Star Tree off-heap format * * @param starTree/* w w w. jav a2 s . c om*/ * @param mappedByteBuffer * @param offset */ private static void writeNodesOffHeap(StarTree starTree, MMapBuffer mappedByteBuffer, long offset) { int index = 0; Queue<StarTreeIndexNode> queue = new LinkedList<>(); StarTreeIndexNode root = (StarTreeIndexNode) starTree.getRoot(); queue.add(root); while (!queue.isEmpty()) { StarTreeIndexNode node = queue.poll(); List<StarTreeIndexNode> children = getSortedChildren(node); // Returns empty list instead of null. int numChildren = children.size(); int startChildrenIndex = (numChildren != 0) ? (index + queue.size() + 1) : StarTreeIndexNodeOffHeap.INVALID_INDEX; int endChildrenIndex = (numChildren != 0) ? (startChildrenIndex + numChildren - 1) : StarTreeIndexNodeOffHeap.INVALID_INDEX; offset = writeOneOffHeapNode(mappedByteBuffer, offset, node, startChildrenIndex, endChildrenIndex); for (StarTreeIndexNode child : children) { queue.add(child); } index++; } }
From source file:com.clustercontrol.jobmanagement.util.JobMultiplicityCache.java
public static String getJobQueueStr() { StringBuilder message = new StringBuilder(); try {//from w w w . j av a 2 s . com _lock.readLock(); Map<String, Queue<JobSessionNodeEntityPK>> runningCache = getRunningCache(); message.append("Running:"); if (null != runningCache) { message.append('\n'); for (Map.Entry<String, Queue<JobSessionNodeEntityPK>> facilityIdEntry : runningCache.entrySet()) { Queue<JobSessionNodeEntityPK> queue = facilityIdEntry.getValue(); StringBuilder str = new StringBuilder(); for (JobSessionNodeEntityPK pk : queue) { str.append("[" + pk.getSessionId() + "," + pk.getJobunitId() + "," + pk.getJobId() + "]\n"); } message.append(facilityIdEntry.getKey() + "(" + queue.size() + ")=\n" + str.toString()); } } else { message.append(" null\n"); } } finally { _lock.readUnlock(); } try { _lock.readLock(); Map<String, Queue<JobSessionNodeEntityPK>> waitingCache = getWaitingCache(); message.append("Wait:\n"); for (Map.Entry<String, Queue<JobSessionNodeEntityPK>> facilityIdEntry : waitingCache.entrySet()) { Queue<JobSessionNodeEntityPK> queue = facilityIdEntry.getValue(); StringBuilder str = new StringBuilder(); for (JobSessionNodeEntityPK pk : queue) { str.append("[" + pk.getSessionId() + "," + pk.getJobunitId() + "," + pk.getJobId() + "]\n"); } message.append(facilityIdEntry.getKey() + "(" + queue.size() + ")=\n" + str); } } finally { _lock.readUnlock(); } return message.toString(); }
From source file:com.mtgi.analytics.NullBehaviorEventPersisterImpl.java
public void persist(Queue<BehaviorEvent> events) { if (log.isDebugEnabled()) log.debug("Discarded " + events.size() + " events"); }
From source file:org.jboss.aerogear.sync.jsonpatch.server.JsonPatchInMemoryDataStoreTest.java
@Test public void getEdits() { final String documentId = "12345"; final String clientId = "client1"; final ServerInMemoryDataStore<JsonNode, JsonPatchEdit> dataStore = new ServerInMemoryDataStore<JsonNode, JsonPatchEdit>(); final JsonPatchEdit editOne = JsonPatchEdit.withChecksum("bogus").clientVersion(0).build(); final JsonPatchEdit editTwo = JsonPatchEdit.withChecksum("bogus").clientVersion(1).build(); dataStore.saveEdits(editOne, documentId, clientId); dataStore.saveEdits(editTwo, documentId, clientId); final Queue<JsonPatchEdit> edits = dataStore.getEdits(documentId, clientId); assertThat(edits.size(), is(2)); final Iterator<JsonPatchEdit> iterator = edits.iterator(); assertThat(iterator.next().clientVersion(), is(0L)); assertThat(iterator.next().clientVersion(), is(1L)); }