List of usage examples for java.util Queue isEmpty
boolean isEmpty();
From source file:password.pwm.svc.report.ReportService.java
private void updateCacheFromLdap() throws ChaiUnavailableException, ChaiOperationException, PwmOperationalException, PwmUnrecoverableException { LOGGER.debug(PwmConstants.REPORTING_SESSION_LABEL, "beginning process to updating user cache records from ldap"); if (status != STATUS.OPEN) { return;/*from ww w. j av a 2 s. c o m*/ } cancelFlag = false; reportStatus = new ReportStatusInfo(settings.getSettingsHash()); reportStatus.setInProgress(true); reportStatus.setStartDate(new Date()); try { final Queue<UserIdentity> allUsers = new LinkedList<>(getListOfUsers()); reportStatus.setTotal(allUsers.size()); while (status == STATUS.OPEN && !allUsers.isEmpty() && !cancelFlag) { final Date startUpdateTime = new Date(); final UserIdentity userIdentity = allUsers.poll(); try { if (updateCachedRecordFromLdap(userIdentity)) { reportStatus.setUpdated(reportStatus.getUpdated() + 1); } } catch (Exception e) { String errorMsg = "error while updating report cache for " + userIdentity.toString() + ", cause: "; errorMsg += e instanceof PwmException ? ((PwmException) e).getErrorInformation().toDebugStr() : e.getMessage(); final ErrorInformation errorInformation; errorInformation = new ErrorInformation(PwmError.ERROR_REPORTING_ERROR, errorMsg); LOGGER.error(PwmConstants.REPORTING_SESSION_LABEL, errorInformation.toDebugStr()); reportStatus.setLastError(errorInformation); reportStatus.setErrors(reportStatus.getErrors() + 1); } reportStatus.setCount(reportStatus.getCount() + 1); reportStatus.getEventRateMeter().markEvents(1); final TimeDuration totalUpdateTime = TimeDuration.fromCurrent(startUpdateTime); if (settings.isAutoCalcRest()) { avgTracker.addSample(totalUpdateTime.getTotalMilliseconds()); Helper.pause(avgTracker.avgAsLong()); } else { Helper.pause(settings.getRestTime().getTotalMilliseconds()); } } if (cancelFlag) { reportStatus.setLastError( new ErrorInformation(PwmError.ERROR_SERVICE_NOT_AVAILABLE, "report cancelled by operator")); } } finally { reportStatus.setFinishDate(new Date()); reportStatus.setInProgress(false); } LOGGER.debug(PwmConstants.REPORTING_SESSION_LABEL, "update user cache process completed: " + JsonUtil.serialize(reportStatus)); }
From source file:org.commonjava.maven.ext.io.rest.DefaultTranslator.java
/** * Translate the versions.//from w w w . ja v a2 s . co m * <pre>{@code * [ { * "groupId": "com.google.guava", * "artifactId": "guava", * "version": "13.0.1" * } } * }</pre> * This equates to a List of ProjectVersionRef. * * <pre>{@code * { * "productNames": [], * "productVersionIds": [], * "repositoryGroup": "", * "gavs": [ * { * "groupId": "com.google.guava", * "artifactId": "guava", * "version": "13.0.1" * } ] * } * }</pre> * There may be a lot of them, possibly causing timeouts or other issues. * This is mitigated by splitting them into smaller chunks when an error occurs and retrying. */ public Map<ProjectVersionRef, String> translateVersions(List<ProjectVersionRef> projects) { init(rgm); final Map<ProjectVersionRef, String> result = new HashMap<>(); final Queue<Task> queue = new ArrayDeque<>(); if (initialRestMaxSize != 0) { // Presplit final List<List<ProjectVersionRef>> partition = ListUtils.partition(projects, initialRestMaxSize); for (List<ProjectVersionRef> p : partition) { queue.add(new Task(rgm, p, endpointUrl + REPORTS_LOOKUP_GAVS)); } logger.debug("For initial sizing of {} have split the queue into {} ", initialRestMaxSize, queue.size()); } else { queue.add(new Task(rgm, projects, endpointUrl + REPORTS_LOOKUP_GAVS)); } while (!queue.isEmpty()) { Task task = queue.remove(); task.executeTranslate(); if (task.isSuccess()) { result.putAll(task.getResult()); } else { if (task.canSplit() && task.getStatus() == 504) { List<Task> tasks = task.split(); logger.warn( "Failed to translate versions for task @{} due to {}, splitting and retrying. Chunk size was: {} and new chunk size {} in {} segments.", task.hashCode(), task.getStatus(), task.getChunkSize(), tasks.get(0).getChunkSize(), tasks.size()); queue.addAll(tasks); } else { if (task.getStatus() < 0) { logger.debug("Caught exception calling server with message {}", task.getErrorMessage()); } else { logger.debug("Did not get status {} but received {}", SC_OK, task.getStatus()); } if (task.getStatus() > 0) { throw new RestException("Received response status " + task.getStatus() + " with message: " + task.getErrorMessage()); } else { throw new RestException("Received response status " + task.getStatus() + " with message " + task.getErrorMessage()); } } } } return result; }
From source file:sadl.models.pdrta.PDRTA.java
public void toDOTLang(Appendable ap, double minP, boolean withInput, StateColoring sc) { // Write transitions with high probability final StringBuilder sb = new StringBuilder(); final Queue<PDRTAState> q = new ArrayDeque<>(); final Set<PDRTAState> found = new HashSet<>(); q.add(root);/*from w w w .j a v a 2 s. co m*/ found.add(root); while (!q.isEmpty()) { final PDRTAState s = q.remove(); for (int i = 0; i < input.getAlphSize(); i++) { final Set<Entry<Integer, Interval>> ins = s.getIntervals(i).entrySet(); for (final Entry<Integer, Interval> eIn : ins) { final Interval in = eIn.getValue(); final double p = s.getStat().getTransProb(i, in); final PDRTAState t = in.getTarget(); if (t != null && p >= minP) { if (!found.contains(t)) { q.add(t); found.add(t); } // Write transition sb.append(s.getIndex()); sb.append(" -> "); sb.append(t.getIndex()); sb.append(" [ label = \""); sb.append(getSymbol(i)); sb.append(" ["); sb.append(in.getBegin()); sb.append(", "); sb.append(in.getEnd()); sb.append("] p="); sb.append(p); if (withInput) { sb.append(" n="); sb.append(in.getTails().size()); } sb.append("\" ];\n"); } } } } try { writeStatData(ap, found); // Write automaton in DOT language ap.append("digraph PDRTA {\n"); ap.append("rankdir=LR;\n"); ap.append("node[style = filled, fillcolor = white, shape = circle];\n"); ap.append("\"\"[style = invis, shape = none, margin = 0, width = 0, heigth = 0];\n"); ap.append("\"\" -> 0;\n"); // Write states for (final PDRTAState s : states.valueCollection()) { if (found.contains(s)) { ap.append(Integer.toString(s.getIndex())); ap.append(" [ xlabel = \""); ap.append(Double.toString(s.getStat().getTailEndProb())); ap.append("\""); if (sc != null) { if (sc.isRed(s)) { ap.append(", fillcolor = \"#FFA9A9\""); } else if (sc.isBlue(s)) { ap.append(", fillcolor = \"#A9D1FF\""); } } ap.append(" ];\n"); } } // Add transitions ap.append(sb.toString()); ap.append("}"); } catch (final IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy.java
/** * Order executors based on how many in and out connections it will potentially need to make. * First order components by the number of in and out connections it will have. Then iterate through the sorted list of components. * For each component sort the neighbors of that component by how many connections it will have to make with that component. * Add an executor from this component and then from each neighboring component in sorted order. Do this until there is nothing left to schedule * * @param td The topology the executors belong to * @param unassignedExecutors a collection of unassigned executors that need to be unassigned. Should only try to assign executors from this list * @return a list of executors in sorted order *///from ww w .j a v a 2s . c om private List<ExecutorDetails> orderExecutors(TopologyDetails td, Collection<ExecutorDetails> unassignedExecutors) { Map<String, Component> componentMap = td.getComponents(); List<ExecutorDetails> execsScheduled = new LinkedList<>(); Map<String, Queue<ExecutorDetails>> compToExecsToSchedule = new HashMap<>(); for (Component component : componentMap.values()) { compToExecsToSchedule.put(component.id, new LinkedList<ExecutorDetails>()); for (ExecutorDetails exec : component.execs) { if (unassignedExecutors.contains(exec)) { compToExecsToSchedule.get(component.id).add(exec); } } } Set<Component> sortedComponents = sortComponents(componentMap); sortedComponents.addAll(componentMap.values()); for (Component currComp : sortedComponents) { Map<String, Component> neighbors = new HashMap<String, Component>(); for (String compId : (List<String>) ListUtils.union(currComp.children, currComp.parents)) { neighbors.put(compId, componentMap.get(compId)); } Set<Component> sortedNeighbors = sortNeighbors(currComp, neighbors); Queue<ExecutorDetails> currCompExesToSched = compToExecsToSchedule.get(currComp.id); boolean flag = false; do { flag = false; if (!currCompExesToSched.isEmpty()) { execsScheduled.add(currCompExesToSched.poll()); flag = true; } for (Component neighborComp : sortedNeighbors) { Queue<ExecutorDetails> neighborCompExesToSched = compToExecsToSchedule.get(neighborComp.id); if (!neighborCompExesToSched.isEmpty()) { execsScheduled.add(neighborCompExesToSched.poll()); flag = true; } } } while (flag); } return execsScheduled; }
From source file:jef.database.DbUtils.java
/** * ?/* ww w .ja va 2 s . c o m*/ * * @param tasks * @throws SQLException */ public static void parallelExecute(List<DbTask> tasks) throws SQLException { CountDownLatch latch = new CountDownLatch(tasks.size()); Queue<SQLException> exceptions = new ConcurrentLinkedQueue<SQLException>(); Queue<Throwable> throwables = new ConcurrentLinkedQueue<Throwable>(); for (DbTask task : tasks) { task.prepare(latch, exceptions, throwables); DbUtils.es.execute(task); } try { latch.await(); } catch (InterruptedException e) { throw new SQLException(e); } if (!exceptions.isEmpty()) { throw DbUtils.wrapExceptions(exceptions); } if (!throwables.isEmpty()) { throw DbUtils.toRuntimeException(throwables.peek()); } }
From source file:it.scoppelletti.mobilepower.app.FragmentLayoutController.java
/** * Ricostruisce la successione della disposizione dei frammenti nei * pannelli./*from w w w . j a v a2 s . com*/ * * @param fragmentMgr Gestore dei frammenti. * @param fragmentQueue Frammenti. * @return Identificatore dell’ultimo elemento inserito * nel back stack. */ private int arrangeFragments(FragmentManager fragmentMgr, Queue<FragmentLayoutController.FragmentEntry> fragmentQueue) { int i; int frameCount, tnId, lastTnId; FragmentLayoutController.FragmentEntry entry; FragmentSupport newFragment, oldFragment; FragmentLayoutController.FragmentEntry[] frames; FragmentTransaction fragmentTn = null; frameCount = 1; frames = new FragmentLayoutController.FragmentEntry[myFrameCount]; Arrays.fill(frames, null); lastTnId = -1; while (!fragmentQueue.isEmpty()) { tnId = -1; entry = fragmentQueue.remove(); try { fragmentTn = fragmentMgr.beginTransaction(); if (frameCount == myFrameCount) { // Tutti i pannelli sono occupati: // Sposto ogni frammento nel pannello precedente per // liberare l'ultimo. for (i = 0; i < frameCount; i++) { if (frames[i] == null) { // Inizialmente il primo pannello risulta vuoto // anche se in realta' e' occupato dal frammento // principale (non di dettaglio). continue; } oldFragment = frames[i].getFragment(); newFragment = (i > 0) ? oldFragment.cloneFragment() : null; fragmentTn.remove(oldFragment.asFragment()); frames[i] = null; if (newFragment != null) { fragmentTn.replace(myFrameIds[i - 1], newFragment.asFragment(), entry.getTag()); frames[i - 1] = new FragmentLayoutController.FragmentEntry(newFragment, entry.getTag()); } } frameCount--; } fragmentTn.add(myFrameIds[frameCount], entry.getFragment().asFragment(), entry.getTag()); frames[frameCount++] = entry; fragmentTn.addToBackStack(null); } finally { if (fragmentTn != null) { tnId = fragmentTn.commit(); fragmentTn = null; } } if (tnId >= 0) { lastTnId = tnId; } } return lastTnId; }
From source file:org.dspace.app.xmlui.aspect.administrative.group.EditGroupForm.java
/** * Method to extensively check whether the first group has the second group as a distant * parent. This is used to avoid creating cycles like A->B, B->C, C->D, D->A which leads * all the groups involved to essentially include themselves. *//* w w w . j ava 2 s . c om*/ private boolean isDescendant(Group descendant, Group ancestor, List<UUID> memberGroupIDs) throws SQLException { Queue<Group> toVisit = new LinkedList<Group>(); Group currentGroup; toVisit.offer(ancestor); // Initialize by adding a list of our current list of group members. for (UUID groupid : memberGroupIDs) { Group member = groupService.find(context, groupid); toVisit.offer(member); } while (!toVisit.isEmpty()) { // 1. Grab a group from the queue currentGroup = toVisit.poll(); // 2. See if it's the descendant we're looking for if (currentGroup.equals(descendant)) { return true; } // 3. If not, add that group's children to the queue for (Group nextBatch : currentGroup.getMemberGroups()) { toVisit.offer(nextBatch); } } return false; }
From source file:org.onebusaway.uk.network_rail.gtfs_realtime.graph.PositionBerthToStanoxGraphMain.java
private void explore(Set<RawBerthNode> connections, RawStanoxNode stanoxNode) { Queue<OrderedRawBerthNode> queue = new PriorityQueue<OrderedRawBerthNode>(); Set<RawBerthNode> visited = new HashSet<RawBerthNode>(); int openCount = 0; for (RawBerthNode connection : connections) { queue.add(new OrderedRawBerthNode(connection, null, 0.0)); openCount++;//from w w w. j a v a2 s .c o m } Map<RawBerthNode, RawBerthNode> parents = new HashMap<RawBerthNode, RawBerthNode>(); while (!queue.isEmpty()) { OrderedRawBerthNode currentNode = queue.poll(); RawBerthNode node = currentNode.getNode(); boolean isOpen = currentNode.isOpen(); if (isOpen) { openCount--; } else if (openCount == 0) { return; } if (visited.contains(node)) { continue; } visited.add(node); parents.put(node, currentNode.getParent()); Set<RawStanoxNode> stanoxes = node.getStanox(); if (stanoxes.size() > 0 && !stanoxes.contains(stanoxNode)) { _log.info(node + " stanoxes=" + stanoxes + " " + currentNode.getDistance() + " open=" + openCount); RawBerthNode c = node; while (c != null) { _log.info(" " + c); c = parents.get(c); } isOpen = false; } for (Map.Entry<RawBerthNode, List<Integer>> entry : node.getOutgoing().entrySet()) { RawBerthNode outgoing = entry.getKey(); int avgDuration = RawNode.average(entry.getValue()); queue.add(new OrderedRawBerthNode(outgoing, node, currentNode.getDistance() + avgDuration, isOpen)); if (isOpen) { openCount++; } } } }
From source file:org.apache.kylin.metadata.model.DataModelDesc.java
private void reorderJoins(Map<String, TableDesc> tables) { if (joinTables.length == 0) { return;/*from www .ja va 2 s . c o m*/ } Map<String, List<JoinTableDesc>> fkMap = Maps.newHashMap(); for (JoinTableDesc joinTable : joinTables) { JoinDesc join = joinTable.getJoin(); String fkSideName = join.getFKSide().getAlias(); if (fkMap.containsKey(fkSideName)) { fkMap.get(fkSideName).add(joinTable); } else { List<JoinTableDesc> joinTableList = Lists.newArrayList(); joinTableList.add(joinTable); fkMap.put(fkSideName, joinTableList); } } JoinTableDesc[] orderedJoinTables = new JoinTableDesc[joinTables.length]; int orderedIndex = 0; Queue<JoinTableDesc> joinTableBuff = new ArrayDeque<JoinTableDesc>(); TableDesc rootDesc = tables.get(rootFactTable); joinTableBuff.addAll(fkMap.get(rootDesc.getName())); while (!joinTableBuff.isEmpty()) { JoinTableDesc head = joinTableBuff.poll(); orderedJoinTables[orderedIndex++] = head; String headAlias = head.getJoin().getPKSide().getAlias(); if (fkMap.containsKey(headAlias)) { joinTableBuff.addAll(fkMap.get(headAlias)); } } joinTables = orderedJoinTables; }
From source file:org.kuali.kra.award.awardhierarchy.AwardHierarchyServiceImpl.java
public Map<String, AwardHierarchy> getAwardHierarchy(AwardHierarchy anyNode, List<String> order) { Map<String, AwardHierarchy> result = new HashMap<String, AwardHierarchy>(); if (anyNode == null) { return result; }/*from www. j a v a2 s. c o m*/ Map<String, Object> values = new HashMap<String, Object>(); //find all hierarchy BOs for the root award number. If the anyNode was got is the root, the award number //will be 'DEFAULT_AWARD_NUMBER' and therefore we will use the award number, otherwise, the root award number String rootAwardNumber = StringUtils.equals(Award.DEFAULT_AWARD_NUMBER, anyNode.getRootAwardNumber()) ? anyNode.getAwardNumber() : anyNode.getRootAwardNumber(); values.put("rootAwardNumber", rootAwardNumber); values.put("active", true); List<AwardHierarchy> hierarchyList = (List<AwardHierarchy>) legacyDataAdapter .findMatchingOrderBy(AwardHierarchy.class, values, "awardNumber", true); if (!hierarchyList.isEmpty()) { for (AwardHierarchy hierarchy : hierarchyList) { result.put(hierarchy.getAwardNumber(), hierarchy); //clear children in case this was already called and cached BOs were returned from OJB. hierarchy.getChildren().clear(); } AwardHierarchy rootNode = result.get(rootAwardNumber); for (AwardHierarchy hierarchy : result.values()) { hierarchy.setRoot(rootNode); AwardHierarchy parent = result.get(hierarchy.getParentAwardNumber()); if (parent != null) { parent.getChildren().add(hierarchy); hierarchy.setParent(parent); } } for (AwardHierarchy hierarchy : result.values()) { Collections.sort(hierarchy.getChildren(), new Comparator<AwardHierarchy>() { public int compare(AwardHierarchy arg0, AwardHierarchy arg1) { return arg0.getAwardNumber().compareTo(arg1.getAwardNumber()); } }); } Queue<AwardHierarchy> queue = new LinkedList<AwardHierarchy>(); queue.add(rootNode); while (!queue.isEmpty()) { AwardHierarchy node = queue.poll(); order.add(node.getAwardNumber()); queue.addAll(node.getChildren()); } } return result; }