List of usage examples for java.util Queue addAll
boolean addAll(Collection<? extends E> c);
From source file:org.apache.kylin.metadata.model.DataModelDesc.java
private void reorderJoins(Map<String, TableDesc> tables) { if (joinTables.length == 0) { return;/*from www . j a v a 2s .c o m*/ } Map<String, List<JoinTableDesc>> fkMap = Maps.newHashMap(); for (JoinTableDesc joinTable : joinTables) { JoinDesc join = joinTable.getJoin(); String fkSideName = join.getFKSide().getAlias(); if (fkMap.containsKey(fkSideName)) { fkMap.get(fkSideName).add(joinTable); } else { List<JoinTableDesc> joinTableList = Lists.newArrayList(); joinTableList.add(joinTable); fkMap.put(fkSideName, joinTableList); } } JoinTableDesc[] orderedJoinTables = new JoinTableDesc[joinTables.length]; int orderedIndex = 0; Queue<JoinTableDesc> joinTableBuff = new ArrayDeque<JoinTableDesc>(); TableDesc rootDesc = tables.get(rootFactTable); joinTableBuff.addAll(fkMap.get(rootDesc.getName())); while (!joinTableBuff.isEmpty()) { JoinTableDesc head = joinTableBuff.poll(); orderedJoinTables[orderedIndex++] = head; String headAlias = head.getJoin().getPKSide().getAlias(); if (fkMap.containsKey(headAlias)) { joinTableBuff.addAll(fkMap.get(headAlias)); } } joinTables = orderedJoinTables; }
From source file:it.unibo.alchemist.model.implementations.environments.AbstractEnvironment.java
/** * After a node movement, recomputes the neighborhood, also notifying the * running simulation about the modifications. This allows movement actions * to be defined as LOCAL (they should be normally considered GLOBAL). * //from w w w.j a v a 2s .co m * @param node * the node that has been moved */ protected final void updateNeighborhood(final Node<T> node) { /* * The following optimization allows to define as local the context of * reactions which are actually including a move, which should be * normally considered global. This because for each node which is * detached, all the dependencies are updated, ensuring the soundness. */ if (Objects.requireNonNull(rule).isLocallyConsistent()) { final Neighborhood<T> newNeighborhood = rule.computeNeighborhood(Objects.requireNonNull(node), this); final Neighborhood<T> oldNeighborhood = neighCache.put(node.getId(), newNeighborhood); if (oldNeighborhood != null) { final Iterator<Node<T>> iter = oldNeighborhood.iterator(); while (iter.hasNext()) { final Node<T> neighbor = iter.next(); if (!newNeighborhood.contains(neighbor)) { /* * Neighbor lost */ iter.remove(); final Neighborhood<T> neighborsNeighborhood = neighCache.get(neighbor.getId()); neighborsNeighborhood.removeNeighbor(node); ifEngineAvailable(s -> s.neighborRemoved(node, neighbor)); } } } for (final Node<T> n : newNeighborhood) { if (oldNeighborhood == null || !oldNeighborhood.contains(n)) { /* * If it's a new neighbor */ neighCache.get(n.getId()).addNeighbor(node); ifEngineAvailable(s -> s.neighborAdded(node, n)); } } } else { final Queue<Operation> operations = recursiveOperation(node); final TIntSet processed = new TIntHashSet(getNodesNumber()); processed.add(node.getId()); while (!operations.isEmpty()) { final Operation next = operations.poll(); final Node<T> dest = next.destination; final int destId = dest.getId(); if (!processed.contains(destId)) { operations.addAll(recursiveOperation(next.origin, next.destination, next.isAdd)); processed.add(destId); } } } }
From source file:com.dell.asm.asmcore.asmmanager.util.template.adjuster.ClusterAdjuster.java
/** * If has NO server, create predefined VDS and port group: * PXE VDS - [User selects from VDS available in the datacenter] * PXE Port Group - [ User selects from available port groups on the PXE VDS] * Workload VDS - [ User selects from VDS available in the datacenter] * * @param cluster//from w w w . j a v a2 s . c o m * @param allNetworks * @param hasServer */ private void refineClusterByServerNetworks(ServiceTemplateComponent cluster, List<PartitionNetworks> allNetworks, boolean hasServer) { // check if it is vCenter cluster ServiceTemplateCategory vdsCategory = cluster .getTemplateResource(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID); if (vdsCategory == null) return; int v = 1; ServiceTemplateSetting vdsNameZero = cluster.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID, ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_NAME_ID); // newly added VDS members List<ServiceTemplateSetting> vdsAdded = new ArrayList<>(); ServiceTemplateSetting vdsNew = null; if (hasServer) { //Restore option Enable VMWare vSAN if server is associated with the cluster ServiceTemplateSetting enableVmwareVsan = cluster.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_ID, ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_CLUSTER_VSAN_ID); if (enableVmwareVsan != null) { enableVmwareVsan.setHideFromTemplate(false); } // first need to count some networks List<Network> iscsiNets = new ArrayList<>(); List<Network> vsanNets = new ArrayList<>(); for (PartitionNetworks pn : allNetworks) { for (Network nConfig : pn.getNetworks()) { if (NetworkType.STORAGE_ISCSI_SAN.equals(nConfig.getType())) { // replace "iscsi" in the network ID by combination of sorted ISCSI net IDs List<String> sortedNetIDs = pn.sortISCSINetworks(); nConfig.setId(StringUtils.join(sortedNetIDs, "-")); // will need to count later if (!iscsiNets.contains(nConfig)) { iscsiNets.add(nConfig); } } } } for (PartitionNetworks pn : allNetworks) { pn.sortById(); ServiceTemplateSetting vdsName = cluster.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID, ServiceTemplateClientUtil.createVDSID(pn.getId())); String uiGroupName = "VDS " + v; if (vdsName == null) { vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory, ServiceTemplateClientUtil.createVDSID(pn.getId()), "VDS Name", uiGroupName, ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null)); } else { // upgrade options only vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null)); } // hard reset for UI group vdsName.setGroup(uiGroupName); vdsName.setHideFromTemplate(false); vdsAdded.add(vdsName); // $new$ vdsNew = vdsCategory.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId()); if (vdsNew != null) { vdsNew.setGroup(uiGroupName); vdsAdded.add(vdsNew); } if (pn.hasManagementNetwork()) { vdsName.setRequired(true); if (vdsNew != null) vdsNew.setRequired(true); } // for each network find or create PG Queue<NetworkObject> iscsiNetworkFIFO = new LinkedList<>(); iscsiNetworkFIFO.addAll(pn.getIscsiNetworks()); for (Network nConfig : pn.getNetworks()) { Queue<NetworkObject> currentQueue = null; String portGroupName = nConfig.getName() + " Port Group"; int cnt = 1; if (NetworkType.STORAGE_ISCSI_SAN.equals(nConfig.getType())) { currentQueue = iscsiNetworkFIFO; if (iscsiNets.size() == 1) { cnt = 2; // 2 PG but only if we have one ISCSI network. } } boolean incrementPortGroup = (cnt > 1 && currentQueue.size() == 1); // multiple PGs for certain networks for (int j = 1; j <= cnt; j++) { String currGroupName = portGroupName; String portGroupSufix = ""; if (incrementPortGroup) { portGroupSufix = " " + j; } String pgNetworkID = nConfig.getId(); // can be only 1 or 2 ISCSI. // But we always need 2 port groups for such networks. // Names and IDs have to be picked from dedicated list if (pgNetworkID.contains("-") && currentQueue != null) { NetworkObject networkObject = currentQueue.remove(); if (networkObject != null) { pgNetworkID = networkObject.getId(); currGroupName = networkObject.getName() + " Port Group"; } } currGroupName += portGroupSufix; ServiceTemplateSetting vdsPG = ServiceTemplateClientUtil.getPortGroup(cluster, pn.getId(), currGroupName, pgNetworkID, j, true); if (vdsPG == null) { // unexpected... LOGGER.error("getPortGroup returned null for VDS ID=" + pn.getId() + ", PG=" + currGroupName); throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR, AsmManagerMessages.internalError()); } vdsPG.setDisplayName(currGroupName); vdsPG.setHideFromTemplate(false); vdsPG.setGroup(uiGroupName); vdsAdded.add(vdsPG); // $new$ vdsNew = vdsCategory.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsPG.getId()); if (vdsNew != null) { vdsNew.setGroup(uiGroupName); vdsAdded.add(vdsNew); } if (NetworkType.PXE.equals(nConfig.getType()) || NetworkType.HYPERVISOR_MANAGEMENT.equals(nConfig.getType())) { vdsPG.setRequired(true); if (vdsNew != null) vdsNew.setRequired(true); } } } v++; } } else { //Remove option Enable VMWare vSAN if server is not associated with the cluster ServiceTemplateSetting enableVmwareVsan = cluster.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_ID, ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_CLUSTER_VSAN_ID); if (enableVmwareVsan != null) { enableVmwareVsan.setHideFromTemplate(true); } ServiceTemplateSetting vdsName = cluster.getParameter( ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID, ServiceTemplateClientUtil.createVDSID("pxe")); if (vdsName == null) { vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory, ServiceTemplateClientUtil.createVDSID("pxe"), "VDS Name", "PXE VDS", ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null)); vdsName.setHideFromTemplate(false); } else { vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null)); } vdsAdded.add(vdsName); vdsNew = vdsCategory .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId()); if (vdsNew != null) { vdsAdded.add(vdsNew); } // PXE Port Group ServiceTemplateSetting vdsPG = ServiceTemplateClientUtil.getPortGroup(cluster, "pxe", "PXE Port Group", "pxe", 1, true); if (vdsPG == null) { // unexpected... LOGGER.error("getPortGroup returned null for VDS ID=pxe" + ", PG=PXE Port Group"); throw new LocalizedWebApplicationException(Response.Status.INTERNAL_SERVER_ERROR, AsmManagerMessages.internalError()); } vdsPG.setDisplayName("PXE Port Group"); vdsPG.setHideFromTemplate(false); vdsAdded.add(vdsPG); vdsNew = vdsCategory .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsPG.getId()); if (vdsNew != null) { vdsAdded.add(vdsNew); } vdsName = cluster.getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_ESX_CLUSTER_COMP_VDS_ID, ServiceTemplateClientUtil.createVDSID("workload")); if (vdsName == null) { vdsName = ServiceTemplateClientUtil.createVDSNameSetting(cluster, vdsCategory, ServiceTemplateClientUtil.createVDSID("workload"), "VDS Name", "Workload VDS", ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null)); vdsName.setHideFromTemplate(false); vdsName.setRequired(true); } else { vdsName.setOptions(ServiceTemplateClientUtil.copyOptions(vdsNameZero.getOptions(), null)); } vdsAdded.add(vdsName); vdsNew = vdsCategory .getParameter(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CREATE_NEW_PREFIX + vdsName.getId()); if (vdsNew != null) { vdsNew.setRequired(true); vdsAdded.add(vdsNew); } } // remove old VDS names / PGs List<ServiceTemplateSetting> toRemove = new ArrayList<>(); for (ServiceTemplateSetting vdsName : vdsCategory.getParameters()) { if (!vdsName.getId().contains(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_NAME_ID + "::") && !vdsName.getId() .contains(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_CLUSTER_VDS_PG_ID + "::")) continue; toRemove.add(vdsName); } vdsCategory.getParameters().removeAll(toRemove); // re-add new parameters vdsCategory.getParameters().addAll(vdsAdded); }
From source file:org.apache.hadoop.hive.ql.QueryPlan.java
/** * generate the operator graph and operator list for the given task based on * the operators corresponding to that task. * * @param task/*from w ww . j a v a 2 s .com*/ * api.Task which needs its operator graph populated * @param topOps * the set of top operators from which the operator graph for the * task is hanging */ private void populateOperatorGraph(org.apache.hadoop.hive.ql.plan.api.Task task, Collection<Operator<? extends OperatorDesc>> topOps) { task.setOperatorGraph(new org.apache.hadoop.hive.ql.plan.api.Graph()); task.getOperatorGraph().setNodeType(NodeType.OPERATOR); Queue<Operator<? extends OperatorDesc>> opsToVisit = new LinkedList<Operator<? extends OperatorDesc>>(); Set<Operator<? extends OperatorDesc>> opsVisited = new HashSet<Operator<? extends OperatorDesc>>(); opsToVisit.addAll(topOps); while (opsToVisit.peek() != null) { Operator<? extends OperatorDesc> op = opsToVisit.remove(); opsVisited.add(op); // populate the operator org.apache.hadoop.hive.ql.plan.api.Operator operator = new org.apache.hadoop.hive.ql.plan.api.Operator(); operator.setOperatorId(op.getOperatorId()); operator.setOperatorType(op.getType()); task.addToOperatorList(operator); // done processing the operator if (op.getChildOperators() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency entry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE); entry.setNode(op.getOperatorId()); for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) { entry.addToChildren(childOp.getOperatorId()); if (!opsVisited.contains(childOp)) { opsToVisit.add(childOp); } } task.getOperatorGraph().addToAdjacencyList(entry); } } }
From source file:org.apache.hadoop.hive.ql.QueryPlan.java
/** * Extract all the counters from tasks and operators. *//*w ww . j ava 2 s.c om*/ private void extractCounters() throws IOException { Queue<Task<? extends Serializable>> tasksToVisit = new LinkedList<Task<? extends Serializable>>(); Set<Task<? extends Serializable>> tasksVisited = new HashSet<Task<? extends Serializable>>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.peek() != null) { Task<? extends Serializable> task = tasksToVisit.remove(); tasksVisited.add(task); // add children to tasksToVisit if (task.getChildTasks() != null) { for (Task<? extends Serializable> childTask : task.getChildTasks()) { if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } } } if (task.getId() == null) { continue; } if (started.contains(task.getId()) && done.contains(task.getId())) { continue; } // get the counters for the task counters.put(task.getId(), task.getCounters()); // check if task is started if (task.started()) { started.add(task.getId()); } if (task.done()) { done.add(task.getId()); } if (task instanceof ExecDriver) { ExecDriver mrTask = (ExecDriver) task; if (mrTask.mapStarted()) { started.add(task.getId() + "_MAP"); } if (mrTask.mapDone()) { done.add(task.getId() + "_MAP"); } if (mrTask.hasReduce()) { if (mrTask.reduceStarted()) { started.add(task.getId() + "_REDUCE"); } if (mrTask.reduceDone()) { done.add(task.getId() + "_REDUCE"); } } } else if (task instanceof ConditionalTask) { ConditionalTask cTask = (ConditionalTask) task; for (Task<? extends Serializable> listTask : cTask.getListTasks()) { if (!tasksVisited.contains(listTask)) { tasksToVisit.add(listTask); } } } } }
From source file:org.opendatakit.aggregate.format.structure.KmlGeoTraceNGeoShapeGenerator.java
private void recursiveElementSearchToFindRepeats(SubmissionSet submissionSet, Queue<SubmissionSet> submissionSetLevelsToExamine, StringBuilder placemarks) { List<SubmissionValue> values = submissionSet.getSubmissionValues(); if (values == null || values.isEmpty()) { return;//from w w w.ja v a2s .c om } for (SubmissionValue value : values) { if (value instanceof SubmissionRepeat) { SubmissionRepeat repeat = (SubmissionRepeat) value; List<SubmissionSet> repeatSets = repeat.getSubmissionSets(); if (getGeoElementParent().equals(repeat.getElement())) { // found the correct repeat, generate placemarks for (SubmissionSet set : repeatSets) { placemarks.append(generatePlacemark(set)); } } else { submissionSetLevelsToExamine.addAll(repeatSets); } } } }
From source file:org.apache.hadoop.hive.ql.QueryPlan.java
/** * Populate api.QueryPlan from exec structures. This includes constructing the * dependency graphs of stages and operators. * * @throws IOException/*w ww .ja v a 2 s.com*/ */ private void populateQueryPlan() throws IOException { query.setStageGraph(new org.apache.hadoop.hive.ql.plan.api.Graph()); query.getStageGraph().setNodeType(NodeType.STAGE); Queue<Task<? extends Serializable>> tasksToVisit = new LinkedList<Task<? extends Serializable>>(); Set<Task<? extends Serializable>> tasksVisited = new HashSet<Task<? extends Serializable>>(); tasksToVisit.addAll(rootTasks); while (tasksToVisit.size() != 0) { Task<? extends Serializable> task = tasksToVisit.remove(); tasksVisited.add(task); // populate stage org.apache.hadoop.hive.ql.plan.api.Stage stage = new org.apache.hadoop.hive.ql.plan.api.Stage(); stage.setStageId(task.getId()); stage.setStageType(task.getType()); query.addToStageList(stage); if (task instanceof ExecDriver) { // populate map task ExecDriver mrTask = (ExecDriver) task; org.apache.hadoop.hive.ql.plan.api.Task mapTask = new org.apache.hadoop.hive.ql.plan.api.Task(); mapTask.setTaskId(stage.getStageId() + "_MAP"); mapTask.setTaskType(TaskType.MAP); stage.addToTaskList(mapTask); populateOperatorGraph(mapTask, mrTask.getWork().getMapWork().getAliasToWork().values()); // populate reduce task if (mrTask.hasReduce()) { org.apache.hadoop.hive.ql.plan.api.Task reduceTask = new org.apache.hadoop.hive.ql.plan.api.Task(); reduceTask.setTaskId(stage.getStageId() + "_REDUCE"); reduceTask.setTaskType(TaskType.REDUCE); stage.addToTaskList(reduceTask); Collection<Operator<? extends OperatorDesc>> reducerTopOps = new ArrayList<Operator<? extends OperatorDesc>>(); reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer()); populateOperatorGraph(reduceTask, reducerTopOps); } } else { org.apache.hadoop.hive.ql.plan.api.Task otherTask = new org.apache.hadoop.hive.ql.plan.api.Task(); otherTask.setTaskId(stage.getStageId() + "_OTHER"); otherTask.setTaskType(TaskType.OTHER); stage.addToTaskList(otherTask); } if (task instanceof ConditionalTask) { org.apache.hadoop.hive.ql.plan.api.Adjacency listEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); listEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE); listEntry.setNode(task.getId()); ConditionalTask t = (ConditionalTask) task; for (Task<? extends Serializable> listTask : t.getListTasks()) { if (t.getChildTasks() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency childEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); childEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE); childEntry.setNode(listTask.getId()); // done processing the task for (Task<? extends Serializable> childTask : t.getChildTasks()) { childEntry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } } query.getStageGraph().addToAdjacencyList(childEntry); } listEntry.addToChildren(listTask.getId()); if (!tasksVisited.contains(listTask)) { tasksToVisit.add(listTask); } } query.getStageGraph().addToAdjacencyList(listEntry); } else if (task.getChildTasks() != null) { org.apache.hadoop.hive.ql.plan.api.Adjacency entry = new org.apache.hadoop.hive.ql.plan.api.Adjacency(); entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE); entry.setNode(task.getId()); // done processing the task for (Task<? extends Serializable> childTask : task.getChildTasks()) { entry.addToChildren(childTask.getId()); if (!tasksVisited.contains(childTask)) { tasksToVisit.add(childTask); } } query.getStageGraph().addToAdjacencyList(entry); } } }
From source file:io.cloudslang.lang.tools.build.verifier.SlangContentVerifier.java
public PreCompileResult createModelsAndValidate(String directoryPath, boolean shouldValidateDescription, boolean shouldValidateCheckstyle) { Validate.notEmpty(directoryPath, "You must specify a path"); Validate.isTrue(new File(directoryPath).isDirectory(), "Directory path argument \'" + directoryPath + "\' does not lead to a directory"); Map<String, Executable> slangModels = new HashMap<>(); Collection<File> slangFiles = slangCompilationService.listSlangFiles(new File(directoryPath), true); loggingService.logEvent(Level.INFO, "Start compiling all slang files under: " + directoryPath); loggingService.logEvent(Level.INFO, slangFiles.size() + " .sl files were found"); loggingService.logEvent(Level.INFO, ""); Queue<RuntimeException> exceptions = new ArrayDeque<>(); String errorMessagePrefixMetadata = ""; for (File slangFile : slangFiles) { Executable sourceModel = null; try {/*from w w w . ja v a2 s.c o m*/ errorMessagePrefixMetadata = "Failed to extract metadata for file: \'" + slangFile.getAbsoluteFile() + "\'.\n"; String errorMessagePrefixCompilation = "Failed to compile file: \'" + slangFile.getAbsoluteFile() + "\'.\n"; Validate.isTrue(slangFile.isFile(), "file path \'" + slangFile.getAbsolutePath() + "\' must lead to a file"); SlangSource slangSource = SlangSource.fromFile(slangFile); ExecutableModellingResult preCompileResult = slangCompiler.preCompileSource(slangSource); sourceModel = preCompileResult.getExecutable(); exceptions.addAll(prependPrefix(preCompileResult.getErrors(), errorMessagePrefixCompilation)); MetadataModellingResult metadataResult = metadataExtractor .extractMetadataModellingResult(slangSource, shouldValidateCheckstyle); Metadata sourceMetadata = metadataResult.getMetadata(); exceptions.addAll(prependPrefix(metadataResult.getErrors(), errorMessagePrefixMetadata)); if (sourceModel != null) { int size = exceptions.size(); staticValidator.validateSlangFile(slangFile, sourceModel, sourceMetadata, shouldValidateDescription, exceptions); if (size == exceptions.size()) { slangModels.put(getUniqueName(sourceModel), sourceModel); } } } catch (Exception e) { String errorMessage = errorMessagePrefixMetadata + e.getMessage(); loggingService.logEvent(Level.ERROR, errorMessage); exceptions.add(new RuntimeException(errorMessage, e)); if (e instanceof MetadataMissingException && sourceModel != null) { slangModels.put(getUniqueName(sourceModel), sourceModel); } } } if (slangFiles.size() != slangModels.size()) { exceptions.add(new RuntimeException("Some Slang files were not pre-compiled.\nFound: " + slangFiles.size() + " executable files in path: \'" + directoryPath + "\' But managed to create slang models for only: " + slangModels.size())); } PreCompileResult preCompileResult = new PreCompileResult(); preCompileResult.addExceptions(exceptions); preCompileResult.addResults(slangModels); return preCompileResult; }
From source file:org.geoserver.wms.legendgraphic.ColorMapLegendCreator.java
public synchronized BufferedImage getLegend() { // do we laraedy have a legend if (legend == null) { // init all the values init();/*from w w w. j a v a2 s . co m*/ // now build the individuals legends // // header // // XXX no header for the moment // // body // final Queue<BufferedImage> body = createBody(); // // footer // if (bandInformation) { final Queue<BufferedImage> footer = createFooter(); body.addAll(footer); } // now merge them legend = mergeRows(body); } return legend; }
From source file:org.kuali.kra.award.awardhierarchy.AwardHierarchyServiceImpl.java
public Map<String, AwardHierarchy> getAwardHierarchy(AwardHierarchy anyNode, List<String> order) { Map<String, AwardHierarchy> result = new HashMap<String, AwardHierarchy>(); if (anyNode == null) { return result; }//from www.ja v a 2s . co m Map<String, Object> values = new HashMap<String, Object>(); //find all hierarchy BOs for the root award number. If the anyNode was got is the root, the award number //will be 'DEFAULT_AWARD_NUMBER' and therefore we will use the award number, otherwise, the root award number String rootAwardNumber = StringUtils.equals(Award.DEFAULT_AWARD_NUMBER, anyNode.getRootAwardNumber()) ? anyNode.getAwardNumber() : anyNode.getRootAwardNumber(); values.put("rootAwardNumber", rootAwardNumber); values.put("active", true); List<AwardHierarchy> hierarchyList = (List<AwardHierarchy>) legacyDataAdapter .findMatchingOrderBy(AwardHierarchy.class, values, "awardNumber", true); if (!hierarchyList.isEmpty()) { for (AwardHierarchy hierarchy : hierarchyList) { result.put(hierarchy.getAwardNumber(), hierarchy); //clear children in case this was already called and cached BOs were returned from OJB. hierarchy.getChildren().clear(); } AwardHierarchy rootNode = result.get(rootAwardNumber); for (AwardHierarchy hierarchy : result.values()) { hierarchy.setRoot(rootNode); AwardHierarchy parent = result.get(hierarchy.getParentAwardNumber()); if (parent != null) { parent.getChildren().add(hierarchy); hierarchy.setParent(parent); } } for (AwardHierarchy hierarchy : result.values()) { Collections.sort(hierarchy.getChildren(), new Comparator<AwardHierarchy>() { public int compare(AwardHierarchy arg0, AwardHierarchy arg1) { return arg0.getAwardNumber().compareTo(arg1.getAwardNumber()); } }); } Queue<AwardHierarchy> queue = new LinkedList<AwardHierarchy>(); queue.add(rootNode); while (!queue.isEmpty()) { AwardHierarchy node = queue.poll(); order.add(node.getAwardNumber()); queue.addAll(node.getChildren()); } } return result; }