List of usage examples for java.util HashSet size
public int size()
From source file:ml.shifu.shifu.core.dtrain.nn.NNMaster.java
private HashSet<Integer> dropoutNodes() { Random random = new Random(System.currentTimeMillis()); HashSet<Integer> droppedNodeIndices = new HashSet<Integer>(); // from input to last hidden layer. (exclude output layer) for (int i = this.flatNetwork.getLayerIndex().length - 1; i > 0; i--) { int beginNeuronIndex = this.flatNetwork.getLayerIndex()[i]; // exclude constant neuron int neuronCount = this.flatNetwork.getLayerFeedCounts()[i]; // from first neuron to last neuron in current layer for (int j = 0; j < neuronCount; j++) { if (random.nextDouble() < this.flatNetwork.getLayerDropoutRates()[i]) { // drop this node by adding it into list and will passing // this list to workers droppedNodeIndices.add(beginNeuronIndex + j); }/*from ww w .j a va 2s . c o m*/ } } LOG.info("layerIndex:{}; layerCounts:{}; dropoutNodes:{}", Arrays.toString(this.flatNetwork.getLayerIndex()), Arrays.toString(this.flatNetwork.getLayerCounts()), Arrays.toString(droppedNodeIndices.toArray(new Integer[droppedNodeIndices.size()]))); return droppedNodeIndices; }
From source file:org.hyperic.hq.authz.server.session.RoleManagerImpl.java
private List<Role> getRolesByIds(AuthzSubject whoami, Integer[] ids, PageControl pc) throws PermissionException { permissionManager.check(whoami.getId(), AuthzConstants.roleResourceTypeName, AuthzConstants.rootResourceId, AuthzConstants.roleOpViewRole); Collection<Role> all = getAllRoles(whoami, pc.getSortattribute(), pc.isAscending()); // build an index of ids HashSet<Integer> index = new HashSet<Integer>(); for (int i = 0; i < ids.length; i++) { Integer id = ids[i];/*from w w w . j a v a 2 s.c o m*/ index.add(id); } int numToFind = index.size(); // find the requested roles List<Role> roles = new ArrayList<Role>(ids.length); Iterator<Role> i = all.iterator(); while (i.hasNext() && roles.size() < numToFind) { Role r = i.next(); if (index.contains(r.getId())) { roles.add(r); } } return roles; }
From source file:com.odoo.core.orm.OModel.java
private String[] updateProjection(String[] projection) { HashSet<String> names = new HashSet<>(); String[] allProjection = projection; if (allProjection == null) { allProjection = projection();//from w ww. j a va 2 s . com } else { for (String col : projection) { OColumn column = getColumn(col); if (column.isFunctionalColumn() && column.canFunctionalStore()) { names.add(column.getName()); } } } names.addAll(Arrays.asList(allProjection)); names.addAll( Arrays.asList(new String[] { OColumn.ROW_ID, "id", "_write_date", "_is_dirty", "_is_active" })); return names.toArray(new String[names.size()]); }
From source file:nl.umcg.westrah.binarymetaanalyzer.BinaryMetaAnalysis.java
private void createSNPIndex(String outdir) throws IOException { HashSet<String> confineToTheseSNPs = null; HashSet<String> snpPreSelection = null; if (settings.getSNPProbeSelection() != null) { System.out.println("Getting SNPs from SNP/Probe selection file: " + settings.getSNPProbeSelection()); snpPreSelection = new HashSet<String>(); TextFile tf = new TextFile(settings.getSNPProbeSelection(), TextFile.R); String[] elems = tf.readLineElems(TextFile.tab); while (elems != null) { String snp = elems[0]; snpPreSelection.add(snp);//ww w . j a v a 2 s . c o m elems = tf.readLineElems(TextFile.tab); } tf.close(); System.out.println("Found " + snpPreSelection.size() + " unique snps in SNP/Probe selection file."); if (snpPreSelection.isEmpty()) { System.err.println("Error: SNP/Probe selection file defined, but no SNPs found."); System.exit(-1); } } if (settings.getSNPSelection() != null) { System.out.println("Selecting SNPs from file: " + settings.getSNPSelection()); confineToTheseSNPs = new HashSet<String>(); TextFile tf = new TextFile(settings.getSNPSelection(), TextFile.R); ArrayList<String> snps = tf.readAsArrayList(); tf.close(); if (snpPreSelection == null) { confineToTheseSNPs.addAll(snps); } else { System.out.println("Intersecting with SNP/Probe selection."); for (String snp : snps) { if (snpPreSelection.contains(snp)) { confineToTheseSNPs.add(snp); } } } System.out.println(confineToTheseSNPs.size() + " SNPs loaded."); } else if (snpPreSelection != null) { confineToTheseSNPs = snpPreSelection; } // create a list of all available SNPs HashSet<String> allSNPs = new HashSet<String>(); for (BinaryMetaAnalysisDataset dataset : datasets) { String[] snps = dataset.getSNPs(); for (String snp : snps) { if (confineToTheseSNPs == null || confineToTheseSNPs.contains(snp)) { allSNPs.add(snp); } } System.out.println(snps.length + " in dataset " + dataset.getName() + "\t" + allSNPs.size() + " unique SNPs found"); } if (allSNPs.isEmpty()) { System.err.println("Error: no SNPs found that match your request"); System.exit(-1); } // create a temporary map that maps each SNP to a meta-analysis position int ctr = 0; TObjectIntHashMap<String> snpMap = new TObjectIntHashMap<String>(allSNPs.size(), 0.85f, -9); snpList = new String[allSNPs.size()]; for (String s : allSNPs) { snpMap.put(s, ctr); snpList[ctr] = s; ctr++; } // TODO: for faster disk access, we would need to sort the SNPs by dataset ID... // fill index snpIndex = new int[allSNPs.size()][datasets.length]; for (int d = 0; d < datasets.length; d++) { for (int s = 0; s < allSNPs.size(); s++) { snpIndex[s][d] = -9; } } for (int d = 0; d < datasets.length; d++) { String[] snps = datasets[d].getSNPs(); for (int s = 0; s < snps.length; s++) { String snp = snps[s]; int id = snpMap.get(snp); if (id != -9) { snpIndex[id][d] = s; } } } TextFile tf = new TextFile(outdir + "snpindex.txt", TextFile.W); String header = "metaID"; for (int d = 0; d < datasets.length; d++) { header += "\t" + datasets[d].getName() + "-sid"; } tf.writeln(header); for (int s = 0; s < snpList.length; s++) { String ln = snpList[s]; for (int d = 0; d < datasets.length; d++) { ln += "\t" + snpIndex[s][d]; } tf.writeln(ln); } tf.close(); }
From source file:it.iit.genomics.cru.simsearch.bundle.utils.PantherBridge.java
public static Collection<String[]> getEnrichment(String organism, String fileName, double threshold) { ArrayList<String[]> results = new ArrayList<>(); ArrayListMultimap<String, String> genes = ArrayListMultimap.create(); ArrayListMultimap<Double, String> pvalues = ArrayListMultimap.create(); HashSet<String> uniqueGenes = new HashSet<>(); try {/*from w ww . j av a 2s .c o m*/ String[] enrichmentTypes = { "process", "pathway" }; for (String enrichmentType : enrichmentTypes) { HttpClient client = new HttpClient(); MultipartPostMethod method = new MultipartPostMethod( "http://pantherdb.org/webservices/garuda/tools/enrichment/VER_2/enrichment.jsp?"); // Define name-value pairs to set into the QueryString method.addParameter("organism", organism); method.addParameter("type", "enrichment"); method.addParameter("enrichmentType", enrichmentType); // "function", // "process", // "cellular_location", // "protein_class", // "pathway" File inputFile = new File(fileName); method.addPart(new FilePart("geneList", inputFile, "text/plain", "ISO-8859-1")); // PANTHER does not use the ID type // method.addParameter("IdType", "UniProt"); // Execute and print response client.executeMethod(method); String response = method.getResponseBodyAsString(); for (String line : response.split("\n")) { if (false == "".equals(line.trim())) { String[] row = line.split("\t"); // Id Name GeneId P-value if ("Id".equals(row[0])) { // header continue; } // if (row.length > 1) { String name = row[1]; String gene = row[2]; Double pvalue = Double.valueOf(row[3]); uniqueGenes.add(gene); if (pvalue < threshold) { if (false == genes.containsKey(name)) { pvalues.put(pvalue, name); } genes.put(name, gene); } // } else { // System.out.println("oups: " + row[0]); // } } } method.releaseConnection(); } ArrayList<Double> pvalueList = new ArrayList<>(); Collections.sort(pvalueList); pvalueList.addAll(pvalues.keySet()); Collections.sort(pvalueList); int numGenes = uniqueGenes.size(); for (Double pvalue : pvalueList) { for (String name : pvalues.get(pvalue)) { String geneList = String.join(",", genes.get(name)); String result[] = { name, "" + pvalue, genes.get(name).size() + "/" + numGenes, geneList }; results.add(result); } } } catch (IOException e) { e.printStackTrace(); } return results; }
From source file:edu.ucla.cs.scai.canali.core.index.BuildIndex.java
private HashSet<Integer> findCommonLowestAncestor(Set<Integer> classes) { HashSet<Integer> finalClasses = new HashSet<>(classes); while (finalClasses.size() > 1) { Iterator<Integer> it = finalClasses.iterator(); int c1 = it.next(); it.remove();/* ww w.j av a 2s. c o m*/ int c2 = it.next(); it.remove(); HashSet<Integer> cas = findCommonLowestAncestor(c1, c2); for (int ca : cas) { finalClasses.add(ca); } } return finalClasses; }
From source file:org.ourbeehive.mbp.builder.ResultMapBuilder.java
private void populateResultMap(ResultMap resultMap, MapperProfile mapperProfile, ResultMapConfig ancesResultMapConfig, OneToOneIdx descOneToOneIdx, String attrNameChain) throws AppException { try {// w w w. ja va2s.c o m // If the given oneToOne is not null, then consider it as current resultMapConfig. // String ancesClassName = ancesResultMapConfig.getClassName(); RelConfig descOneToOne = null; ResultMapConfig currResultMapConfig = null; // String refToSon = null; if (descOneToOneIdx != null) { descOneToOne = descOneToOneIdx.getOneToOne(); currResultMapConfig = descOneToOne.getResultMapConfig(); // refToSon = descOneToOne.getRefToSon(); } else { currResultMapConfig = ancesResultMapConfig; } String tableName = currResultMapConfig.getTableName(); String tableAlias = currResultMapConfig.getTableAlias(); // Find OrmTable according to the table name. OrmTable ormTable = CtxCacheFacade.lookupOrmTable(tableName); if (ormTable == null) { logger.error("!!! NO TABLE !!!: No table found with table name: " + tableName); return; } List<Result> resultList = resultMap.getResult(); Result result = null; OrmColumn ormColumn = null; String columnName = null; String attrName = null; // OrmAttr ormAttr = null; logger.debug("ATTR NAME CHAIN: The given attrNameChain is: " + attrNameChain); if (attrNameChain == null) { attrNameChain = ""; } else { attrNameChain = attrNameChain + JavaSrcElm.DOT; } HashSet<String> includedAttrs = ProfileHelper.getIncludedAttrName(currResultMapConfig); HashSet<String> excludedAttrs = ProfileHelper.getExcludedAttrName(currResultMapConfig); // Populate 'result' elements according to the the ClassEntity definition. List<OrmColumn> ormColumnList = ormTable.getColumnList(); for (int i = 0; i < ormColumnList.size(); i++) { // Check inclusion and exclusion, inclusion take higher preference. ormColumn = ormColumnList.get(i); columnName = ormColumn.getName(); // Translate column name to java attribute name. attrName = JavaFormatter.getJavaStyle(columnName, false); if (includedAttrs.size() != 0) { if (includedAttrs.contains(attrName) == false) { logger.debug("EXCLUDE ATTRIBUTE: Property '" + attrName + "' is NOT in the inclusion list, skipped."); continue; } } else if (excludedAttrs.size() != 0) { if (excludedAttrs.contains(attrName) == true) { logger.debug("EXCLUDE ATTRIBUTE: Property '" + attrName + "' is in the exclusion list, skipped."); continue; } } result = mapperObjFactory.createResult(); result.setProperty(attrNameChain + attrName); result.setColumn(MapperFormatter.getColumnAlias(mapperProfile, tableName, tableAlias, columnName)); result.setJdbcType(ormColumn.getJdbcTypeName()); resultList.add(result); logger.debug("FIND MAPPING: Find mapping between attribute '" + attrNameChain + attrName + "' and column '" + columnName + "'"); } } catch (Throwable t) { ExceptionUtil.handleException(t, logger); } }
From source file:gedi.riboseq.inference.orf.OrfFinder.java
/** * Coordinates are in codonsRegion space! * @param index/*from w w w . jav a2 s.c om*/ * @param sequence * @param sg * @param codonsRegion * @return */ public ArrayList<OrfWithCodons> findOrfs(int index, String sequence, SpliceGraph sg, ImmutableReferenceGenomicRegion<IntervalTreeSet<Codon>> codonsRegion) { SimpleDirectedGraph<Codon> fg = new SimpleDirectedGraph<Codon>("Codongraph"); // if (!codonsRegion.getReference().toString().equals("chr4+") || !codonsRegion.getRegion().contains(140_283_087)) // return 0; LeftMostInFrameAndClearList buff = new LeftMostInFrameAndClearList(); IntervalTreeSet<Codon> codons = codonsRegion.getData(); codons.removeIf(c -> c.getTotalActivity() < minCodonActivity); if (codons.size() == 0) return new ArrayList<OrfWithCodons>(); // add stop codons for easy orf inference HashSet<Codon> stopCodons = new HashSet<Codon>(); Trie<String> stop = new Trie<String>(); stop.put("TAG", "TAG"); stop.put("TGA", "TGA"); stop.put("TAA", "TAA"); stop.iterateAhoCorasick(sequence) .map(r -> new Codon(new ArrayGenomicRegion(r.getStart(), r.getEnd()), r.getValue())) .toCollection(stopCodons); for (Intron intr : sg.iterateIntrons().loop()) { ArrayGenomicRegion reg = new ArrayGenomicRegion(intr.getStart() - 2, intr.getStart(), intr.getEnd(), intr.getEnd() + 1); String cod = stop.get(SequenceUtils.extractSequence(reg, sequence)); if (cod != null) stopCodons.add(new Codon(reg, cod)); reg = new ArrayGenomicRegion(intr.getStart() - 1, intr.getStart(), intr.getEnd(), intr.getEnd() + 2); cod = stop.get(SequenceUtils.extractSequence(reg, sequence)); if (cod != null) stopCodons.add(new Codon(reg, cod)); } stopCodons.removeAll(codons); codons.addAll(stopCodons); ArrayList<OrfWithCodons> re = new ArrayList<OrfWithCodons>(); HashSet<Codon> usedForAnno = new HashSet<Codon>(); if (assembleAnnotationFirst) { // new: first use annotated transcripts in a greedy fashion ArrayList<ImmutableReferenceGenomicRegion<Transcript>> transcripts = annotation.ei(codonsRegion) .filter(t -> t.getData().isCoding()).map(t -> codonsRegion.induce(t, "T")).list(); int acount = 0; LinkedList<OrfWithCodons> orfs = new LinkedList<OrfWithCodons>(); GenomicRegion best; HashSet<Codon> aremoved = new HashSet<Codon>(); do { best = null; double bestSum = 0; for (ImmutableReferenceGenomicRegion<Transcript> tr : transcripts) { double[] a = new double[tr.getRegion().getTotalLength()]; for (Codon c : codons) { if (tr.getRegion().containsUnspliced(c)) { int p = tr.induce(c.getStart()); assert a[p] == 0; if (!aremoved.contains(c)) a[p] = c.totalActivity; if (c.isStop()) a[p] = -1; } } for (int f = 0; f < 3; f++) { int s = -1; double sum = 0; for (int p = f; p < a.length; p += 3) { if (a[p] == -1) {//stop if (sum > bestSum) { bestSum = sum; best = tr.getRegion().map(new ArrayGenomicRegion(s, p + 3)); } s = -1; sum = 0; } else sum += a[p]; if (a[p] > 0 && s == -1) s = p; } } } if (best != null) { ArrayList<Codon> cods = new ArrayList<>(); int uniqueCodons = 0; double uniqueActivity = 0; double totalActivity = 0; for (Codon c : codons) { if (best.containsUnspliced(c) && best.induce(c.getStart()) % 3 == 0) { if (aremoved.add(c)) { uniqueActivity += c.totalActivity; uniqueCodons++; } totalActivity += c.totalActivity; if (c.totalActivity > 0) cods.add(c); } } // System.out.println(codonsRegion.map(best)); if ((uniqueCodons >= minUniqueCodons || uniqueCodons == cods.size()) && uniqueActivity > minUniqueActivity && totalActivity > minOrfTotalActivity) { Collections.sort(cods); usedForAnno.addAll(cods); OrfWithCodons orf = new OrfWithCodons(index, 0, acount++, best.toArrayGenomicRegion(), cods, true); orfs.add(orf); } } } while (best != null); if (orfs.size() > 1) { // they are not necessarily connected! LinkedList<OrfWithCodons>[] connected = findConnectedOrfs(orfs); orfs.clear(); for (LinkedList<OrfWithCodons> corfs : connected) { for (boolean changed = true; changed && corfs.size() > 1;) { changed = false; if (useEM) inferOverlappingOrfActivitiesEM(corfs); else overlapUniqueCoverage(corfs); Iterator<OrfWithCodons> it = corfs.iterator(); while (it.hasNext()) { OrfWithCodons orf = it.next(); if (orf.getEstimatedTotalActivity() < minOrfTotalActivity) { it.remove(); changed = true; } } } if (corfs.size() > 1) distributeCodons(corfs); orfs.addAll(corfs); } } re.addAll(orfs); } // as edges only are represented in the splice graph, singleton codons are discarded (which does make sense anyway) for (Codon c : codons) { if (!c.isStop()) { // find unspliced successors (can be more than one, when the successor codon itself is spliced! all of them have the same start!) int max = c.getEnd() + maxAminoDist * 3; for (Codon n : codons .getIntervalsIntersecting(c.getEnd(), c.getEnd() + maxAminoDist * 3, buff.startAndClear(c)) .get()) { if (!containsInframeStop(sequence.substring(c.getEnd(), n.getStart()))) fg.addInteraction(c, n); max = n.getStart() + 2; } // find all spliced successors for each splice junction that comes before n or maxAminoDist sg.forEachIntronStartingBetween(c.getEnd(), max + 1, intron -> { for (Codon n : codons.getIntervalsIntersecting(intron.getEnd(), intron.getEnd() + maxAminoDist * 3 - (intron.getStart() - c.getEnd()), buff.startAndClear(c, intron)).get()) if (!containsInframeStop(SequenceUtils.extractSequence(new ArrayGenomicRegion(c.getStart(), intron.getStart(), intron.getEnd(), n.getStart()), sequence))) fg.addInteraction(c, n, intron); }); } } int cc = 1; for (SimpleDirectedGraph<Codon> g : fg.getWeaklyConnectedComponents()) { if (EI.wrap(g.getSources()).mapToDouble(c -> c.getTotalActivity()).sum() == 0) continue; // iterate longest paths in g LinkedList<Codon> topo = g.getTopologicalOrder(); HashSet<Codon> remInTopo = new HashSet<Codon>(topo); remInTopo.removeIf(c -> !stopCodons.contains(c) && !usedForAnno.contains(c)); HashSet<Codon> removed = new HashSet<Codon>(remInTopo); // double maxPathScore = 0; LinkedList<OrfWithCodons> orfs = new LinkedList<OrfWithCodons>(); int count = 0; while (removed.size() < topo.size()) { HashMap<Codon, MutablePair<GenomicRegion, Double>> longestPrefixes = new HashMap<Codon, MutablePair<GenomicRegion, Double>>(); for (Codon c : topo) longestPrefixes.put(c, new MutablePair<GenomicRegion, Double>(c, removed.contains(c) ? 0 : (c.getTotalActivity()))); Codon longestEnd = null; HashMap<Codon, Codon> backtracking = new HashMap<Codon, Codon>(); for (Codon c : topo) { // if (codonsRegion.map(c).getStart()==100_466_118) // System.out.println(c); // // if (codonsRegion.map(c).getStart()==100_465_842) // System.out.println(c); double len = longestPrefixes.get(c).Item2; for (AdjacencyNode<Codon> n = g.getTargets(c); n != null; n = n.next) { MutablePair<GenomicRegion, Double> pref = longestPrefixes.get(n.node); double nnact = removed.contains(n.node) ? 0 : (n.node.getTotalActivity()); if (pref.Item2 <= len + nnact) { pref.set(extendFullPath(longestPrefixes.get(c).Item1, c, n.node, n.getLabel()), len + nnact); backtracking.put(n.node, c); } } if (longestEnd == null || longestPrefixes.get(longestEnd).Item2 <= len) longestEnd = c; } // determine longest path by backtracking and mark all codons on the path as removed ArrayList<Codon> orfCodons = new ArrayList<Codon>(); double totalActivity = 0; double uniqueActivity = 0; int uniqueCodons = 0; for (Codon c = longestEnd; c != null; c = backtracking.get(c)) { if (removed.add(c) && c.getTotalActivity() > 0) { uniqueCodons++; uniqueActivity += c.getTotalActivity(); } if (c.getTotalActivity() > 0) // to remove dummy stop codons orfCodons.add(c); totalActivity += c.getTotalActivity(); } // System.out.println(codonsRegion.map(longestPrefixes.get(longestEnd).Item1)); if ((uniqueCodons >= minUniqueCodons || uniqueCodons == orfCodons.size()) && uniqueActivity > minUniqueActivity && totalActivity > minOrfTotalActivity) { Collections.reverse(orfCodons); MutablePair<GenomicRegion, Double> triple = longestPrefixes.get(longestEnd); ArrayGenomicRegion region = triple.Item1.toArrayGenomicRegion(); String lastCodon = SequenceUtils.extractSequence( region.map( new ArrayGenomicRegion(region.getTotalLength() - 3, region.getTotalLength())), sequence); OrfWithCodons orf = new OrfWithCodons(index, cc, count++, region, orfCodons, stop.containsKey(lastCodon)); orfs.add(orf); } // maxPathScore = Math.max(maxPathScore,totalActivity); } if (orfs.size() > 1) { // they are not necessarily connected! LinkedList<OrfWithCodons>[] connected = findConnectedOrfs(orfs); orfs.clear(); for (LinkedList<OrfWithCodons> corfs : connected) { for (boolean changed = true; changed && corfs.size() > 1;) { changed = false; if (useEM) inferOverlappingOrfActivitiesEM(corfs); else overlapUniqueCoverage(corfs); Iterator<OrfWithCodons> it = corfs.iterator(); while (it.hasNext()) { OrfWithCodons orf = it.next(); if (orf.getEstimatedTotalActivity() < minOrfTotalActivity) { it.remove(); changed = true; } } } if (corfs.size() > 1) distributeCodons(corfs); orfs.addAll(corfs); } } re.addAll(orfs); cc++; } return re; }
From source file:org.kuali.kfs.fp.document.BudgetAdjustmentDocument.java
/** * Determines if this document can be auto-approved or not. The conditions for auto-approval are: 1) Single account used on document 2) Initiator is * fiscal officer or primary delegate for the account 3) Only current adjustments are being made 4) The fund group for the account * is not contract and grants 5) current income/expense decrease amount must equal increase amount * @return false if auto-approval can occur (and therefore, full approval is not required); true if a full approval is required *///from w w w.j a v a 2 s. co m protected boolean requiresFullApproval() { List<BudgetAdjustmentAccountingLine> accountingLines = new ArrayList<BudgetAdjustmentAccountingLine>(); accountingLines.addAll(getSourceAccountingLines()); accountingLines.addAll(getTargetAccountingLines()); HashSet<String> distinctAccts = new HashSet<String>(); HashSet<String> distinctObjs = new HashSet<String>(); String accountKey = ""; String objCdKey = ""; for (BudgetAdjustmentAccountingLine account : accountingLines) { if (account.getBaseBudgetAdjustmentAmount().isNonZero()) { return true; } accountKey = account.getChartOfAccountsCode() + "-" + account.getAccountNumber(); objCdKey = account.getPostingYear() + "-" + account.getChartOfAccountsCode() + "-" + account.getFinancialObjectCode(); distinctAccts.add(accountKey); distinctObjs.add(objCdKey); if (distinctAccts.size() > 1 || distinctObjs.size() > 1) { return true; } } // check remaining conditions // initiator should be fiscal officer or primary delegate for account Person initiator = KimApiServiceLocator.getPersonService() .getPerson(getDocumentHeader().getWorkflowDocument().getInitiatorPrincipalId()); AccountService acctService = SpringContext.getBean(AccountService.class); for (Iterator iter1 = accountingLines.iterator(); iter1.hasNext();) { BudgetAdjustmentAccountingLine line = (BudgetAdjustmentAccountingLine) iter1.next(); Account account = acctService.getByPrimaryId(line.getChartOfAccountsCode(), line.getAccountNumber()); boolean hasResponsibilityOnAccount = acctService.hasResponsibilityOnAccount(initiator, account); Account userAccount = null; if (hasResponsibilityOnAccount) { userAccount = account; } if (userAccount == null) { return true; } else { // fund group should not be CG if (userAccount.isForContractsAndGrants()) { return true; } // current income/expense decrease amount must equal increase amount if (!getSourceCurrentBudgetIncomeTotal().equals(getTargetCurrentBudgetIncomeTotal()) || !getSourceCurrentBudgetExpenseTotal().equals(getTargetCurrentBudgetExpenseTotal())) { return true; } } // End of else block. } // End of for loop return false; }
From source file:org.kuali.ole.fp.document.BudgetAdjustmentDocument.java
/** * Determines if this document can be auto-approved or not. The conditions for auto-approval are: 1) Single account used on document 2) Initiator is * fiscal officer or primary delegate for the account 3) Only current adjustments are being made 4) The fund group for the account * is not contract and grants 5) current income/expense decrease amount must equal increase amount * @return false if auto-approval can occur (and therefore, full approval is not required); true if a full approval is required *//*from w w w .java2s . c o m*/ //MSU Contribution DTT-3059, DTT-3235 OLEMI-8689 OLECNTRB-941 - Re-implemented this method to execute the rules correctly protected boolean requiresFullApproval() { List<BudgetAdjustmentAccountingLine> accountingLines = new ArrayList<BudgetAdjustmentAccountingLine>(); accountingLines.addAll(getSourceAccountingLines()); accountingLines.addAll(getTargetAccountingLines()); HashSet<String> distinctAccts = new HashSet<String>(); HashSet<String> distinctObjs = new HashSet<String>(); String accountKey = ""; String objCdKey = ""; for (BudgetAdjustmentAccountingLine account : accountingLines) { if (account.getBaseBudgetAdjustmentAmount().isNonZero()) { return true; } accountKey = account.getChartOfAccountsCode() + "-" + account.getAccountNumber(); objCdKey = account.getPostingYear() + "-" + account.getChartOfAccountsCode() + "-" + account.getFinancialObjectCode(); distinctAccts.add(accountKey); distinctObjs.add(objCdKey); if (distinctAccts.size() > 1 || distinctObjs.size() > 1) { return true; } } String chart = ""; String accountNumber = ""; // check remaining conditions // initiator should be fiscal officer or primary delegate for account Person initiator = KimApiServiceLocator.getPersonService() .getPersonByPrincipalName(getDocumentHeader().getWorkflowDocument().getInitiatorPrincipalId()); List userAccounts = SpringContext.getBean(AccountService.class) .getAccountsThatUserIsResponsibleFor(initiator); //DTT:3059-Loop over all the accounts present on the document and see if user account responsibility includes them for (Iterator iter1 = accountingLines.iterator(); iter1.hasNext();) { BudgetAdjustmentAccountingLine line = (BudgetAdjustmentAccountingLine) iter1.next(); chart = line.getChartOfAccountsCode(); accountNumber = line.getAccountNumber(); Account userAccount = null; for (Iterator iter2 = userAccounts.iterator(); iter2.hasNext();) { AccountResponsibility account = (AccountResponsibility) iter2.next(); if (chart.equals(account.getAccount().getChartOfAccountsCode()) && accountNumber.equals(account.getAccount().getAccountNumber())) { userAccount = account.getAccount(); break; } } if (userAccount == null) { return true; } else { // fund group should not be CG if (userAccount.isForContractsAndGrants()) { return true; } // current income/expense decrease amount must equal increase amount if (!getSourceCurrentBudgetIncomeTotal().equals(getTargetCurrentBudgetIncomeTotal()) || !getSourceCurrentBudgetExpenseTotal().equals(getTargetCurrentBudgetExpenseTotal())) { return true; } } // End of else block. } // End of for loop return false; }