List of usage examples for java.util Collections max
public static <T extends Object & Comparable<? super T>> T max(Collection<? extends T> coll)
From source file:info.magnolia.cms.beans.config.ModuleRegistration.java
/** * Calculates the level of dependency. 0 means no dependency. If no of the dependencies has itself dependencies is * this level 1. If one or more of the dependencies has a dependencies has a dependency it would return 2. And so on * .../* w w w .j a v a 2s. c o m*/ * @param def module definition * @return the level */ private int calcDependencyLevel(ModuleDefinition def) { if (def.getDependencies() == null || def.getDependencies().size() == 0) { return 0; } List dependencyLevels = new ArrayList(); for (Iterator iter = def.getDependencies().iterator(); iter.hasNext();) { DependencyDefinition dep = (DependencyDefinition) iter.next(); ModuleDefinition depDef = this.getModuleDefinition(dep.getName()); dependencyLevels.add(new Integer(calcDependencyLevel(depDef))); } return ((Integer) Collections.max(dependencyLevels)).intValue() + 1; }
From source file:org.jahia.modules.wiki.WikiURLInterceptor.java
String replaceRefsByPlaceholders(final String originalValue, final Map<String, Long> newRefs, final Map<String, Long> oldRefs, String workspace) throws RepositoryException { if (logger.isDebugEnabled()) { logger.debug("Before replaceRefsByPlaceholders : " + originalValue); }//from www . j av a2 s . c o m String pathPart = originalValue; if (pathPart.startsWith(dmsContext)) { // Remove DOC context part pathPart = StringUtils.substringAfter(StringUtils.substringAfter(pathPart, dmsContext), "/"); } else { return originalValue; } final String path = "/" + WebUtils.urlDecode(pathPart); return JCRTemplate.getInstance().doExecuteWithSystemSession(null, workspace, null, new JCRCallback<String>() { public String doInJCR(JCRSessionWrapper session) throws RepositoryException { String value = originalValue; JCRNodeWrapper reference; try { String currentPath = path; // retrieve path while (true) { if (StringUtils.contains(currentPath, '/')) { currentPath = StringUtils.substringAfter(currentPath, "/"); } else { throw new PathNotFoundException("not found in " + path); } try { reference = session.getNode(JCRContentUtils.escapeNodePath("/" + currentPath)); break; } catch (PathNotFoundException e) { // continue } } value = DOC_CONTEXT_PLACEHOLDER + StringUtils.substringAfter(value, dmsContext); } catch (PathNotFoundException e) { throw new ConstraintViolationException("Invalid link : " + path, e); } String id = reference.getIdentifier(); if (!newRefs.containsKey(id)) { if (oldRefs.containsKey(id)) { newRefs.put(id, oldRefs.get(id)); } else { Long max = Math.max(oldRefs.isEmpty() ? 0 : Collections.max(oldRefs.values()), newRefs.isEmpty() ? 0 : Collections.max(newRefs.values())); newRefs.put(id, max + 1); } } Long index = newRefs.get(id); String link = "/##ref:link" + index + "##"; value = WebUtils.urlDecode(value).replace(path, link); if (logger.isDebugEnabled()) { logger.debug("After replaceRefsByPlaceholders : " + value); } return value; } }); }
From source file:org.apache.hadoop.hbase.client.TestMultipleTimestamps.java
/** * Uses the TimestampFilter on a Get to request a specified list of * versions for the row/column specified by rowIdx & colIdx. * *//*ww w . j a v a 2 s . c o m*/ private Cell[] getNVersions(HTable ht, byte[] cf, int rowIdx, int colIdx, List<Long> versions) throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Get get = new Get(row); get.addColumn(cf, column); get.setMaxVersions(); get.setTimeRange(Collections.min(versions), Collections.max(versions) + 1); Result result = ht.get(get); return result.rawCells(); }
From source file:dk.nsi.haiba.minipasconverter.dao.impl.MinipasDAOImpl.java
public <T extends MinipasRowWithRecnum> void buildCache(Map<Integer, Collection<T>> destination, RowMapper<T> rowMapper, String tableName, int currentRecnum) { Monitor mon = MonitorFactory.start("MinipasDAOImpl.buildCache"); List<T> query = jdbc.query("SELECT * FROM " + minipasPrefix + tableName + " WHERE V_RECNUM >= ? ORDER BY V_RECNUM FETCH FIRST " + cacheSize + " ROWS ONLY", rowMapper, currentRecnum);//w w w . j a v a 2s.c o m // be sure to tell that we have already been here for this recnum, even if there is no data in the loop destination.put(currentRecnum, new ArrayList<T>()); for (T t : query) { // store in cache Collection<T> collection = destination.get(t.getV_recnum()); if (collection == null) { collection = new ArrayList<T>(); destination.put(t.getV_recnum(), collection); } collection.add(t); } if (query.size() == cacheSize) { if (destination.size() == 1) { // fatal error, we can never retrieve all data for this recnum throw new RuntimeException( "cache size of " + cacheSize + " is not enough to contain data for recnum " + currentRecnum + " from table " + tableName); } // drop the collection with the largest recnum as this may have been cut short (by the fetch limit) Integer max_recnum = Collections.max(destination.keySet()); destination.remove(max_recnum); } mon.stop(); }
From source file:fNIRs.FNIRsStats.java
public static void oldWriteANOVAs(File outFile, GroupedChannels data, List<String> groupNames, List<Integer> conditions, int numChunks, int precision) { // open file for writing with a nice print stream object: PrintWriter ostream = makePWriter(outFile); // OKAY VAR NAME? // get all condition-group sequences: ArrayList<GroupedChannels.TaggedDataSequence> allTDSs = data.getAllSelectedTDSs(groupNames, conditions); // chunkData(allTDSs, numChunks); // COMMENT THIS LATER // calculate required widths for printed names and condition numbers: int nameWidth = longestLength(groupNames); // length of longest name int conditionWidth = // number of digits in the largest condition number String.valueOf(Collections.max(conditions)).length(); // make sure the fields will be wide enough to hold the ANOVA values, // which will consist of a 0 or 1 followed by a . and precision 0s: int idFieldWidth = nameWidth + 2 + conditionWidth; // 2 == " c".length() if (idFieldWidth < precision + 2) { // 2 == "1.".length() // if not, increase the condition width so the total field width is // large enough: // System.out.println("ANOVA values are wider than identifiers."); idFieldWidth = precision + 2;/*from ww w . j a v a 2 s .c o m*/ } String idFieldFormat = "%-" + idFieldWidth + "s"; // format string // output the first row, containing identifying information for each // group-condition combination: // first, output proper-width placeholder for the identifier column: ostream.printf("%" + idFieldWidth + "s ", ""); // TOO HACKY?? // then, output all tds identifiers: for (GroupedChannels.TaggedDataSequence tds : allTDSs) { ostream.printf(idFieldFormat + " ", tds.getGroupName() + " c" + tds.getCondition()); // ostream.printf(idFieldFormat + " ", // tds.getGroupName(), // tds.getCondition()); } ostream.println(); // print newline // output ANOVA values line by line: OneWayAnova myANOVA = new OneWayAnova(); for (GroupedChannels.TaggedDataSequence first : allTDSs) { // output tds identifier in first column: ostream.printf(idFieldFormat + " ", first.getGroupName() + " c" + first.getCondition()); // create Collection to send to the ANOVA object: LinkedList<double[]> dataSets = new LinkedList<double[]>(); // convert first's data sequence to an array, then add it to // dataSets dataSets.add(toPrimitiveDoubleArray(first.getData())); dataSets.add(null); // placeholder for second's data sequence for (GroupedChannels.TaggedDataSequence second : allTDSs) { // convert and add second's data sequence to position one in // dataSets: dataSets.set(1, toPrimitiveDoubleArray(second.getData())); double result = -1; // not a valid ANOVA value so we know if // something went wrong try { result = myANOVA.anovaPValue(dataSets); // if (first == second) { // if the two TDSs are the same // TDS, // result = 1; // then the ANOVA value should be 1, even // though a divide-by-zero // } } catch (Exception ex) { ostream.println(); localError("unknown problem calculating ANOVA: " + ex.getMessage()); } if (result != result) { // if result is NaN System.out.println("NaN on " + first.getGroupName() + " c" + first.getCondition() + " and " + second.getGroupName() + " c" + second.getCondition()); } ostream.printf("%-" + idFieldWidth + "." + precision + "f ", result); } ostream.println(); // print newline } ostream.close(); }
From source file:org.apache.hadoop.hbase.client.TestMultipleTimestamps.java
private ResultScanner scan(HTable ht, byte[] cf, Integer[] rowIndexes, Integer[] columnIndexes, Long[] versions, int maxVersions) throws IOException { Arrays.asList(rowIndexes);/*from w w w.j av a 2s .c o m*/ byte startRow[] = Bytes.toBytes("row:" + Collections.min(Arrays.asList(rowIndexes))); byte endRow[] = Bytes.toBytes("row:" + Collections.max(Arrays.asList(rowIndexes)) + 1); Scan scan = new Scan(startRow, endRow); for (Integer colIdx : columnIndexes) { byte column[] = Bytes.toBytes("column:" + colIdx); scan.addColumn(cf, column); } scan.setMaxVersions(maxVersions); scan.setTimeRange(Collections.min(Arrays.asList(versions)), Collections.max(Arrays.asList(versions)) + 1); ResultScanner scanner = ht.getScanner(scan); return scanner; }
From source file:org.squashtest.tm.domain.library.structures.LibraryTree.java
/** * return the depth of the tree, ie how many layers does the tree count. * @return the depth./*from ww w . j a v a 2 s . com*/ */ public int getDepth() { return Collections.max(layers.keySet()) + 1; }
From source file:org.apache.lens.cube.metadata.CubeFactTable.java
public Date getStartTime() { return Collections.max(Lists.newArrayList(getRelativeStartTime(), getAbsoluteStartTime())); }
From source file:org.libreplan.importers.JiraOrderElementSynchronizer.java
/** * Loop through all <code>workLogItems</code> and get the latest date * * @param workLogItems// w w w.j a va 2s . c o m * list of workLogItems * @return latest date */ private Date getTheLatestWorkLoggedDate(List<WorkLogItemDTO> workLogItems) { List<Date> dates = new ArrayList<Date>(); for (WorkLogItemDTO workLogItem : workLogItems) { if (workLogItem.getStarted() != null) { dates.add(workLogItem.getStarted()); } } return Collections.max(dates); }
From source file:com.turn.griffin.GriffinLibCacheUtil.java
public Map<String, String> getLocalFileLatestVersion() { Map<String, String> filenameAndVersion = new ArrayMap<>(); Map<String, File> localFileMap = getLocalFileMap(); for (Map.Entry<String, File> entry : localFileMap.entrySet()) { List<File> fileVersions = Arrays .asList(entry.getValue().listFiles((FileFilter) DirectoryFileFilter.DIRECTORY)); List<String> versions = new ArrayList<>( Collections2.transform(fileVersions, new Function<File, String>() { @Override/*from w ww . j a v a2 s . c om*/ public String apply(File file) { return file.getName(); } })); filenameAndVersion.put(entry.getKey(), Collections.max(versions)); } return filenameAndVersion; }