List of usage examples for java.util Collections max
public static <T extends Object & Comparable<? super T>> T max(Collection<? extends T> coll)
From source file:uk.ac.ebi.atlas.bioentity.properties.BioEntityPropertyService.java
private List<PropertyLink> fetchGoLinksOrderedByDepth() { List<PropertyLink> propertyLinks = Lists.newArrayList(); if (!depthToGoTerms.isEmpty()) { for (int i = Collections.max(depthToGoTerms.keySet()); i >= 1; i--) { for (GoPoTerm goPoTerm : depthToGoTerms.get(i)) { Optional<PropertyLink> link = linkBuilder.createLink(identifier, "go", goPoTerm.accession(), species);//from w ww . ja v a 2 s. c o m if (link.isPresent()) { propertyLinks.add(link.get()); } } } } return propertyLinks; }
From source file:org.encuestame.core.cron.CalculateHashTagSize.java
/** * Calculate all hashtag size./*ww w . j a v a 2 s .com*/ */ // @Scheduled(cron = "${cron.calculateReindex}") public void calculate() { if (EnMePlaceHolderConfigurer.getSystemInitialized()) { log.info("calculate hastahg rating ..."); double average = 0; int total = 0; double score = 0; double scoreRank = 0; double averageHashTagRanking = 0; Date currentDate = DateUtil.getCurrentCalendarDate(); //store the max min values final List<Long> maxMinTotal = new ArrayList<Long>(); final List<HashTag> tags = getHashTagDao().getHashTags(null, 0, ""); log.debug("HashTag to process " + tags.size()); total = tags.size(); final List<Object[]> maxMin = getHashTagDao().getMaxMinTagFrecuency(); long maxFrecuency = 0; long minFrecuency = 0; if (maxMin.get(0) != null) { maxFrecuency = (Long) maxMin.get(0)[0]; // Max minFrecuency = (Long) maxMin.get(0)[1]; // Min } List<HashTagRanking> hashTagRankingList = getHashTagDao().getHashTagRankStats(currentDate); for (HashTag hashTag : tags) { final HashTagRanking tagRanking; log.debug("Calculate for: " + hashTag.getHashTag() + " size after calculate: " + hashTag.getSize()); long tagFrecuency = getHashTagFrecuency(hashTag.getHashTag(), this.INIT_RESULTS, this.MAX_RESULTS); log.debug("-------- tag frecuency: " + tagFrecuency); long relevance = (tagFrecuency + (hashTag.getHits() == null ? 0 : hashTag.getHits())); long logFrecuency = Math.round(EnMeUtils.calculateSizeTag(relevance, maxFrecuency, minFrecuency)); score += logFrecuency; scoreRank = Math.round((double) relevance / (double) total); averageHashTagRanking = scoreRank < 1 ? 1 : Math.round(scoreRank); maxMinTotal.add(logFrecuency); hashTag.setSize(Long.valueOf(logFrecuency)); log.debug("Calculate for: " + hashTag.getHashTag() + " size before calculate: " + logFrecuency); hashTag.setCreatedAt(Calendar.getInstance().getTime()); getHashTagDao().saveOrUpdate(hashTag); // Save table if (hashTagRankingList.size() == 0) { tagRanking = this.createHashTagRanking(averageHashTagRanking, hashTag, currentDate); getHashTagDao().saveOrUpdate(tagRanking); } else { log.debug("Process has been executed today`s date"); } } average = (double) score / (double) total; log.info("*******************************"); log.info("******* Resume of Process *****"); log.info("-------------------------------"); log.info("| Max Frec : " + maxFrecuency + " |"); log.info("| Min Frec : " + minFrecuency + " |"); log.info("| Total : " + total + " |"); log.info("| Score : " + Math.round(score) + " |"); log.info("| Average : " + Math.round(average) + " |"); log.info("| Max : " + Collections.max(maxMinTotal) + " |"); log.info("| Min : " + Collections.min(maxMinTotal) + " |"); log.info("-------------------------------"); log.info("*******************************"); log.info("************ Finished Start hashtag calculate job **************"); } }
From source file:org.gephi.io.importer.plugin.file.spreadsheet.ImporterSpreadsheetCSV.java
private void autoDetectFieldDelimiter() { //Very simple naive detector but should work in most cases: try (LineNumberReader reader = ImportUtils.getTextReader(FileUtil.toFileObject(file))) { String line = reader.readLine(); //Check for typical delimiter chars in the header int commaCount = 0; int semicolonCount = 0; int tabCount = 0; int spaceCount = 0; boolean inQuote = false; for (char c : line.toCharArray()) { if (c == '"' || c == '\'') { inQuote = !inQuote;/*from ww w. j ava 2 s.co m*/ } if (!inQuote) { switch (c) { case ',': commaCount++; break; case ';': semicolonCount++; break; case '\t': tabCount++; break; case ' ': spaceCount++; break; } } } int max = Collections.max(Arrays.asList(commaCount, semicolonCount, tabCount, spaceCount)); if (commaCount == max) { fieldDelimiter = ','; } else if (semicolonCount == max) { fieldDelimiter = ';'; } else if (tabCount == max) { fieldDelimiter = '\t'; } else if (spaceCount == max) { fieldDelimiter = ' '; } } catch (IOException ex) { } }
From source file:de.teamgrit.grit.entities.Controller.java
/** * Restores the state of GRIT.// w w w. j a va 2 s. c o m * * @param state * the State storing the state information. * @throws ConfigurationException * if the restoring fails. */ public void restoreState(State state) throws ConfigurationException { m_state = state; try { m_connections = state.restoreConnections(); if (!m_connections.isEmpty()) { m_nextConnectionId = Collections.max(m_connections.keySet()) + 1; } } catch (InvalidStructureException e) { throw new ConfigurationException(e); } m_courses = state.restoreCourses(); if (!m_courses.isEmpty()) { m_nextCourseId = Collections.max(m_courses.keySet()) + 1; } }
From source file:be.ugent.maf.cellmissy.analysis.singlecell.preprocessing.impl.SingleCellWellPreProcessorImpl.java
@Override public void generateRawCoordinatesRanges(SingleCellWellDataHolder singleCellWellDataHolder) { Double[][] transposedMatrix = AnalysisUtils .transpose2DArray(singleCellWellDataHolder.getRawTrackCoordinatesMatrix()); // compute the min and the max coordinates Double xMin = Collections.min(Arrays.asList(transposedMatrix[0])); Double xMax = Collections.max(Arrays.asList(transposedMatrix[0])); Double yMin = Collections.min(Arrays.asList(transposedMatrix[1])); Double yMax = Collections.max(Arrays.asList(transposedMatrix[1])); Double[][] rawCoordinatesRanges = new Double[2][2]; rawCoordinatesRanges[0] = new Double[] { xMin, xMax }; rawCoordinatesRanges[1] = new Double[] { yMin, yMax }; singleCellWellDataHolder.setRawCoordinatesRanges(rawCoordinatesRanges); }
From source file:org.libreplan.business.calendars.entities.CombinedWorkHours.java
@Override protected EffortDuration updateCapacity(EffortDuration current, EffortDuration each) { return Collections.max(asList(current, each)); }
From source file:com.thoughtworks.gauge.Table.java
private Integer getMaxStringLength() { List<Integer> maxs = new ArrayList<Integer>(); maxs.add(getMaxStringSize(headers)); for (TableRow tableRow : tableRows) { maxs.add(getMaxStringSize(tableRow.getCellValues())); }// w w w . ja va 2 s.c o m return Collections.max(maxs); }
From source file:com.marvelution.jira.plugins.hudson.charts.BuildTestResultsRatioChartGenerator.java
/** * {@inheritDoc}// ww w . j ava 2 s. c o m */ @Override public ChartHelper generateChart() { buildMap = new HashMap<Integer, Build>(); final CategoryTableXYDataset dataset = new CategoryTableXYDataset(); for (Build build : builds) { final TestResult results = build.getTestResult(); double percentagePass = 0.0D, percentageFail = 0.0D, percentageSkipped = 0.0D; if (results != null && results.getTotal() > 0) { percentagePass = Double.valueOf(results.getPassed()) / Double.valueOf(results.getTotal()) * 100.0D; percentageFail = Double.valueOf(results.getFailed()) / Double.valueOf(results.getTotal()) * 100.0D; percentageSkipped = Double.valueOf(results.getSkipped()) / Double.valueOf(results.getTotal()) * 100.0D; } dataset.add(Double.valueOf(build.getBuildNumber()), percentagePass, seriesNames[0]); dataset.add(Double.valueOf(build.getBuildNumber()), percentageFail, seriesNames[1]); dataset.add(Double.valueOf(build.getBuildNumber()), percentageSkipped, seriesNames[2]); buildMap.put(Integer.valueOf(build.getBuildNumber()), build); } final JFreeChart chart = ChartFactory.createStackedXYAreaChart("", "", getI18n().getText("hudson.charts.tests"), dataset, PlotOrientation.VERTICAL, false, false, false); chart.setBackgroundPaint(Color.WHITE); chart.setBorderVisible(false); XYPlot xyPlot = chart.getXYPlot(); xyPlot.setDataset(1, dataset); if (dataset.getItemCount() > 0) { XYLineAndShapeRenderer shapeRenderer = new XYLineAndShapeRenderer(false, true); shapeRenderer.setSeriesShapesVisible(1, false); shapeRenderer.setSeriesLinesVisible(1, false); shapeRenderer.setSeriesShapesVisible(2, false); shapeRenderer.setSeriesLinesVisible(2, false); shapeRenderer.setSeriesShape(0, new Ellipse2D.Double(-3.0D, -3.0D, 6.0D, 6.0D)); shapeRenderer.setSeriesPaint(0, GREEN_PAINT); shapeRenderer.setSeriesShapesFilled(0, true); shapeRenderer.setBaseToolTipGenerator(this); shapeRenderer.setBaseItemLabelFont(ChartDefaults.defaultFont); shapeRenderer.setBaseItemLabelsVisible(false); xyPlot.setRenderer(0, shapeRenderer); StackedXYAreaRenderer2 renderer = new StackedXYAreaRenderer2(); renderer.setSeriesPaint(0, GREEN_PAINT); renderer.setSeriesPaint(1, RED_PAINT); renderer.setSeriesPaint(2, YELLOW_PAINT); renderer.setBaseItemLabelFont(ChartDefaults.defaultFont); renderer.setBaseItemLabelsVisible(false); xyPlot.setRenderer(1, renderer); renderer.setBaseToolTipGenerator(this); } ValueAxis rangeAxis = xyPlot.getRangeAxis(); rangeAxis.setLowerBound(0.0D); rangeAxis.setUpperBound(100.0D); final NumberAxis domainAxis = new NumberAxis(); domainAxis.setLowerBound(Collections.min(buildMap.keySet())); domainAxis.setUpperBound(Collections.max(buildMap.keySet())); final TickUnitSource ticks = NumberAxis.createIntegerTickUnits(); domainAxis.setStandardTickUnits(ticks); xyPlot.setDomainAxis(domainAxis); ChartUtil.setupPlot(xyPlot); return new ChartHelper(chart); }
From source file:fr.ericlab.mabed.structure.Corpus.java
public void loadCorpus(boolean parallelized) { output = "";/*from w w w. j av a2 s .c om*/ if (configuration.prepareCorpus) { prepareCorpus(); } String[] fileArray = new File("input/").list(); nbTimeSlices = 0; NumberFormat formatter = new DecimalFormat("00000000"); ArrayList<Integer> list = new ArrayList<>(); for (String filename : fileArray) { if (filename.endsWith(".text")) { try { list.add(formatter.parse(filename.substring(0, 8)).intValue()); } catch (ParseException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } nbTimeSlices++; } } int a = Collections.min(list), b = Collections.max(list); distribution = new int[nbTimeSlices]; messageCount = 0; LineIterator it = null; try { it = FileUtils.lineIterator(new File("input/" + formatter.format(a) + ".time"), "UTF-8"); if (it.hasNext()) { SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S"); Date parsedDate = dateFormat.parse(it.nextLine()); startTimestamp = new java.sql.Timestamp(parsedDate.getTime()); } it = FileUtils.lineIterator(new File("input/" + formatter.format(b) + ".time"), "UTF-8"); String timestamp = ""; while (it.hasNext()) { timestamp = it.nextLine(); } SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S"); Date parsedDate = dateFormat.parse(timestamp); endTimestamp = new java.sql.Timestamp(parsedDate.getTime()); } catch (IOException | ParseException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } finally { LineIterator.closeQuietly(it); } try { // Global index FileInputStream fisMatrix = new FileInputStream("input/indexes/frequencyMatrix.dat"); ObjectInputStream oisMatrix = new ObjectInputStream(fisMatrix); frequencyMatrix = (short[][]) oisMatrix.readObject(); FileInputStream fisVocabulary = new FileInputStream("input/indexes/vocabulary.dat"); ObjectInputStream oisVocabulary = new ObjectInputStream(fisVocabulary); vocabulary = (ArrayList<String>) oisVocabulary.readObject(); // Mention index FileInputStream fisMentionMatrix = new FileInputStream("input/indexes/mentionFrequencyMatrix.dat"); ObjectInputStream oisMentionMatrix = new ObjectInputStream(fisMentionMatrix); mentionFrequencyMatrix = (short[][]) oisMentionMatrix.readObject(); FileInputStream fisMentionVocabulary = new FileInputStream("input/indexes/mentionVocabulary.dat"); ObjectInputStream oisMentionVocabulary = new ObjectInputStream(fisMentionVocabulary); mentionVocabulary = (ArrayList<String>) oisMentionVocabulary.readObject(); // Message count String messageCountStr = FileUtils.readFileToString(new File("input/indexes/messageCount.txt")); messageCount = Integer.parseInt(messageCountStr); // Message count distribution FileInputStream fisDistribution = new FileInputStream("input/indexes/messageCountDistribution.dat"); ObjectInputStream oisDistribution = new ObjectInputStream(fisDistribution); distribution = (int[]) oisDistribution.readObject(); } catch (FileNotFoundException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException | ClassNotFoundException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } DecimalFormat df = new DecimalFormat("#,###"); System.out.println(Util.getDate() + " Loaded corpus:"); output += Util.getDate() + " Loaded corpus:\n"; info = " - time-slices: " + df.format(nbTimeSlices) + " time-slices of " + configuration.timeSliceLength + " minutes each\n"; info += " - first message: " + startTimestamp + "\n"; double datasetLength = (nbTimeSlices * configuration.timeSliceLength) / 60 / 24; info += " - last message: " + endTimestamp + " (" + datasetLength + " days)\n"; info += " - number of messages: " + df.format(messageCount); output += info; System.out.println(info); }
From source file:org.apache.cassandra.config.CFMetaData.java
public static void fixMaxId() { // never set it to less than 1000. this ensures that we have enough system CFids for future use. idGen.set(cfIdMap.size() == 0 ? MIN_CF_ID : Math.max(Collections.max(cfIdMap.values()) + 1, MIN_CF_ID)); }