List of usage examples for java.util Collections min
public static <T extends Object & Comparable<? super T>> T min(Collection<? extends T> coll)
From source file:org.wallerlab.yoink.cube.service.CubeBuilderImpl.java
private void getMinMax(List<Double> xCoordOfAllMolecules, List<Double> yCoordOfAllMolecules, List<Double> zCoordOfAllMolecules, Cube cube, double[] xyzMinimumOfCube, double[] xyzMaximumOfCube) { double xMinimumOfCube = Collections.min(xCoordOfAllMolecules); double yMinimumOfCube = Collections.min(yCoordOfAllMolecules); double zMinimumOfCube = Collections.min(zCoordOfAllMolecules); double xMaximumOfCube = Collections.max(xCoordOfAllMolecules); double yMaximumOfCube = Collections.max(yCoordOfAllMolecules); double zMaximumOfCube = Collections.max(zCoordOfAllMolecules); // build a larger cube to write cube files if (cube.getDensityTypes().size() != 0) { xMinimumOfCube -= 2;/*from w w w . java 2 s . c om*/ yMinimumOfCube -= 2; zMinimumOfCube -= 2; xMaximumOfCube += 2; yMaximumOfCube += 2; zMaximumOfCube += 2; } xyzMinimumOfCube[0] = xMinimumOfCube; xyzMinimumOfCube[1] = yMinimumOfCube; xyzMinimumOfCube[2] = zMinimumOfCube; xyzMaximumOfCube[0] = xMaximumOfCube; xyzMaximumOfCube[1] = yMaximumOfCube; xyzMaximumOfCube[2] = zMaximumOfCube; }
From source file:fr.ericlab.mabed.structure.Corpus.java
public void loadCorpus(boolean parallelized) { output = "";// w ww.j a va 2s.c o m if (configuration.prepareCorpus) { prepareCorpus(); } String[] fileArray = new File("input/").list(); nbTimeSlices = 0; NumberFormat formatter = new DecimalFormat("00000000"); ArrayList<Integer> list = new ArrayList<>(); for (String filename : fileArray) { if (filename.endsWith(".text")) { try { list.add(formatter.parse(filename.substring(0, 8)).intValue()); } catch (ParseException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } nbTimeSlices++; } } int a = Collections.min(list), b = Collections.max(list); distribution = new int[nbTimeSlices]; messageCount = 0; LineIterator it = null; try { it = FileUtils.lineIterator(new File("input/" + formatter.format(a) + ".time"), "UTF-8"); if (it.hasNext()) { SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S"); Date parsedDate = dateFormat.parse(it.nextLine()); startTimestamp = new java.sql.Timestamp(parsedDate.getTime()); } it = FileUtils.lineIterator(new File("input/" + formatter.format(b) + ".time"), "UTF-8"); String timestamp = ""; while (it.hasNext()) { timestamp = it.nextLine(); } SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.S"); Date parsedDate = dateFormat.parse(timestamp); endTimestamp = new java.sql.Timestamp(parsedDate.getTime()); } catch (IOException | ParseException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } finally { LineIterator.closeQuietly(it); } try { // Global index FileInputStream fisMatrix = new FileInputStream("input/indexes/frequencyMatrix.dat"); ObjectInputStream oisMatrix = new ObjectInputStream(fisMatrix); frequencyMatrix = (short[][]) oisMatrix.readObject(); FileInputStream fisVocabulary = new FileInputStream("input/indexes/vocabulary.dat"); ObjectInputStream oisVocabulary = new ObjectInputStream(fisVocabulary); vocabulary = (ArrayList<String>) oisVocabulary.readObject(); // Mention index FileInputStream fisMentionMatrix = new FileInputStream("input/indexes/mentionFrequencyMatrix.dat"); ObjectInputStream oisMentionMatrix = new ObjectInputStream(fisMentionMatrix); mentionFrequencyMatrix = (short[][]) oisMentionMatrix.readObject(); FileInputStream fisMentionVocabulary = new FileInputStream("input/indexes/mentionVocabulary.dat"); ObjectInputStream oisMentionVocabulary = new ObjectInputStream(fisMentionVocabulary); mentionVocabulary = (ArrayList<String>) oisMentionVocabulary.readObject(); // Message count String messageCountStr = FileUtils.readFileToString(new File("input/indexes/messageCount.txt")); messageCount = Integer.parseInt(messageCountStr); // Message count distribution FileInputStream fisDistribution = new FileInputStream("input/indexes/messageCountDistribution.dat"); ObjectInputStream oisDistribution = new ObjectInputStream(fisDistribution); distribution = (int[]) oisDistribution.readObject(); } catch (FileNotFoundException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException | ClassNotFoundException ex) { Logger.getLogger(Corpus.class.getName()).log(Level.SEVERE, null, ex); } DecimalFormat df = new DecimalFormat("#,###"); System.out.println(Util.getDate() + " Loaded corpus:"); output += Util.getDate() + " Loaded corpus:\n"; info = " - time-slices: " + df.format(nbTimeSlices) + " time-slices of " + configuration.timeSliceLength + " minutes each\n"; info += " - first message: " + startTimestamp + "\n"; double datasetLength = (nbTimeSlices * configuration.timeSliceLength) / 60 / 24; info += " - last message: " + endTimestamp + " (" + datasetLength + " days)\n"; info += " - number of messages: " + df.format(messageCount); output += info; System.out.println(info); }
From source file:org.dspace.content.ItemComparator.java
/** * Choose the canonical value from an item for comparison. If there are no * values, null is returned. If there is exactly one value, then it is * returned. Otherwise, either the maximum or minimum lexicographical value * is returned; the parameter to the constructor says which. * /* w w w .ja va2s . c o m*/ * @param item * The item to check * @return The chosen value, or null */ private String getValue(Item item) { // The overall array and each element are guaranteed non-null DCValue[] dcvalues = item.getDC(element, qualifier, language); if (dcvalues.length == 0) { return null; } if (dcvalues.length == 1) { return normalizeTitle(dcvalues[0]); } // We want to sort using Strings, but also keep track of // which DCValue the value came from. Map<String, Integer> values = new HashMap<String, Integer>(); for (int i = 0; i < dcvalues.length; i++) { String value = dcvalues[i].value; if (value != null) { values.put(value, Integer.valueOf(i)); } } if (values.size() == 0) { return null; } Set<String> valueSet = values.keySet(); String chosen = max ? Collections.max(valueSet) : Collections.min(valueSet); int index = (values.get(chosen)).intValue(); return normalizeTitle(dcvalues[index]); }
From source file:dkpro.similarity.algorithms.lsr.LexSemResourceComparator.java
protected double getBestRelatedness(List<Double> relatednessValues) throws SimilarityException { if (relatednessValues.size() == 0) { return NOT_FOUND; }/* w w w .j ava 2 s .co m*/ List<Double> scores = new ArrayList<Double>(); for (double d : relatednessValues) { if (d >= 0.0) { scores.add(d); } } if (scores.size() == 0) { scores.add(NOT_FOUND); } return isDistanceMeasure() ? Collections.min(scores) : Collections.max(scores); }
From source file:org.apache.accumulo.test.functional.RegexGroupBalanceIT.java
private boolean checkGroup(Table<String, String, MutableInt> groupLocationCounts, String group, int min, int max, int tsevers) { Collection<MutableInt> counts = groupLocationCounts.row(group).values(); if (counts.size() == 0) { return min == 0 && max == 0 && tsevers == 0; }// ww w . j ava2 s . co m return min == Collections.min(counts).intValue() && max == Collections.max(counts).intValue() && counts.size() == tsevers; }
From source file:TwitterClustering.java
public static Long findClosestNumber(List list, Long num) { if (list.size() > 0) { // Check list does not empty Long smaller = (Long) Collections.min(list); // get min number from // the list Long larger = (Long) Collections.max(list); // get max number from // the list for (int i = 0; i < list.size(); i++) { // Traverse list if (num == (Long) list.get(i)) // if find the passed number in // the list {/* w w w . java 2 s . co m*/ return num; // than return num } if (num > (Long) list.get(i) && smaller < (Long) list.get(i)) // find // nearest // smaller { smaller = (Long) list.get(i); } if (num < (Long) list.get(i) && larger > (Long) list.get(i)) // find // nearest // larger { larger = (Long) list.get(i); } } return (num - smaller < larger - num ? smaller : larger); // return // closest // number } return new Long(0); }
From source file:eu.annocultor.tagger.postprocessors.PeopleTermFilter.java
private boolean checkDates(Set<Integer> ulanBirthYears, Set<Integer> ulanDeathYears, int reqBirthYear, int reqDeathYear, int toleranceMultiplier, boolean lifeDate) { // tolerance of 1 year per 100 years back int toleranceOnDeathYear = (2000 - reqDeathYear) * toleranceMultiplier / 100; int toleranceOnBirthYear = (2000 - reqBirthYear) * toleranceMultiplier / 100; if (lifeDate) { return (reqBirthYear >= (Collections.min(ulanBirthYears) - toleranceOnBirthYear)) && (ulanDeathYears.isEmpty() || reqBirthYear <= (Collections.max(ulanDeathYears) + toleranceOnDeathYear)); } else {//from www . ja v a2 s . co m // old people should have death year if (reqBirthYear <= allDeadYear && (ulanDeathYears == null || ulanDeathYears.isEmpty())) return false; // if present, birth year should match if (reqBirthYear > 0) { if (!(ulanBirthYears.isEmpty() || (reqBirthYear >= (Collections.min(ulanBirthYears) - toleranceOnBirthYear)) && (reqBirthYear <= (Collections.max(ulanBirthYears) + toleranceOnBirthYear)))) return false; } // if present, death year should match if (reqDeathYear != 0) { // young guys have their right to be alive if (!(reqBirthYear > allDeadYear && ulanDeathYears.isEmpty())) { if (!(ulanDeathYears.isEmpty() || (reqDeathYear >= (Collections.min(ulanDeathYears) - toleranceOnDeathYear)) && (reqDeathYear <= (Collections.max(ulanDeathYears) + toleranceOnDeathYear)))) return false; } } } return true; }
From source file:be.ugent.maf.cellmissy.analysis.singlecell.preprocessing.impl.SingleCellWellPreProcessorImpl.java
@Override public void generateShiftedCoordinatesRanges(SingleCellWellDataHolder singleCellWellDataHolder) { Double[][] transposedMatrix = AnalysisUtils .transpose2DArray(singleCellWellDataHolder.getShiftedTrackCoordinatesMatrix()); // compute the min and the max coordinates Double xMin = Collections.min(Arrays.asList(transposedMatrix[0])); Double xMax = Collections.max(Arrays.asList(transposedMatrix[0])); Double yMin = Collections.min(Arrays.asList(transposedMatrix[1])); Double yMax = Collections.max(Arrays.asList(transposedMatrix[1])); Double[][] shiftedCoordinatesRanges = new Double[2][2]; shiftedCoordinatesRanges[0] = new Double[] { xMin, xMax }; shiftedCoordinatesRanges[1] = new Double[] { yMin, yMax }; singleCellWellDataHolder.setShiftedCoordinatesRanges(shiftedCoordinatesRanges); }
From source file:be.ugent.maf.cellmissy.gui.controller.analysis.doseresponse.area.AreaDRInitialController.java
/** * Prepare data for fitting starting from the analysis group. * * @param dRAnalysisGroup//from w ww . ja v a 2 s .com * @return LinkedHashMap That maps the concentration (log-transformed!) to * the replicate velocities */ private List<DoseResponsePair> prepareFittingData(AreaDoseResponseAnalysisGroup dRAnalysisGroup) { List<DoseResponsePair> result = new ArrayList<>(); List<List<Double>> allVelocities = new ArrayList<>(); List<Double> allLogConcentrations = new ArrayList<>(); //put concentrations of treatment to analyze (control not included!) in list LinkedHashMap<Double, String> nestedMap = dRAnalysisGroup.getConcentrationsMap() .get(dRAnalysisGroup.getTreatmentToAnalyse()); for (Double concentration : nestedMap.keySet()) { //key can only be linked with a single value, if one concentration is setup to have more than one associated concentration unit, only the last will remain String unit = nestedMap.get(concentration); Double logConcentration = AnalysisUtils.logTransform(concentration, unit); allLogConcentrations.add(logConcentration); } Double lowestLogConc = Collections.min(allLogConcentrations); //iterate through conditions int x = 0; for (PlateCondition plateCondition : dRAnalysisGroup.getVelocitiesMap().keySet()) { List<Double> replicateVelocities = dRAnalysisGroup.getVelocitiesMap().get(plateCondition); //check if this platecondition is the control for (Treatment treatment : plateCondition.getTreatmentList()) { if (treatment.getTreatmentType().getName().contains("ontrol")) { allLogConcentrations.add(x, lowestLogConc - 1.0); } } allVelocities.add(replicateVelocities); x++; } for (int i = 0; i < allVelocities.size(); i++) { result.add(new DoseResponsePair(allLogConcentrations.get(i), allVelocities.get(i))); } return result; }
From source file:org.apache.kylin.source.kafka.hadoop.KafkaFlatTableJob.java
private void setupMapper(CubeSegment cubeSeg) throws IOException { // set the segment's offset info to job conf Map<Integer, Long> offsetStart = cubeSeg.getSourcePartitionOffsetStart(); Map<Integer, Long> offsetEnd = cubeSeg.getSourcePartitionOffsetEnd(); Integer minPartition = Collections.min(offsetStart.keySet()); Integer maxPartition = Collections.max(offsetStart.keySet()); job.getConfiguration().set(CONFIG_KAFKA_PARITION_MIN, minPartition.toString()); job.getConfiguration().set(CONFIG_KAFKA_PARITION_MAX, maxPartition.toString()); for (Integer partition : offsetStart.keySet()) { job.getConfiguration().set(CONFIG_KAFKA_PARITION_START + partition, offsetStart.get(partition).toString()); job.getConfiguration().set(CONFIG_KAFKA_PARITION_END + partition, offsetEnd.get(partition).toString()); }/*from ww w . j av a 2 s .c o m*/ job.setMapperClass(KafkaFlatTableMapper.class); job.setInputFormatClass(KafkaInputFormat.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setNumReduceTasks(0); }