List of usage examples for java.util Collections max
@SuppressWarnings({ "unchecked", "rawtypes" }) public static <T> T max(Collection<? extends T> coll, Comparator<? super T> comp)
From source file:de.bund.bfr.knime.gis.GisUtils.java
public static Point2D getCenterOfLargestPolygon(MultiPolygon poly) { Map<Polygon, Double> areas = new LinkedHashMap<>(); getPolygons(poly).forEach(p -> areas.put(p, p.getArea())); Point center = Collections.max(areas.entrySet(), (p1, p2) -> Double.compare(p1.getValue(), p2.getValue())) .getKey().getCentroid();//from w ww . j a v a2 s. c o m return new Point2D.Double(center.getX(), center.getY()); }
From source file:xxx.web.comments.clustering.debatefiltering.DebateRanker.java
@Override public void process(JCas aJCas) throws AnalysisEngineProcessException { // document topic vector - there must be exactly one topic annotation for the document TopicDistribution topicDistribution = JCasUtil.selectSingle(aJCas, TopicDistribution.class); // create vector (deep copy) double[] doubles = topicDistribution.getTopicProportions().toArray(); // distances (JS-divergences) to all domains Map<String, Double> distancesToDomains = new HashMap<>(); for (String domain : this.domainTopicVectorMap.keySet()) { // existing domain vector Vector averageTopicVectorForDomain = this.domainTopicVectorMap.get(domain); // compute JS divergence // double jsDivergence = Maths.jensenShannonDivergence(doubles, // VectorUtils.toDoubleArray(averageTopicVectorForDomain)); double cosineSimilarity = VectorUtils.cosineSimilarity(new DenseVector(doubles), averageTopicVectorForDomain); // distancesToDomains.put(domain, jsDivergence); distancesToDomains.put(domain, cosineSimilarity); }/*from w w w .j a v a 2 s . c om*/ System.out.println(distancesToDomains); // find the "closest" domain (minimum divergence) Map.Entry<String, Double> max = Collections.max(distancesToDomains.entrySet(), new Comparator<Map.Entry<String, Double>>() { public int compare(Map.Entry<String, Double> entry1, Map.Entry<String, Double> entry2) { return entry1.getValue().compareTo(entry2.getValue()); } }); // TMP add to the list domainDivergencesMap.get(max.getKey()).add(max.getValue()); // copy the debate to the appropriate output folder try { copyDebate(aJCas, max.getKey(), max.getValue()); } catch (IOException e) { throw new AnalysisEngineProcessException(e); } }
From source file:org.apache.drill.exec.server.rest.profile.OperatorWrapper.java
public void addSummary(TableBuilder tb) { String path = new OperatorPathBuilder().setMajor(major).setOperator(firstProfile).build(); tb.appendCell(path, null);/*from w ww.j a v a2 s. c om*/ tb.appendCell(operatorType == null ? "UNKNOWN_OPERATOR" : operatorType.toString(), null); double setupSum = 0.0; double processSum = 0.0; double waitSum = 0.0; double memSum = 0.0; for (ImmutablePair<OperatorProfile, Integer> ip : ops) { OperatorProfile profile = ip.getLeft(); setupSum += profile.getSetupNanos(); processSum += profile.getProcessNanos(); waitSum += profile.getWaitNanos(); memSum += profile.getPeakLocalMemoryAllocated(); } final ImmutablePair<OperatorProfile, Integer> shortSetup = Collections.min(ops, Comparators.setupTime); final ImmutablePair<OperatorProfile, Integer> longSetup = Collections.max(ops, Comparators.setupTime); tb.appendNanos(shortSetup.getLeft().getSetupNanos(), String.format(format, shortSetup.getRight())); tb.appendNanos(Math.round(setupSum / size), null); tb.appendNanos(longSetup.getLeft().getSetupNanos(), String.format(format, longSetup.getRight())); final ImmutablePair<OperatorProfile, Integer> shortProcess = Collections.min(ops, Comparators.processTime); final ImmutablePair<OperatorProfile, Integer> longProcess = Collections.max(ops, Comparators.processTime); tb.appendNanos(shortProcess.getLeft().getProcessNanos(), String.format(format, shortProcess.getRight())); tb.appendNanos(Math.round(processSum / size), null); tb.appendNanos(longProcess.getLeft().getProcessNanos(), String.format(format, longProcess.getRight())); final ImmutablePair<OperatorProfile, Integer> shortWait = Collections.min(ops, Comparators.waitTime); final ImmutablePair<OperatorProfile, Integer> longWait = Collections.max(ops, Comparators.waitTime); tb.appendNanos(shortWait.getLeft().getWaitNanos(), String.format(format, shortWait.getRight())); tb.appendNanos(Math.round(waitSum / size), null); tb.appendNanos(longWait.getLeft().getWaitNanos(), String.format(format, longWait.getRight())); final ImmutablePair<OperatorProfile, Integer> peakMem = Collections.max(ops, Comparators.operatorPeakMemory); tb.appendBytes(Math.round(memSum / size), null); tb.appendBytes(peakMem.getLeft().getPeakLocalMemoryAllocated(), null); }
From source file:net.sf.jdmf.algorithms.classification.util.AttributeValuePartitioner.java
/** * Partitions attribute value pairs using the algorithm described above. * /*from w ww .j a v a 2 s .c o m*/ * This method needs heavy refactoring. */ public List<Rule> partitionValues(String firstAttributeName, String secondAttributeName, List<AttributeValuePair> valuePairs, int minimumMajorityClassExamples) { Collections.sort(valuePairs); AttributeValueIncidenceComparator incidenceComparator = new AttributeValueIncidenceComparator(); List<AttributeValuePair> breakpoints = new ArrayList<AttributeValuePair>(); List<AttributeValueIncidence> majorityClassExamples = new ArrayList<AttributeValueIncidence>(); AttributeValueIncidence valueWithHighestIncidence = null; for (AttributeValuePair valuePair : valuePairs) { log.debug("valuePair: " + valuePair.toString()); Comparable secondAttributeValue = valuePair.getSecondValue(); if ((valueWithHighestIncidence != null) && (valueWithHighestIncidence.getCurrentIncidence() >= minimumMajorityClassExamples) && (valueWithHighestIncidence.getAttributeValue().equals(secondAttributeValue) == false)) { AttributeValuePair lastBreakpoint = breakpoints.get(breakpoints.size() - 1); Comparable finalBreakpointValue = calculateBreakpointValue(lastBreakpoint.getFirstValue(), valuePair.getFirstValue()); lastBreakpoint.setFirstValue(finalBreakpointValue); majorityClassExamples.clear(); } AttributeValueIncidence valueIncidence = new AttributeValueIncidence(secondAttributeValue); int valueIncidenceIndex = majorityClassExamples.indexOf(valueIncidence); if (valueIncidenceIndex >= 0) { AttributeValueIncidence currentValueIncidence = majorityClassExamples.get(valueIncidenceIndex); currentValueIncidence.checkAttributeValue(secondAttributeValue); } else { majorityClassExamples.add(valueIncidence); valueIncidence.checkAttributeValue(secondAttributeValue); } valueWithHighestIncidence = Collections.max(majorityClassExamples, incidenceComparator); log.debug("attributeValueWithHighestIncidence: " + valueWithHighestIncidence.toString()); Integer highestIncidence = valueWithHighestIncidence.getCurrentIncidence(); if (valueIncidence.equals(valueWithHighestIncidence)) { if (highestIncidence == minimumMajorityClassExamples) { AttributeValuePair breakpoint = new AttributeValuePair(valuePair.getFirstValue(), valueWithHighestIncidence.getAttributeValue()); breakpoints.add(breakpoint); } else if (highestIncidence > minimumMajorityClassExamples) { AttributeValuePair lastBreakpoint = breakpoints.get(breakpoints.size() - 1); lastBreakpoint.setFirstValue(valuePair.getFirstValue()); } } } Comparable lastPartitionAttributeValue = determineLastPartitionAttributeValue(majorityClassExamples, incidenceComparator); log.debug("lastPartitionAttributeValue=" + lastPartitionAttributeValue); mergePartitionsWithIdenticalAttributeValue(breakpoints, lastPartitionAttributeValue); log.debug("breakpoints: " + breakpoints.toString()); List<Rule> partitioningRules = new ArrayList<Rule>(); for (int i = 0; i < breakpoints.size(); ++i) { Comparable firstAttributeValue = breakpoints.get(i).getFirstValue(); Comparable secondAttributeValue = breakpoints.get(i).getSecondValue(); if (i == 0) { Rule firstRule = new Rule(); firstRule.defineIf().attribute(firstAttributeName).isLowerThanOrEqualTo(firstAttributeValue); firstRule.defineThen().attribute(secondAttributeName).equals(secondAttributeValue); partitioningRules.add(firstRule); continue; } Comparable previousBreakpointAttributeValue = breakpoints.get(i - 1).getFirstValue(); Rule partitioningRule = new Rule(); partitioningRule.defineIf().attribute(firstAttributeName) .isGreaterThan(previousBreakpointAttributeValue).and(partitioningRule) .attribute(firstAttributeName).isLowerThanOrEqualTo(firstAttributeValue); partitioningRule.defineThen().attribute(secondAttributeName).equals(secondAttributeValue); partitioningRules.add(partitioningRule); } if (breakpoints.isEmpty()) { Rule globalRule = new Rule(); globalRule.defineThen().attribute(secondAttributeName).equals(lastPartitionAttributeValue); partitioningRules.add(globalRule); } else { Comparable lastBreakpointValue = breakpoints.get(breakpoints.size() - 1).getFirstValue(); Rule lastRule = new Rule(); lastRule.defineIf().attribute(firstAttributeName).isGreaterThan(lastBreakpointValue); lastRule.defineThen().attribute(secondAttributeName).equals(lastPartitionAttributeValue); partitioningRules.add(lastRule); } return partitioningRules; }
From source file:com.lumata.lib.lupa.extractor.internal.HtmlBiggestImageExtractor.java
@Override public Image extractBestImage(URL sourceUrl, Elements htmlSection, ImageExtractionRequirements requirements) { Map<String, Image> imagesToExplore = new HashMap<String, Image>(); Set<ImageDownloadTask> imagesToDownload = new HashSet<ImageDownloadTask>(); Iterator<org.jsoup.nodes.Element> it = htmlSection.iterator(); // collect valid images while (it.hasNext() && imagesToExplore.size() < requirements.getMaxImagesToExplore()) { Element imageElement = it.next(); String imageUrl = imageElement.absUrl("src"); // Do not process empty img tags, duplicated images or tracking // pixels and other assorted ads if (imageUrl == null || imagesToExplore.containsKey(imageUrl) || isTrackingPixelOrAd(imageUrl)) { continue; }//from w w w .j a v a 2 s . c o m // remember this image Image imageContent = new Image(imageUrl); if (imageElement.hasAttr(WIDTH_ATTRIBUTE)) { // TODO: We need to convert other picture size units supported by html (there must be a lib for this) imageContent.setWidth(Integer.parseInt(imageElement.attr(WIDTH_ATTRIBUTE).replace("px", ""))); } if (imageElement.hasAttr(HEIGHT_ATTRIBUTE)) { imageContent.setHeight(Integer.parseInt(imageElement.attr(HEIGHT_ATTRIBUTE).replace("px", ""))); } if (imageContent.getWidth() == null || imageContent.getHeight() == null) {// mark image to download imagesToDownload.add(new ImageDownloadTask(imageContent)); } imagesToExplore.put(imageUrl, imageContent); } // if dimensions are empty -> download image if (CollectionUtils.isNotEmpty(imagesToDownload)) { try { ExecutorService pool = Executors.newFixedThreadPool(imagesToDownload.size(), getThreadFactory(sourceUrl)); pool.invokeAll(imagesToDownload); pool.shutdown(); } catch (InterruptedException e) { LOG.error("InterruptedException while downloading images", e); } } // select biggest image Image biggestImage = null; try { biggestImage = Collections.max(imagesToExplore.values(), new Comparator<Image>() { @Override public int compare(Image o1, Image o2) { return getSquarePixels(o1) - getSquarePixels(o2); } }); } catch (NoSuchElementException e) { return null; } // if image is too small, discard return (biggestImage.getWidth() < requirements.getMinImageSize() || biggestImage.getHeight() < requirements.getMinImageSize()) ? null : biggestImage; }
From source file:org.datacleaner.user.upgrade.DataCleanerHomeUpgrader.java
private FileObject findUpgradeCandidate(FileObject target) throws FileSystemException { FileObject parentFolder = target.getParent(); List<FileObject> versionFolders = new ArrayList<>(); FileObject[] allFoldersInParent = parentFolder.findFiles(new FileDepthSelector(1, 1)); for (FileObject folderInParent : allFoldersInParent) { final String folderInParentName = folderInParent.getName().getBaseName(); if (folderInParent.getType().equals(FileType.FOLDER) && (!folderInParentName.equals(target.getName().getBaseName())) && (!candidateBlacklist.contains(folderInParentName))) { versionFolders.add(folderInParent); }//from w ww .j av a 2s . co m } List<FileObject> validatedVersionFolders = validateVersionFolders(versionFolders); if (!validatedVersionFolders.isEmpty()) { List<String> versions = new ArrayList<>(); for (FileObject validatedVersionFolder : validatedVersionFolders) { String baseName = validatedVersionFolder.getName().getBaseName(); versions.add(baseName); } final Comparator<String> comp = new VersionComparator(); String latestVersion = Collections.max(versions, comp); FileObject latestVersionFolder = null; for (FileObject validatedVersionFolder : validatedVersionFolders) { if (validatedVersionFolder.getName().getBaseName().equals(latestVersion)) { latestVersionFolder = validatedVersionFolder; } } return latestVersionFolder; } else { return null; } }
From source file:us.mn.state.dot.tms.client.camera.FTPStream.java
protected FTPFile getMaxLastModified(FTPFile[] ftpFiles) { return Collections.max(Arrays.asList(ftpFiles), new FTPComparator()); }
From source file:ru.bmstu.iu6.producthuntsimpleclient.activities.CategoryActivity.java
private void dispatchNewPosts() { adapter.setPosts(postList);/*from w w w . ja va 2 s . co m*/ if (postList.size() > 0) { textHelper.setVisibility(View.GONE); } else { textHelper.setVisibility(View.VISIBLE); textHelper.setText(R.string.no_posts_today); } SharedPreferences.Editor editor = sharedPreferences.edit(); if (postList.size() > 0) { Post maxIdPost = Collections.max(postList, (pf, ps) -> pf.getId().compareTo(ps.getId())); editor.putInt(PREF_NEWEST, maxIdPost.getId()); } else { editor.remove(PREF_NEWEST); } editor.apply(); }
From source file:org.nuclos.server.statemodel.valueobject.StateModelUsages.java
/** * @todo refactor: use this algorithm in UsageCriteria.getBestMatchingUsageCriteria - it's clearer. * <br>/*from w w w.j a v a 2 s .com*/ * @param usagecriteria * @return the usage for the state model with the given usagecriteria */ public StateModelUsage getStateModelUsage(final UsageCriteria usagecriteria) { // 1. find matching usages (candidates): final List<StateModelUsage> lstCandidates = CollectionUtils.select(lstStateUsage, new Predicate<StateModelUsage>() { @Override public boolean evaluate(StateModelUsage o) { return o.getUsageCriteria().isMatchFor(usagecriteria); } }); final StateModelUsage result; if (lstCandidates.isEmpty()) { /** @todo rather throw an exception here */ result = null; } else { // 2. These candidates are totally ordered with respect to isLessOrEqual(). The result is the greatest of these // candidates. result = Collections.max(lstCandidates, new Comparator<StateModelUsage>() { @Override public int compare(StateModelUsage su1, StateModelUsage su2) { return su1.getUsageCriteria().compareTo(su2.getUsageCriteria()); } }); } return result; }
From source file:br.upe.ecomp.doss.algorithm.apso.APSO.java
private double calculateEvolutionaryFactor() { double evolFactor; APSOParticle[] particles = (APSOParticle[]) getParticles(); List<APSOParticle> parciclesList = Arrays.asList(particles); APSOParticle particleAux = Collections.min(parciclesList, new ComparatorMeanDistance()); double dmin = particleAux.getMeanDistanceToOthersParticles(); particleAux = Collections.max(parciclesList, new ComparatorMeanDistance()); double dmax = particleAux.getMeanDistanceToOthersParticles(); particleAux = Collections.max(parciclesList, new ComparatorMaximumFitness()); double dg = particleAux.getMeanDistanceToOthersParticles(); evolFactor = (dg - dmin) / (dmax - dmin); return evolFactor; }