List of usage examples for java.util Comparator comparing
public static <T, U extends Comparable<? super U>> Comparator<T> comparing( Function<? super T, ? extends U> keyExtractor)
From source file:com.naver.divideandconquer.closestpair.ClosestPair.java
private Pair getCenterClosestPair(Point[] points, double minDistance) { Point[] closestPairCandidatePoints = new Point[points.length]; int count = 0; int mid = points.length / 2; Point standardPoint = points[mid]; for (int i = 0; i < points.length; i++) { double distance = standardPoint.getX() > points[i].getX() ? standardPoint.getX() - points[i].getX() : points[i].getX() - standardPoint.getX(); if (distance < minDistance) { closestPairCandidatePoints[count++] = points[i]; }/* www.j av a2s .co m*/ } closestPairCandidatePoints = Arrays.copyOfRange(closestPairCandidatePoints, 0, count); Arrays.sort(closestPairCandidatePoints, Comparator.comparing(Point::getY)); Pair centerClosestPair = null; for (int i = 0; i < closestPairCandidatePoints.length - 5; i++) { for (int j = i + 1; j < i + 6 && j < closestPairCandidatePoints.length; j++) { double distance = calculateDistance(closestPairCandidatePoints[i], closestPairCandidatePoints[j]); if (distance < minDistance) { Pair pair = new Pair(closestPairCandidatePoints[i], closestPairCandidatePoints[j], distance); centerClosestPair = pair; } } } return centerClosestPair; }
From source file:org.apache.nifi.toolkit.cli.impl.result.BucketsResult.java
public BucketsResult(final ResultType resultType, final List<Bucket> buckets) { super(resultType); this.buckets = buckets; Validate.notNull(buckets);//from www. j av a2s . co m // NOTE: it is important that the order the buckets are printed is the same order for the ReferenceResolver this.buckets.sort(Comparator.comparing(Bucket::getName)); }
From source file:com.simiacryptus.mindseye.test.PCAUtil.java
/** * Pca features inv tensor [ ]./*from w w w . j ava2 s . c om*/ * * @param covariance the covariance * @param components the components * @param featureDimensions the feature dimensions * @param power the power * @return the tensor [ ] */ public static Tensor[] pcaFeatures(final RealMatrix covariance, final int components, final int[] featureDimensions, final double power) { @Nonnull final EigenDecomposition decomposition = new EigenDecomposition(covariance); final int[] orderedVectors = IntStream.range(0, components).mapToObj(x -> x) .sorted(Comparator.comparing(x -> -decomposition.getRealEigenvalue(x))).mapToInt(x -> x).toArray(); return IntStream.range(0, orderedVectors.length).mapToObj(i -> { @Nonnull final Tensor src = new Tensor(decomposition.getEigenvector(orderedVectors[i]).toArray(), featureDimensions).copy(); return src.scale(1.0 / src.rms()).scale((Math.pow(decomposition.getRealEigenvalue(orderedVectors[i]) / decomposition.getRealEigenvalue(orderedVectors[0]), power))); }).toArray(i -> new Tensor[i]); }
From source file:org.talend.dataprep.transformation.cache.CacheKeyGenerator.java
/** * Build a cache key with additional parameters * When source type is HEAD, the user id is not included in cache key, as the HEAD sample is common for all users *///from w w w . j a v a 2 s . c o m public TransformationCacheKey generateContentKey(final String datasetId, final String preparationId, final String stepId, final String format, final ExportParameters.SourceType sourceType, final Map<String, String> parameters) { final String actualParameters = parameters == null ? StringUtils.EMPTY : parameters.entrySet().stream() // .sorted(Comparator.comparing(Map.Entry::getKey)) // .map(Map.Entry::getValue) // .reduce((s1, s2) -> s1 + s2) // .orElse(StringUtils.EMPTY); final ExportParameters.SourceType actualSourceType = sourceType == null ? HEAD : sourceType; final String actualUserId = actualSourceType == HEAD ? null : security.getUserId(); return new TransformationCacheKey(preparationId, datasetId, format, stepId, actualParameters, actualSourceType, actualUserId); }
From source file:org.apache.nifi.toolkit.cli.impl.result.VersionedFlowsResult.java
public VersionedFlowsResult(final ResultType resultType, final List<VersionedFlow> flows) { super(resultType); this.versionedFlows = flows; Validate.notNull(this.versionedFlows); // NOTE: it is important that the order the flows are printed is the same order for the ReferenceResolver this.versionedFlows.sort(Comparator.comparing(VersionedFlow::getName)); }
From source file:net.morimekta.idltool.cmd.RemoteList.java
private Comparator<Map.Entry<String, Remote>> comparator() { switch (sorting) { case alphabetical: return Comparator.comparing(Map.Entry::getKey); case newest_first: return Comparator.comparing(e -> -e.getValue().getTime()); case oldest_first: return Comparator.comparing(e -> e.getValue().getTime()); }//from w ww . j av a 2 s. com throw new ArgumentException("Wooops!"); }
From source file:org.apache.nifi.toolkit.cli.impl.result.RegistryClientsResult.java
@Override protected void writeSimpleResult(final PrintStream output) { final Set<RegistryClientEntity> clients = registryClients.getRegistries(); if (clients == null || clients.isEmpty()) { return;/*w ww . jav a 2s .co m*/ } final List<RegistryDTO> registries = clients.stream().map(RegistryClientEntity::getComponent) .sorted(Comparator.comparing(RegistryDTO::getName)).collect(Collectors.toList()); final Table table = new Table.Builder().column("#", 3, 3, false).column("Name", 20, 36, true) .column("Id", 36, 36, false).column("Uri", 3, Integer.MAX_VALUE, false).build(); for (int i = 0; i < registries.size(); i++) { RegistryDTO r = registries.get(i); table.addRow("" + (i + 1), r.getName(), r.getId(), r.getUri()); } final TableWriter tableWriter = new DynamicTableWriter(); tableWriter.write(table, output); }
From source file:software.reinvent.dependency.parser.service.CsvWriter.java
/** * Creates the csv files/*from www . j a va2 s . c o m*/ * <ul> * <li>Internal_{date}.csv</li> * <li>External_{date}.csv</li> * <li>Artifacts_{date}.csv</li> * </ul> * with all important information's about the {@link Artifact}s and their {@link ArtifactDependency}'s. * * @param internalGroupId the internal maven group id * @param resultDir the dir where the CSV files will be written * @param prefix any optional prefix for the CSV files * * @throws IOException */ public void writeDependencies(final String internalGroupId, final File resultDir, final String prefix) throws IOException { final Set<ArtifactDependency> allDependencies = artifacts.stream().map(Artifact::getDependencies) .flatMap(Collection::stream).collect(Collectors.toSet()); final Set<ArtifactDependency> internalDependencies = allDependencies.stream() .filter(isInternalPredicate(internalGroupId)) .sorted(Comparator.comparing(ArtifactDependency::getGroupId)).collect(toSet()); final Set<ArtifactDependency> externalDependencies = Sets .newHashSet(CollectionUtils.subtract(allDependencies, internalDependencies)); final Multimap<ArtifactDependency, Artifact> dependencyToArtifact = HashMultimap.create(); allDependencies.forEach( dependency -> artifacts.stream().filter(artifact -> artifact.getDependencies().contains(dependency)) .forEach(x -> dependencyToArtifact.put(dependency, x))); CSVWriter internalWriter = null; CSVWriter externalWriter = null; CSVWriter artifactWriter = null; try { resultDir.mkdirs(); final File internalResultFile = new File(resultDir, prefix + "Internal_" + LocalDate.now().toString() + ".csv"); final File externalResultFile = new File(resultDir, prefix + "External_" + LocalDate.now().toString() + ".csv"); final File artifactResultFile = new File(resultDir, prefix + "Artifacts_" + LocalDate.now().toString() + ".csv"); logger.info("Will write results to {} and {}.", internalResultFile, externalResultFile); internalWriter = new CSVWriter(new FileWriter(internalResultFile), separator); writeDependencyHeader(internalWriter); externalWriter = new CSVWriter(new FileWriter(externalResultFile), separator); writeDependencyHeader(externalWriter); artifactWriter = new CSVWriter(new FileWriter(artifactResultFile), separator); artifactWriter .writeNext(("groupId#artifactId#version#package#internalDependencies" + "#externalDependencies") .split("#")); final CSVWriter finalInternalWriter = internalWriter; final CSVWriter finalExternalWriter = externalWriter; dependencyToArtifact.keySet().stream().sorted(Comparator.comparing(ArtifactDependency::getGroupId) .thenComparing(ArtifactDependency::getArtifactId)).forEach(dependency -> { final List<String> dependentArtifacts = dependencyToArtifact.get(dependency).stream() .map(Artifact::getArtifactId).sorted().collect(toList()); final String artifactLicenses = defaultIfBlank( Joiner.on("\n").join(dependency.getArtifactLicenses()), "n/a in pom"); final ArrayList<String> newLine = Lists.newArrayList(dependency.getGroupId(), dependency.getArtifactId(), Joiner.on("\n").join(dependency.getVersions()), artifactLicenses, dependency.getDescription(), Joiner.on("\n").join(dependentArtifacts)); final String[] csvLine = Iterables.toArray(newLine, String.class); if (isInternal(internalGroupId, dependency)) { finalInternalWriter.writeNext(csvLine); } else { finalExternalWriter.writeNext(csvLine); } }); final CSVWriter finalArtifactWriter = artifactWriter; artifacts.stream() .sorted(Comparator.comparing(Artifact::getGroupId).thenComparing(Artifact::getArtifactId)) .forEachOrdered(artifact -> { final String intDependencies = getDependencyColumn(artifact, internalDependencies, ArtifactDependency::getArtifactId); final String extDependencies = getDependencyColumn(artifact, externalDependencies, ArtifactDependency::toString); final ArrayList<String> newLine = Lists.newArrayList(artifact.getGroupId(), artifact.getArtifactId(), Joiner.on(",").join(artifact.getVersions()), defaultString(artifact.getPackaging()), intDependencies, extDependencies); final String[] csvLine = Iterables.toArray(newLine, String.class); finalArtifactWriter.writeNext(csvLine); }); } catch (IOException e) { logger.error("Could not write csv.", e); } finally { if (internalWriter != null) { internalWriter.close(); } if (externalWriter != null) { externalWriter.close(); } if (artifactWriter != null) { artifactWriter.close(); } } logger.info("Found {} dependencies. {} internal and {} external", allDependencies.size(), internalDependencies.size(), externalDependencies.size()); }
From source file:alluxio.cli.fs.command.AbstractFileSystemCommand.java
/** * Run the command for a particular URI that may contain wildcard in its path. * * @param wildCardPath an AlluxioURI that may or may not contain a wildcard * @param cl object containing the original commandLine * @throws AlluxioException//w w w . ja va 2 s . c o m * @throws IOException */ protected void runWildCardCmd(AlluxioURI wildCardPath, CommandLine cl) throws IOException { List<AlluxioURI> paths = FileSystemShellUtils.getAlluxioURIs(mFileSystem, wildCardPath); if (paths.size() == 0) { // A unified sanity check on the paths throw new IOException(wildCardPath + " does not exist."); } paths.sort(Comparator.comparing(AlluxioURI::getPath)); List<String> errorMessages = new ArrayList<>(); for (AlluxioURI path : paths) { try { runPlainPath(path, cl); } catch (AlluxioException | IOException e) { errorMessages.add(e.getMessage() != null ? e.getMessage() : e.toString()); } } if (errorMessages.size() != 0) { throw new IOException(Joiner.on('\n').join(errorMessages)); } }
From source file:org.fenixedu.academic.util.UniqueAcronymCreator.java
public UniqueAcronymCreator(Function<T, String> slotAccessor, Function<T, String> acronymAccessor, Set<T> objects) throws Exception { this.slotAccessor = slotAccessor; this.acronymAccessor = acronymAccessor; this.objects = new TreeSet<T>(Comparator.comparing(slotAccessor)); this.objects.addAll(objects); }