Example usage for java.util Comparator comparing

List of usage examples for java.util Comparator comparing

Introduction

In this page you can find the example usage for java.util Comparator comparing.

Prototype

public static <T, U extends Comparable<? super U>> Comparator<T> comparing(
        Function<? super T, ? extends U> keyExtractor) 

Source Link

Document

Accepts a function that extracts a java.lang.Comparable Comparable sort key from a type T , and returns a Comparator that compares by that sort key.

Usage

From source file:org.apache.nifi.minifi.c2.provider.nifi.rest.NiFiRestConfigurationProvider.java

private Pair<String, Integer> getMaxIdAndVersion(String filenamePattern) throws ConfigurationProviderException {
    try {/*from www .j  a  v  a  2s  . c o m*/
        Pair<Stream<Pair<String, Integer>>, Closeable> streamCloseablePair = getIdAndVersionStream(
                filenamePattern);
        try {
            return streamCloseablePair.getFirst()
                    .sorted(Comparator.comparing(p -> ((Pair<String, Integer>) p).getSecond()).reversed())
                    .findFirst().orElseThrow(() -> new ConfigurationProviderException(
                            "Didn't find any templates that matched " + filenamePattern));
        } finally {
            streamCloseablePair.getSecond().close();
        }
    } catch (IOException | TemplatesIteratorException e) {
        throw new ConfigurationProviderException("Unable to retrieve template list", e);
    }
}

From source file:com.epam.catgenome.manager.externaldb.ncbi.NCBIGeneManager.java

private void parseJsonFromBio(final JsonNode biosystemsResultRoot, final JsonNode biosystemsEntries,
        final NCBIGeneVO ncbiGeneVO) throws JsonProcessingException {
    if (biosystemsResultRoot.isArray()) {
        ncbiGeneVO.setPathwaysNumber(biosystemsResultRoot.size());
        List<NCBISummaryVO> pathways = new ArrayList<>(biosystemsResultRoot.size());
        for (final JsonNode objNode : biosystemsResultRoot) {
            JsonNode jsonNode = biosystemsEntries.path(RESULT_PATH).get("" + objNode.asText());
            NCBISummaryVO biosystemsReference = mapper.treeToValue(jsonNode, NCBISummaryVO.class);
            biosystemsReference.setLink(
                    String.format(NCBI_BIOSYSTEM_URL, biosystemsReference.getUid(), ncbiGeneVO.getGeneId()));
            pathways.add(biosystemsReference);
        }//w w w  . j a  v  a 2  s.  co m
        ncbiGeneVO.setBiosystemsReferences(pathways.stream()
                .sorted(Comparator.comparing(summary -> summary.getBiosystem().getBiosystemname()))
                .collect(Collectors.toList()));
    }
}

From source file:org.apache.samza.container.grouper.task.GroupByContainerIds.java

/**
 * {@inheritDoc}/*from   www .  ja v  a  2s  .c  om*/
 *
 * When the are `t` tasks and `p` processors, where t &lt;= p, a fair task distribution should ideally assign
 * (t / p) tasks to each processor. In addition to guaranteeing a fair distribution, this {@link TaskNameGrouper}
 * implementation generates a locationId aware task assignment to processors where it makes best efforts in assigning
 * the tasks to processors with the same locality.
 *
 * Task assignment to processors is accomplished through the following two phases:
 *
 * 1. In the first phase, each task(T) is assigned to a processor(P) that satisfies the following constraints:
 *    A. The processor(P) should have the same locality of the task(T).
 *    B. Number of tasks already assigned to the processor should be less than the (number of tasks / number of processors).
 *
 * 2. Each unassigned task from phase 1 are then mapped to any processor with task count less than the
 * (number of tasks / number of processors). When no such processor exists, then the unassigned
 * task is mapped to any processor from available processors in a round robin fashion.
 */
@Override
public Set<ContainerModel> group(Set<TaskModel> taskModels, GrouperMetadata grouperMetadata) {
    // Validate that the task models are not empty.
    Map<TaskName, LocationId> taskLocality = grouperMetadata.getTaskLocality();
    Preconditions.checkArgument(!taskModels.isEmpty(),
            "No tasks found. Likely due to no input partitions. Can't run a job with no tasks.");

    // Invoke the default grouper when the processor locality does not exist.
    if (MapUtils.isEmpty(grouperMetadata.getProcessorLocality())) {
        LOG.info("ProcessorLocality is empty. Generating with the default group method.");
        return group(taskModels, new ArrayList<>());
    }

    Map<String, LocationId> processorLocality = new TreeMap<>(grouperMetadata.getProcessorLocality());
    /**
     * When there're more task models than processors then choose the lexicographically least `x` processors(where x = tasks.size()).
     */
    if (processorLocality.size() > taskModels.size()) {
        processorLocality = processorLocality.entrySet().stream().limit(taskModels.size())
                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    }

    Map<LocationId, List<String>> locationIdToProcessors = new HashMap<>();
    Map<String, TaskGroup> processorIdToTaskGroup = new HashMap<>();

    // Generate the {@see LocationId} to processors mapping and processorId to {@see TaskGroup} mapping.
    processorLocality.forEach((processorId, locationId) -> {
        List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
        processorIds.add(processorId);
        locationIdToProcessors.put(locationId, processorIds);
        processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
    });

    int numTasksPerProcessor = taskModels.size() / processorLocality.size();
    Set<TaskName> assignedTasks = new HashSet<>();

    /**
     * A processor is considered under-assigned when number of tasks assigned to it is less than
     * (number of tasks / number of processors).
     * Map the tasks to the under-assigned processors with same locality.
     */
    for (TaskModel taskModel : taskModels) {
        LocationId taskLocationId = taskLocality.get(taskModel.getTaskName());
        if (taskLocationId != null) {
            List<String> processorIds = locationIdToProcessors.getOrDefault(taskLocationId, new ArrayList<>());
            for (String processorId : processorIds) {
                TaskGroup taskGroup = processorIdToTaskGroup.get(processorId);
                if (taskGroup.size() < numTasksPerProcessor) {
                    taskGroup.addTaskName(taskModel.getTaskName().getTaskName());
                    assignedTasks.add(taskModel.getTaskName());
                    break;
                }
            }
        }
    }

    /**
     * In some scenarios, the task either might not have any previous locality or might not have any
     * processor that maps to its previous locality. This cyclic processorId's iterator helps us in
     * those scenarios to assign the processorIds to those kind of tasks in a round robin fashion.
     */
    Iterator<String> processorIdsCyclicIterator = Iterators.cycle(processorLocality.keySet());

    // Order the taskGroups to choose a task group in a deterministic fashion for unassigned tasks.
    List<TaskGroup> taskGroups = new ArrayList<>(processorIdToTaskGroup.values());
    taskGroups.sort(Comparator.comparing(TaskGroup::getContainerId));

    /**
     * For the tasks left over from the previous stage, map them to any under-assigned processor.
     * When a under-assigned processor doesn't exist, then map them to any processor from the
     * available processors in a round robin manner.
     */
    for (TaskModel taskModel : taskModels) {
        if (!assignedTasks.contains(taskModel.getTaskName())) {
            Optional<TaskGroup> underAssignedTaskGroup = taskGroups.stream()
                    .filter(taskGroup -> taskGroup.size() < numTasksPerProcessor).findFirst();
            if (underAssignedTaskGroup.isPresent()) {
                underAssignedTaskGroup.get().addTaskName(taskModel.getTaskName().getTaskName());
            } else {
                TaskGroup taskGroup = processorIdToTaskGroup.get(processorIdsCyclicIterator.next());
                taskGroup.addTaskName(taskModel.getTaskName().getTaskName());
            }
            assignedTasks.add(taskModel.getTaskName());
        }
    }

    return TaskGroup.buildContainerModels(taskModels, taskGroups);
}

From source file:org.polymap.p4.data.importer.ImporterContext.java

protected Optional<Severity> maxNotOkPromptSeverity() {
    return prompts == null ? Optional.empty()
            : prompts.values().stream().filter(prompt -> !prompt.ok.get()).map(prompt -> prompt.severity.get())
                    .max(Comparator.comparing(s -> s.ordinal()));
}

From source file:org.fenixedu.qubdocs.ui.documenttemplates.AcademicServiceRequestTemplateController.java

@RequestMapping(value = _CREATESTANDARDTEMPLATE_URI, method = RequestMethod.GET)
public String createstandardtemplate(Model model) {
    model.addAttribute("AcademicServiceRequestTemplate_language_options", CoreConfiguration.supportedLocales());
    model.addAttribute("AcademicServiceRequestTemplate_serviceRequestType_options",
            org.fenixedu.academic.domain.serviceRequests.ServiceRequestType.findActive()
                    .sorted(Comparator.comparing(ServiceRequestType::getName)).collect(Collectors.toList()));
    model.addAttribute("AcademicServiceRequestTemplate_degreeType_options",
            DegreeType.all().collect(Collectors.toList()));
    model.addAttribute("AcademicServiceRequestTemplate_degree_options",
            new ArrayList<org.fenixedu.academic.domain.Degree>());
    model.addAttribute("AcademicServiceRequestTemplate_programConclusion_options",
            new ArrayList<org.fenixedu.academic.domain.degreeStructure.ProgramConclusion>());

    AcademicServiceRequestTemplateBean bean = new AcademicServiceRequestTemplateBean();
    bean.setLanguageDataSource(new ArrayList<Locale>(CoreConfiguration.supportedLocales()));
    bean.setServiceRequestTypeDataSource(ServiceRequestType.findActive()
            .sorted(Comparator.comparing(ServiceRequestType::getName)).collect(Collectors.toList()));
    bean.setDegreeTypeDataSource(DegreeType.all().sorted().collect(Collectors.toList()));
    this.setAcademicServiceRequestTemplateBean(bean, model);

    return "qubdocsreports/documenttemplates/academicservicerequesttemplate/angularcreatestandardtemplate";
}

From source file:org.eclipse.sw360.licenseinfo.outputGenerators.OutputGenerator.java

/**
 * Helper function to sort a set by the given key extractor. Falls back to the
 * unsorted set if sorting the set would squash values.
 *
 * @param unsorted/*from w ww . j  a  v a2s.  c o m*/
 *            set to be sorted
 * @param keyExtractor
 *            function to extract the key to use for sorting
 *
 * @return the sorted set
 */
private static <U, K extends Comparable<K>> SortedSet<U> sortSet(Set<U> unsorted, Function<U, K> keyExtractor) {
    if (unsorted == null || unsorted.isEmpty()) {
        return Collections.emptySortedSet();
    }
    SortedSet<U> sorted = new TreeSet<>(Comparator.comparing(keyExtractor));
    sorted.addAll(unsorted);
    if (sorted.size() != unsorted.size()) {
        // there were key collisions and some data was lost -> throw away the sorted set
        // and sort by U's natural order
        sorted = new TreeSet<>();
        sorted.addAll(unsorted);
    }
    return sorted;
}

From source file:org.apache.sysml.hops.codegen.opt.PlanSelectionFuseCostBased.java

private void createAndAddMultiAggPlans(CPlanMemoTable memo, ArrayList<Hop> roots) {
    //collect full aggregations as initial set of candidates
    HashSet<Long> fullAggs = new HashSet<>();
    Hop.resetVisitStatus(roots);//from w  w w . j av a  2 s  .  com
    for (Hop hop : roots)
        rCollectFullAggregates(hop, fullAggs);
    Hop.resetVisitStatus(roots);

    //remove operators with assigned multi-agg plans
    fullAggs.removeIf(p -> memo.contains(p, TemplateType.MAGG));

    //check applicability for further analysis
    if (fullAggs.size() <= 1)
        return;

    if (LOG.isTraceEnabled()) {
        LOG.trace("Found across-partition ua(RC) aggregations: "
                + Arrays.toString(fullAggs.toArray(new Long[0])));
    }

    //collect information for all candidates 
    //(subsumed aggregations, and inputs to fused operators) 
    List<AggregateInfo> aggInfos = new ArrayList<>();
    for (Long hopID : fullAggs) {
        Hop aggHop = memo.getHopRefs().get(hopID);
        AggregateInfo tmp = new AggregateInfo(aggHop);
        for (int i = 0; i < aggHop.getInput().size(); i++) {
            Hop c = HopRewriteUtils.isMatrixMultiply(aggHop) && i == 0
                    ? aggHop.getInput().get(0).getInput().get(0)
                    : aggHop.getInput().get(i);
            rExtractAggregateInfo(memo, c, tmp, TemplateType.CELL);
        }
        if (tmp._fusedInputs.isEmpty()) {
            if (HopRewriteUtils.isMatrixMultiply(aggHop)) {
                tmp.addFusedInput(aggHop.getInput().get(0).getInput().get(0).getHopID());
                tmp.addFusedInput(aggHop.getInput().get(1).getHopID());
            } else
                tmp.addFusedInput(aggHop.getInput().get(0).getHopID());
        }
        aggInfos.add(tmp);
    }

    if (LOG.isTraceEnabled()) {
        LOG.trace("Extracted across-partition ua(RC) aggregation info: ");
        for (AggregateInfo info : aggInfos)
            LOG.trace(info);
    }

    //sort aggregations by num dependencies to simplify merging
    //clusters of aggregations with parallel dependencies
    aggInfos = aggInfos.stream().sorted(Comparator.comparing(a -> a._inputAggs.size()))
            .collect(Collectors.toList());

    //greedy grouping of multi-agg candidates
    boolean converged = false;
    while (!converged) {
        AggregateInfo merged = null;
        for (int i = 0; i < aggInfos.size(); i++) {
            AggregateInfo current = aggInfos.get(i);
            for (int j = i + 1; j < aggInfos.size(); j++) {
                AggregateInfo that = aggInfos.get(j);
                if (current.isMergable(that)) {
                    merged = current.merge(that);
                    aggInfos.remove(j);
                    j--;
                }
            }
        }
        converged = (merged == null);
    }

    if (LOG.isTraceEnabled()) {
        LOG.trace("Merged across-partition ua(RC) aggregation info: ");
        for (AggregateInfo info : aggInfos)
            LOG.trace(info);
    }

    //construct and add multiagg template plans (w/ max 3 aggregations)
    for (AggregateInfo info : aggInfos) {
        if (info._aggregates.size() <= 1)
            continue;
        Long[] aggs = info._aggregates.keySet().toArray(new Long[0]);
        MemoTableEntry me = new MemoTableEntry(TemplateType.MAGG, aggs[0], aggs[1],
                (aggs.length > 2) ? aggs[2] : -1, aggs.length);
        for (int i = 0; i < aggs.length; i++) {
            memo.add(memo.getHopRefs().get(aggs[i]), me);
            addBestPlan(aggs[i], me);
            if (LOG.isTraceEnabled())
                LOG.trace("Added multiagg* plan: " + aggs[i] + " " + me);

        }
    }
}

From source file:org.codice.ddf.catalog.content.monitor.DavAlterationObserver.java

/**
 * List the contents of a directory//from w w w.j a va  2s.c  o  m
 *
 * @param file The file to list the contents of
 * @return the directory contents or a zero length array if the empty or the file is not a
 *     directory
 */
private DavResource[] listFiles(final String file) {
    DavResource[] children = null;
    try {
        List<DavResource> list = sardine.list(file);
        // the returned list includes the parent
        if (list.size() > 1 && list.get(0).isDirectory()) {
            List<DavResource> resourceList = list.subList(1, list.size());
            // lexicographical sorting
            resourceList.sort(Comparator.comparing(DavResource::getName));
            children = resourceList.toArray(new DavResource[resourceList.size()]);
        }
    } catch (IOException e) {
        // if it doesn't exist it can't have children
        children = EMPTY_RESOURCES;
    }
    if (children == null) {
        children = EMPTY_RESOURCES;
    }
    return children;
}

From source file:com.strider.datadefender.FileDiscoverer.java

@SuppressWarnings("unchecked")
public List<FileMatchMetaData> discover(final Properties fileDiscoveryProperties)
        throws FileDiscoveryException, DatabaseDiscoveryException, IOException, SAXException, TikaException {
    log.info("Data discovery in process");

    // Get the probability threshold from property file
    final double probabilityThreshold = parseDouble(
            fileDiscoveryProperties.getProperty("probability_threshold"));

    log.info("Probability threshold [" + probabilityThreshold + "]");

    // Get list of models used in data discovery
    final String models = fileDiscoveryProperties.getProperty("models");

    modelList = models.split(",");
    log.info("Model list [" + Arrays.toString(modelList) + "]");

    List<FileMatchMetaData> finalList = new ArrayList<>();

    for (final String model : modelList) {
        log.info("********************************");
        log.info("Processing model " + model);
        log.info("********************************");

        final Model modelPerson = createModel(fileDiscoveryProperties, model);

        fileMatches = discoverAgainstSingleModel(fileDiscoveryProperties, modelPerson, probabilityThreshold);
        finalList = ListUtils.union(finalList, fileMatches);
    }//from w  w  w  .j  a  va 2s  .c  om

    // Special case
    String[] specialCaseFunctions = null;
    boolean specialCase = false;
    final String extentionList = fileDiscoveryProperties.getProperty("extentions");

    final String directories = fileDiscoveryProperties.getProperty("directories");
    final String exclusions = fileDiscoveryProperties.getProperty("exclusions");
    String exclusionList[] = null;
    if ((exclusions == null) || exclusions.equals("")) {
        log.info("exclusions property is empty in firediscovery.properties file");
    } else {
        exclusionList = exclusions.split(",");
        log.info("File types not considered for analysis: " + exclusions);
    }
    log.info("Directories to analyze: " + directories);

    if ((directories == null) || directories.equals("")) {
        log.error("directories property is empty in firediscovery.properties file");

        throw new DatabaseDiscoveryException("directories property is empty in firediscovery.properties file");
    }

    final String[] directoryList = directories.split(",");
    if (!CommonUtils.isEmptyString(extentionList)) {
        log.info("***** Extension list:" + extentionList);
        specialCaseFunctions = extentionList.split(",");

        if ((specialCaseFunctions != null) && (specialCaseFunctions.length > 0)) {
            log.debug("special case:" + specialCase);
            File node;
            Metadata metadata;

            try {
                log.info("**************" + specialCaseFunctions.toString());
                for (int j = 0; j < specialCaseFunctions.length; j++) {
                    for (final String directory : directoryList) {

                        node = new File(directory);
                        final List<File> files = (List<File>) FileUtils.listFiles(node, null, true);

                        for (final File fich : files) {
                            final String file = fich.getName();
                            final String recursivedir = fich.getParent();

                            log.info("Analyzing [" + fich.getCanonicalPath() + "]");
                            final String ext = CommonUtils.getFileExtension(fich).toLowerCase(Locale.ENGLISH);
                            log.debug("Extension: " + ext);

                            if ((exclusionList != null) && Arrays.asList(exclusionList).contains(ext)) {
                                log.info("Ignoring type " + ext);
                                continue;
                            }

                            final BodyContentHandler handler = new BodyContentHandler(-1);
                            final AutoDetectParser parser = new AutoDetectParser();

                            metadata = new Metadata();

                            String handlerString = "";
                            try {
                                final InputStream stream = new FileInputStream(fich.getCanonicalPath());
                                log.debug("Loading data into the stream");
                                if (stream != null) {
                                    parser.parse(stream, handler, metadata);
                                    handlerString = handler.toString().replaceAll("( )+", " ")
                                            .replaceAll("[\\t\\n\\r]+", " ");

                                    String[] tokens = handlerString.split(" ");

                                    for (int t = 0; t < tokens.length; t++) {
                                        String token = tokens[t];
                                        if (token.trim().length() < 1) {
                                            continue;
                                        }
                                        String specialFunction = specialCaseFunctions[j];
                                        log.info(specialFunction);
                                        FileMatchMetaData returnData = null;
                                        try {
                                            returnData = (FileMatchMetaData) callExtention(
                                                    new FileMatchMetaData(recursivedir, file), specialFunction,
                                                    token);
                                        } catch (InvocationTargetException e) {
                                            continue;
                                        }
                                        if (returnData != null) {
                                            returnData.setModel("sin");
                                            returnData.setAverageProbability(1.0);
                                            List<FileMatchMetaData> specialFileMatches = new ArrayList();
                                            specialFileMatches.add(returnData);

                                            finalList = ListUtils.union(finalList, specialFileMatches);
                                        }
                                        log.debug(tokens[t]);
                                    }

                                }
                            } catch (IOException e) {
                                log.info("Unable to read " + fich.getCanonicalPath() + ".Ignoring...");
                            }
                            log.info("Finish processing " + fich.getCanonicalPath());
                        }
                        log.info("Finish speclai case " + specialCaseFunctions[j]);
                    }
                }
            } catch (IOException | IllegalAccessException | IllegalArgumentException | NoSuchMethodException
                    | SecurityException | SQLException | TikaException | SAXException e) {
                log.error(e.toString());
            }
        }
    }

    final DecimalFormat decimalFormat = new DecimalFormat("#.##");

    log.info("List of suspects:");
    log.info(String.format("%40s %20s %20s %20s", "Directory*", "File*", "Probability*", "Model*"));

    finalList = uniqueList(finalList);

    Collections.sort(finalList, Comparator.comparing(FileMatchMetaData::getFileName));

    for (final FileMatchMetaData data : finalList) {
        String result = "";
        final String probability = decimalFormat.format(data.getAverageProbability());
        result = String.format("%40s %20s %20s %20s", data.getDirectory(), data.getFileName(), probability,
                data.getModel());
        log.info(result);
    }

    return Collections.unmodifiableList(fileMatches);
}

From source file:org.phoenicis.repository.types.LocalRepository.java

private List<ApplicationDTO> fetchApplications(String typeId, String categoryId, File categoryDirectory) {
    final File[] applicationDirectories = categoryDirectory.listFiles();
    if (applicationDirectories == null) {
        return Collections.emptyList();
    }// ww  w .  j a v a 2 s .c  o m

    final List<ApplicationDTO> results = new ArrayList<>();

    for (File applicationDirectory : applicationDirectories) {
        if (applicationDirectory.isDirectory()) {
            final ApplicationDTO.Builder applicationDTOBuilder;
            final File applicationJson = new File(applicationDirectory, "application.json");
            if (applicationJson.exists()) {
                applicationDTOBuilder = new ApplicationDTO.Builder(unSerializeApplication(applicationJson));
            } else {
                applicationDTOBuilder = new ApplicationDTO.Builder();
            }

            applicationDTOBuilder.withTypeId(typeId).withCategoryId(categoryId);

            if (StringUtils.isBlank(applicationDTOBuilder.getId())) {
                if (!StringUtils.isBlank(applicationDTOBuilder.getName())) {
                    applicationDTOBuilder.withId(applicationDTOBuilder.getName().replaceAll(ID_REGEX, ""));
                } else {
                    applicationDTOBuilder.withId(applicationDirectory.getName().replaceAll(ID_REGEX, ""));
                }
            }

            final File miniaturesDirectory = new File(applicationDirectory, "miniatures");

            if (miniaturesDirectory.exists() && miniaturesDirectory.isDirectory()) {
                try {
                    applicationDTOBuilder.withMiniatures(fetchMiniatures(miniaturesDirectory));
                } catch (IOException e) {
                    LOGGER.warn("Unable to read miniatures", e);
                }
            }

            applicationDTOBuilder.withScripts(fetchScripts(applicationDTOBuilder.getTypeId(),
                    applicationDTOBuilder.getCategoryId(), applicationDTOBuilder.getId(), applicationDirectory))
                    .withResources(fetchResources(applicationDirectory));

            ApplicationDTO app = applicationDTOBuilder.build();
            results.add(app);
        }
    }

    Collections.sort(results, Comparator.comparing(ApplicationDTO::getName));
    return results;
}