Example usage for java.util List sort

List of usage examples for java.util List sort

Introduction

In this page you can find the example usage for java.util List sort.

Prototype

@SuppressWarnings({ "unchecked", "rawtypes" })
default void sort(Comparator<? super E> c) 

Source Link

Document

Sorts this list according to the order induced by the specified Comparator .

Usage

From source file:org.structr.web.entity.feed.DataFeed.java

/**
 * Clean-up feed items which are either too old or too many.
 *//*w  ww.  j a v  a2 s .c om*/
@Export
public void cleanUp() {

    final Integer maxItemsToRetain = getProperty(maxItems);
    final Long maxItemAge = getProperty(maxAge);

    int i = 0;

    // Don't do anything if maxItems and maxAge are not set
    if (maxItemsToRetain != null || maxItemAge != null) {

        final List<FeedItem> feedItems = getProperty(items);

        // Sort by publication date, youngest items first
        feedItems.sort(new GraphObjectComparator(FeedItem.pubDate, GraphObjectComparator.DESCENDING));

        for (final FeedItem item : feedItems) {

            i++;

            final Date itemDate = item.getProperty(FeedItem.pubDate);

            if ((maxItemsToRetain != null && i > maxItemsToRetain)
                    || (maxItemAge != null && itemDate.before(new Date(new Date().getTime() - maxItemAge)))) {

                try {
                    StructrApp.getInstance().delete(item);

                } catch (FrameworkException ex) {
                    logger.log(Level.SEVERE, "Error while deleting old/surplus feed item " + item, ex);
                }
            }
        }

    }

}

From source file:ummisco.gama.ui.views.inspectors.ExperimentParametersView.java

@Override
public void addItem(final IExperimentPlan exp) {
    if (exp != null) {
        experiment = exp;//  w ww  .  j a  v a 2s .  c o m
        if (!exp.hasParametersOrUserCommands()) {
            return;
        }
        reset();
        final List<IExperimentDisplayable> params = new ArrayList<>(exp.getParameters().values());
        params.addAll(exp.getExplorableParameters().values());
        params.addAll(exp.getUserCommands());
        params.sort(null);
        editors = new ExperimentsParametersList(exp.getAgent().getScope(), params);
        final String expInfo = "Model " + experiment.getModel().getDescription().getTitle() + " / "
                + StringUtils.capitalize(experiment.getDescription().getTitle());
        this.setPartName(expInfo);
        displayItems();
    } else {
        experiment = null;
    }
}

From source file:com.netflix.spinnaker.igor.jenkins.JenkinsCache.java

public List<String> getJobNames(String master) {
    List<String> jobs = new ArrayList<>();
    redisClientDelegate.withKeyScan(prefix() + ":" + master + ":*", 1000, page -> jobs
            .addAll(page.getResults().stream().map(JenkinsCache::extractJobName).collect(Collectors.toList())));
    jobs.sort(Comparator.naturalOrder());
    return jobs;//from ww  w .  j  a  v  a 2s  .c  o m
}

From source file:fi.vm.sade.eperusteet.ylops.service.dokumentti.impl.DokumenttiServiceImpl.java

@Override
@Transactional(noRollbackFor = DokumenttiException.class)
@Async(value = "docTaskExecutor")
public void autogenerate(Long id, Kieli kieli) throws DokumenttiException {
    Dokumentti dokumentti;/*from w ww.j a v  a  2  s . c o  m*/
    List<Dokumentti> dokumentit = dokumenttiRepository.findByOpsIdAndKieli(id, kieli);
    if (!dokumentit.isEmpty()) {
        dokumentit.sort((a, b) -> new Long(a.getId()).compareTo(b.getId()));
        dokumentti = dokumentit.get(0);
    } else {
        dokumentti = new Dokumentti();
    }

    dokumentti.setTila(DokumenttiTila.LUODAAN);
    dokumentti.setAloitusaika(new Date());
    dokumentti.setLuoja(SecurityUtil.getAuthenticatedPrincipal().getName());
    dokumentti.setKieli(kieli);
    dokumentti.setOpsId(id);

    Opetussuunnitelma ops = opetussuunnitelmaRepository.findOne(id);
    if (ops != null) {
        try {
            dokumentti.setData(builder.generatePdf(ops, kieli));
            dokumentti.setTila(DokumenttiTila.VALMIS);
            dokumentti.setValmistumisaika(new Date());
            dokumentti.setVirhekoodi("");
            dokumenttiRepository.save(dokumentti);
        } catch (Exception ex) {
            dokumentti.setTila(DokumenttiTila.EPAONNISTUI);
            dokumentti.setVirhekoodi(ExceptionUtils.getStackTrace(ex));
            dokumenttiRepository.save(dokumentti);

            throw new DokumenttiException(ex.getMessage(), ex);
        }
    } else {
        dokumentti.setTila(DokumenttiTila.EPAONNISTUI);
        dokumenttiRepository.save(dokumentti);
    }
}

From source file:fi.vm.sade.eperusteet.ylops.service.dokumentti.impl.DokumenttiServiceImpl.java

@Override
@Transactional(readOnly = true)/*from   w w w .  jav  a2  s  .  c o m*/
public DokumenttiDto getDto(Long opsId, Kieli kieli) {
    List<Dokumentti> dokumentit = dokumenttiRepository.findByOpsIdAndKieli(opsId, kieli);

    // Jos lytyy
    if (!dokumentit.isEmpty()) {
        dokumentit.sort((a, b) -> new Long(a.getId()).compareTo(b.getId()));
        return mapper.map(dokumentit.get(0), DokumenttiDto.class);
    }

    return null;
}

From source file:org.apache.samza.container.grouper.task.GroupByContainerIds.java

/**
 * {@inheritDoc}//from ww w .  j  av a  2 s . c  o m
 *
 * When the are `t` tasks and `p` processors, where t &lt;= p, a fair task distribution should ideally assign
 * (t / p) tasks to each processor. In addition to guaranteeing a fair distribution, this {@link TaskNameGrouper}
 * implementation generates a locationId aware task assignment to processors where it makes best efforts in assigning
 * the tasks to processors with the same locality.
 *
 * Task assignment to processors is accomplished through the following two phases:
 *
 * 1. In the first phase, each task(T) is assigned to a processor(P) that satisfies the following constraints:
 *    A. The processor(P) should have the same locality of the task(T).
 *    B. Number of tasks already assigned to the processor should be less than the (number of tasks / number of processors).
 *
 * 2. Each unassigned task from phase 1 are then mapped to any processor with task count less than the
 * (number of tasks / number of processors). When no such processor exists, then the unassigned
 * task is mapped to any processor from available processors in a round robin fashion.
 */
@Override
public Set<ContainerModel> group(Set<TaskModel> taskModels, GrouperMetadata grouperMetadata) {
    // Validate that the task models are not empty.
    Map<TaskName, LocationId> taskLocality = grouperMetadata.getTaskLocality();
    Preconditions.checkArgument(!taskModels.isEmpty(),
            "No tasks found. Likely due to no input partitions. Can't run a job with no tasks.");

    // Invoke the default grouper when the processor locality does not exist.
    if (MapUtils.isEmpty(grouperMetadata.getProcessorLocality())) {
        LOG.info("ProcessorLocality is empty. Generating with the default group method.");
        return group(taskModels, new ArrayList<>());
    }

    Map<String, LocationId> processorLocality = new TreeMap<>(grouperMetadata.getProcessorLocality());
    /**
     * When there're more task models than processors then choose the lexicographically least `x` processors(where x = tasks.size()).
     */
    if (processorLocality.size() > taskModels.size()) {
        processorLocality = processorLocality.entrySet().stream().limit(taskModels.size())
                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    }

    Map<LocationId, List<String>> locationIdToProcessors = new HashMap<>();
    Map<String, TaskGroup> processorIdToTaskGroup = new HashMap<>();

    // Generate the {@see LocationId} to processors mapping and processorId to {@see TaskGroup} mapping.
    processorLocality.forEach((processorId, locationId) -> {
        List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
        processorIds.add(processorId);
        locationIdToProcessors.put(locationId, processorIds);
        processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
    });

    int numTasksPerProcessor = taskModels.size() / processorLocality.size();
    Set<TaskName> assignedTasks = new HashSet<>();

    /**
     * A processor is considered under-assigned when number of tasks assigned to it is less than
     * (number of tasks / number of processors).
     * Map the tasks to the under-assigned processors with same locality.
     */
    for (TaskModel taskModel : taskModels) {
        LocationId taskLocationId = taskLocality.get(taskModel.getTaskName());
        if (taskLocationId != null) {
            List<String> processorIds = locationIdToProcessors.getOrDefault(taskLocationId, new ArrayList<>());
            for (String processorId : processorIds) {
                TaskGroup taskGroup = processorIdToTaskGroup.get(processorId);
                if (taskGroup.size() < numTasksPerProcessor) {
                    taskGroup.addTaskName(taskModel.getTaskName().getTaskName());
                    assignedTasks.add(taskModel.getTaskName());
                    break;
                }
            }
        }
    }

    /**
     * In some scenarios, the task either might not have any previous locality or might not have any
     * processor that maps to its previous locality. This cyclic processorId's iterator helps us in
     * those scenarios to assign the processorIds to those kind of tasks in a round robin fashion.
     */
    Iterator<String> processorIdsCyclicIterator = Iterators.cycle(processorLocality.keySet());

    // Order the taskGroups to choose a task group in a deterministic fashion for unassigned tasks.
    List<TaskGroup> taskGroups = new ArrayList<>(processorIdToTaskGroup.values());
    taskGroups.sort(Comparator.comparing(TaskGroup::getContainerId));

    /**
     * For the tasks left over from the previous stage, map them to any under-assigned processor.
     * When a under-assigned processor doesn't exist, then map them to any processor from the
     * available processors in a round robin manner.
     */
    for (TaskModel taskModel : taskModels) {
        if (!assignedTasks.contains(taskModel.getTaskName())) {
            Optional<TaskGroup> underAssignedTaskGroup = taskGroups.stream()
                    .filter(taskGroup -> taskGroup.size() < numTasksPerProcessor).findFirst();
            if (underAssignedTaskGroup.isPresent()) {
                underAssignedTaskGroup.get().addTaskName(taskModel.getTaskName().getTaskName());
            } else {
                TaskGroup taskGroup = processorIdToTaskGroup.get(processorIdsCyclicIterator.next());
                taskGroup.addTaskName(taskModel.getTaskName().getTaskName());
            }
            assignedTasks.add(taskModel.getTaskName());
        }
    }

    return TaskGroup.buildContainerModels(taskModels, taskGroups);
}

From source file:com.evolveum.midpoint.model.impl.trigger.TriggerScannerTaskHandler.java

private List<TriggerType> getSortedTriggers(List<PrismContainerValue<TriggerType>> triggerCVals) {
    List<TriggerType> rv = new ArrayList<>();
    triggerCVals.forEach(cval -> rv.add(cval.clone().asContainerable()));
    rv.sort(Comparator.comparingLong(t -> XmlTypeConverter.toMillis(t.getTimestamp())));
    return rv;//from ww  w.  j  av a2s  .c  om
}

From source file:org.neo4j.nlp.impl.util.VectorUtil.java

public static Map<String, List<LinkedHashMap<String, Object>>> similarDocumentMapForVector(
        GraphDatabaseService db, GraphManager graphManager, String input, DecisionTree<Long> decisionTree) {
    Map<String, List<LinkedHashMap<String, Object>>> documents;
    Map<String, List<LinkedHashMap<String, Object>>> results = new HashMap<>();
    List<Integer> featureIndexList;

    VsmCacheModel vsmCacheModel = new VsmCacheModel(db).invoke();
    featureIndexList = vsmCacheModel.getFeatureIndexList();
    documents = vsmCacheModel.getDocuments();

    List<Double> features = getFeatureVector(db, graphManager, input, featureIndexList, decisionTree);

    List<LinkedHashMap<String, Object>> resultList = new ArrayList<>();
    LinkedHashMap<String, Double> classMap = new LinkedHashMap<>();

    documents.keySet().stream().forEach(otherKey -> {
        List<Double> v2 = getWeightVectorForClass(documents, otherKey, featureIndexList, db);
        classMap.put(otherKey, cosineSimilarity(features, v2));
    });//from ww w.ja v  a 2 s.  c o m

    classMap.keySet().stream().forEach(ks -> {
        if (classMap.get(ks) > 0.0) {
            LinkedHashMap<String, Object> localMap = new LinkedHashMap<>();
            localMap.put("class", ks);
            localMap.put("similarity", classMap.get(ks));
            resultList.add(localMap);
        }
    });

    try {
        resultList.sort((a, b) -> {
            Double diff = (((double) a.get("similarity")) - ((double) b.get("similarity")));
            return diff > 0 ? -1 : diff.equals(0.0) ? 0 : 1;
        });
    } catch (NullPointerException ex) {
        // resultList is empty or null
    }

    results.put("classes", resultList);

    return results;
}

From source file:org.fenixedu.start.service.APISyncService.java

public Map<String, List<Version>> syncVersions() {
    logger.info("Synchronizing with the Github API");
    Map<String, List<Version>> versions = new HashMap<>();
    for (String project : Arrays.asList("bennu", "bennu-spring", "fenixedu-maven")) {
        try {/*w  w w.  j a v a  2s  .com*/
            ResponseEntity<String> resp = new RestTemplate()
                    .getForEntity("https://api.github.com/repos/FenixEdu/" + project + "/tags", String.class);
            JsonArray array = new JsonParser().parse(resp.getBody()).getAsJsonArray();
            List<Version> ownVersions = new ArrayList<>();
            versions.put(project, ownVersions);
            for (JsonElement element : array) {
                ownVersions
                        .add(Version.parse(element.getAsJsonObject().get("name").getAsString().substring(1)));
            }
            ownVersions.sort(Comparator.reverseOrder());
        } catch (ResourceAccessException e) {
            logger.warn("Could not determine versions for {} due to an exception: {}", project, e);
        }
    }
    return versions;
}

From source file:com.joyent.manta.client.MantaObjectDepthComparatorTest.java

public void verifyOrderingWithSmallDataSet() {
    List<MantaObject> objects = new ArrayList<>();
    List<MantaObject> dirs = dirObjects(12);

    for (MantaObject dir : dirs) {
        objects.add(dir);// w  w  w  .j a  v a2 s  .  co m
        objects.addAll(fileObjects(dir, 3));
    }

    Collections.shuffle(objects);

    objects.sort(MantaObjectDepthComparator.INSTANCE);

    assertOrdering(objects);
}