Example usage for java.util List sort

List of usage examples for java.util List sort

Introduction

In this page you can find the example usage for java.util List sort.

Prototype

@SuppressWarnings({ "unchecked", "rawtypes" })
default void sort(Comparator<? super E> c) 

Source Link

Document

Sorts this list according to the order induced by the specified Comparator .

Usage

From source file:de.dkfz.roddy.core.Analysis.java

public Map<DataSet, Boolean> checkStatus(List<String> pids, boolean suppressInfo) {
    List<DataSet> dataSets = getRuntimeService().loadDatasetsWithFilter(this, pids, suppressInfo);
    Map<DataSet, Boolean> results = new LinkedHashMap<>();
    dataSets.parallelStream().forEach(ds -> {
        boolean result = checkStatusForDataset(ds);
        synchronized (results) {
            results.put(ds, result);/*w w  w  .jav a 2s  . c  o  m*/
        }
    });
    List<DataSet> sortedKeys = new LinkedList<>(results.keySet());
    sortedKeys.sort((ds1, ds2) -> ds1.getId().compareTo(ds2.getId()));
    Map<DataSet, Boolean> sortedMap = new LinkedHashMap<>();
    for (DataSet ds : sortedKeys) {
        sortedMap.put(ds, results.get(ds));
    }
    return sortedMap;
}

From source file:net.sf.jabref.exporter.BibDatabaseWriter.java

/**
 * Write all strings in alphabetical order, modified to produce a safe (for
 * BibTeX) order of the strings if they reference each other.
 *
 * @param fw       The Writer to send the output to.
 * @param database The database whose strings we should write.
 * @param reformatFile//from ww w.j a va  2s  .c  om
 * @throws IOException If anything goes wrong in writing.
 */
private void writeStrings(Writer fw, BibDatabase database, Boolean reformatFile) throws IOException {
    List<BibtexString> strings = database.getStringKeySet().stream().map(database::getString)
            .collect(Collectors.toList());
    strings.sort(new BibtexStringComparator(true));
    // First, make a Map of all entries:
    Map<String, BibtexString> remaining = new HashMap<>();
    int maxKeyLength = 0;
    for (BibtexString string : strings) {
        remaining.put(string.getName(), string);
        maxKeyLength = Math.max(maxKeyLength, string.getName().length());
    }

    for (BibtexString.Type t : BibtexString.Type.values()) {
        isFirstStringInType = true;
        for (BibtexString bs : strings) {
            if (remaining.containsKey(bs.getName()) && (bs.getType() == t)) {
                writeString(fw, bs, remaining, maxKeyLength, reformatFile);
                isFirstStringInType = false;
            }
        }
    }
}

From source file:com.evolveum.midpoint.wf.impl.processors.primary.policy.ApprovalSchemaBuilder.java

private void processFragmentGroup(List<Fragment> fragments, ApprovalSchemaType resultingSchemaType,
        SchemaAttachedPolicyRulesType attachedRules, ModelInvocationContext ctx, OperationResult result)
        throws SchemaException {
    Fragment firstFragment = fragments.get(0);
    appendAddOnFragments(fragments);/*  ww w . j a v a  2s  .  co m*/
    List<ApprovalStageDefinitionType> fragmentStageDefs = cloneAndMergeStages(fragments);
    if (fragmentStageDefs.isEmpty()) {
        return; // probably shouldn't occur
    }
    fragmentStageDefs.sort(Comparator.comparing(s -> getNumber(s), Comparator.nullsLast(naturalOrder())));
    RelationResolver relationResolver = primaryChangeAspect.createRelationResolver(firstFragment.target,
            result);
    ReferenceResolver referenceResolver = primaryChangeAspect.createReferenceResolver(ctx.modelContext,
            ctx.taskFromModel, result);
    int from = getStages(resultingSchemaType).size() + 1;
    int i = from;
    for (ApprovalStageDefinitionType stageDef : fragmentStageDefs) {
        stageDef.asPrismContainerValue().setId(null); // to avoid ID collision
        stageDef.setOrder(null);
        stageDef.setNumber(i++);
        approvalSchemaHelper.prepareStage(stageDef, relationResolver, referenceResolver);
        resultingSchemaType.getStage().add(stageDef);
    }
    if (firstFragment.policyRule != null) {
        List<EvaluatedPolicyRuleType> rules = new ArrayList<>();
        firstFragment.policyRule.addToEvaluatedPolicyRuleTypes(rules,
                new PolicyRuleExternalizationOptions(FULL, false, true));
        for (EvaluatedPolicyRuleType rule : rules) {
            SchemaAttachedPolicyRuleType attachedRule = new SchemaAttachedPolicyRuleType();
            attachedRule.setStageMin(from);
            attachedRule.setStageMax(i - 1);
            attachedRule.setRule(rule);
            attachedRules.getEntry().add(attachedRule);
        }
    }
}

From source file:org.dkpro.core.io.nif.internal.Nif2DKPro.java

public void convert(Statement aContext, JCas aJCas) {
    Model m = aContext.getModel();/*from w  w w  .jav a  2  s .  c  o  m*/

    final Resource tSentence = m.createResource(NIF.TYPE_SENTENCE);
    final Resource tWord = m.createResource(NIF.TYPE_WORD);
    final Resource tTitle = m.createResource(NIF.TYPE_TITLE);
    final Resource tParagraph = m.createResource(NIF.TYPE_PARAGRAPH);

    final Property pReferenceContext = m.createProperty(NIF.PROP_REFERENCE_CONTEXT);
    final Property pIsString = m.createProperty(NIF.PROP_IS_STRING);
    final Property pBeginIndex = m.createProperty(NIF.PROP_BEGIN_INDEX);
    final Property pEndIndex = m.createProperty(NIF.PROP_END_INDEX);
    final Property pLemma = m.createProperty(NIF.PROP_LEMMA);
    final Property pStem = m.createProperty(NIF.PROP_STEM);
    final Property pPosTag = m.createProperty(NIF.PROP_POS_TAG);
    final Property pTaIdentRef = m.createProperty(ITS.PROP_TA_IDENT_REF);
    final Property pTaClassRef = m.createProperty(ITS.PROP_TA_CLASS_REF);

    // Convert context node -> document text
    String text = m.getProperty(aContext.getSubject(), pIsString).getString();
    aJCas.setDocumentText(text);

    // Convert headings/titles
    Iterator<Resource> headingIterator = m.listResourcesWithProperty(RDF.type, tTitle)
            .filterKeep(res -> res.getProperty(pReferenceContext).getResource().equals(aContext.getSubject()));
    for (Resource nifTitle : new IteratorIterable<Resource>(headingIterator)) {
        int begin = nifTitle.getProperty(pBeginIndex).getInt();
        int end = nifTitle.getProperty(pEndIndex).getInt();
        Heading uimaHeading = new Heading(aJCas, begin, end);
        uimaHeading.addToIndexes();

        assert assertSanity(nifTitle, uimaHeading);
    }

    // Convert paragraphs
    Iterator<Resource> paragraphIterator = m.listResourcesWithProperty(RDF.type, tParagraph)
            .filterKeep(res -> res.getProperty(pReferenceContext).getResource().equals(aContext.getSubject()));
    for (Resource nifParagraph : new IteratorIterable<Resource>(paragraphIterator)) {
        int begin = nifParagraph.getProperty(pBeginIndex).getInt();
        int end = nifParagraph.getProperty(pEndIndex).getInt();
        Paragraph uimaParagraph = new Paragraph(aJCas, begin, end);
        uimaParagraph.addToIndexes();

        assert assertSanity(nifParagraph, uimaParagraph);
    }

    // Convert sentences
    List<Resource> nifSentences = m.listResourcesWithProperty(RDF.type, tSentence)
            .filterKeep(res -> res.getProperty(pReferenceContext).getResource().equals(aContext.getSubject()))
            .toList();
    nifSentences.sort((a, b) -> a.getProperty(pBeginIndex).getInt() - b.getProperty(pBeginIndex).getInt());
    for (Resource nifSentence : nifSentences) {
        int begin = nifSentence.getProperty(pBeginIndex).getInt();
        int end = nifSentence.getProperty(pEndIndex).getInt();
        Sentence uimaSentence = new Sentence(aJCas, begin, end);
        uimaSentence.addToIndexes();

        assert assertSanity(nifSentence, uimaSentence);
    }

    // Convert tokens
    Iterator<Resource> tokenIterator = m.listResourcesWithProperty(RDF.type, tWord)
            .filterKeep(res -> res.getProperty(pReferenceContext).getResource().equals(aContext.getSubject()));
    for (Resource nifWord : new IteratorIterable<Resource>(tokenIterator)) {
        int begin = nifWord.getProperty(pBeginIndex).getInt();
        int end = nifWord.getProperty(pEndIndex).getInt();
        Token uimaToken = new Token(aJCas, begin, end);
        uimaToken.addToIndexes();

        assert assertSanity(nifWord, uimaToken);

        // Convert lemma
        if (nifWord.hasProperty(pLemma)) {
            Lemma uimaLemma = new Lemma(aJCas, uimaToken.getBegin(), uimaToken.getEnd());
            uimaLemma.setValue(nifWord.getProperty(pLemma).getString());
            uimaLemma.addToIndexes();
            uimaToken.setLemma(uimaLemma);
        }

        // Convert stem
        if (nifWord.hasProperty(pLemma)) {
            Stem uimaStem = new Stem(aJCas, uimaToken.getBegin(), uimaToken.getEnd());
            uimaStem.setValue(nifWord.getProperty(pStem).getString());
            uimaStem.addToIndexes();
            uimaToken.setStem(uimaStem);
        }

        // Convert posTag (this is discouraged, the better alternative should be oliaLink)
        if (nifWord.hasProperty(pPosTag)) {
            String tag = nifWord.getProperty(pStem).getString();
            Type posTag = posMappingProvider.getTagType(tag);
            POS uimaPos = (POS) aJCas.getCas().createAnnotation(posTag, uimaToken.getBegin(),
                    uimaToken.getEnd());
            uimaPos.setPosValue(tag.intern());
            uimaPos.setCoarseValue(
                    uimaPos.getClass().equals(POS.class) ? null : uimaPos.getType().getShortName().intern());
            uimaPos.addToIndexes();
            uimaToken.setPos(uimaPos);
        }
    }

    // Convert named entities
    //
    // NIF uses taIdentRef to link to a unique instance of an entity and taClassRef to identify
    // the category of the entity. Named entity recognizers in DKPro Core just categorizes the
    // entity, e.g. as a person, location, or whatnot. For what NIF uses, we'd need a named
    // entity linker, not just a recognizer. Furthermore, the DKPro Core named entity
    // recognizers are not mapped to a common tag set (unlike e.g. POS which is mapped to 
    // the universal POS tags).
    // 
    // So, what we do here is treating the URI of the taClassRef in NIF simply as the
    // named entity category and store it. 
    //
    // Here we use duck-typing, i.e. it has a taClassRef property then it is likely a named
    // entity. NIF 2.1 [1] appears to introduce a representation of named entities using the
    // class "EntityOccurrence", but e.g. kore50 [2] doesn't seem to use that - it uses "Phrase"
    // instead.
    //
    // [1] http://nif.readthedocs.io/en/2.1-rc/prov-and-conf.html
    // [2] https://datahub.io/dataset/kore-50-nif-ner-corpus
    Set<Resource> nifNamedEntities1 = m.listResourcesWithProperty(pTaIdentRef)
            .filterKeep(res -> res.getProperty(pReferenceContext).getResource().equals(aContext.getSubject()))
            .toSet();
    Set<Resource> nifNamedEntities2 = m.listResourcesWithProperty(pTaIdentRef)
            .filterKeep(res -> res.getProperty(pReferenceContext).getResource().equals(aContext.getSubject()))
            .toSet();
    Set<Resource> nifNamedEntities = new HashSet<Resource>();
    nifNamedEntities.addAll(nifNamedEntities1);
    nifNamedEntities.addAll(nifNamedEntities2);
    for (Resource nifNamedEntity : nifNamedEntities) {
        int begin = nifNamedEntity.getProperty(pBeginIndex).getInt();
        int end = nifNamedEntity.getProperty(pEndIndex).getInt();
        NamedEntity uimaNamedEntity = new NamedEntity(aJCas, begin, end);
        if (nifNamedEntity.hasProperty(pTaClassRef)) {
            uimaNamedEntity.setValue(nifNamedEntity.getProperty(pTaClassRef).getResource().getURI());
        }
        if (nifNamedEntity.hasProperty(pTaIdentRef)) {
            uimaNamedEntity.setIdentifier(nifNamedEntity.getProperty(pTaIdentRef).getResource().getURI());
        }
        uimaNamedEntity.addToIndexes();

        assert assertSanity(nifNamedEntity, uimaNamedEntity);
    }
}

From source file:com.orange.clara.cloud.servicedbdumper.service.DbDumperServiceInstanceBindingService.java

private Map<String, Object> extractCredentials(List<DbDumperCredential> dbDumperCredentials) {
    SimpleDateFormat dateFormater = new SimpleDateFormat(this.dateFormat);
    Map<String, Object> credentials = Maps.newHashMap();
    List<Map<String, Object>> dumpFiles = new ArrayList<>();
    Map<String, Object> dumpFile;
    Comparator<DbDumperCredential> comparator = (d1, d2) -> d1.getCreatedAt().compareTo(d2.getCreatedAt());
    dbDumperCredentials.sort(comparator.reversed());
    for (DbDumperCredential dbDumperCredential : dbDumperCredentials) {
        dumpFile = Maps.newHashMap();/* w  ww .j a  v a  2  s .  c  o  m*/
        dumpFile.put("download_url", dbDumperCredential.getDownloadUrl());
        dumpFile.put("show_url", dbDumperCredential.getShowUrl());
        dumpFile.put("filename", dbDumperCredential.getFilename());
        dumpFile.put("created_at", dateFormater.format(dbDumperCredential.getCreatedAt()));
        dumpFile.put("dump_id", dbDumperCredential.getId());
        dumpFile.put("size", dbDumperCredential.getSize());
        dumpFile.put("deleted", dbDumperCredential.getDeleted());
        dumpFile.put("tags", dbDumperCredential.getTags());
        dumpFiles.add(dumpFile);
    }
    credentials.put("dumps", dumpFiles);
    return credentials;
}

From source file:com.searchcode.app.service.CodeMatcher.java

/**
 * Actually does the matching for a single code result given the match terms
 *///from  ww w  . j  av a  2s. co  m
public List<CodeMatchResult> matchResults(List<String> code, List<String> matchTerms, boolean highlightLine) {
    List<CodeMatchResult> resultLines = findMatchingLines(code, matchTerms, highlightLine);
    List<CodeMatchResult> newResultLines = new ArrayList<>();

    // get the top matching lines for this result
    resultLines.sort((p1, p2) -> Integer.valueOf(p2.getLineMatches()).compareTo(p1.getLineMatches()));

    // gets the best snippets based on number of matches
    for (int i = 0; i < resultLines.size(); i++) {
        CodeMatchResult match = resultLines.get(i);
        match.setLineNumber(match.getLineNumber() + 1);

        if (!resultExists(newResultLines, match.getLineNumber())) {
            newResultLines.add(match);
        }

        CodeMatchResult resultBefore = getResultByLineNumber(resultLines, match.getLineNumber() - 1);
        CodeMatchResult resultAfter = getResultByLineNumber(resultLines, match.getLineNumber() + 1);

        if (resultBefore != null && !resultExists(newResultLines, match.getLineNumber() - 1)) {
            newResultLines.add(resultBefore);
        }
        if (resultAfter != null && !resultExists(newResultLines, match.getLineNumber() + 1)) {
            newResultLines.add(resultAfter);
        }

        if (newResultLines.size() >= MATCHLINES) {
            break;
        }
    }

    newResultLines.sort((p1, p2) -> Integer.valueOf(p1.getLineNumber()).compareTo(p2.getLineNumber()));

    if (!newResultLines.isEmpty()) {
        newResultLines.get(0).addBreak = false;
        return newResultLines;
    }

    return null;
}

From source file:com.navercorp.pinpoint.common.server.bo.grpc.GrpcSpanFactory.java

private List<AnnotationBo> buildAnnotationList(List<PAnnotation> pAnnotationList) {
    if (CollectionUtils.isEmpty(pAnnotationList)) {
        return new ArrayList<>();
    }/*  ww w .j a  v  a2s. c  om*/
    List<AnnotationBo> boList = new ArrayList<>(pAnnotationList.size());
    for (PAnnotation tAnnotation : pAnnotationList) {
        final AnnotationBo annotationBo = newAnnotationBo(tAnnotation);
        boList.add(annotationBo);
    }

    boList.sort(AnnotationComparator.INSTANCE);
    return boList;
}

From source file:org.cloudsimplus.sla.responsetime.CloudletResponseTimeMinimizationExperiment.java

@Override
public final void printResults() {
    DatacenterBroker broker0 = getFirstBroker();
    List<Cloudlet> finishedCloudlets = broker0.getCloudletsFinishedList();
    Comparator<Cloudlet> sortByVmId = comparingDouble(c -> c.getVm().getId());
    Comparator<Cloudlet> sortByStartTime = comparingDouble(c -> c.getExecStartTime());
    finishedCloudlets.sort(sortByVmId.thenComparing(sortByStartTime));

    new CloudletsTableBuilder(finishedCloudlets).build();
}

From source file:org.devathon.contest2016.npc.NPCController.java

public void updateEquipment() {
    for (ArmorCategory category : ArmorCategory.values()) {
        List<Pair<ItemStack, Double>> weighted = itemStacks.stream()
                .map(itemStack -> Pair.of(itemStack, getGenericDefense(category, itemStack.getType())))
                .filter(pair -> pair.getRight() > 0).collect(Collectors.toList());

        weighted.sort((a, b) -> -Double.compare(a.getRight(), b.getRight()));

        if (weighted.size() > 0) {
            category.applyTo(getBukkitEntity(), weighted.get(0).getLeft());
        }// ww  w  .  java 2 s.  com
    }

    List<Pair<ItemStack, Double>> weighted = itemStacks.stream()
            .map(itemStack -> Pair.of(itemStack, getGenericAttackDamage(itemStack.getType())))
            .filter(pair -> pair.getRight() > 0).collect(Collectors.toList());

    weighted.sort((a, b) -> -Double.compare(a.getRight(), b.getRight()));

    if (weighted.size() > 0) {
        getBukkitEntity().getEquipment().setItemInMainHand(weighted.get(0).getLeft());
    }
}

From source file:mesclasses.view.TimetableController.java

private List<Cours> getSimultaneousCours(Cours theCours) {
    List<Cours> liste = cours.stream().filter(c -> {
        return c.getDay().equals(theCours.getDay())
                && Math.max(start(c), start(theCours)) < Math.min(end(c), end(theCours));
    }).collect(Collectors.toList());
    liste.sort((Cours t, Cours t1) -> {
        if (t.getWeek().equals(t1.getWeek())) {
            return classes.indexOf(t.getClasse()) - classes.indexOf(t1.getClasse());
        }/* w  ww . j av a 2 s .c om*/
        return t.getWeek().compareTo(t1.getWeek());
    });
    return liste;
}