Example usage for java.util SortedMap containsKey

List of usage examples for java.util SortedMap containsKey

Introduction

In this page you can find the example usage for java.util SortedMap containsKey.

Prototype

boolean containsKey(Object key);

Source Link

Document

Returns true if this map contains a mapping for the specified key.

Usage

From source file:marytts.language.de.JPhonemiser.java

public void shutdown() {
    if (logUnknownFileName != null || logEnglishFileName != null) {
        try {//  w w w  . j  a v a2s . c  o  m
            /* print unknown words */

            //open file
            PrintWriter logUnknown = new PrintWriter(
                    new OutputStreamWriter(new FileOutputStream(logUnknownFileName), "UTF-8"));
            //sort the words
            Set<String> unknownWords = unknown2Frequency.keySet();
            SortedMap<Integer, List<String>> freq2Unknown = new TreeMap<Integer, List<String>>();

            for (String nextUnknown : unknownWords) {
                int nextFreq = unknown2Frequency.get(nextUnknown);
                //logUnknown.println(nextFreq+" "+nextUnknown);
                if (freq2Unknown.containsKey(nextFreq)) {
                    List<String> unknowns = freq2Unknown.get(nextFreq);
                    unknowns.add(nextUnknown);
                } else {
                    List<String> unknowns = new ArrayList<String>();
                    unknowns.add(nextUnknown);
                    freq2Unknown.put(nextFreq, unknowns);
                }
            }
            //print the words
            for (int nextFreq : freq2Unknown.keySet()) {
                List<String> unknowns = freq2Unknown.get(nextFreq);
                for (int i = 0; i < unknowns.size(); i++) {
                    String unknownWord = (String) unknowns.get(i);
                    logUnknown.println(nextFreq + " " + unknownWord);
                }

            }
            //close file
            logUnknown.flush();
            logUnknown.close();

            /* print english words */
            //open the file
            PrintWriter logEnglish = new PrintWriter(
                    new OutputStreamWriter(new FileOutputStream(logEnglishFileName), "UTF-8"));
            //sort the words
            SortedMap<Integer, List<String>> freq2English = new TreeMap<Integer, List<String>>();
            for (String nextEnglish : english2Frequency.keySet()) {
                int nextFreq = english2Frequency.get(nextEnglish);
                if (freq2English.containsKey(nextFreq)) {
                    List<String> englishWords = freq2English.get(nextFreq);
                    englishWords.add(nextEnglish);
                } else {
                    List<String> englishWords = new ArrayList<String>();
                    englishWords.add(nextEnglish);
                    freq2English.put(nextFreq, englishWords);
                }

            }
            //print the words
            for (int nextFreq : freq2English.keySet()) {
                List<String> englishWords = freq2English.get(nextFreq);
                for (int i = 0; i < englishWords.size(); i++) {
                    logEnglish.println(nextFreq + " " + englishWords.get(i));
                }
            }
            //close file
            logEnglish.flush();
            logEnglish.close();

        } catch (Exception e) {
            logger.info("Error printing log files for english and unknown words", e);
        }
    }
}

From source file:uk.org.sappho.applications.transcript.service.registry.WorkingCopy.java

public void putProperties(String environment, String application, SortedMap<String, Object> properties,
        boolean isMerge) throws TranscriptException {

    Gson gson = new Gson();
    synchronized (getLock()) {
        SortedMap<String, Object> oldProperties = getJsonProperties(environment, application);
        SortedMap<String, Object> newProperties;
        if (isMerge && !oldProperties.isEmpty()) {
            newProperties = new TreeMap<String, Object>();
            for (String key : oldProperties.keySet()) {
                newProperties.put(key, oldProperties.get(key));
            }/*from w w w .ja v  a  2s  .c om*/
            for (String key : properties.keySet()) {
                Object newValue = properties.get(key);
                if (transcriptParameters.isFailOnValueChange() && oldProperties.containsKey(key)
                        && !gson.toJson(newValue).equals(gson.toJson(oldProperties.get(key)))) {
                    throw new TranscriptException("Value of property " + environment + ":" + application + ":"
                            + key + " would change");
                }
                newProperties.put(key, newValue);
            }
        } else {
            newProperties = properties;
        }
        String oldJson = gson.toJson(oldProperties);
        String newJson = gson.toJson(newProperties);
        if (!newJson.equals(oldJson)) {
            putProperties(environment, application, newProperties);
        }
    }
}

From source file:org.apache.cassandra.db.index.sasi.disk.TokenTreeTest.java

@Test
public void buildWithMultipleMapsAndIterate() throws Exception {
    final SortedMap<Long, LongSet> merged = new TreeMap<>();
    final TokenTreeBuilder builder = new TokenTreeBuilder(simpleTokenMap).finish();
    builder.add(collidingTokensMap);//  w w w . ja  va  2  s . co  m

    merged.putAll(collidingTokensMap);
    for (Map.Entry<Long, LongSet> entry : simpleTokenMap.entrySet()) {
        if (merged.containsKey(entry.getKey())) {
            LongSet mergingOffsets = entry.getValue();
            LongSet existingOffsets = merged.get(entry.getKey());

            if (mergingOffsets.equals(existingOffsets))
                continue;

            Set<Long> mergeSet = new HashSet<>();
            for (LongCursor merging : mergingOffsets)
                mergeSet.add(merging.value);

            for (LongCursor existing : existingOffsets)
                mergeSet.add(existing.value);

            LongSet mergedResults = new LongOpenHashSet();
            for (Long result : mergeSet)
                mergedResults.add(result);

            merged.put(entry.getKey(), mergedResults);
        } else {
            merged.put(entry.getKey(), entry.getValue());
        }
    }

    final Iterator<Pair<Long, LongSet>> tokenIterator = builder.iterator();
    final Iterator<Map.Entry<Long, LongSet>> listIterator = merged.entrySet().iterator();
    while (tokenIterator.hasNext() && listIterator.hasNext()) {
        Pair<Long, LongSet> tokenNext = tokenIterator.next();
        Map.Entry<Long, LongSet> listNext = listIterator.next();

        Assert.assertEquals(listNext.getKey(), tokenNext.left);
        Assert.assertEquals(listNext.getValue(), tokenNext.right);
    }

    Assert.assertFalse("token iterator not finished", tokenIterator.hasNext());
    Assert.assertFalse("list iterator not finished", listIterator.hasNext());

}

From source file:org.apache.hcatalog.hcatmix.load.HCatMapper.java

@Override
public void map(LongWritable longWritable, Text text, OutputCollector<LongWritable, IntervalResult> collector,
        final Reporter reporter) throws IOException {
    LOG.info(MessageFormat.format("Input: {0}={1}", longWritable, text));
    final List<Future<SortedMap<Long, IntervalResult>>> futures = new ArrayList<Future<SortedMap<Long, IntervalResult>>>();

    // Initialize tasks
    List<org.apache.hcatalog.hcatmix.load.tasks.Task> tasks;
    try {/*from   ww w .j av  a 2  s  .  c o  m*/
        tasks = initializeTasks(jobConf);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    ThreadCreatorTimer createNewThreads = new ThreadCreatorTimer(new TimeKeeper(timeKeeper), tasks,
            threadIncrementCount, futures, reporter);

    // Create timer thread to automatically keep on increasing threads at fixed interval
    Timer newThreadCreator = new Timer(true);
    newThreadCreator.scheduleAtFixedRate(createNewThreads, 0, threadIncrementIntervalInMillis);

    // Sleep and let the tasks get expired
    long remainingTime = timeKeeper.getRemainingTimeIncludingBuffer();
    final long sleepPeriod = 2000;
    for (long i = remainingTime; i > 0; i = i - sleepPeriod) {
        try {
            Thread.sleep(sleepPeriod);
            reporter.progress();
        } catch (InterruptedException e) {
            LOG.error("Got interrupted while sleeping for timer thread to finish");
        }
    }

    newThreadCreator.cancel();
    LOG.info("Time is over, will collect the futures now. Total number of threads: " + futures.size());
    SortedMap<Long, IntervalResult> stopWatchAggregatedTimeSeries = new TreeMap<Long, IntervalResult>();

    // Merge the corresponding time interval results received from all the threads for each time interval
    for (TaskExecutor taskExecutor : createNewThreads.getTaskExecutors()) {
        try {
            SortedMap<Long, IntervalResult> threadTimeSeries = taskExecutor.getTimeSeriesResult();
            for (Map.Entry<Long, IntervalResult> entry : threadTimeSeries.entrySet()) {
                Long timeStamp = entry.getKey();
                IntervalResult intervalResult = entry.getValue();

                if (stopWatchAggregatedTimeSeries.containsKey(timeStamp)) {
                    stopWatchAggregatedTimeSeries.get(timeStamp).addIntervalResult(intervalResult);
                } else {
                    stopWatchAggregatedTimeSeries.put(timeStamp, intervalResult);
                }
                LOG.info(MessageFormat.format("{0}: Added {1} stopwatches. Current stopwatch number: {2}",
                        timeStamp, intervalResult.getStopWatchList().size(),
                        stopWatchAggregatedTimeSeries.get(timeStamp).getStopWatchList().size()));
            }
        } catch (Exception e) {
            LOG.error("Error while getting thread results", e);
        }
    }

    // Output the consolidated result for this map along with the number of threads against time
    LOG.info("Collected all the statistics for #threads: " + createNewThreads.getThreadCount());
    SortedMap<Long, Integer> threadCountTimeSeries = createNewThreads.getThreadCountTimeSeries();
    int threadCount = 0;
    for (Map.Entry<Long, IntervalResult> entry : stopWatchAggregatedTimeSeries.entrySet()) {
        long timeStamp = entry.getKey();
        IntervalResult intervalResult = entry.getValue();
        if (threadCountTimeSeries.containsKey(timeStamp)) {
            threadCount = threadCountTimeSeries.get(timeStamp);
        }
        intervalResult.setThreadCount(threadCount);
        collector.collect(new LongWritable(timeStamp), intervalResult);
    }
}

From source file:org.formix.dsx.serialization.XmlSerializer.java

private Object getValue(XmlElement elem, Class<?> paramType, SortedMap<String, Method> parentMethods,
        Object parent) throws XmlException {
    try {/*from w  ww .  j  a  v  a2s  .co  m*/
        SortedMap<String, Method> methods = this.createMethodMap(paramType);

        Method paramTypeValueOfMethod = null;
        if (methods.containsKey("valueOf-String"))
            paramTypeValueOfMethod = methods.get("valueOf-String");

        if (elem.getChilds().size() == 0) {
            if (Collection.class.isAssignableFrom(paramType)) {
                return this.getCollectionValue(elem, paramType, parentMethods, parent);
            } else {
                return null;
            }
        }

        Object value = null;
        Iterator<XmlContent> contentIterator = elem.getChilds().iterator();
        XmlContent firstChild = contentIterator.next();
        while (firstChild.toString().trim().equals("") && contentIterator.hasNext()) {
            // skip blank lines
            firstChild = contentIterator.next();
        }

        if (paramType.equals(String.class)) {
            XmlText text = (XmlText) firstChild;
            value = text.getText();

        } else if (paramType.equals(Timestamp.class)) {
            value = new Timestamp(this.parseDate(firstChild.toString()).getTime());

        } else if (paramType.equals(Date.class)) {
            value = this.parseDate(firstChild.toString());

        } else if (Calendar.class.isAssignableFrom(paramType)) {
            value = new GregorianCalendar();
            ((GregorianCalendar) value).setTime(this.parseDate(firstChild.toString()));

        } else if (paramTypeValueOfMethod != null) {
            value = paramTypeValueOfMethod.invoke(null, new Object[] { firstChild.toString() });

        } else if (Collection.class.isAssignableFrom(paramType)) {
            value = this.getCollectionValue(elem, paramType, parentMethods, parent);

        } else if (Map.class.isAssignableFrom(paramType)) {
            throw new XmlException("The Map deserialization is not yet implemented.");
        } else if (this.isSetter(parentMethods, elem.getName()) && (firstChild instanceof XmlElement)) {
            XmlElement elemFirstChild = (XmlElement) firstChild;
            Class<?> specifiedType = this.getType(this.capitalize(elemFirstChild.getName()));
            value = this.deserialize(elemFirstChild, specifiedType);
        } else {
            value = this.deserialize(elem, paramType);
        }

        return value;
    } catch (Exception e) {
        throw new XmlException("Problem getting value of " + elem + " of type " + paramType.getName(), e);
    }
}

From source file:com.facebook.presto.execution.resourceGroups.TestResourceGroups.java

@Test(timeOut = 10_000)
public void testPriorityScheduling() {
    RootInternalResourceGroup root = new RootInternalResourceGroup("root", (group, export) -> {
    }, directExecutor());//from  ww w. j a  v  a  2 s  .  c o  m
    root.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
    root.setMaxQueuedQueries(100);
    // Start with zero capacity, so that nothing starts running until we've added all the queries
    root.setMaxRunningQueries(0);
    root.setSchedulingPolicy(QUERY_PRIORITY);
    InternalResourceGroup group1 = root.getOrCreateSubGroup("1");
    group1.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
    group1.setMaxQueuedQueries(100);
    group1.setMaxRunningQueries(1);
    InternalResourceGroup group2 = root.getOrCreateSubGroup("2");
    group2.setSoftMemoryLimit(new DataSize(1, MEGABYTE));
    group2.setMaxQueuedQueries(100);
    group2.setMaxRunningQueries(1);

    SortedMap<Integer, MockQueryExecution> queries = new TreeMap<>();

    Random random = new Random();
    for (int i = 0; i < 100; i++) {
        int priority;
        do {
            priority = random.nextInt(1_000_000) + 1;
        } while (queries.containsKey(priority));

        MockQueryExecution query = new MockQueryExecution(0, "query_id", priority);
        if (random.nextBoolean()) {
            group1.run(query);
        } else {
            group2.run(query);
        }
        queries.put(priority, query);
    }

    root.setMaxRunningQueries(1);

    List<MockQueryExecution> orderedQueries = new ArrayList<>(queries.values());
    reverse(orderedQueries);

    for (MockQueryExecution query : orderedQueries) {
        root.processQueuedQueries();
        assertEquals(query.getState(), RUNNING);
        query.complete();
    }
}

From source file:ubic.gemma.web.controller.common.description.bibref.BibliographicReferenceControllerImpl.java

@Override
public ModelAndView showAllForExperiments(HttpServletRequest request, HttpServletResponse response) {
    Map<ExpressionExperiment, BibliographicReference> eeToBibRefs = bibliographicReferenceService
            .getAllExperimentLinkedReferences();

    // map sorted in natural order of the keys
    SortedMap<CitationValueObject, Collection<ExpressionExperimentValueObject>> citationToEEs = new TreeMap<>();
    for (Entry<ExpressionExperiment, BibliographicReference> entry : eeToBibRefs.entrySet()) {
        if (entry.getValue().getTitle() == null || entry.getValue().getTitle().isEmpty()
                || entry.getValue().getAuthorList() == null || entry.getValue().getAuthorList().isEmpty()) {
            continue;
        }/*from   w w  w  .  j  a v  a 2 s . co m*/
        CitationValueObject cvo = CitationValueObject.convert2CitationValueObject(entry.getValue());
        if (!citationToEEs.containsKey(cvo)) {
            citationToEEs.put(cvo, new ArrayList<ExpressionExperimentValueObject>());
        }
        ExpressionExperiment ee = entry.getKey();
        ee.setBioAssays(null);
        ee.setAccession(null);
        ee.setExperimentalDesign(null);
        citationToEEs.get(cvo).add(new ExpressionExperimentValueObject(ee));

    }

    return new ModelAndView("bibRefAllExperiments").addObject("citationToEEs", citationToEEs);
}

From source file:net.sourceforge.fenixedu.presentationTier.Action.publico.ViewHomepageDA.java

public ActionForward listAlumni(ActionMapping mapping, ActionForm actionForm, HttpServletRequest request,
        HttpServletResponse response) throws Exception {
    final SortedMap<Degree, SortedSet<Homepage>> homepages = new TreeMap<Degree, SortedSet<Homepage>>(
            Degree.COMPARATOR_BY_DEGREE_TYPE_AND_NAME_AND_ID);
    for (final Registration registration : rootDomainObject.getRegistrationsSet()) {

        if (registration.getActiveState().getStateType().equals(RegistrationStateType.CONCLUDED)) {

            final Degree degree = registration.getActiveStudentCurricularPlan().getDegreeCurricularPlan()
                    .getDegree();//from   w  ww .j  a v  a  2  s  .  com

            final SortedSet<Homepage> degreeHomepages;
            if (homepages.containsKey(degree)) {
                degreeHomepages = homepages.get(degree);
            } else {
                degreeHomepages = new TreeSet<Homepage>(Homepage.HOMEPAGE_COMPARATOR_BY_NAME);
                homepages.put(degree, degreeHomepages);
            }

            final Homepage homepage = registration.getPerson().getHomepage();
            if (homepage != null && homepage.getActivated()) {
                degreeHomepages.add(homepage);
            }
        }

    }

    request.setAttribute("homepages", homepages);

    final String selectedPage = request.getParameter("selectedPage");
    if (selectedPage != null) {
        request.setAttribute("selectedPage", selectedPage);
    }

    return mapping.findForward("list-homepages-alumni");
}

From source file:org.mitre.ccv.canopy.CcvCanopyCluster.java

/**
 * Sets the thresholds 1 and 2 using MaxLike profile.
 *
 * Issues/Pittfalls:/*  w  w  w.j  ava 2  s . c  o m*/
 * <ol>
 * <ul>t2 might be to small and nothing is removed from the list
 * <ul>t1 might be to large and everything is added to a canopy
 * </ol>
 * @todo: figure out how to select threshold1 (not to big not to small)
 */
public double[] autoThreshold() throws Exception {
    LOG.info("autoThreshold: Generating distance distribution");
    //SortedMap<Double, Integer> sortMap = new TreeMap<Double, Integer>(new ReverseDoubleComparator());
    SortedMap<Double, Integer> sortMap = new TreeMap<Double, Integer>();
    // generate all the pairwise distances
    final int size = completeMatrix.getMatrix().getColumnDimension();
    for (int i = 0; i < size; ++i) {
        for (int j = i + 1; j < size; ++j) {
            // only calculate one triangle not full!
            Double d = this.cheapMetric.distance(i, j);
            //set.add(this.cheapMetric.distance(i, j));
            if (sortMap.containsKey(d)) {
                sortMap.put(d, sortMap.get(d) + 1);
            } else {
                sortMap.put(d, 1);
            }
        }
    }

    /**
    * $gnuplot
    * > set nokey
    * > set xlabel "Pairwise distance"
    * > set ylabel "Number of samples"
    * > plot "output.txt" using 1:2
    */
    /* */
    for (Iterator<Entry<Double, Integer>> i = sortMap.entrySet().iterator(); i.hasNext();) {
        Entry<Double, Integer> entry = i.next();

        //System.out.printf("%f\t%d\n", entry.getKey(), entry.getValue());
    }
    /* */

    /**
     * How many bins per samples do we want?
     * Using the two end cases at lower and upper bounds.
     */
    TH1D hist = new TH1D(completeMatrix.getMatrix().getColumnDimension() * 2, sortMap.firstKey(),
            sortMap.lastKey());
    LOG.info(String.format("autoThreshold: Packing into histogram with %d bins (%f, %f)", hist.getBins().length,
            hist.getLower(), hist.getUpper()));
    hist.pack(sortMap);
    int[] bins = hist.getBins();
    if (LOG.isDebugEnabled()) {
        if (hist.getNumberOverflows() != 0) {
            LOG.debug(
                    String.format("autoThreshold: Have %d overflows in histogram!", hist.getNumberOverflows()));
        }
        if (hist.getNumberUnderflows() != 0) {
            LOG.debug(String.format("autoThreshold: Have %d underflows in histogram!",
                    hist.getNumberUnderflows()));
        }
    }

    // print out histogram bins
    for (int i = 0; i < bins.length; i++) {
        //System.out.printf("%f\t%d\n", hist.getBinCenter(i), hist.getBinContent(i));
    }
    TSpectrum spectrum = new TSpectrum(); // use default values (sigma = 1, threshold = 0.5
    int numFound = spectrum.search(hist);
    LOG.info(String.format("autoThreshold: Found %d peaks", numFound));
    if (numFound == 0) {
        LOG.fatal("autoThreshold: No peaks found in data!");
        throw new Exception();
    }
    double xpeaks[] = spectrum.getPostionX();
    double[] rtn = new double[2]; // t1, t2
    if (numFound == 1) {
        int bin = hist.findBin(xpeaks[0]);
        // is this in the top or bottom half?
        // @todo: must be better way than this hack
        if (bin > 0) {
            bin--;
        }
        rtn[0] = hist.getBinCenter(bin); // threshold1 is only peak
        rtn[1] = (hist.getLower() + rtn[0]) / 2;
        return rtn;
    }

    // more than one peak
    /**
     * Several possible options:
     * - select t1 first than find a good t2
     * - select t2 first than find a good t1
     * 
     * make sure that there is enough samples below t2 and above t1
             
    if (xpeaks[0] > xpeaks[1]) {
    // what about sigma value: how many are between these two
    rtn[0] = xpeaks[0]; // t1
    rtn[1] = xpeaks[1];  //t2
    } else {
    rtn[0] = xpeaks[1];
    rtn[1] = xpeaks[0];
    }
    */

    // find the peak with the smallest this will be the basis for t2
    double minPeakX = hist.getUpper();
    int minPeakI = -1;
    for (int i = 0; i < numFound; i++) {
        final double x = xpeaks[i];
        if (x < minPeakX) {
            minPeakX = x;
            minPeakI = i;
        }
    }
    //System.err.printf("minPeakX=%f (%d)\n", minPeakX, minPeakI);

    // find next peak above the smallest
    // should try using something about the average and standard deviation
    // of the distribution of entries in picking this
    double min2PeakX = hist.getUpper();
    int min2PeakI = -1;
    for (int i = 0; i < numFound; i++) {
        final double x = xpeaks[i];
        if (i != minPeakI && x < min2PeakX) { // should check that it isn't equal or within sigma
            min2PeakX = x;
            min2PeakI = i;
        }
    }
    //System.err.printf("min2PeakX=%f (%d)\n", min2PeakX, min2PeakI);
    /**
    if (minPeakI + 1 < min2PeakI - 1) {
    rtn[0] = hist.getBinCenter(min2PeakI - 1);         // t1
    rtn[1] = hist.getBinCenter(minPeakI + 1);          // t2
    } else {
    // really close not good - these should be the centers
    LOG.info("autoThreshold: t1 and t2 are possbily from adjacent bins!");
    rtn[0] = min2PeakX;
    rtn[1] = minPeakX;
    }
    int t2bin = hist.findBin(minPeakX);
    if (t2bin - 1 > 0 ) {
    rtn[1] = hist.getBinCenter(t2bin - 1); // don't want the first bin?
    } else {
    rtn[1] = minPeakX;
    }
    int t1bin = hist.findBin(min2PeakX);
    if (t1bin + 1 < bins.length - 1) {  // don't want the last bin?
    rtn[0] = hist.getBinCenter(t1bin + 1);
    } else {
    rtn[0] = min2PeakX;
    }*/

    rtn[0] = min2PeakX;
    rtn[1] = minPeakX;

    /*
    double t1 = hist.getUpper();
    double t2 = hist.getLower(); */
    // print out what we found
    for (int p = 0; p < numFound; p++) {
        double xp = xpeaks[p];
        int bin = hist.findBin(xp);
        int yp = hist.getBinContent(bin); // double yp
        System.err.printf("%d\t%f\t%d\n", bin, xp, yp);
        // if(yp- Math.sqrt(yp) < fline.eval(xp)) continue
    }

    return rtn;
}

From source file:net.sourceforge.pmd.docs.RuleDocGenerator.java

private Map<Language, List<RuleSet>> sortRulesets(Iterator<RuleSet> rulesets) throws RuleSetNotFoundException {
    SortedMap<Language, List<RuleSet>> rulesetsByLanguage = new TreeMap<>();

    while (rulesets.hasNext()) {
        RuleSet ruleset = rulesets.next();
        Language language = getRuleSetLanguage(ruleset);

        if (!rulesetsByLanguage.containsKey(language)) {
            rulesetsByLanguage.put(language, new ArrayList<RuleSet>());
        }//from www.  j  a v  a2s .  c  o  m
        rulesetsByLanguage.get(language).add(ruleset);
    }

    for (List<RuleSet> rulesetsOfOneLanguage : rulesetsByLanguage.values()) {
        Collections.sort(rulesetsOfOneLanguage, new Comparator<RuleSet>() {
            @Override
            public int compare(RuleSet o1, RuleSet o2) {
                return o1.getName().compareToIgnoreCase(o2.getName());
            }
        });
    }
    return rulesetsByLanguage;
}