Example usage for java.util SortedMap containsKey

List of usage examples for java.util SortedMap containsKey

Introduction

In this page you can find the example usage for java.util SortedMap containsKey.

Prototype

boolean containsKey(Object key);

Source Link

Document

Returns true if this map contains a mapping for the specified key.

Usage

From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.feature.coreference.CoreferenceFeatures.java

/**
 * Returns a sorted map of [sentencePos, list[coreferenceLink]] for the given chain.
 * The keys are not continuous, only those that have coreference link to the given chain
 * are stored//ww w .j av  a2 s.c o  m
 *
 * @param chain chain
 * @param jCas  jcas
 * @return map
 */
private static SortedMap<Integer, List<CoreferenceLink>> extractSentencesAndLinksFromChain(
        List<CoreferenceLink> chain, JCas jCas) {
    SortedMap<Integer, List<CoreferenceLink>> result = new TreeMap<>();

    // iterate over sentences
    List<Sentence> sentences = new ArrayList<>(JCasUtil.select(jCas, Sentence.class));
    for (int sentenceNo = 0; sentenceNo < sentences.size(); sentenceNo++) {
        Sentence sentence = sentences.get(sentenceNo);

        for (CoreferenceLink link : chain) {
            // is there a link in a sentence?
            if (link.getBegin() >= sentence.getBegin() && link.getEnd() <= sentence.getEnd()) {
                // put it to the map
                if (!result.containsKey(sentenceNo)) {
                    result.put(sentenceNo, new ArrayList<CoreferenceLink>());
                }
                result.get(sentenceNo).add(link);
            }
        }
    }

    return result;
}

From source file:org.fede.util.Util.java

private static MoneyAmountSeries read(String name) {

    try (InputStream is = Util.class.getResourceAsStream("/" + name)) {

        final JSONSeries series = CONSULTATIO_SERIES.contains(name) ? readConsultatioSeries(is, OM)
                : OM.readValue(is, JSONSeries.class);

        final SortedMap<YearMonth, MoneyAmount> interpolatedData = new TreeMap<>();
        final String currency = series.getCurrency();
        for (JSONDataPoint dp : series.getData()) {
            if (interpolatedData.put(new YearMonth(dp.getYear(), dp.getMonth()),
                    new MoneyAmount(dp.getValue(), currency)) != null) {
                throw new IllegalArgumentException("Series " + name + " has two values for year " + dp.getYear()
                        + " and month " + dp.getMonth());
            }/*  ww  w . j a  va 2 s . c  o  m*/
        }

        final InterpolationStrategy strategy = InterpolationStrategy.valueOf(series.getInterpolation());

        YearMonth ym = interpolatedData.firstKey();
        final YearMonth last = interpolatedData.lastKey();
        while (ym.monthsUntil(last) > 0) {
            YearMonth next = ym.next();
            if (!interpolatedData.containsKey(next)) {
                interpolatedData.put(next, strategy.interpolate(interpolatedData.get(ym), ym, currency));
            }
            ym = ym.next();
        }
        return new SortedMapMoneyAmountSeries(currency, interpolatedData);

    } catch (IOException ioEx) {
        throw new IllegalArgumentException("Could not read series named " + name, ioEx);
    }

}

From source file:com.kolich.aws.services.s3.impl.KolichS3Signer.java

/**
  * Calculate the canonical string for a REST/HTTP request to S3.
  *//* ww w .  ja  v  a 2s.  co m*/
private static final String getS3CanonicalString(final AwsHttpRequest request) {
    // A few standard headers we extract for conveinence.
    final String contentType = CONTENT_TYPE.toLowerCase(), contentMd5 = CONTENT_MD5.toLowerCase(),
            date = DATE.toLowerCase();
    // Start with the empty string ("").
    final StringBuilder buf = new StringBuilder();
    // Next is the HTTP verb and a newline.
    buf.append(request.getMethod() + LINE_SEPARATOR_UNIX);
    // Add all interesting headers to a list, then sort them.
    // "Interesting" is defined as Content-MD5, Content-Type, Date,
    // and x-amz-... headers.
    final Map<String, String> headersMap = getHeadersAsMap(request);
    final SortedMap<String, String> interesting = Maps.newTreeMap();
    if (!headersMap.isEmpty()) {
        Iterator<Map.Entry<String, String>> it = headersMap.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry<String, String> entry = it.next();
            final String key = entry.getKey(), value = entry.getValue();
            if (key == null) {
                continue;
            }
            final String lk = key.toLowerCase(Locale.getDefault());
            // Ignore any headers that are not interesting.
            if (lk.equals(contentType) || lk.equals(contentMd5) || lk.equals(date)
                    || lk.startsWith(AMAZON_PREFIX)) {
                interesting.put(lk, value);
            }
        }
    }
    // Remove default date timestamp if "x-amz-date" is set.
    if (interesting.containsKey(S3_ALTERNATE_DATE)) {
        interesting.put(date, "");
    }
    // These headers require that we still put a new line in after them,
    // even if they don't exist.
    if (!interesting.containsKey(contentType)) {
        interesting.put(contentType, "");
    }
    if (!interesting.containsKey(contentMd5)) {
        interesting.put(contentMd5, "");
    }
    // Add all the interesting headers
    for (Iterator<Map.Entry<String, String>> i = interesting.entrySet().iterator(); i.hasNext();) {
        final Map.Entry<String, String> entry = i.next();
        final String key = entry.getKey();
        final Object value = entry.getValue();
        if (key.startsWith(AMAZON_PREFIX)) {
            buf.append(key).append(':').append(value);
        } else {
            buf.append(value);
        }
        buf.append(LINE_SEPARATOR_UNIX);
    }
    // The CanonicalizedResource this request is working with.
    // If the request specifies a bucket using the HTTP Host header
    // (virtual hosted-style), append the bucket name preceded by a
    // "/" (e.g., "/bucketname"). For path-style requests and requests
    // that don't address a bucket, do nothing.
    if (request.getResource() != null) {
        buf.append("/" + request.getResource() + request.getURI().getRawPath());
    } else {
        buf.append(request.getURI().getRawPath());
    }
    // Amazon requires us to sort the query string parameters.
    final List<SortableBasicNameValuePair> params = sortParams(URLEncodedUtils.parse(request.getURI(), UTF_8));
    String separator = "?";
    for (final NameValuePair pair : params) {
        final String name = pair.getName(), value = pair.getValue();
        // Skip any parameters that aren't part of the
        // canonical signed string.
        if (!INTERESTING_PARAMETERS.contains(name)) {
            continue;
        }
        buf.append(separator).append(name);
        if (value != null) {
            buf.append("=").append(value);
        }
        separator = "&";
    }
    return buf.toString();
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.sequence.evaluation.F1ScoreTableAggregator.java

public static void evaluatePredictionsFoldersDomain(File masterFolder, String folderContains) throws Exception {
    SortedMap<String, SortedMap<String, String>> featureSetsResults = new TreeMap<>();

    File[] foldersFeatureSets = masterFolder.listFiles(EvalHelper.DIRECTORY_FILTER);

    for (File folderFeatureSet : foldersFeatureSets) {
        String[] split = folderFeatureSet.getName().split("_");
        String featureSet = split[0];
        String paramE = split[1];
        String paramT = split[2];

        if ("e0".equals(paramE) && "t1".equals(paramT)) {

            Map<String, File> foldersData = EvalHelper.listSubFoldersAndRemoveUUID(folderFeatureSet);
            for (Map.Entry<String, File> folderData : foldersData.entrySet()) {

                String data = folderData.getKey();

                if (data.contains(folderContains)) {
                    File resultSummary = new File(folderData.getValue(), "resultSummary.txt");

                    List<String> values = extractValues(resultSummary);
                    String macroF1 = values.get(0);

                    if (!featureSetsResults.containsKey(featureSet)) {
                        featureSetsResults.put(featureSet, new TreeMap<String, String>());
                    }/*w  w w .j  a  v  a  2s . c om*/

                    String domainName = data.split("_")[2];

                    featureSetsResults.get(featureSet).put(domainName, macroF1);
                }
            }
        }
    }

    // print results
    int rows = featureSetsResults.values().iterator().next().size();
    System.out.printf("\t");

    for (String featureSet : featureSetsResults.keySet()) {
        System.out.printf("%s\t", featureSet);
    }
    System.out.println();
    for (int i = 0; i < rows; i++) {
        //            Set<String> keySet = featureSetsResults.values().iterator().next().keySet();
        SortedMap<String, String> firstColumn = featureSetsResults.values().iterator().next();
        List<String> keys = new ArrayList<>(firstColumn.keySet());
        System.out.printf("%s\t", keys.get(i));

        for (SortedMap<String, String> values : featureSetsResults.values()) {
            System.out.printf("%s\t", values.get(keys.get(i)));
        }

        System.out.println();
    }
}

From source file:org.openmrs.module.drughistory.api.impl.DrugSnapshotServiceImpl.java

private Map<Person, SortedMap<Date, List<DrugEvent>>> groupByPersonAndDate(List<DrugEvent> events) {
    Map<Person, SortedMap<Date, List<DrugEvent>>> m = new HashMap<Person, SortedMap<Date, List<DrugEvent>>>();
    for (DrugEvent de : events) {
        if (!m.containsKey(de.getPerson())) {
            m.put(de.getPerson(), new TreeMap<Date, List<DrugEvent>>());
        }/*w w  w  .j a  v a  2  s . c om*/
        SortedMap<Date, List<DrugEvent>> mm = m.get(de.getPerson());
        if (!mm.containsKey(de.getDateOccurred())) {
            mm.put(de.getDateOccurred(), new ArrayList<DrugEvent>());
        }
        mm.get(de.getDateOccurred()).add(de);
    }
    return m;
}

From source file:uk.ac.ucl.excites.sapelli.shared.io.text.CharsetHelpers.java

/**
 * Parses all .charsetinfo files in the input folder and works out which is the 
 * maximum "maxBytesPerChar" value for each known Charset across all input files.
 * If this summarised information is in anyway different from the information in
 * {@link #CharsetMaxMaxBytesPerCharBundle} (or if {@code force} is {@code true})
 * then a new CharsetMaxMaxBytesPerChar.properties is created (or overwritten!)
 * in the output folder./* w  w  w  .j a  va  2  s.com*/
 * 
 * This method is called from a Groovy script in the pom.xml of the Sapelli Library.
 * 
 * @param inputFolderPath path of a directory containing CharsetInfo files (with *.charsetinfo extension!) to process
 * @param outputFolderPath path of a (resource) directory in which to create the new/updated CharsetMaxMaxBytesPerChar.properties file
 * @param force when {@code true} a new CharsetMaxMaxBytesPerChar.properties file will always be generated
 * @return whether or not a new or updated CharsetMaxMaxBytesPerChar.properties file was created
 * @throws IOException
 */
static public final boolean GeneratePropertiesFile(String inputFolderPath, String outputFolderPath,
        boolean force) throws IOException {
    File inputFolder = new File(inputFolderPath);
    if (!inputFolder.isDirectory() || !inputFolder.exists())
        throw new IllegalArgumentException("Please provide a valid and existing folder!");

    SortedMap<String, String> fileHeaders = new TreeMap<String, String>();
    SortedMap<String, Float> tempCN2MBPC = new TreeMap<String, Float>();

    // Process all charsetinfo files in the folder:
    for (File charsetFile : inputFolder
            .listFiles((FileFilter) new WildcardFileFilter("*." + CHARSETINFO_FILE_EXTENSION))) {
        try (UnicodeBOMInputStream input = new UnicodeBOMInputStream(new FileInputStream(charsetFile));
                BufferedReader reader = new BufferedReader(input.getReader(CHARSETINFO_FILE_CHARSET))) {
            String line;
            while ((line = reader.readLine()) != null) {
                if (line.isEmpty())
                    continue; // skip blank lines
                if (line.charAt(0) == COMMENT_LINE_MARKER) {
                    if (!fileHeaders.containsKey(charsetFile.getName()))
                        fileHeaders.put(charsetFile.getName(), line.substring(1).trim());
                    continue; // skip comment lines
                }
                // Parse Charset info:
                CharsetInfo csInfo = CharsetInfo.Parse(line);
                // Store/replace max maxBytesPerChar value:
                if (csInfo.maxBytesPerChar > 0.0f && (!tempCN2MBPC.containsKey(csInfo.name)
                        || tempCN2MBPC.get(csInfo.name).floatValue() < csInfo.maxBytesPerChar))
                    tempCN2MBPC.put(csInfo.name, csInfo.maxBytesPerChar);
            }
            if (!fileHeaders.containsKey(charsetFile.getName()))
                fileHeaders.put(charsetFile.getName(), "");
        }
    }

    // Compare information loaded from charsetinfo files with that in the resource bundle:
    boolean different = force || CharsetMaxMaxBytesPerCharBundle == null
            || tempCN2MBPC.size() != getNumberOfKnownCharsets();
    if (!different)
        for (Map.Entry<String, Float> mapping : tempCN2MBPC.entrySet())
            try {
                if (!Float.valueOf(CharsetMaxMaxBytesPerCharBundle.getString(mapping.getKey()))
                        .equals(mapping.getValue())) // getString throws Exception if key is not found
                    throw new Exception("maxBytesPerChar mismatch!");
            } catch (Exception e) {
                different = true;
                break;
            }

    // If the information is different...
    if (different) { // Write new properties file (in package-specific subfolder of the given output folder):
        File outputFolder = new File(new File(outputFolderPath),
                ClassHelpers.classPackageAsResourcePath(CharsetHelpers.class));
        FileHelpers.createDirectory(outputFolder);
        try (FileOutputStream fos = new FileOutputStream(
                new File(outputFolder, CMMBPC_PROPERTIES_FILE_NAME + "." + PROPERTIES_FILE_EXTENSION));
                BufferedWriter writer = new BufferedWriter(
                        new OutputStreamWriter(fos, PROPERTIES_FILE_CHARSET))) {
            // Header:
            writer.write(COMMENT_LINE_MARKER + " Generated on "
                    + TimeUtils.getISOTimestamp(System.currentTimeMillis(), false) + " from input files:"
                    + EOL);
            for (Map.Entry<String, String> fileAndHeader : fileHeaders.entrySet()) {
                writer.write(COMMENT_LINE_MARKER + "\t- " + fileAndHeader.getKey() + ":" + EOL);
                writer.write(COMMENT_LINE_MARKER + "\t\t" + fileAndHeader.getValue() + EOL);
            }
            writer.write(EOL);
            // Body:
            for (Map.Entry<String, Float> mapping : tempCN2MBPC.entrySet())
                writer.write(mapping.getKey() + "=" + mapping.getValue().toString() + EOL);
        }
    }

    return different;
}

From source file:cz.muni.fi.mir.controllers.StatisticsController.java

private ModelMap prepareStatisticsModelMap(Statistics stat, Map<Long, DateTime> dropdownMap) {
    Map<Pair<Configuration, Revision>, SortedMap<String, Integer>> map = new HashMap<>();
    Map<String, Integer> graph = new HashMap<>();
    SortedSet<String> columns = new TreeSet<>();

    for (StatisticsHolder sh : stat.getStatisticsHolders()) {
        Pair<Configuration, Revision> key = new Pair<>(sh.getConfiguration(), sh.getRevision());

        SortedMap<String, Integer> keyValues = null;

        if (map.containsKey(key)) {
            keyValues = map.get(key);/*from   ww w  .j  av a 2s  .c  o  m*/

            addOrIncrement(sh.getAnnotation(), sh.getCount(), keyValues);
            addOrIncrement(sh.getAnnotation(), sh.getCount(), graph);
        } else {
            keyValues = new TreeMap<>();
            keyValues.put(sh.getAnnotation(), sh.getCount());
            addOrIncrement(sh.getAnnotation(), sh.getCount(), graph);
        }

        map.put(key, keyValues);
        columns.add(sh.getAnnotation());
    }

    //postprocessing to fill empty columns
    for (Pair<Configuration, Revision> pair : map.keySet()) {
        SortedMap<String, Integer> values = map.get(pair);
        for (String s : columns) {
            if (!values.containsKey(s)) {
                values.put(s, 0);
            }
        }

        map.put(pair, values);
    }

    ModelMap mm = new ModelMap();

    mm.addAttribute("statisticsMap", map);
    mm.addAttribute("statisticsColumns", columns);
    mm.addAttribute("statisticsDate", stat.getCalculationDate());
    mm.addAttribute("formulaCount", stat.getTotalFormulas());
    mm.addAttribute("coCount", stat.getTotalCanonicOutputs());
    mm.addAttribute("graph", graph);
    mm.addAttribute("statisticsDropdown", dropdownMap);

    return mm;
}

From source file:org.wso2.carbon.metrics.impl.ReporterTest.java

public void testJMXReporter() {
    AttributeList meterAttributes = getAttributes(meterName, "Count");
    SortedMap<String, Object> meterMap = values(meterAttributes);
    assertTrue("Meter is available", meterMap.containsKey("Count"));
    assertTrue("Meter count is one", meterMap.containsValue(1L));

    AttributeList gaugeAttributes = getAttributes(gaugeName, "Value");
    SortedMap<String, Object> gaugeMap = values(gaugeAttributes);
    assertTrue("Gauge is available", gaugeMap.containsKey("Value"));
    assertTrue("Gauge value is one", gaugeMap.containsValue(1));
}

From source file:de.tudarmstadt.ukp.dkpro.tc.svmhmm.writer.SVMHMMDataWriterTest.java

@Test
public void testMetDataFeatures() throws Exception {
    String longText = "rO0ABXNyABNqYXZhLnV0aWwuQXJyYXlMaXN0eIHSHZnHYZ0DAAFJAARzaXpleHAAAAAedwQAAAAedAABT3EAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJxAH4AAnEAfgACcQB%2BAAJ4";

    featureStore = new SparseFeatureStore();
    Feature f1 = new Feature(OriginalTextHolderFeatureExtractor.ORIGINAL_TEXT, "multi line \n text");
    Feature f2 = new Feature(SVMHMMDataWriter.META_DATA_FEATURE_PREFIX + "someFeature", longText);

    Instance instance = new Instance(Arrays.asList(f1, f2), "outcome");
    featureStore.addInstance(instance);//from w w w. j ava 2 s .  c  om

    SVMHMMDataWriter svmhmmDataWriter = new SVMHMMDataWriter();
    System.out.println(featureStore.getNumberOfInstances());
    svmhmmDataWriter.write(temporaryFolder.getRoot(), featureStore, false, null, false);

    File featureVectorsFile = new File(temporaryFolder.getRoot(), "feature-vectors.txt");
    List<String> lines = IOUtils.readLines(new FileInputStream(featureVectorsFile));
    System.out.println(lines);

    assertEquals("outcome",
            SVMHMMUtils.extractOutcomeLabelsFromFeatureVectorFiles(featureVectorsFile).iterator().next());

    assertEquals(Integer.valueOf(0),
            SVMHMMUtils.extractOriginalSequenceIDs(featureVectorsFile).iterator().next());

    SortedMap<String, String> metaDataFeatures = SVMHMMUtils.extractMetaDataFeatures(featureVectorsFile).get(0);

    assertTrue(metaDataFeatures.containsKey(SVMHMMDataWriter.META_DATA_FEATURE_PREFIX + "someFeature"));
    assertEquals(longText, metaDataFeatures.get(SVMHMMDataWriter.META_DATA_FEATURE_PREFIX + "someFeature"));

}

From source file:com.octo.mbo.domain.ApproachingMatcher.java

/**
 * Sort the list of keys according to their distance with the reference key
 * @param keysOfSlides List of keys// w ww  .j a  v a 2 s .  c o m
 * @param key A reference key that will be compared to each String
 * @return A map whose index are the distances and values a list of keys for the corresponding distance.
 */
public SortedMap<Integer, Set<String>> sortByDistanceWithKey(Collection<String> keysOfSlides, String key) {
    assert keysOfSlides != null;
    assert key != null;

    SortedMap<Integer, Set<String>> keysSortedByDistance = new TreeMap<>();

    for (String slideKey : keysOfSlides) {
        int distK2k = levenshteinDistance.apply(normalize(key), normalize(slideKey));
        if (keysSortedByDistance.containsKey(distK2k)) {
            keysSortedByDistance.get(distK2k).add(slideKey);
        } else {
            keysSortedByDistance.put(distK2k, new HashSet<>((Collections.singletonList(slideKey))));
        }
    }

    log.trace("Sort by least distance to '{}' after normalization : {}", key, keysSortedByDistance);

    return keysSortedByDistance;
}