Example usage for java.util SortedMap containsKey

List of usage examples for java.util SortedMap containsKey

Introduction

In this page you can find the example usage for java.util SortedMap containsKey.

Prototype

boolean containsKey(Object key);

Source Link

Document

Returns true if this map contains a mapping for the specified key.

Usage

From source file:uk.org.sappho.applications.transcript.service.registry.WorkingCopy.java

public void deleteProperty(String environment, String application, String key) throws TranscriptException {

    synchronized (getLock()) {
        SortedMap<String, Object> properties = getJsonProperties(environment, application);
        if (properties.containsKey(key)) {
            properties.remove(key);//from w  ww.  j  a v a  2s  .  com
            putProperties(environment, application, properties);
        }
    }
}

From source file:org.codetrack.database.file.FileProject.java

/**
 * Find an ProjectItem instance by name//w  w  w .  j  av a  2 s.c  o  m
 * @param id String of ProjectItem
 * @return ProjectItem instance. Null if not found
 */
//@Override
public ProjectItem findById(Class clazz, String id) {

    if (itemsMap.containsKey(clazz)) {

        SortedMap<String, ProjectItem> map = itemsMap.get(clazz);
        if (map.containsKey(id))
            return map.get(id);
    }

    return null;

}

From source file:org.codetrack.database.file.FileProject.java

/**
 * Add an ProjectItem instance in to project
 * @param projectItem to remove/*from w w  w.  ja v a2  s.  co m*/
 * @return this Project
 */
//@Override
public Project add(ProjectItem projectItem) {

    SortedMap<String, ProjectItem> map = lazyMap(projectItem.getClass());

    if (!map.containsKey(projectItem.getId()))
        map.put(projectItem.getId(), projectItem);

    return this;

}

From source file:org.fcrepo.kernel.impl.rdf.impl.RootRdfContext.java

private void concatRepositoryTriples() throws RepositoryException {
    LOGGER.trace("Creating RDF triples for repository description");
    final Repository repository = resource().getNode().getSession().getRepository();

    final ImmutableSet.Builder<Triple> b = builder();

    for (final String key : repository.getDescriptorKeys()) {
        final String descriptor = repository.getDescriptor(key);
        if (descriptor != null) {
            // Create a URI from the jcr.Repository constant values,
            // converting them from dot notation (identifier.stability)
            // to the camel case that is more common in RDF properties.
            final StringBuilder uri = new StringBuilder(REPOSITORY_NAMESPACE);
            uri.append("repository");
            for (final String segment : key.split("\\.")) {
                uri.append(StringUtils.capitalize(segment));
            }//w ww . ja  va 2s  .  com
            b.add(create(subject(), createURI(uri.toString()), createLiteral(descriptor)));
        }
    }
    final NodeTypeManager nodeTypeManager = resource().getNode().getSession().getWorkspace()
            .getNodeTypeManager();

    final NodeTypeIterator nodeTypes = nodeTypeManager.getAllNodeTypes();
    while (nodeTypes.hasNext()) {
        final NodeType nodeType = nodeTypes.nextNodeType();
        b.add(create(subject(), HAS_NODE_TYPE.asNode(), createLiteral(nodeType.getName())));
    }

    /*
    FIXME: removing due to performance problems, esp. w/ many files on federated filesystem
    see: https://www.pivotaltracker.com/story/show/78647248
            
    b.add(create(subject(), HAS_OBJECT_COUNT.asNode(), createLiteral(String
            .valueOf(getRepositoryCount(repository)))));
    b.add(create(subject(), HAS_OBJECT_SIZE.asNode(), createLiteral(String
            .valueOf(getRepositorySize(repository)))));
    */

    // Get the cluster configuration, if available
    // this ugly test checks to see whether this is an ordinary JCR
    // repository or a ModeShape repo, which will possess the extra info
    if (JcrRepository.class.isAssignableFrom(repository.getClass())) {
        final Map<String, String> config = new GetClusterConfiguration().apply(repository);
        assert (config != null);

        for (final Map.Entry<String, String> entry : config.entrySet()) {
            b.add(create(subject(), createURI(REPOSITORY_NAMESPACE + entry.getKey()),
                    createLiteral(entry.getValue())));
        }
    }

    // retrieve the metrics from the service
    final SortedMap<String, Counter> counters = registryService.getMetrics().getCounters();
    // and add the repository metrics to the RDF model
    if (counters.containsKey("LowLevelStorageService.fixity-check-counter")) {
        b.add(create(subject(), HAS_FIXITY_CHECK_COUNT.asNode(),
                createTypedLiteral(counters
                        .get("org.fcrepo.services." + "LowLevelStorageService." + "fixity-check-counter")
                        .getCount()).asNode()));
    }

    if (counters.containsKey("LowLevelStorageService.fixity-error-counter")) {
        b.add(create(subject(), HAS_FIXITY_ERROR_COUNT.asNode(),
                createTypedLiteral(counters
                        .get("org.fcrepo.services." + "LowLevelStorageService." + "fixity-error-counter")
                        .getCount()).asNode()));
    }

    if (counters.containsKey("LowLevelStorageService.fixity-repaired-counter")) {
        b.add(create(subject(), HAS_FIXITY_REPAIRED_COUNT.asNode(),
                createTypedLiteral(counters
                        .get("org.fcrepo.services." + "LowLevelStorageService." + "fixity-repaired-counter")
                        .getCount()).asNode()));
    }

    // offer all these accumulated triples
    concat(b.build());
}

From source file:de.tudarmstadt.ukp.experiments.argumentation.convincingness.sampling.Step6GraphTransitivityCleaner.java

public static SortedMap<String, DescriptiveStatistics> collectStatisticsOverGraphCleaningResults(
        Collection<GraphCleaningResults> results) throws IllegalAccessException {

    SortedMap<String, DescriptiveStatistics> result = new TreeMap<>();

    for (GraphCleaningResults r : results) {
        Field[] declaredFields = GraphCleaningResults.class.getDeclaredFields();
        //            System.out.println(Arrays.toString(declaredFields));
        for (Field field : declaredFields) {
            String fieldName = field.getName();

            if (!result.containsKey(fieldName)) {
                result.put(fieldName, new DescriptiveStatistics());
            }/*  w w  w . j  a  v  a 2s .co  m*/

            Object value = field.get(r);

            double doubleVal;

            if (value instanceof Integer) {
                doubleVal = ((Integer) value).doubleValue();
            } else if (value instanceof Double) {
                doubleVal = (Double) value;
            } else {
                throw new IllegalStateException("Unkown type " + value.getClass());
            }

            //                System.out.println(doubleVal);

            result.get(fieldName).addValue(doubleVal);
        }

    }

    return result;
}

From source file:at.christophwurst.orm.service.BurnDownServiceImpl.java

@Override
public Map<Date, Float> getBurnDownData(Long sprintId) {
    Map<Date, Float> result = new HashMap<>();
    Sprint sprint = sprintRepository.getSprintAndWorklogs(sprintId);

    int totalTime = sprint.getRequirements().stream().mapToInt((s) -> {
        return s.getTasks().stream().mapToInt(Task::getEstimatedTime).sum();
    }).sum();//from   ww w.jav a 2 s  .  c  o m

    SortedMap<Date, List<LogbookEntry>> workLogs = new TreeMap<>();
    sprint.getRequirements().forEach((r) -> {
        r.getTasks().forEach((t) -> {
            t.getLogbookEntries().forEach((e) -> {
                Date day = stripDate(e.getStartTime());
                if (!workLogs.containsKey(day)) {
                    workLogs.put(day, new ArrayList<>());
                }
                workLogs.get(day).add(e);
            });
        });
    });

    int left = totalTime;
    for (Map.Entry<Date, List<LogbookEntry>> entry : workLogs.entrySet()) {
        long Ldone = entry.getValue().stream().mapToLong(LogbookEntry::getTotalTime).sum();
        int done = (int) (Ldone / (1000 * 3600));
        left -= done;
        result.put(entry.getKey(), (float) left / totalTime);
    }
    return result;
}

From source file:net.ripe.rpki.commons.crypto.rfc3779.ResourceExtensionParser.java

/**
 * Parses the IP address blocks extension and merges all address families
 * into a single {@link IpResourceSet} containing both IPv4 and IPv6
 * addresses. Maps an {@link AddressFamily} to <code>null</code> when the
 * resource of this type are inherited. If no resources are specified it is
 * mapped to an empty resource set./*w  w w.ja v  a  2 s  .  c o m*/
 */
public SortedMap<AddressFamily, IpResourceSet> parseIpAddressBlocks(byte[] extension) {
    ASN1Primitive octetString = decode(extension);
    expect(octetString, ASN1OctetString.class);
    ASN1OctetString o = (ASN1OctetString) octetString;
    SortedMap<AddressFamily, IpResourceSet> map = derToIpAddressBlocks(decode(o.getOctets()));

    for (AddressFamily family : SUPPORTED_ADDRESS_FAMILIES) {
        if (!map.containsKey(family)) {
            map.put(family, new IpResourceSet());
        }
    }

    for (AddressFamily addressFamily : map.keySet()) {
        Validate.isTrue(!addressFamily.hasSubsequentAddressFamilyIdentifier(), "SAFI not supported");
    }

    return map;
}

From source file:org.apache.hadoop.hbase.regionserver.tableindexed.IndexedRegion.java

/** Ask if this update does apply to the index. 
 * //from  w w  w  .  jav a  2  s . c o  m
 * @param indexSpec
 * @param b
 * @return true if possibly apply.
 */
private boolean doesApplyToIndex(IndexSpecification indexSpec, SortedMap<byte[], byte[]> columnValues) {

    for (byte[] neededCol : indexSpec.getIndexedColumns()) {
        if (!columnValues.containsKey(neededCol)) {
            LOG.debug("Index [" + indexSpec.getIndexId() + "] can't be updated because ["
                    + Bytes.toString(neededCol) + "] is missing");
            return false;
        }
    }
    return true;
}

From source file:com.illustrationfinder.process.post.HtmlPostProcessor.java

@Override
public List<String> generateKeywords() {
    // TODO If two words are always close to each other, they should be considered as an expression and managed like one word
    if (this.url == null)
        return null;

    try {//from   www  .j  av a 2  s  .  c om
        // Retrieve the document and store it temporary
        try (final InputStream stream = this.url.openStream()) {
            final String rawText = IOUtils.toString(stream);

            // Retrieve useful HTML data
            final Document document = Jsoup.parse(rawText);

            String htmlTitle = document.title();
            String htmlKeywords = document.select("meta[name=keywords]").text();
            String htmlDescription = document.select("meta[name=description]").text();

            // Extract the content of the raw text
            String content = ArticleExtractor.getInstance().getText(rawText);

            // Now we apply a simple algorithm to get keywords
            //  1) We remove all punctuation marks from the title
            //  2) We remove all words with less than 4 characters
            //  3) We remove excessive spacing and tabulations

            htmlTitle = htmlTitle.toLowerCase();
            htmlTitle = htmlTitle.replaceAll(PUNCTUATION_REGEX, "");
            htmlTitle = htmlTitle.replaceAll(WORD_WITH_LESS_THAN_4_CHARACTERS_REGEX, "");
            htmlTitle = htmlTitle.replaceAll(EXCESSIVE_SPACING_REGEX, " ");

            final List<String> keywords = new ArrayList<>();
            final List<String> keywordsList = Arrays.asList(htmlTitle.split(" "));
            for (String tmp : keywordsList) {
                if (tmp.length() >= MINIMUM_WORD_LENGTH) {
                    keywords.add(tmp);
                }
            }

            // If there is enough keywords, we return
            if (keywords.size() >= MINIMUM_KEYWORDS_COUNT) {
                return keywords;
            } else {
                // Otherwise, we look for more keywords from the text by taking the more frequent words
                content = content.toLowerCase();
                content = content.replaceAll(PUNCTUATION_REGEX, "");
                content = content.replaceAll(WORD_WITH_LESS_THAN_4_CHARACTERS_REGEX, "");
                content = content.replaceAll(EXCESSIVE_SPACING_REGEX, " ");

                final Map<String, Integer> frequencies = new HashMap<>();
                final String[] words = content.split(" ");

                // Count word frequencies
                for (final String word : words) {
                    if (frequencies.containsKey(word)) {
                        frequencies.put(word, frequencies.get(word) + 1);
                    } else {
                        frequencies.put(word, 1);
                    }
                }

                // Sort the words per frequency
                final SortedMap<Integer, HashSet<String>> sortedWords = new TreeMap<>();

                for (Map.Entry<String, Integer> entry : frequencies.entrySet()) {
                    if (sortedWords.containsKey(entry.getValue())) {
                        sortedWords.get(entry.getValue()).add(entry.getKey());
                    } else {
                        final HashSet<String> set = new HashSet<>();
                        set.add(entry.getKey());
                        sortedWords.put(entry.getValue(), set);
                    }
                }

                // Add the most frequent words until we reach the minimu keywords count
                while (keywords.size() < MINIMUM_KEYWORDS_COUNT) {
                    final HashSet<String> set = sortedWords.get(sortedWords.lastKey());
                    final String keyword = set.iterator().next();

                    set.remove(keyword);
                    if (set.size() == 0) {
                        sortedWords.remove(sortedWords.lastKey());
                    }

                    if (keyword.length() > MINIMUM_WORD_LENGTH) {
                        keywords.add(keyword);
                    }
                }

                return keywords;
            }
        }
    } catch (BoilerpipeProcessingException e) {
        // TODO
        e.printStackTrace();
    } catch (IOException e) {
        // TODO
        e.printStackTrace();
    }

    return null;
}

From source file:voldemort.routing.ConsistentRoutingStrategy.java

public ConsistentRoutingStrategy(HashFunction hash, Collection<Node> nodes, int numReplicas) {
    this.numReplicas = numReplicas;
    this.hash = hash;
    // sanity check that we dont assign the same partition to multiple nodes
    SortedMap<Integer, Node> m = new TreeMap<Integer, Node>();
    for (Node n : nodes) {
        for (Integer partition : n.getPartitionIds()) {
            if (m.containsKey(partition))
                throw new IllegalArgumentException(
                        "Duplicate partition id " + partition + " in cluster configuration " + nodes);
            m.put(partition, n);// ww w. java 2  s  . c  om
        }
    }

    this.partitionToNode = new Node[m.size()];
    for (int i = 0; i < m.size(); i++) {
        if (!m.containsKey(i))
            throw new IllegalArgumentException("Invalid configuration, missing partition " + i);
        this.partitionToNode[i] = m.get(i);
    }
}