Example usage for org.apache.commons.lang3 StringUtils capitalize

List of usage examples for org.apache.commons.lang3 StringUtils capitalize

Introduction

In this page you can find the example usage for org.apache.commons.lang3 StringUtils capitalize.

Prototype

public static String capitalize(final String str) 

Source Link

Document

Capitalizes a String changing the first letter to title case as per Character#toTitleCase(char) .

Usage

From source file:org.gbif.ipt.model.Resource.java

/**
 * Construct the resource citation from various parts for the version specified.
 * </br>//from   ww  w. ja  v  a  2 s.  c  o m
 * The citation format is:
 * Creators (PublicationYear): Title. Version. Publisher. ResourceType. Identifier
 *
 * @param version  resource version to use in citation
 * @param homepage homepage URI
 *
 * @return generated resource citation string
 */
public String generateResourceCitation(@NotNull BigDecimal version, @NotNull URI homepage) {
    StringBuilder sb = new StringBuilder();

    // make list of verified authors (having first and last name)
    List<String> verifiedAuthorList = Lists.newArrayList();
    for (Agent creator : getEml().getCreators()) {
        String authorName = getAuthorName(creator);
        if (authorName != null) {
            verifiedAuthorList.add(authorName);
        }
    }

    // add comma separated authors
    Iterator<String> iter = verifiedAuthorList.iterator();
    while (iter.hasNext()) {
        sb.append(iter.next());
        if (iter.hasNext()) {
            sb.append(", ");
        }
    }

    // add year resource was first published (captured in EML dateStamp)
    int publicationYear = getPublicationYear(getEml().getDateStamp());
    if (publicationYear > 0) {
        sb.append(" (");
        sb.append(publicationYear);
        sb.append("): ");
    }

    // add title
    sb.append((StringUtils.trimToNull(getTitle()) == null) ? getShortname() : StringUtils.trim(getTitle()));
    sb.append(". ");

    // add version
    sb.append("v");
    sb.append(version.toPlainString());
    sb.append(". ");

    // add publisher
    String publisher = (getOrganisation() == null) ? null : StringUtils.trimToNull(getOrganisation().getName());
    if (publisher != null) {
        sb.append(publisher);
        sb.append(". ");
    }

    // add ResourceTypeGeneral/ResourceType, e.g. Dataset/Occurrence, Dataset/Checklist
    sb.append("Dataset");
    if (getCoreType() != null) {
        sb.append("/");
        sb.append(StringUtils.capitalize(getCoreType().toLowerCase()));
    }
    sb.append(". ");

    // add DOI as the identifier. DataCite recommends using linkable, permanent URL
    if (getDoi() != null) {
        sb.append(getDoi().getUrl());
    }
    // otherwise add the citation identifier instead
    else if (getEml().getCitation() != null && !Strings.isNullOrEmpty(getEml().getCitation().getIdentifier())) {
        sb.append(getEml().getCitation().getIdentifier());
    }
    // otherwise use its IPT homepage as the identifier
    else {
        sb.append(homepage.toString());
    }
    return sb.toString();
}

From source file:org.gbif.ipt.service.manage.impl.ResourceManagerImplTest.java

/**
 * Test resource retrieval from resource.xml file. The loadFromDir method is responsible for this retrieval.
 *//*from  w  w w  .  j ava  2 s. c  o m*/
@Test
public void testLoadFromDir()
        throws IOException, SAXException, ParserConfigurationException, AlreadyExistingException {
    ResourceManagerImpl resourceManager = getResourceManagerImpl();

    String shortName = "ants";

    // create a new resource.
    resourceManager.create(shortName, DATASET_TYPE_OCCURRENCE_IDENTIFIER, creator);
    // get added resource.
    Resource addedResource = resourceManager.get(shortName);
    // indicate it is a dataset subtype Specimen
    addedResource.setSubtype(DATASET_SUBTYPE_SPECIMEN_IDENTIFIER);

    // add SQL source, and save resource
    SqlSource source = new SqlSource();
    // connection/db params
    source.setName("danbif_db_source");
    source.setDatabase("DanBIF");
    source.setHost("50.19.64.6");
    source.setPassword("Dan=bif=17=5321");
    source.setUsername("DanBIFUser");
    source.setColumns(44);

    // query
    source.setSql("SELECT * FROM occurrence_record where datasetID=1");

    // other params
    source.setEncoding("UTF-8");
    source.setDateFormat("YYYY-MM-DD");
    source.setReadable(true);

    // rdbms param
    JdbcSupport.JdbcInfo info = support.get("mysql");
    source.setRdbms(info);

    // set resource on source
    source.setResource(addedResource);

    // add source to resource
    addedResource.addSource(source, true);

    // save
    resourceManager.save(addedResource);

    // retrieve resource file
    File resourceFile = mockedDataDir.resourceFile(shortName, "resource.xml");
    assertTrue(resourceFile.exists());

    // retrieve resource directory
    File resourceDir = resourceFile.getParentFile();
    assertTrue(resourceDir.exists());

    // load resource
    Resource persistedResource = resourceManager.loadFromDir(resourceDir);

    // make some assertions about resource
    assertEquals(shortName, persistedResource.getShortname());
    assertEquals(DATASET_TYPE_OCCURRENCE_IDENTIFIER, persistedResource.getCoreType());
    assertEquals(PublicationStatus.PRIVATE, persistedResource.getStatus());
    assertEquals(1, persistedResource.getSources().size());
    assertEquals(0, persistedResource.getEmlVersion());
    assertEquals(0, persistedResource.getRecordsPublished());
    // should be 1 KeywordSet corresponding to Dataset Type vocabulary
    assertEquals(2, persistedResource.getEml().getKeywords().size());
    assertEquals(StringUtils.capitalize(DATASET_TYPE_OCCURRENCE_IDENTIFIER),
            persistedResource.getEml().getKeywords().get(0).getKeywordsString());
    assertEquals(StringUtils.capitalize(DATASET_SUBTYPE_SPECIMEN_IDENTIFIER),
            persistedResource.getEml().getKeywords().get(1).getKeywordsString());

    // make some assertions about SQL source
    SqlSource persistedSource = (SqlSource) persistedResource.getSources().get(0);
    assertEquals("Dan=bif=17=5321", persistedSource.getPassword());
    assertEquals("danbif_db_source", persistedSource.getName());
    assertEquals("DanBIF", persistedSource.getDatabase());
    assertEquals("50.19.64.6", persistedSource.getHost());
    assertEquals("DanBIFUser", persistedSource.getUsername());
    assertEquals(44, persistedSource.getColumns());
    assertEquals("SELECT * FROM occurrence_record where datasetID=1", persistedSource.getSql());
    assertEquals("com.mysql.jdbc.Driver", persistedSource.getJdbcDriver());
    assertEquals("UTF-8", persistedSource.getEncoding());
    assertEquals("YYYY-MM-DD", persistedSource.getDateFormat());
    assertTrue(persistedSource.isReadable());

}

From source file:org.gbif.ipt.task.Eml2Rtf.java

/**
 * Add taxonomic coverages, writing in this order: description, ranks, then common names.
 * /*from  w  w w  . j  a va2s.c om*/
 * @param doc Document
 * @param eml EML object
 * @throws DocumentException if an error occurred adding to the Document
 */
private void addTaxonomicCoverages(Document doc, Eml eml) throws DocumentException {
    // proceed, provided there is at least 1 Taxonomic Coverage to iterate over
    if (exists(eml.getTaxonomicCoverages()) && !eml.getTaxonomicCoverages().isEmpty()) {

        // begin new paragraph
        Paragraph p = new Paragraph();
        p.setAlignment(Element.ALIGN_JUSTIFIED);
        p.setFont(font);
        boolean firstTaxon = true;
        for (TaxonomicCoverage taxcoverage : eml.getTaxonomicCoverages()) {
            if (!firstTaxon) {
                p.add(Chunk.NEWLINE);
            }
            firstTaxon = false;
            p.add(new Phrase(getText("rtf.taxcoverage"), fontTitle));
            p.add(Chunk.NEWLINE);
            p.add(Chunk.NEWLINE);
            if (exists(taxcoverage.getDescription())) {
                p.add(new Phrase(getText("rtf.taxcoverage.description") + ": ", fontTitle));
                p.add(taxcoverage.getDescription().replace("\r\n", "\n"));
                p.add(Chunk.NEWLINE);
            }
            Map<String, String> ranks = vocabManager.getI18nVocab(Constants.VOCAB_URI_RANKS,
                    Locale.getDefault().getLanguage(), false);
            boolean firstRank = true;
            for (String rank : ranks.keySet()) {
                boolean wroteRank = false;
                for (TaxonKeyword keyword : taxcoverage.getTaxonKeywords()) {
                    if (exists(keyword.getRank()) && keyword.getRank().equals(rank)) {
                        if (!wroteRank) {
                            if (firstRank) {
                                p.add(new Phrase(getText("rtf.taxcoverage.rank"), fontTitle));
                            }
                            p.add(Chunk.NEWLINE);
                            p.add(StringUtils.capitalize(rank) + ": ");
                            p.add(keyword.getScientificName());
                            wroteRank = true;
                            firstRank = false;
                        } else {
                            p.add(", " + keyword.getScientificName());
                        }
                    }
                }
            }
            p.add(Chunk.NEWLINE);
            boolean isFirst = true;
            for (TaxonKeyword keyword : taxcoverage.getTaxonKeywords()) {
                if (exists(keyword.getCommonName())) {
                    if (isFirst) {
                        p.add(new Phrase(getText("rtf.taxcoverage.common") + ": ", fontTitle));
                    } else {
                        p.add(", ");
                    }
                    isFirst = false;
                    p.add(keyword.getCommonName());
                }
            }
        }
        p.add(Chunk.NEWLINE);
        doc.add(p);
        p.clear();
    }
}

From source file:org.gbif.ipt.utils.DataCiteMetadataBuilder.java

/**
 * Retrieve the ResourceType, using the formula Dataset/Resource Type=Resource Core Type.
 *
 * @param resource resource//  ww  w  .jav a 2  s  .c om
 *
 * @return DataCite ResourceType
 */
protected static DataCiteMetadata.ResourceType getResourceType(Resource resource) {
    DataCiteMetadata.ResourceType resourceType = FACTORY.createDataCiteMetadataResourceType();
    resourceType.setResourceTypeGeneral(ResourceType.DATASET);
    if (resource.getCoreType() != null) {
        resourceType.setValue(StringUtils.capitalize(resource.getCoreType().toLowerCase()));
    }
    return resourceType;
}

From source file:org.gbif.nub.lookup.HigherTaxaLookup.java

/**
 * Sets the synonym lookup map for a given rank.
 * Names will be normalised and checked for existance of the same entry as key or value.
 *
 * @param rank// w  w  w . ja  va  2s  . c o m
 * @param synonyms
 */
public void setSynonyms(Rank rank, Map<String, String> synonyms) {
    Map<String, String> synonymsNormed = Maps.newHashMap();

    // normalise keys
    for (Entry<String, String> entry : synonyms.entrySet()) {
        synonymsNormed.put(norm(entry.getKey()), entry.getValue());
    }

    // test if synonyms show up as accepted too
    Collection<String> syns = Sets.newHashSet(synonymsNormed.keySet());
    for (String syn : syns) {
        if (synonymsNormed.containsKey(synonymsNormed.get(syn))) {
            log.warn(syn + " is both synonym and accepted - ignore synonym.");
            synonymsNormed.remove(syn);
        }
    }

    syn.put(rank, synonymsNormed);
    log.debug("Loaded " + synonyms.size() + " " + rank.name() + " synonyms ");

    // also insert kingdom enum lookup in case of kingdom synonyms
    if (Rank.KINGDOM == rank) {
        Map<String, String> map = syn.get(Rank.KINGDOM);
        if (map != null) {
            for (String syn : map.keySet()) {
                Kingdom k = null;
                String key = map.get(syn);
                if (key != null) {
                    key = key.toLowerCase();
                    key = StringUtils.capitalize(key);
                    try {
                        k = Kingdom.valueOf(key);
                    } catch (Exception e) {
                    }
                }
                this.kingdoms.put(norm(syn), k);
            }
        }
        for (Kingdom k : Kingdom.values()) {
            this.kingdoms.put(norm(k.name()), k);
        }
    }

}

From source file:org.gerzog.jstataggr.core.collector.impl.StatisticsCollector.java

protected static CollectorClassInfo generateClassInfo(final String className,
        final Map<IStatisticsField, FieldInfo> fieldInfo) {
    final ClassPool pool = ClassPool.getDefault();

    final CtClass clazz = pool.makeClass(PACKAGE_PREFIX + StringUtils.capitalize(className));

    fieldInfo.keySet().forEach(info -> {
        propogate(() -> info.generate(clazz));
    });/*from   w  w w .jav  a 2  s  .  co m*/

    return propogate(() -> {
        return generateClassInfo(clazz.toClass(), fieldInfo);
    });
}

From source file:org.gerzog.jstataggr.core.utils.FieldUtils.java

public static String getGetterName(final String name, final Class<?> type) {
    final String prefix = type.equals(Boolean.class) || type.equals(boolean.class) ? BOOLEAN_GETTER_PREFIX
            : DEFAULT_GETTER_PREFIX;//  www  .j av a  2s  .c  o  m

    return prefix + StringUtils.capitalize(name);
}

From source file:org.gerzog.jstataggr.core.utils.FieldUtils.java

public static String getSetterName(final String name) {
    final String prefix = DEFAULT_SETTER_PREFIX;

    return prefix + StringUtils.capitalize(name);
}

From source file:org.gerzog.jstataggr.core.utils.FieldUtils.java

public static String getUpdaterName(final String name, final AggregationType aggregationType) {
    return UPDATER_PREFIX + StringUtils.capitalize(getAggregationFieldName(name, aggregationType));
}

From source file:org.gerzog.jstataggr.core.utils.FieldUtils.java

private static String getAggregationPostfix(final AggregationType aggregationType) {
    return StringUtils.capitalize(StringUtils.lowerCase(aggregationType.name()));
}