Example usage for org.apache.commons.lang StringUtils abbreviate

List of usage examples for org.apache.commons.lang StringUtils abbreviate

Introduction

In this page you can find the example usage for org.apache.commons.lang StringUtils abbreviate.

Prototype

public static String abbreviate(String str, int maxWidth) 

Source Link

Document

Abbreviates a String using ellipses.

Usage

From source file:org.apache.ambari.server.controller.internal.UpgradeResourceProvider.java

private void makeServerSideStage(UpgradeContext context, RequestStageContainer request,
        UpgradeItemEntity entity, ServerSideActionTask task, boolean skippable, boolean allowRtery)
        throws AmbariException {

    Cluster cluster = context.getCluster();

    Map<String, String> commandParams = new HashMap<String, String>();
    commandParams.put(COMMAND_PARAM_CLUSTER_NAME, cluster.getClusterName());
    commandParams.put(COMMAND_PARAM_VERSION, context.getVersion());
    commandParams.put(COMMAND_PARAM_DIRECTION, context.getDirection().name().toLowerCase());

    String itemDetail = entity.getText();
    String stageText = StringUtils.abbreviate(entity.getText(), 255);

    switch (task.getType()) {
    case MANUAL: {
        ManualTask mt = (ManualTask) task;
        itemDetail = mt.message;/*from w w w  .  ja v a  2 s.c o m*/
        if (null != mt.summary) {
            stageText = mt.summary;
        }
        entity.setText(itemDetail);

        if (null != mt.structuredOut) {
            commandParams.put(COMMAND_PARAM_STRUCT_OUT, mt.structuredOut);
        }

        break;
    }
    case CONFIGURE: {
        ConfigureTask ct = (ConfigureTask) task;
        Map<String, String> configProperties = ct.getConfigurationProperties(cluster);

        // if the properties are empty it means that the conditions in the
        // task did not pass;
        if (configProperties.isEmpty()) {
            stageText = "No conditions were met for this configuration task.";
            itemDetail = stageText;
        } else {
            commandParams.putAll(configProperties);

            // extract the config type, key and value to use to build the
            // summary and detail
            String configType = configProperties.get(ConfigureTask.PARAMETER_CONFIG_TYPE);
            String key = configProperties.get(ConfigureTask.PARAMETER_KEY);
            String value = configProperties.get(ConfigureTask.PARAMETER_VALUE);

            itemDetail = String.format("Updating config %s/%s to %s", configType, key, value);

            if (null != ct.summary) {
                stageText = ct.summary;
            } else {
                stageText = String.format("Updating Config %s", configType);
            }
        }

        entity.setText(itemDetail);
        break;
    }
    default:
        break;
    }

    ActionExecutionContext actionContext = new ActionExecutionContext(cluster.getClusterName(),
            Role.AMBARI_SERVER_ACTION.toString(), Collections.<RequestResourceFilter>emptyList(),
            commandParams);

    actionContext.setTimeout(Short.valueOf((short) -1));
    actionContext.setIgnoreMaintenance(true);

    ExecuteCommandJson jsons = s_commandExecutionHelper.get().getCommandJson(actionContext, cluster);

    Stage stage = s_stageFactory.get().createNew(request.getId().longValue(), "/tmp/ambari",
            cluster.getClusterName(), cluster.getClusterId(), stageText, jsons.getClusterHostInfo(),
            jsons.getCommandParamsForStage(), jsons.getHostParamsForStage());

    stage.setSkippable(skippable);

    long stageId = request.getLastStageId() + 1;
    if (0L == stageId) {
        stageId = 1L;
    }
    stage.setStageId(stageId);
    entity.setStageId(Long.valueOf(stageId));

    // !!! hack hack hack
    String host = cluster.getAllHostsDesiredConfigs().keySet().iterator().next();

    stage.addServerActionCommand(task.getImplementationClass(), getManagementController().getAuthName(),
            Role.AMBARI_SERVER_ACTION, RoleCommand.EXECUTE, cluster.getClusterName(), host,
            new ServiceComponentHostServerActionEvent(StageUtils.getHostName(), System.currentTimeMillis()),
            commandParams, itemDetail, null, Integer.valueOf(1200), allowRtery);

    request.addStages(Collections.singletonList(stage));
}

From source file:org.apache.crunch.impl.mr.plan.DotfileWriter.java

/**
 * Limit a node name length down to {@link #MAX_NODE_NAME_LENGTH}, to ensure valid (and readable) dot files. If the
 * name is already less than or equal to the maximum length, it will be returned untouched.
 *
 * @param nodeName node name to be limited in length
 * @return the abbreviated node name if it was longer than the given maximum allowable length
 *//*from  w w w  . java2 s .  c  o m*/
static String limitNodeNameLength(String nodeName) {
    if (nodeName.length() <= MAX_NODE_NAME_LENGTH) {
        return nodeName;
    }
    String hashString = Integer.toString(nodeName.hashCode());
    return String.format("%s@%s",
            StringUtils.abbreviate(nodeName, MAX_NODE_NAME_LENGTH - (hashString.length() + 1)), hashString);
}

From source file:org.apache.eagle.alert.utils.JsonUtils.java

public static List<String> jsonStringToList(String message) {
    List<String> result = new ArrayList<>();
    try {//from   w  w  w.ja  v a  2 s . c  o m
        if (!message.isEmpty()) {
            JSONArray jsonArray = new JSONArray(message);
            for (int i = 0; i < jsonArray.length(); ++i) {
                result.add(jsonArray.getString(i));
            }
        }
    } catch (Exception e) {
        LOG.warn("illegal json array message: {}, ignored", StringUtils.abbreviate(message, 50));
    }

    return result;
}

From source file:org.apache.hadoop.hbase.rsgroup.RSGroupAdminServer.java

@Override
public boolean balanceRSGroup(String groupName) throws IOException {
    ServerManager serverManager = master.getServerManager();
    AssignmentManager assignmentManager = master.getAssignmentManager();
    LoadBalancer balancer = master.getLoadBalancer();

    boolean balancerRan;
    synchronized (balancer) {
        if (master.getMasterCoprocessorHost() != null) {
            master.getMasterCoprocessorHost().preBalanceRSGroup(groupName);
        }/* ww w . j  a  v a  2 s.  c o  m*/
        if (getRSGroupInfo(groupName) == null) {
            throw new ConstraintException("Group does not exist: " + groupName);
        }
        // Only allow one balance run at at time.
        Map<String, RegionState> groupRIT = rsGroupGetRegionsInTransition(groupName);
        if (groupRIT.size() > 0) {
            LOG.debug("Not running balancer because " + groupRIT.size() + " region(s) in transition: "
                    + StringUtils.abbreviate(
                            master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(),
                            256));
            return false;
        }
        if (serverManager.areDeadServersInProgress()) {
            LOG.debug("Not running balancer because processing dead regionserver(s): "
                    + serverManager.getDeadServers());
            return false;
        }

        //We balance per group instead of per table
        List<RegionPlan> plans = new ArrayList<RegionPlan>();
        for (Map.Entry<TableName, Map<ServerName, List<HRegionInfo>>> tableMap : getRSGroupAssignmentsByTable(
                groupName).entrySet()) {
            LOG.info("Creating partial plan for table " + tableMap.getKey() + ": " + tableMap.getValue());
            List<RegionPlan> partialPlans = balancer.balanceCluster(tableMap.getValue());
            LOG.info("Partial plan for table " + tableMap.getKey() + ": " + partialPlans);
            if (partialPlans != null) {
                plans.addAll(partialPlans);
            }
        }
        long startTime = System.currentTimeMillis();
        balancerRan = plans != null;
        if (plans != null && !plans.isEmpty()) {
            LOG.info("Group balance " + groupName + " starting with plan count: " + plans.size());
            for (RegionPlan plan : plans) {
                LOG.info("balance " + plan);
                assignmentManager.balance(plan);
            }
            LOG.info("Group balance " + groupName + " completed after "
                    + (System.currentTimeMillis() - startTime) + " seconds");
        }
        if (master.getMasterCoprocessorHost() != null) {
            master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan);
        }
    }
    return balancerRan;
}

From source file:org.apache.hadoop.hbase.zookeeper.ZKUtil.java

private static void logRetrievedMsg(final ZooKeeperWatcher zkw, final String znode, final byte[] data,
        final boolean watcherSet) {
    if (!LOG.isTraceEnabled())
        return;/*from  w w w.j  a  va 2s .c om*/
    LOG.trace(zkw.prefix("Retrieved " + ((data == null) ? 0 : data.length) + " byte(s) of data from znode "
            + znode + (watcherSet ? " and set watcher; " : "; data=")
            + (data == null ? "null"
                    : data.length == 0 ? "empty"
                            : (znode.startsWith(zkw.assignmentZNode) ? ZKAssign.toString(data) : // We should not be doing this reaching into another class
                                    znode.startsWith(zkw.metaServerZNode) ? getServerNameOrEmptyString(data)
                                            : znode.startsWith(zkw.backupMasterAddressesZNode)
                                                    ? getServerNameOrEmptyString(data)
                                                    : StringUtils.abbreviate(Bytes.toStringBinary(data),
                                                            32)))));
}

From source file:org.apache.hadoop.hive.ql.exec.Utilities.java

/**
 * convert "From src insert blah blah" to "From src insert ... blah"
 *///  w  w w.  ja v a2s  . c  o  m
public static String abbreviate(String str, int max) {
    str = str.trim();

    int len = str.length();
    int suffixlength = 20;

    if (len <= max) {
        return str;
    }

    suffixlength = Math.min(suffixlength, (max - 3) / 2);
    String rev = StringUtils.reverse(str);

    // get the last few words
    String suffix = WordUtils.abbreviate(rev, 0, suffixlength, "");
    suffix = StringUtils.reverse(suffix);

    // first few ..
    String prefix = StringUtils.abbreviate(str, max - suffix.length());

    return prefix + suffix;
}

From source file:org.apache.myfaces.custom.convertStringUtils.StringUtilsConverter.java

private String format(String val, boolean duringOutput) throws ConverterException {

    String str;//w  w w .ja  va2 s .  c  om
    if (BooleanUtils.isTrue(trim)) {
        str = val.trim();
    } else {
        str = val;
    }
    // Any decorations first
    if (StringUtils.isNotEmpty(format)) {
        if ("uppercase".equalsIgnoreCase(format)) {
            str = StringUtils.upperCase(str);
        } else if ("lowercase".equalsIgnoreCase(format)) {
            str = StringUtils.lowerCase(str);
        } else if ("capitalize".equalsIgnoreCase(format)) {
            str = WordUtils.capitalizeFully(str);
        } else {
            throw new ConverterException("Invalid format '" + format + "'");
        }
    }

    boolean appendEllipses = ((duringOutput)
            && ((null != appendEllipsesDuringOutput) && (appendEllipsesDuringOutput.booleanValue())))
            || ((false == duringOutput)
                    && ((null != appendEllipsesDuringInput) && (appendEllipsesDuringInput.booleanValue())));

    if (appendEllipses) {
        // See if we need to abbreviate/truncate this string
        if (null != maxLength && maxLength.intValue() > 4) {
            str = StringUtils.abbreviate(str, maxLength.intValue());
        }
    } else {
        // See if we need to truncate this string
        if (null != maxLength) {
            str = str.substring(0, maxLength.intValue());
        }
    }
    return str;
}

From source file:org.apache.stanbol.enhancer.engines.opennlp.impl.NEREngineCore.java

public void computeEnhancements(ContentItem ci) throws EngineException {
    //first check the langauge before processing the content (text)
    String language = extractLanguage(ci);
    if (language == null) {
        throw new IllegalStateException("Unable to extract Language for " + "ContentItem " + ci.getUri()
                + ": This is also checked in the canEnhance "
                + "method! -> This indicated an Bug in the implementation of the " + "EnhancementJobManager!");
    }/*from  ww  w.  ja va 2 s .  co m*/
    if (!isNerModel(language)) {
        throw new IllegalStateException("For the language '" + language + "' of ContentItem " + ci.getUri()
                + " no NER model is configured: This is also checked in the canEnhance "
                + "method! -> This indicated an Bug in the implementation of the " + "EnhancementJobManager!");
    }
    final AnalysedText at = AnalysedTextUtils.getAnalysedText(ci);
    //validate data in the AnalysedText
    final String text;
    if (at != null && at.getTokens().hasNext()) { //if the AnalysedText is present and tokens are present
        if (log.isDebugEnabled()) {
            log.debug("computeEnhancements from AnalysedText ContentPart of ContentItem {}: text={}",
                    ci.getUri().getUnicodeString(), StringUtils.abbreviate(at.getSpan(), 100));
        }
        text = null;
    } else { //no AnalysedText with tokens ...
        //fallback to processing the plain text is still supported
        Entry<IRI, Blob> contentPart = ContentItemHelper.getBlob(ci, SUPPORTED_MIMETYPES);
        if (contentPart == null) {
            throw new IllegalStateException(
                    "No ContentPart with Mimetype '" + TEXT_PLAIN_MIMETYPE + "' found for ContentItem "
                            + ci.getUri() + ": This is also checked in the canEnhance method! -> This "
                            + "indicated an Bug in the implementation of the " + "EnhancementJobManager!");
        }
        try {
            text = ContentItemHelper.getText(contentPart.getValue());
        } catch (IOException e) {
            throw new InvalidContentException(this, ci, e);
        }
        if (text.trim().length() == 0) {
            // TODO: make the length of the data a field of the ContentItem
            // interface to be able to filter out empty items in the canEnhance
            // method
            log.warn("ContentPart {} of ContentItem {} does not contain any text"
                    + "to extract knowledge from in ContentItem {}", contentPart.getKey(), ci);
            return;
        }
        if (log.isDebugEnabled()) {
            log.debug("computeEnhancements from ContentPart {} of ContentItem {}: text={}", new Object[] {
                    contentPart.getKey(), ci.getUri().getUnicodeString(), StringUtils.abbreviate(text, 100) });
        }
    }
    try {
        if (config.isProcessedLangage(language)) {
            for (String defaultModelType : config.getDefaultModelTypes()) {
                TokenNameFinderModel nameFinderModel = openNLP.getNameModel(defaultModelType, language);
                if (nameFinderModel == null) {
                    log.info("No NER Model for {} and language {} available!", defaultModelType, language);
                } else {
                    findNamedEntities(ci, at, text, language, nameFinderModel);
                }
            }
        } //else do not use default models for languages other than the processed one
          //process for additional models
        for (String additionalModel : config.getSpecificNerModles(language)) {
            TokenNameFinderModel nameFinderModel;
            try {
                nameFinderModel = openNLP.getModel(TokenNameFinderModel.class, additionalModel, null);
                findNamedEntities(ci, at, text, language, nameFinderModel);
            } catch (IOException e) {
                log.warn("Unable to load TokenNameFinderModel model for language '" + language + "' (model: "
                        + additionalModel + ")", e);
            } catch (RuntimeException e) {
                log.warn("Error while creating ChunkerModel for language '" + language + "' (model: "
                        + additionalModel + ")", e);
            }
        }
    } catch (Exception e) {
        if (e instanceof RuntimeException) {
            throw (RuntimeException) e;
        } else {
            throw new EngineException(this, ci, e);
        }
    }
}

From source file:org.apache.stanbol.enhancer.engines.opennlp.impl.NEREngineCore.java

protected void findNamedEntities(final ContentItem ci, final AnalysedText at, final String text,
        final String lang, final TokenNameFinderModel nameFinderModel) {

    if (ci == null) {
        throw new IllegalArgumentException("Parsed ContentItem MUST NOT be NULL");
    }/*  ww w  .j a  v  a2s  . c  o m*/
    if (at == null && text == null) {
        log.warn("NULL was parsed as AnalysedText AND Text for content item " + ci.getUri()
                + ". One of the two MUST BE present! -> call ignored");
        return;
    }
    final Language language;
    if (lang != null && !lang.isEmpty()) {
        language = new Language(lang);
    } else {
        language = null;
    }
    if (log.isDebugEnabled()) {
        log.debug("findNamedEntities model={},  language={}, text=", new Object[] { nameFinderModel, language,
                StringUtils.abbreviate(at != null ? at.getSpan() : text, 100) });
    }
    LiteralFactory literalFactory = LiteralFactory.getInstance();
    Graph g = ci.getMetadata();
    Map<String, List<NameOccurrence>> entityNames;
    if (at != null) {
        entityNames = extractNameOccurrences(nameFinderModel, at, lang);
    } else {
        entityNames = extractNameOccurrences(nameFinderModel, text, lang);
    }
    //lock the ContentItem while writing the RDF data for found Named Entities
    ci.getLock().writeLock().lock();
    try {
        Map<String, IRI> previousAnnotations = new LinkedHashMap<String, IRI>();
        for (Map.Entry<String, List<NameOccurrence>> nameInContext : entityNames.entrySet()) {

            String name = nameInContext.getKey();
            List<NameOccurrence> occurrences = nameInContext.getValue();

            IRI firstOccurrenceAnnotation = null;

            for (NameOccurrence occurrence : occurrences) {
                IRI textAnnotation = EnhancementEngineHelper.createTextEnhancement(ci, this);
                g.add(new TripleImpl(textAnnotation, ENHANCER_SELECTED_TEXT,
                        new PlainLiteralImpl(name, language)));
                g.add(new TripleImpl(textAnnotation, ENHANCER_SELECTION_CONTEXT,
                        new PlainLiteralImpl(occurrence.context, language)));
                if (occurrence.type != null) {
                    g.add(new TripleImpl(textAnnotation, DC_TYPE, occurrence.type));
                }
                if (occurrence.confidence != null) {
                    g.add(new TripleImpl(textAnnotation, ENHANCER_CONFIDENCE,
                            literalFactory.createTypedLiteral(occurrence.confidence)));
                }
                if (occurrence.start != null && occurrence.end != null) {
                    g.add(new TripleImpl(textAnnotation, ENHANCER_START,
                            literalFactory.createTypedLiteral(occurrence.start)));
                    g.add(new TripleImpl(textAnnotation, ENHANCER_END,
                            literalFactory.createTypedLiteral(occurrence.end)));
                }

                // add the subsumption relationship among occurrences of the same
                // name
                if (firstOccurrenceAnnotation == null) {
                    // check already extracted annotations to find a first most
                    // specific occurrence
                    for (Map.Entry<String, IRI> entry : previousAnnotations.entrySet()) {
                        if (entry.getKey().contains(name)) {
                            // we have found a most specific previous
                            // occurrence, use it as subsumption target
                            firstOccurrenceAnnotation = entry.getValue();
                            g.add(new TripleImpl(textAnnotation, DC_RELATION, firstOccurrenceAnnotation));
                            break;
                        }
                    }
                    if (firstOccurrenceAnnotation == null) {
                        // no most specific previous occurrence, I am the first,
                        // most specific occurrence to be later used as a target
                        firstOccurrenceAnnotation = textAnnotation;
                        previousAnnotations.put(name, textAnnotation);
                    }
                } else {
                    // I am referring to a most specific first occurrence of the
                    // same name
                    g.add(new TripleImpl(textAnnotation, DC_RELATION, firstOccurrenceAnnotation));
                }
            }
        }
    } finally {
        ci.getLock().writeLock().unlock();
    }
}

From source file:org.apache.tajo.cli.tools.TajoAdmin.java

private void processList(Writer writer) throws ParseException, IOException, ServiceException, SQLException {

    List<BriefQueryInfo> queryList = tajoClient.getRunningQueryList();
    SimpleDateFormat df = new SimpleDateFormat(DATE_FORMAT);
    StringBuilder builder = new StringBuilder();

    /* print title */
    builder.append(StringUtils.rightPad("QueryId", 21));
    builder.append(StringUtils.rightPad("State", 20));
    builder.append(StringUtils.rightPad("StartTime", 20));
    builder.append(StringUtils.rightPad("Query", 30)).append("\n");

    builder.append(StringUtils.rightPad(StringUtils.repeat("-", 20), 21));
    builder.append(StringUtils.rightPad(StringUtils.repeat("-", 19), 20));
    builder.append(StringUtils.rightPad(StringUtils.repeat("-", 19), 20));
    builder.append(StringUtils.rightPad(StringUtils.repeat("-", 29), 30)).append("\n");
    writer.write(builder.toString());/*from   w ww.j a v  a  2 s  .c o m*/

    builder = new StringBuilder();
    for (BriefQueryInfo queryInfo : queryList) {
        builder.append(StringUtils.rightPad(new QueryId(queryInfo.getQueryId()).toString(), 21));
        builder.append(StringUtils.rightPad(queryInfo.getState().name(), 20));
        builder.append(StringUtils.rightPad(df.format(queryInfo.getStartTime()), 20));
        builder.append(StringUtils.abbreviate(queryInfo.getQuery(), 30)).append("\n");
    }
    writer.write(builder.toString());
}