com.vmware.bdd.cli.commands.ClusterCommands.java Source code

Java tutorial

Introduction

Here is the source code for com.vmware.bdd.cli.commands.ClusterCommands.java

Source

/*****************************************************************************
 *   Copyright (c) 2012-2015 VMware, Inc. All Rights Reserved.
 *   Licensed under the Apache License, Version 2.0 (the "License");
 *   you may not use this file except in compliance with the License.
 *   You may obtain a copy of the License at
 *
 *       http://www.apache.org/licenses/LICENSE-2.0
 *
 *   Unless required by applicable law or agreed to in writing, software
 *   distributed under the License is distributed on an "AS IS" BASIS,
 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *   See the License for the specific language governing permissions and
 *   limitations under the License.
 ****************************************************************************/
package com.vmware.bdd.cli.commands;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;

import jline.console.ConsoleReader;

import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.shell.core.CommandMarker;
import org.springframework.shell.core.annotation.CliAvailabilityIndicator;
import org.springframework.shell.core.annotation.CliCommand;
import org.springframework.shell.core.annotation.CliOption;
import org.springframework.stereotype.Component;

import com.vmware.bdd.apitypes.AppManagerRead;
import com.vmware.bdd.apitypes.ClusterCreate;
import com.vmware.bdd.apitypes.ClusterRead;
import com.vmware.bdd.apitypes.ClusterType;
import com.vmware.bdd.apitypes.DistroRead;
import com.vmware.bdd.apitypes.ElasticityRequestBody;
import com.vmware.bdd.apitypes.ElasticityRequestBody.ElasticityMode;
import com.vmware.bdd.apitypes.FixDiskRequestBody;
import com.vmware.bdd.apitypes.NetConfigInfo.NetTrafficType;
import com.vmware.bdd.apitypes.NetworkRead;
import com.vmware.bdd.apitypes.NodeGroupAdd;
import com.vmware.bdd.apitypes.NodeGroupCreate;
import com.vmware.bdd.apitypes.NodeGroupRead;
import com.vmware.bdd.apitypes.NodeRead;
import com.vmware.bdd.apitypes.Priority;
import com.vmware.bdd.apitypes.ResourceScale;
import com.vmware.bdd.apitypes.TaskRead;
import com.vmware.bdd.apitypes.TaskRead.NodeStatus;
import com.vmware.bdd.apitypes.TopologyType;
import com.vmware.bdd.apitypes.VcResourceMap;
import com.vmware.bdd.cli.rest.AppManagerRestClient;
import com.vmware.bdd.cli.rest.CliRestException;
import com.vmware.bdd.cli.rest.ClusterRestClient;
import com.vmware.bdd.cli.rest.NetworkRestClient;
import com.vmware.bdd.exception.WarningMessageException;
import com.vmware.bdd.usermgmt.UserMgmtConstants;
import com.vmware.bdd.utils.AppConfigValidationUtils;
import com.vmware.bdd.utils.AppConfigValidationUtils.ValidationType;
import com.vmware.bdd.utils.CommonUtil;
import com.vmware.bdd.utils.ListToStringConverter;
import com.vmware.bdd.utils.ValidateResult;

@Component
public class ClusterCommands implements CommandMarker {
    private static LinkedHashMap<String, List<String>> USER_MGMT_COLUMN_FORMAT = null;

    @Autowired
    private NetworkRestClient networkRestClient;

    @Autowired
    private ClusterRestClient restClient;

    @Autowired
    private AppManagerRestClient appManagerRestClient;

    @CliAvailabilityIndicator({ "cluster help" })
    public boolean isCommandAvailable() {
        return true;
    }

    @CliCommand(value = "cluster create", help = "Create a new cluster")
    public void createCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
            @CliOption(key = {
                    "appManager" }, mandatory = false, help = "The application manager name") final String appManager,
            @CliOption(key = {
                    "type" }, mandatory = false, help = "The cluster type is Hadoop or HBase") final String type,
            @CliOption(key = { "distro" }, mandatory = false, help = "The distro name") final String distro,
            @CliOption(key = {
                    "specFile" }, mandatory = false, help = "The spec file name path") final String specFilePath,
            @CliOption(key = {
                    "rpNames" }, mandatory = false, help = "Resource Pools for the cluster: use \",\" among names.") final String rpNames,
            @CliOption(key = {
                    "dsNames" }, mandatory = false, help = "Datastores for the cluster: use \",\" among names.") final String dsNames,
            @CliOption(key = {
                    "networkName" }, mandatory = false, help = "Network Name used for management") final String networkName,
            @CliOption(key = {
                    "hdfsNetworkName" }, mandatory = false, help = "Network Name for HDFS traffic.") final String hdfsNetworkName,
            @CliOption(key = {
                    "mapredNetworkName" }, mandatory = false, help = "Network Name for MapReduce traffic") final String mapredNetworkName,
            @CliOption(key = {
                    "topology" }, mandatory = false, help = "You must specify the topology type: HVE or RACK_AS_RACK or HOST_AS_RACK") final String topology,
            @CliOption(key = {
                    "resume" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "flag to resume cluster creation") final boolean resume,
            @CliOption(key = {
                    "skipConfigValidation" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Skip cluster configuration validation. ") final boolean skipConfigValidation,
            @CliOption(key = {
                    "yes" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Answer 'yes' to all Y/N questions. ") final boolean alwaysAnswerYes,
            @CliOption(key = {
                    "password" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "Answer 'yes' to set password for all VMs in this cluster.") final boolean setClusterPassword,
            @CliOption(key = {
                    "localRepoURL" }, mandatory = false, help = "Local yum server URL for application managers, ClouderaManager/Ambari.") final String localRepoURL,
            @CliOption(key = {
                    "adminGroupName" }, mandatory = false, help = "AD/LDAP Admin Group Name.") final String adminGroupName,
            @CliOption(key = {
                    "userGroupName" }, mandatory = false, help = "AD/LDAP User Group Name.") final String userGroupName,
            @CliOption(key = {
                    "disableLocalUsers" }, mandatory = false, help = "Disable local users") final Boolean disableLocalUsersFlag,
            @CliOption(key = {
                    "skipVcRefresh" }, mandatory = false, help = "flag to skip refreshing VC resources") final Boolean skipVcRefresh,
            @CliOption(key = {
                    "template" }, mandatory = false, help = "The node template name") final String templateName) {
        // validate the name
        if (name.indexOf("-") != -1) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL,
                    Constants.PARAM_CLUSTER + Constants.PARAM_NOT_CONTAIN_HORIZONTAL_LINE);
            return;
        } else if (name.indexOf(" ") != -1) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL,
                    Constants.PARAM_CLUSTER + Constants.PARAM_NOT_CONTAIN_BLANK_SPACE);
            return;
        }

        // process resume
        if (resume && setClusterPassword) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, Constants.RESUME_DONOT_NEED_SET_PASSWORD);
            return;
        } else if (resume) {
            resumeCreateCluster(name, skipVcRefresh);
            return;
        }

        // build ClusterCreate object
        ClusterCreate clusterCreate = new ClusterCreate();
        clusterCreate.setName(name);

        if (!CommandsUtils.isBlank(appManager) && !Constants.IRONFAN.equalsIgnoreCase(appManager)) {
            AppManagerRead appManagerRead = appManagerRestClient.get(appManager);
            if (appManagerRead == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        appManager + " cannot be found in the list of application managers.");
                return;
            }
        }

        if (CommandsUtils.isBlank(appManager)) {
            clusterCreate.setAppManager(Constants.IRONFAN);
        } else {
            clusterCreate.setAppManager(appManager);
            // local yum repo url for 3rd party app managers like ClouderaMgr, Ambari etc.
            if (!CommandsUtils.isBlank(localRepoURL)) {
                clusterCreate.setLocalRepoURL(localRepoURL);
            }
        }

        if (setClusterPassword) {
            String password = getPassword();
            //user would like to set password, but failed to enter
            //a valid one, quit cluster create
            if (password == null) {
                return;
            } else {
                clusterCreate.setPassword(password);
            }
        }

        if (type != null) {
            ClusterType clusterType = ClusterType.getByDescription(type);
            if (clusterType == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL, Constants.INVALID_VALUE + " " + "type=" + type);
                return;
            }
            clusterCreate.setType(clusterType);
        } else if (specFilePath == null) {
            // create Hadoop (HDFS + MapReduce) cluster as default
            clusterCreate.setType(ClusterType.HDFS_MAPRED);
        }

        TopologyType policy = null;
        if (topology != null) {
            policy = validateTopologyValue(name, topology);
            if (policy == null) {
                return;
            }
        } else {
            policy = TopologyType.NONE;
        }
        clusterCreate.setTopologyPolicy(policy);

        DistroRead distroRead4Create;
        try {
            if (distro != null) {
                DistroRead[] distroReads = appManagerRestClient.getDistros(clusterCreate.getAppManager());
                distroRead4Create = getDistroByName(distroReads, distro);

                if (distroRead4Create == null) {
                    CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                            Constants.OUTPUT_OP_RESULT_FAIL,
                            Constants.PARAM_DISTRO + Constants.PARAM_NOT_SUPPORTED + getDistroNames(distroReads));
                    return;
                }
            } else {
                distroRead4Create = appManagerRestClient.getDefaultDistro(clusterCreate.getAppManager());
                if (distroRead4Create == null || CommandsUtils.isBlank(distroRead4Create.getName())) {
                    CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                            Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_NO_DEFAULT_DISTRO);
                    return;
                }
            }
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            return;
        }

        Map<String, Map<String, String>> infraConfigs = new HashMap<String, Map<String, String>>();

        if (StringUtils.isBlank(adminGroupName) && StringUtils.isBlank(userGroupName)) {
            //both adminGroupName and userGroupName are null, supposes no need to enable ldap.
        } else if (!StringUtils.isBlank(adminGroupName) && !StringUtils.isBlank(userGroupName)) {
            if (MapUtils.isEmpty(infraConfigs.get(UserMgmtConstants.LDAP_USER_MANAGEMENT))) {
                initInfraConfigs(infraConfigs, disableLocalUsersFlag);
            }
            Map<String, String> userMgmtConfig = infraConfigs.get(UserMgmtConstants.LDAP_USER_MANAGEMENT);
            userMgmtConfig.put(UserMgmtConstants.ADMIN_GROUP_NAME, adminGroupName);
            userMgmtConfig.put(UserMgmtConstants.USER_GROUP_NAME, userGroupName);
            clusterCreate.setInfrastructure_config(infraConfigs);
        } else {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, "You need to supply both AdminGroupName and UserGroupName.");
            return;
        }

        clusterCreate.setDistro(distroRead4Create.getName());
        clusterCreate.setDistroVendor(distroRead4Create.getVendor());
        clusterCreate.setDistroVersion(distroRead4Create.getVersion());

        clusterCreate.setTemplateName(templateName);

        if (rpNames != null) {
            List<String> rpNamesList = CommandsUtils.inputsConvert(rpNames);
            if (rpNamesList.isEmpty()) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.INPUT_RPNAMES_PARAM + Constants.MULTI_INPUTS_CHECK);
                return;
            } else {
                clusterCreate.setRpNames(rpNamesList);
            }
        }
        if (dsNames != null) {
            List<String> dsNamesList = CommandsUtils.inputsConvert(dsNames);
            if (dsNamesList.isEmpty()) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.INPUT_DSNAMES_PARAM + Constants.MULTI_INPUTS_CHECK);
                return;
            } else {
                clusterCreate.setDsNames(dsNamesList);
            }
        }
        List<String> failedMsgList = new ArrayList<String>();
        List<String> warningMsgList = new ArrayList<String>();
        Set<String> allNetworkNames = new HashSet<String>();
        try {
            if (specFilePath != null) {
                ClusterCreate clusterSpec = CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                        CommandsUtils.dataFromFile(specFilePath));
                clusterCreate.setSpecFile(true);
                clusterCreate.setExternalHDFS(clusterSpec.getExternalHDFS());
                clusterCreate.setExternalMapReduce(clusterSpec.getExternalMapReduce());
                clusterCreate.setExternalNamenode(clusterSpec.getExternalNamenode());
                clusterCreate.setExternalSecondaryNamenode(clusterSpec.getExternalSecondaryNamenode());
                clusterCreate.setExternalDatanodes(clusterSpec.getExternalDatanodes());
                clusterCreate.setNodeGroups(clusterSpec.getNodeGroups());
                clusterCreate.setConfiguration(clusterSpec.getConfiguration());
                // TODO: W'd better merge validateConfiguration with validateClusterSpec to avoid repeated validation.
                if (CommandsUtils.isBlank(appManager) || Constants.IRONFAN.equalsIgnoreCase(appManager)) {
                    validateConfiguration(clusterCreate, skipConfigValidation, warningMsgList, failedMsgList);
                }
                clusterCreate.validateNodeGroupNames();
                if (!validateHAInfo(clusterCreate.getNodeGroups())) {
                    CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                            Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_CLUSTER_SPEC_HA_ERROR + specFilePath);
                    return;
                }

                Map<String, Map<String, String>> specInfraConfigs = clusterSpec.getInfrastructure_config();
                if (!MapUtils.isEmpty(specInfraConfigs)) //spec infra config is not empty
                {
                    if (MapUtils.isNotEmpty(infraConfigs)) {
                        System.out.println(
                                "adminGroup and userGroup has been specified as commandline parameters, so the values inside spec file will be ignored.");
                    } else {
                        clusterCreate.setInfrastructure_config(specInfraConfigs);
                    }
                }
                Map<String, Object> configuration = clusterSpec.getConfiguration();
                if (MapUtils.isNotEmpty(configuration)) {
                    Map<String, Map<String, String>> serviceUserConfig = (Map<String, Map<String, String>>) configuration
                            .get(UserMgmtConstants.SERVICE_USER_CONFIG_IN_SPEC_FILE);
                    if (MapUtils.isNotEmpty(serviceUserConfig)) {
                        //user didn't specify ldap in command line and specfile, but specfiy ldap user in service user
                        if (hasLdapServiceUser(serviceUserConfig)
                                && (clusterCreate.getInfrastructure_config() == null)) {
                            Map<String, Map<String, String>> infraConfig = new HashMap<>();
                            initInfraConfigs(infraConfig, disableLocalUsersFlag);
                            clusterCreate.setInfrastructure_config(infraConfig);
                        }
                        validateServiceUserConfigs(appManager, clusterSpec, failedMsgList);
                    }
                }

            }
            allNetworkNames = getAllNetworkNames();
        } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            return;
        }

        if (allNetworkNames.isEmpty()) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_CANNOT_FIND_NETWORK);
            return;
        }

        LinkedHashMap<NetTrafficType, List<String>> networkConfig = new LinkedHashMap<NetTrafficType, List<String>>();
        if (networkName == null) {
            if (allNetworkNames.size() == 1) {
                networkConfig.put(NetTrafficType.MGT_NETWORK, new ArrayList<String>());
                networkConfig.get(NetTrafficType.MGT_NETWORK).addAll(allNetworkNames);
            } else {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.PARAM_NETWORK_NAME + Constants.PARAM_NOT_SPECIFIED);
                return;
            }
        } else {
            if (!allNetworkNames.contains(networkName)
                    || (hdfsNetworkName != null && !allNetworkNames.contains(hdfsNetworkName))
                    || (mapredNetworkName != null && !allNetworkNames.contains(mapredNetworkName))) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.PARAM_NETWORK_NAME + Constants.PARAM_NOT_SUPPORTED + allNetworkNames.toString());
                return;
            }

            networkConfig.put(NetTrafficType.MGT_NETWORK, new ArrayList<String>());
            networkConfig.get(NetTrafficType.MGT_NETWORK).add(networkName);

            if (hdfsNetworkName != null) {
                networkConfig.put(NetTrafficType.HDFS_NETWORK, new ArrayList<String>());
                networkConfig.get(NetTrafficType.HDFS_NETWORK).add(hdfsNetworkName);
            }

            if (mapredNetworkName != null) {
                networkConfig.put(NetTrafficType.MAPRED_NETWORK, new ArrayList<String>());
                networkConfig.get(NetTrafficType.MAPRED_NETWORK).add(mapredNetworkName);
            }
        }
        notifyNetsUsage(networkConfig, warningMsgList);
        clusterCreate.setNetworkConfig(networkConfig);

        clusterCreate.validateCDHVersion(warningMsgList);

        // Validate that the specified file is correct json format and proper value.
        //TODO(qjin): 1, in validateClusterCreate, implement roles check and validation
        //            2, consider use service to validate configuration for different appManager
        if (specFilePath != null) {
            validateClusterSpec(clusterCreate, failedMsgList, warningMsgList);
        }

        // give a warning message if both type and specFilePath are specified
        if (type != null && specFilePath != null) {
            warningMsgList.add(Constants.TYPE_SPECFILE_CONFLICT);
        }

        if (!failedMsgList.isEmpty()) {
            showFailedMsg(clusterCreate.getName(), Constants.OUTPUT_OP_CREATE, failedMsgList);
            return;
        }

        // rest invocation
        try {
            if (!CommandsUtils.showWarningMsg(clusterCreate.getName(), Constants.OUTPUT_OBJECT_CLUSTER,
                    Constants.OUTPUT_OP_CREATE, warningMsgList, alwaysAnswerYes, null)) {
                return;
            }
            restClient.create(clusterCreate, BooleanUtils.toBoolean(skipVcRefresh));
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_CREAT);
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, CommandsUtils.getExceptionMessage(e));
            return;
        }

        // check the instant clone type and the HA configuration for node groups
        // currently there are limitations on HA support with instant clone, so we will
        // display a warning message for instant clone with HA function
        ClusterRead cluster = restClient.get(name, false);
        if (cluster != null) {
            String cloneType = cluster.getClusterCloneType();
            String INSTANT_CLONE = com.vmware.bdd.utils.Constants.CLUSTER_CLONE_TYPE_INSTANT_CLONE;
            if (null != cloneType && cloneType.equals(INSTANT_CLONE)) {
                String warningMsg = validateInstantCloneWithHA(specFilePath, clusterCreate);
                if (!CommonUtil.isBlank(warningMsg)) {
                    System.out.println(warningMsg);
                }
            }
        }
    }

    private boolean hasLdapServiceUser(Map<String, Map<String, String>> serviceUserConfigs) {
        for (Map<String, String> serviceUserConfig : serviceUserConfigs.values()) {
            if (serviceUserConfig.get(UserMgmtConstants.SERVICE_USER_TYPE).equalsIgnoreCase("LDAP")) {
                return true;
            }
        }
        return false;
    }

    private void initInfraConfigs(Map<String, Map<String, String>> infraConfigs, Boolean disableLocalUsersFlag) {
        Map<String, String> userMgmtConfigs = new HashMap<>();
        //disable local account by default.
        userMgmtConfigs.put(UserMgmtConstants.DISABLE_LOCAL_USER_FLAG,
                disableLocalUsersFlag == null ? Boolean.TRUE.toString() : disableLocalUsersFlag.toString());
        infraConfigs.put(UserMgmtConstants.LDAP_USER_MANAGEMENT, userMgmtConfigs);
    }

    protected void validateServiceUserConfigs(String appMangerName, ClusterCreate clusterSpec,
            List<String> failedMsgList) {
        if (clusterSpec.getConfiguration() == null) {
            return;
        }
        Map<String, Map<String, String>> serviceUserConfigs = (Map<String, Map<String, String>>) clusterSpec
                .getConfiguration().get(UserMgmtConstants.SERVICE_USER_CONFIG_IN_SPEC_FILE);
        if (MapUtils.isEmpty(serviceUserConfigs)) {
            return;
        }
        String appManagerType = appManagerRestClient.get(appMangerName).getType();
        String[] ambariSupportedConfigs = { UserMgmtConstants.SERVICE_USER_NAME,
                UserMgmtConstants.SERVICE_USER_TYPE };
        String[] clouderaSupportedConfigs = { UserMgmtConstants.SERVICE_USER_NAME,
                UserMgmtConstants.SERVICE_USER_TYPE, UserMgmtConstants.SERVICE_USER_GROUP };
        if (appManagerType.equals(Constants.IRONFAN)) {
            failedMsgList.add("Ironfan deployed cluster doesn't support config service user");
        } else if (appManagerType.equals(com.vmware.bdd.utils.Constants.AMBARI_PLUGIN_TYPE)) {
            validateServiceUserConfigHelper(appManagerType, serviceUserConfigs,
                    Arrays.asList(ambariSupportedConfigs), failedMsgList);
        } else if (appManagerType.equals(com.vmware.bdd.utils.Constants.CLOUDERA_MANAGER_PLUGIN_TYPE)) {
            validateServiceUserConfigHelper(appManagerType, serviceUserConfigs,
                    Arrays.asList(clouderaSupportedConfigs), failedMsgList);
        }
    }

    private void validateServiceUserConfigHelper(String appManagerType,
            Map<String, Map<String, String>> serviceUserConfigs, List<String> supportedConfigs,
            List<String> failedMsgList) {
        ArrayList<String> unSupportedKeys = new ArrayList<>();
        for (Map<String, String> config : serviceUserConfigs.values()) {
            for (String key : config.keySet()) {
                if (!supportedConfigs.contains(key) && !unSupportedKeys.contains(key)) {
                    unSupportedKeys.add(key);
                }
            }
        }
        if (!unSupportedKeys.isEmpty()) {
            failedMsgList.add(
                    appManagerType + " deployed cluster doesn't support following keys when config service user: "
                            + unSupportedKeys.toString());
        }
    }

    private DistroRead getDistroByName(DistroRead[] distroReads, String distroName) {
        if (distroReads != null && distroName != null) {
            for (DistroRead distroRead : distroReads) {
                if (distroName.equals(distroRead.getName())) {
                    return distroRead;
                }
            }
        }
        return null;
    }

    private void validateClusterSpec(ClusterCreate clusterCreate, List<String> failedMsgList,
            List<String> warningMsgList) {
        clusterCreate.validateClusterCreate(failedMsgList, warningMsgList);
        //validate roles and configuration.
        com.vmware.bdd.apitypes.ValidateResult vr = restClient.validateBlueprint(clusterCreate);
        if (!vr.isValidated()) {
            failedMsgList.addAll(vr.getFailedMsgList());
            warningMsgList.addAll(vr.getWarningMsgList());
        }
    }

    /**
     * notify user which network Serengeti will pick up for mgt/hdfs/mapred
     *
     * @param networkConfig
     * @param warningMsgList
     */
    private void notifyNetsUsage(Map<NetTrafficType, List<String>> networkConfig, List<String> warningMsgList) {
        if (!networkConfig.containsKey(NetTrafficType.HDFS_NETWORK)
                && !networkConfig.containsKey(NetTrafficType.MAPRED_NETWORK)) {
            return;
        }
        String mgtNetwork = networkConfig.get(NetTrafficType.MGT_NETWORK).get(0);
        String hdfsNetwork = mgtNetwork;
        String mapredNetwork = mgtNetwork;
        if (networkConfig.containsKey(NetTrafficType.HDFS_NETWORK)
                && !networkConfig.get(NetTrafficType.HDFS_NETWORK).isEmpty()) {
            hdfsNetwork = networkConfig.get(NetTrafficType.HDFS_NETWORK).get(0);
        }
        if (networkConfig.containsKey(NetTrafficType.MAPRED_NETWORK)
                && !networkConfig.get(NetTrafficType.MAPRED_NETWORK).isEmpty()) {
            mapredNetwork = networkConfig.get(NetTrafficType.MAPRED_NETWORK).get(0);
        }

        StringBuffer netsUsage = new StringBuffer().append("The cluster will use network ").append(mgtNetwork)
                .append(" for management, ").append(hdfsNetwork).append(" for HDFS traffic, and ")
                .append(mapredNetwork).append(" for MapReduce traffic.");
        warningMsgList.add(netsUsage.toString());
    }

    private String getPassword() {
        System.out.println("Hint: " + com.vmware.bdd.utils.Constants.PASSWORD_REQUIREMENT);
        String firstPassword = getInputedPassword(Constants.ENTER_PASSWORD);
        if (firstPassword == null) {
            return null;
        }

        String secondPassword = getInputedPassword(Constants.CONFIRM_PASSWORD);
        if (secondPassword == null) {
            return null;
        }

        if (!firstPassword.equals(secondPassword)) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, Constants.PASSWORD_CONFIRMATION_FAILED);
            return null;
        }

        return firstPassword;
    }

    private String getInputedPassword(String promptMsg) {
        try {
            ConsoleReader reader = CommandsUtils.getConsoleReader();
            reader.setPrompt(promptMsg);
            String password = null;
            password = reader.readLine(Character.valueOf('*'));
            if (isValidPassword(password)) {
                return password;
            } else {
                return null;
            }
        } catch (IOException e) {
            return null;
        }
    }

    private boolean isValidPassword(String password) {
        if (!CommonUtil.validateClusterPassword(password)) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, com.vmware.bdd.utils.Constants.PASSWORD_REQUIREMENT);
            return false;
        }
        return true;
    }

    @CliCommand(value = "cluster list", help = "Show cluster information")
    public void getCluster(
            @CliOption(key = { "name" }, mandatory = false, help = "The cluster name") final String name,
            @CliOption(key = {
                    "detail" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "flag to show node information") final boolean detail) {

        // rest invocation
        try {
            if (name == null) {
                ClusterRead[] clusters = restClient.getAll(detail);
                if (clusters != null && clusters.length > 0) {
                    Arrays.sort(clusters);
                    prettyOutputClustersInfo(clusters, detail);
                }
            } else {
                ClusterRead cluster = restClient.get(name, detail);
                if (cluster != null) {
                    prettyOutputClusterInfo(cluster, detail);
                    printSeperator();
                }
            }
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_LIST,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    @CliCommand(value = "cluster export", help = "Export cluster data")
    public void exportClusterData(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
            @CliOption(key = {
                    "specFile" }, mandatory = false, help = "The cluster spec file path") final String specFileName,
            @CliOption(key = {
                    "type" }, mandatory = false, help = "The data type to export: SPEC or RACK or IP2FQDN") final String type,
            @CliOption(key = {
                    "topology" }, mandatory = false, help = "The topology type: HVE or RACK_AS_RACK or HOST_AS_RACK") final String topology,
            @CliOption(key = {
                    "delimiter" }, mandatory = false, help = "The string used to separate each line") final String delimeter,
            @CliOption(key = {
                    "output" }, mandatory = false, help = "The path to the output file") final String output) {

        // when neither fileName nor type is specified, path is null and when output, it will be replaced
        // with System.out
        String path = null;
        if (!CommandsUtils.isBlank(specFileName)) {
            if (!CommandsUtils.isBlank(type)) {
                System.out.println(Constants.TYPE_SPECFILE_CONFLICT);
                return;
            }
            path = specFileName;
        } else if (!CommandsUtils.isBlank((type))) {
            if (!CommandsUtils.isBlank(output)) {
                path = output;
            }
        }

        if (topology != null && validateTopologyValue(name, topology) == null) {
            return;
        }

        try {
            if ((CommandsUtils.isBlank(specFileName) && CommandsUtils.isBlank(type))
                    || !CommandsUtils.isBlank(specFileName) || Constants.EXPORT_TYPE_SPEC.equalsIgnoreCase(type)) {
                ClusterCreate cluster = restClient.getSpec(name);
                CommandsUtils.prettyJsonOutput(cluster, path);
            } else if (Constants.EXPORT_TYPE_RACK.equalsIgnoreCase(type)) {
                Map<String, String> rackTopology = restClient.getRackTopology(name, topology);
                CommandsUtils.gracefulRackTopologyOutput(rackTopology, path, delimeter);
            } else if (Constants.EXPORT_TYPE_IP.equalsIgnoreCase(type)) {
                ClusterRead cluster = restClient.get(name, true);
                prettyOutputClusterIPs(cluster, path, delimeter);
            } else if (Constants.EXPORT_TYPE_IP_2_FQDN.equalsIgnoreCase(type)) {
                ClusterRead cluster = restClient.get(name, true);
                prettyOutputClusterIp2FqdnMapping(cluster, path, delimeter);
            } else {
                System.out.println(Constants.UNKNOWN_EXPORT_TYPE);
            }
        } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_EXPORT,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    @CliCommand(value = "cluster delete", help = "Delete a cluster")
    public void deleteCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name) {

        // rest invocation
        try {
            restClient.delete(name);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_DELETE);
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_DELETE,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    @CliCommand(value = "cluster start", help = "Start a cluster")
    public void startCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String clusterName,
            @CliOption(key = {
                    "force" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "Force start cluster") final Boolean forceStart) {

        Map<String, String> queryStrings = new HashMap<String, String>();
        queryStrings.put(Constants.QUERY_ACTION_KEY, Constants.QUERY_ACTION_START);
        queryStrings.put(Constants.FORCE_CLUSTER_OPERATION_KEY, forceStart.toString());
        // rest invocation
        try {
            restClient.actionOps(clusterName, queryStrings);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_NODES_IN_CLUSTER,
                    Constants.OUTPUT_OP_RESULT_START);

        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_NODES_IN_CLUSTER, Constants.OUTPUT_OP_START,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    @CliCommand(value = "cluster stop", help = "Stop a cluster")
    public void stopCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String clusterName) {
        Map<String, String> queryStrings = new HashMap<String, String>();
        queryStrings.put(Constants.QUERY_ACTION_KEY, Constants.QUERY_ACTION_STOP);

        // rest invocation
        try {
            restClient.actionOps(clusterName, queryStrings);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_NODES_IN_CLUSTER,
                    Constants.OUTPUT_OP_RESULT_STOP);
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_NODES_IN_CLUSTER, Constants.OUTPUT_OP_STOP,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    @CliCommand(value = "cluster resize", help = "Resize a cluster")
    public void resizeCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
            @CliOption(key = {
                    "nodeGroup" }, mandatory = true, help = "The node group name") final String nodeGroup,
            @CliOption(key = {
                    "instanceNum" }, mandatory = false, unspecifiedDefaultValue = "0", help = "The new instance number, should be larger than 0") final int instanceNum,
            @CliOption(key = {
                    "cpuNumPerNode" }, mandatory = false, unspecifiedDefaultValue = "0", help = "The number of vCPU for the nodes in this group") final int cpuNumber,
            @CliOption(key = {
                    "memCapacityMbPerNode" }, mandatory = false, unspecifiedDefaultValue = "0", help = "The number of memory size in Mb for the nodes in this group") final long memory,
            @CliOption(key = {
                    "force" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "Ignore errors during resizing cluster") final Boolean force,
            @CliOption(key = {
                    "skipVcRefresh" }, mandatory = false, help = "flag to skip refreshing VC resources") final Boolean skipVcRefresh) {

        if ((instanceNum > 0 && cpuNumber == 0 && memory == 0)
                || (instanceNum == 0 && (cpuNumber > 0 || memory > 0))) {
            try {
                ClusterRead cluster = restClient.get(name, false);
                if (cluster == null) {
                    CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                            Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + name + " does not exist.");
                    return;
                }
                // disallow scale out zookeeper node group.
                List<NodeGroupRead> ngs = cluster.getNodeGroups();
                boolean found = false;
                for (NodeGroupRead ng : ngs) {
                    if (ng.getName().equals(nodeGroup)) {
                        found = true;
                        /*if (ng.getRoles() != null
                              && ng.getRoles().contains(
                          HadoopRole.ZOOKEEPER_ROLE.toString())
                              && instanceNum > 1) {
                           CommandsUtils.printCmdFailure(
                                 Constants.OUTPUT_OBJECT_CLUSTER, name,
                                 Constants.OUTPUT_OP_RESIZE,
                                 Constants.OUTPUT_OP_RESULT_FAIL,
                                 Constants.ZOOKEEPER_NOT_RESIZE);
                           return;
                        }*/// TODO emma: do not check as client do not know who is Zookeeper
                        break;
                    }
                }

                if (!found) {
                    CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                            Constants.OUTPUT_OP_RESULT_FAIL, "node group " + nodeGroup + " does not exist.");
                    return;
                }
                TaskRead taskRead = null;
                if (instanceNum > 0) {
                    Map<String, String> queryStrings = new HashMap<String, String>();
                    queryStrings.put(Constants.FORCE_CLUSTER_OPERATION_KEY, force.toString());
                    queryStrings.put(Constants.REST_PARAM_SKIP_REFRESH_VC,
                            Boolean.toString(BooleanUtils.toBoolean(skipVcRefresh)));
                    restClient.resize(name, nodeGroup, instanceNum, queryStrings);
                } else if (cpuNumber > 0 || memory > 0) {
                    if (!cluster.getStatus().isActiveServiceStatus()) {
                        CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                                Constants.OUTPUT_OP_RESULT_FAIL,
                                "Cluster must be in 'RUNNING' state to scale up/down");
                        return;
                    }
                    ResourceScale resScale = new ResourceScale(name, nodeGroup, cpuNumber, memory);
                    taskRead = restClient.scale(resScale);
                }
                CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_RESIZE);
                if (taskRead != null) {
                    System.out.println();
                    printScaleReport(taskRead, name, nodeGroup);
                }
            } catch (CliRestException e) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                        Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            }
        } else {
            if (instanceNum > 0 && (cpuNumber > 0 || memory > 0)) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        "Can not scale out/in and scale up/down at the same time, you have to run those commands separately");
            } else if (instanceNum == 0 && cpuNumber == 0 && memory == 0) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        "You must specify one positive value for instanceNum/cpuNumPerNode/memCapacityMbPerNode");

            } else {
                List<String> invalidParams = new ArrayList<String>();
                if (instanceNum < 0) {
                    invalidParams.add("instanceNum=" + instanceNum);
                }
                if (cpuNumber < 0) {
                    invalidParams.add("cpuNumPerNode=" + cpuNumber);
                }
                if (memory < 0) {
                    invalidParams.add("memCapacityMbPerNode=" + memory);
                }
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESIZE,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.INVALID_VALUE + " " + StringUtils.join(invalidParams, ", "));
            }
        }
    }

    @CliCommand(value = "cluster update", help = "Update resourcepools or datastores used by cluster")
    public void updateCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "the cluster name") final String name,
            @CliOption(key = {
                    "rpNames" }, mandatory = false, help = "Resource Pools for the cluster: use \",\" among names.") final String rpNames,
            @CliOption(key = {
                    "dsNames" }, mandatory = false, help = "Datastores for the cluster: use \",\" among names.") final String dsNames,
            @CliOption(key = {
                    "append" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "Append the specified rpNames or dsNames to current value.") final Boolean append,
            @CliOption(key = {
                    "yes" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Answer 'yes' to all Y/N questions. ") final boolean alwaysAnswerYes) {

        ClusterRead cluster = null;
        try {
            cluster = restClient.get(name, false);
            if (cluster == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPDATE,
                        Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + name + " does not exist.");
                return;
            }
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPDATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            return;
        }
        boolean ignoreWarning = alwaysAnswerYes;
        boolean onlyAppend = append;
        List<String> rpNamesList = new ArrayList<String>();
        List<String> dsNamesList = new ArrayList<String>();
        List<String> warningMsgList = new ArrayList<String>();
        ClusterCreate clusterUpdate = new ClusterCreate();
        clusterUpdate.setName(name);

        //Check whether the new rpNames include all resourcepools which cluster already uses
        if (!CommonUtil.isBlank(rpNames)) {
            rpNamesList.addAll(CommandsUtils.inputsConvertSet(rpNames));
            clusterUpdate.setRpNames(rpNamesList);
        }

        //Check whether the new dsNames include all datastores which cluster already uses
        if (!CommonUtil.isBlank(dsNames)) {
            dsNamesList.addAll(CommandsUtils.inputsConvertSet(dsNames));
            clusterUpdate.setDsNames(dsNamesList);
        }

        if (!CommonUtil.isBlank(rpNames) || !CommonUtil.isBlank(dsNames)) {
            try {
                restClient.updateCluster(clusterUpdate, ignoreWarning, onlyAppend);
                CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_UPDATE);
            } catch (WarningMessageException e) {
                warningMsgList.add(CommonUtil.formatWarningMsg(e.getMessage()));
                if (!CommandsUtils.showWarningMsg(cluster.getName(), Constants.OUTPUT_OBJECT_CLUSTER,
                        Constants.OUTPUT_OP_UPDATE, warningMsgList, ignoreWarning, null)) {
                    return;
                } else {
                    ignoreWarning = true;
                    restClient.updateCluster(clusterUpdate, ignoreWarning, onlyAppend);
                    CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER,
                            Constants.OUTPUT_OP_RESULT_UPDATE);
                }
            } catch (CliRestException e) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPDATE,
                        Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            }
        } else {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPDATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_SHOULD_SPECIFY_RP_DS);
        }
    }

    private void printScaleReport(TaskRead taskRead, String clusterName, String nodeGroupName) {
        ClusterRead cluster = restClient.get(clusterName, true);
        List<NodeGroupRead> nodeGroups = cluster.getNodeGroups();
        List<NodeStatus> succeedNodes = taskRead.getSucceedNodes();
        List<NodeStatus> failedNodes = taskRead.getFailNodes();
        setNodeStatusInfo(succeedNodes, nodeGroups);
        setNodeStatusInfo(failedNodes, nodeGroups);
        LinkedHashMap<String, List<String>> columnNamesWithGetMethodNames = new LinkedHashMap<String, List<String>>();
        columnNamesWithGetMethodNames.put("IP", Arrays.asList("getIp"));
        columnNamesWithGetMethodNames.put("NAME", Arrays.asList("getNodeName"));
        columnNamesWithGetMethodNames.put("CPU", Arrays.asList("getCpuNumber"));
        columnNamesWithGetMethodNames.put("MEM(MB)", Arrays.asList("getMemory"));
        columnNamesWithGetMethodNames.put("STATUS", Arrays.asList("getStatus"));
        columnNamesWithGetMethodNames.put("NOTES", Arrays.asList("getErrorMessage"));
        try {
            System.out.println("The resized node group: " + nodeGroupName);
            System.out.println("The current resized nodes: " + succeedNodes.size());
            CommandsUtils.printInTableFormat(columnNamesWithGetMethodNames, succeedNodes.toArray(),
                    Constants.OUTPUT_INDENT);
            System.out.println("The failed resized nodes: " + failedNodes.size());
            CommandsUtils.printInTableFormat(columnNamesWithGetMethodNames, failedNodes.toArray(),
                    Constants.OUTPUT_INDENT);
        } catch (Exception e) {
            throw new CliRestException(e.getMessage());
        }
    }

    private void setNodeStatusInfo(List<NodeStatus> nodes, List<NodeGroupRead> nodeGroups) {
        for (NodeStatus nodeStatus : nodes) {
            NodeRead node = getNodeRead(nodeStatus.getNodeName(), nodeGroups);
            if (node != null) {
                // only show the management Ip currently
                nodeStatus.setIp(node.fetchMgtIp());
                nodeStatus.setStatus(node.getStatus());
                nodeStatus.setCpuNumber(node.getCpuNumber());
                nodeStatus.setMemory(node.getMemory());
            }
        }
    }

    @CliCommand(value = "cluster setParam", help = "set cluster parameters")
    public void setParam(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String clusterName,
            @CliOption(key = {
                    "ioShares" }, mandatory = false, help = "The relative disk I/O priorities: HIGH, NORNAL, LOW") final String ioShares) {
        try {
            //validate if the cluster exists
            ClusterRead cluster = restClient.get(clusterName, false);
            if (cluster == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_SET_PARAM,
                        Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + clusterName + " does not exist.");
                return;
            }

            if (ioShares == null) {
                // in this case, no parameter is specified excpet "cluster name", return directly
                System.out.println("There is nothing to adjust, please specify more parameters.");
                return;
            }

            //validate the input of ioShares
            Priority ioPriority = null;
            if (ioShares != null) {
                try {
                    ioPriority = Priority.valueOf(ioShares.toUpperCase());
                } catch (IllegalArgumentException ex) {
                    CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_SET_PARAM,
                            Constants.OUTPUT_OP_RESULT_FAIL,
                            Constants.INVALID_VALUE + " " + "ioShares = " + ioShares);
                    return;
                }
            }

            ElasticityRequestBody requestBody = new ElasticityRequestBody();
            requestBody.setIoPriority(ioPriority);

            restClient.setParam(cluster, requestBody);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_ADJUST);
        } catch (CliRestException e) {
            if (e.getMessage() != null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_SET_PARAM,
                        Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            }
        }
    }

    @CliCommand(value = "cluster resetParam", help = "reset cluster parameters")
    public void resetParam(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String clusterName,
            @CliOption(key = {
                    "ioShares" }, mandatory = true, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "reset disk I/O priorities to LOW") final boolean ioShares) {
        try {
            //validate if the cluster exists
            ClusterRead cluster = restClient.get(clusterName, false);
            if (cluster == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESET_PARAM,
                        Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + clusterName + " does not exist.");
                return;
            }

            // ioShares: normal
            ElasticityRequestBody requestBody = new ElasticityRequestBody();
            if (ioShares) {
                requestBody.setIoPriority(Priority.NORMAL);
            }
            restClient.setParam(cluster, requestBody);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_RESET);
        } catch (CliRestException e) {
            if (e.getMessage() != null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESET_PARAM,
                        Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            }
        }
    }

    @CliCommand(value = "cluster config", help = "Config an existing cluster")
    public void configCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
            @CliOption(key = {
                    "specFile" }, mandatory = true, help = "The spec file name path") final String specFilePath,
            @CliOption(key = {
                    "skipConfigValidation" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Skip cluster configuration validation. ") final boolean skipConfigValidation,
            @CliOption(key = {
                    "yes" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Answer 'yes' to all Y/N questions. ") final boolean alwaysAnswerYes) {
        // validate the name
        if (name.indexOf("-") != -1) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CONFIG,
                    Constants.OUTPUT_OP_RESULT_FAIL,
                    Constants.PARAM_CLUSTER + Constants.PARAM_NOT_CONTAIN_HORIZONTAL_LINE);
            return;
        }
        try {
            ClusterRead clusterRead = restClient.get(name, false);
            // build ClusterCreate object
            ClusterCreate clusterConfig = new ClusterCreate();
            clusterConfig.setName(clusterRead.getName());
            ClusterCreate clusterSpec = CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                    CommandsUtils.dataFromFile(specFilePath));
            clusterConfig.setNodeGroups(clusterSpec.getNodeGroups());
            clusterConfig.setConfiguration(clusterSpec.getConfiguration());
            clusterConfig.setExternalHDFS(clusterSpec.getExternalHDFS());
            List<String> warningMsgList = new ArrayList<String>();
            List<String> failedMsgList = new ArrayList<String>();
            validateConfiguration(clusterConfig, skipConfigValidation, warningMsgList, failedMsgList);
            // add a confirm message for running job
            warningMsgList.add("Warning: " + Constants.PARAM_CLUSTER_CONFIG_RUNNING_JOB_WARNING);
            if (!CommandsUtils.showWarningMsg(clusterConfig.getName(), Constants.OUTPUT_OBJECT_CLUSTER,
                    Constants.OUTPUT_OP_CONFIG, warningMsgList, alwaysAnswerYes, null)) {
                return;
            }

            if (!failedMsgList.isEmpty()) {
                showFailedMsg(clusterConfig.getName(), Constants.OUTPUT_OP_CONFIG, failedMsgList);
                return;
            }

            restClient.configCluster(clusterConfig);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_CONFIG);
        } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CONFIG,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            return;
        }
    }

    @CliCommand(value = "cluster fix", help = "Fix a cluster failure")
    public void fixCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String clusterName,
            @CliOption(key = {
                    "disk" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Recover a disk failure") final boolean isDiskFailure,
            @CliOption(key = {
                    "parallel" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Whether use parallel way to recovery node or not") final boolean parallel,
            @CliOption(key = {
                    "nodeGroup" }, mandatory = false, help = "The node group name which failure belong to") final String nodeGroupName) {
        try {
            TaskRead taskRead = null;
            if (!isDiskFailure) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_FIX,
                        Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_SHOULD_SPECIFY_DISK);
                return;
            } else {
                FixDiskRequestBody requestBody = new FixDiskRequestBody();
                requestBody.setParallel(parallel);
                if (!CommandsUtils.isBlank(nodeGroupName)) {
                    requestBody.setNodeGroupName(nodeGroupName);
                }
                taskRead = restClient.fixDisk(clusterName, requestBody);
                if (taskRead == null) {
                    return;
                }
            }
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_FIX);
            System.out.println();
            printClusterFixReport(taskRead, clusterName);
        } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_FIX,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            return;
        }
    }

    @CliCommand(value = "cluster upgrade", help = "Upgrade an old cluster")
    public void upgradeCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
            @CliOption(key = {
                    "yes" }, mandatory = false, unspecifiedDefaultValue = "false", specifiedDefaultValue = "true", help = "Answer 'yes' to all Y/N questions. ") final boolean alwaysAnswerYes)
            throws IOException {
        // validate the name
        if (name.indexOf("-") != -1) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPGRADE,
                    Constants.OUTPUT_OP_RESULT_FAIL,
                    Constants.PARAM_CLUSTER + Constants.PARAM_NOT_CONTAIN_HORIZONTAL_LINE);
            return;
        }

        // rest invocation
        try {
            // add a confirm message
            List<String> warningMsgList = new ArrayList<String>();
            warningMsgList.add("Warning: " + Constants.PARAM_PROMPT_UPGRADE_CLUSTER_WARNING);
            if (!CommandsUtils.showWarningMsg(name, Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPGRADE,
                    warningMsgList, alwaysAnswerYes, null)) {
                return;
            }

            restClient.upgradeCluster(name);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_UPGRADE);
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_UPGRADE,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    private void printClusterFixReport(TaskRead taskRead, String clusterName) throws Exception {
        ClusterRead cluster = restClient.get(clusterName, true);
        List<NodeGroupRead> nodeGroups = cluster.getNodeGroups();
        List<NodeStatus> succeedNodes = taskRead.getSucceedNodes();
        List<NodeStatus> failedNodes = taskRead.getFailNodes();
        setNodeStatusInfo(succeedNodes, nodeGroups);
        System.out.println("The fixed nodes: " + succeedNodes.size());
        LinkedHashMap<String, List<String>> columnNamesWithGetMethodNames = new LinkedHashMap<String, List<String>>();
        columnNamesWithGetMethodNames.put("IP", Arrays.asList("getIp"));
        columnNamesWithGetMethodNames.put("NAME", Arrays.asList("getNodeName"));
        columnNamesWithGetMethodNames.put("STATUS", Arrays.asList("getStatus"));
        CommandsUtils.printInTableFormat(columnNamesWithGetMethodNames, succeedNodes.toArray(),
                Constants.OUTPUT_INDENT);
        if (failedNodes.size() > 0) {
            System.out.println("The failed nodes: " + failedNodes.size());
        }
        setNodeStatusInfo(failedNodes, nodeGroups);
        columnNamesWithGetMethodNames.put("Error Message", Arrays.asList("getErrorMessage"));
        CommandsUtils.printInTableFormat(columnNamesWithGetMethodNames, failedNodes.toArray(),
                Constants.OUTPUT_INDENT);
    }

    private NodeRead getNodeRead(String nodeName, List<NodeGroupRead> nodeGroups) {
        for (NodeGroupRead nodeGroup : nodeGroups) {
            List<NodeRead> nodes = nodeGroup.getInstances();
            for (NodeRead node : nodes) {
                if (node.getName().equals(nodeName)) {
                    return node;
                }
            }
        }
        return null;
    }

    private void resumeCreateCluster(final String name, Boolean skipVcRefresh) {
        Map<String, String> queryStrings = new HashMap<String, String>();
        queryStrings.put(Constants.QUERY_ACTION_KEY, Constants.QUERY_ACTION_RESUME);
        queryStrings.put(Constants.REST_PARAM_SKIP_REFRESH_VC,
                Boolean.toString(BooleanUtils.toBoolean(skipVcRefresh)));

        try {
            restClient.actionOps(name, queryStrings);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_RESUME);
        } catch (CliRestException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESUME,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    private Set<String> getAllNetworkNames() {
        Set<String> allNetworks = new HashSet<String>();
        NetworkRead[] networks = networkRestClient.getAll(false);
        if (networks != null) {
            for (NetworkRead network : networks) {
                allNetworks.add(network.getName());
            }
        }
        return allNetworks;
    }

    private List<String> getDistroNames(DistroRead[] distroReads) {
        List<String> distroNames = new ArrayList<String>();
        if (distroReads != null) {
            for (int i = 0; i < distroReads.length; i++) {
                distroNames.add(distroReads[i].getName());
            }
        }
        return distroNames;
    }

    private boolean validName(String inputName, List<String> validNames) {
        for (String name : validNames) {
            if (name.equals(inputName)) {
                return true;
            }
        }
        return false;
    }

    private void prettyOutputClusterInfo(ClusterRead cluster, boolean detail) {
        Map<String, Map<String, String>> infraCfg = cluster.getInfrastructure_config();
        Map<String, String> userMgmtCfg = null;
        if (MapUtils.isNotEmpty(infraCfg)) {
            userMgmtCfg = infraCfg.get(UserMgmtConstants.LDAP_USER_MANAGEMENT);
        }

        TopologyType topology = cluster.getTopologyPolicy();
        printSeperator();

        // list cluster level params
        LinkedHashMap<String, String> clusterParams = new LinkedHashMap<String, String>();
        clusterParams.put("CLUSTER NAME", cluster.getName());
        clusterParams.put("AGENT VERSION", cluster.getVersion());
        clusterParams.put("APP MANAGER", cluster.getAppManager());
        clusterParams.put("DISTRO", cluster.getDistro());
        clusterParams.put("NODE TEMPLATE", cluster.getTemplateName());
        String cloneType = cluster.getClusterCloneType();
        if (!CommandsUtils.isBlank(cloneType)) {
            clusterParams.put("CLUSTER CLONE TYPE", cloneType.toUpperCase());
        }
        if (topology != null && topology != TopologyType.NONE) {
            clusterParams.put("TOPOLOGY", topology.toString());
        }
        clusterParams.put("IO SHARES", cluster.getIoShares() == null ? "" : cluster.getIoShares().toString());
        clusterParams.put("STATUS", cluster.getStatus() == null ? "" : cluster.getStatus().toString());
        if (cluster.getExternalHDFS() != null && !cluster.getExternalHDFS().isEmpty()) {
            clusterParams.put("EXTERNAL HDFS", cluster.getExternalHDFS());
        }
        //Burst out
        if (!CommandsUtils.isBlank(cluster.getExternalMapReduce())) {
            clusterParams.put("EXTERNAL MAPREDUCE", cluster.getExternalMapReduce());
        }

        clusterParams.put("AD/LDAP ENABLED", Boolean.toString(MapUtils.isNotEmpty(userMgmtCfg)));

        for (String key : clusterParams.keySet()) {
            System.out.printf(Constants.OUTPUT_INDENT + "%-26s:" + Constants.OUTPUT_INDENT + "%s\n", key,
                    clusterParams.get(key));
        }
        System.out.println();

        LinkedHashMap<String, List<String>> ngColumnNamesWithGetMethodNames = new LinkedHashMap<String, List<String>>();
        List<NodeGroupRead> nodegroups = cluster.getNodeGroups();
        if (nodegroups != null) {
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_GROUP_NAME, Arrays.asList("getName"));
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_ROLES, Arrays.asList("getRoles"));
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_INSTANCE,
                    Arrays.asList("getInstanceNum"));
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_CPU, Arrays.asList("getCpuNum"));
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_MEM,
                    Arrays.asList("getMemCapacityMB"));
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_TYPE,
                    Arrays.asList("getStorage", "getType"));
            ngColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_SIZE,
                    Arrays.asList("getStorage", "getSizeGB"));

            try {
                if (detail) {
                    prettyOutputDetailNodegroups(topology, ngColumnNamesWithGetMethodNames, nodegroups);

                } else
                    CommandsUtils.printInTableFormat(ngColumnNamesWithGetMethodNames, nodegroups.toArray(),
                            Constants.OUTPUT_INDENT);
            } catch (Exception e) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_LIST,
                        Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            }

            if (detail) {
                prettyOutputDetailedUserMgmt(cluster.getName(), userMgmtCfg);
            }
        }
    }

    protected static void prettyOutputDetailedUserMgmt(String clusterName, Map<String, String> userMgmtCfg) {
        try {
            if (MapUtils.isNotEmpty(userMgmtCfg)) {
                if (MapUtils.isEmpty(USER_MGMT_COLUMN_FORMAT)) {
                    USER_MGMT_COLUMN_FORMAT = new LinkedHashMap<>();
                    USER_MGMT_COLUMN_FORMAT.put("ADMINISTRATORS GROUP",
                            Arrays.asList(UserMgmtConstants.ADMIN_GROUP_NAME));
                    USER_MGMT_COLUMN_FORMAT.put("USERS GROUP", Arrays.asList(UserMgmtConstants.USER_GROUP_NAME));
                    USER_MGMT_COLUMN_FORMAT.put("LOCAL USERS DISABLED",
                            Arrays.asList(UserMgmtConstants.DISABLE_LOCAL_USER_FLAG));
                }
                CommandsUtils.printInTableFormat(USER_MGMT_COLUMN_FORMAT, Arrays.asList(((Map) userMgmtCfg)),
                        Constants.OUTPUT_INDENT);
            }
        } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_LIST,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }

    private void prettyOutputDetailNodegroups(TopologyType topology,
            LinkedHashMap<String, List<String>> ngColumnNamesWithGetMethodNames, List<NodeGroupRead> nodegroups)
            throws Exception {
        LinkedHashMap<String, List<String>> nColumnNamesWithGetMethodNames = new LinkedHashMap<String, List<String>>();
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_NODE_NAME, Arrays.asList("getName"));
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_NODE_VERSION, Arrays.asList("getVersion"));
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_HOST, Arrays.asList("getHostName"));
        if (topology == TopologyType.RACK_AS_RACK || topology == TopologyType.HVE) {
            nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_RACK, Arrays.asList("getRack"));
        }
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_IP, Arrays.asList("fetchMgtIp"));
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_HDFS_IP, Arrays.asList("fetchHdfsIp"));
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_MAPRED_IP, Arrays.asList("fetchMapredIp"));
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_STATUS, Arrays.asList("getStatus"));
        nColumnNamesWithGetMethodNames.put(Constants.FORMAT_TABLE_COLUMN_TASK, Arrays.asList("getAction"));

        for (NodeGroupRead nodegroup : nodegroups) {
            CommandsUtils.printInTableFormat(ngColumnNamesWithGetMethodNames, new NodeGroupRead[] { nodegroup },
                    Constants.OUTPUT_INDENT);
            List<NodeRead> nodes = nodegroup.getInstances();
            if (nodes != null) {
                LinkedHashMap<String, List<String>> nColumnNamesWithGetMethodNamesClone = (LinkedHashMap<String, List<String>>) nColumnNamesWithGetMethodNames
                        .clone();
                if (!nodes.isEmpty() && (nodes.get(0).getIpConfigs() == null
                        || (!nodes.get(0).getIpConfigs().containsKey(NetTrafficType.HDFS_NETWORK)
                                && !nodes.get(0).getIpConfigs().containsKey(NetTrafficType.MAPRED_NETWORK)))) {
                    nColumnNamesWithGetMethodNamesClone.remove(Constants.FORMAT_TABLE_COLUMN_HDFS_IP);
                    nColumnNamesWithGetMethodNamesClone.remove(Constants.FORMAT_TABLE_COLUMN_MAPRED_IP);
                }
                System.out.println();
                CommandsUtils.printInTableFormat(nColumnNamesWithGetMethodNamesClone, nodes.toArray(),
                        new StringBuilder().append(Constants.OUTPUT_INDENT).append(Constants.OUTPUT_INDENT)
                                .toString());
            }
            System.out.println();
        }

        CommandsUtils.prettyOutputErrorNode(nodegroups);
    }

    private void prettyOutputClustersInfo(ClusterRead[] clusters, boolean detail) {
        for (ClusterRead cluster : clusters) {
            prettyOutputClusterInfo(cluster, detail);
        }
        printSeperator();
    }

    private void printSeperator() {
        StringBuffer seperator = new StringBuffer().append(Constants.OUTPUT_INDENT);
        for (int i = 0; i < Constants.SEPERATOR_LEN; i++) {
            seperator.append("=");
        }
        System.out.println(seperator.toString());
        System.out.println();
    }

    public static void prettyOutputClusterIPs(ClusterRead cluster, String filename, String delimeter)
            throws Exception {
        List<Object> list = new ArrayList<Object>();
        for (NodeGroupRead nodegroup : cluster.getNodeGroups()) {
            List<NodeRead> nodes = nodegroup.getInstances();
            if (nodes != null && !nodes.isEmpty()) {
                for (NodeRead node : nodes) {
                    list.add(node.fetchMgtIp());
                }
            }
        }
        CommandsUtils.prettyOutputStrings(list, filename, delimeter);
    }

    private void showFailedMsg(String name, String op, List<String> failedMsgList) {
        // cluster creation failed message.
        StringBuilder failedMsg = new StringBuilder();
        failedMsg.append(Constants.INVALID_VALUE);
        if (failedMsgList.size() > 1) {
            failedMsg.append("s");
        }
        failedMsg.append(".\n");

        failedMsg.append(new ListToStringConverter<String>(failedMsgList, '\n'));

        CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, op, Constants.OUTPUT_OP_RESULT_FAIL,
                failedMsg.toString());
    }

    private void validateConfiguration(ClusterCreate cluster, boolean skipConfigValidation,
            List<String> warningMsgList, List<String> failedMsgList) {

        // validate blacklist
        ValidateResult blackListResult = validateBlackList(cluster);
        addBlackListWarning(blackListResult, warningMsgList);

        if (!skipConfigValidation) {
            // validate config type
            AppConfigValidationUtils.validateSupportType(cluster.getConfiguration(), warningMsgList);
            // validate whitelist
            ValidateResult whiteListResult = validateWhiteList(cluster);
            addWhiteListWarningOrFailure(cluster.getName(), whiteListResult, warningMsgList, failedMsgList);
        } else {
            cluster.setValidateConfig(false);
        }
    }

    private ValidateResult validateBlackList(ClusterCreate cluster) {
        return validateConfiguration(cluster, ValidationType.BLACK_LIST);
    }

    private ValidateResult validateWhiteList(ClusterCreate cluster) {
        return validateConfiguration(cluster, ValidationType.WHITE_LIST);
    }

    /*
     * Validate a configuration of the cluster at first. Validate configurations
     * of all of node groups then. And merge the failed info which have been
     * producted by validation between cluster level and node group level.
     */
    private ValidateResult validateConfiguration(ClusterCreate cluster, ValidationType validationType) {
        ValidateResult validateResult = new ValidateResult();
        // validate cluster level Configuration
        ValidateResult vr = null;
        if (cluster.getConfiguration() != null && !cluster.getConfiguration().isEmpty()) {
            vr = AppConfigValidationUtils.validateConfig(validationType, cluster.getConfiguration());
            if (vr.getType() != ValidateResult.Type.VALID) {
                validateResult.setType(vr.getType());
                if (!vr.getFailureNames().isEmpty()) {
                    validateResult.setFailureNames(vr.getFailureNames());
                }
                if (!vr.getFailureValues().isEmpty()) {
                    validateResult.setFailureValues(vr.getFailureValues());
                }
                if (!vr.getNoExistFileNames().isEmpty()) {
                    validateResult.setNoExistFileNames(vr.getNoExistFileNames());
                }
            }
        }
        List<String> failureNames = new ArrayList<String>();
        Map<String, List<String>> noExistingFileNamesMap = new HashMap<String, List<String>>();
        List<String> failureValues = new ArrayList<String>();
        if (!validateResult.getFailureNames().isEmpty()) {
            failureNames.addAll(validateResult.getFailureNames());
        }
        if (!validateResult.getNoExistFileNames().isEmpty()) {
            noExistingFileNamesMap.putAll(validateResult.getNoExistFileNames());
        }
        if (!validateResult.getFailureValues().isEmpty()) {
            failureValues.addAll(validateResult.getFailureValues());
        }

        // validate nodegroup level Configuration
        for (NodeGroupCreate nodeGroup : cluster.getNodeGroups()) {
            if (nodeGroup.getConfiguration() != null && !nodeGroup.getConfiguration().isEmpty()) {
                vr = AppConfigValidationUtils.validateConfig(validationType, nodeGroup.getConfiguration());
                if (vr.getType() != ValidateResult.Type.VALID) {
                    //invalid value will take higher priority than invalid name as it will throw failure
                    if (validateResult.getType() != ValidateResult.Type.WHITE_LIST_INVALID_VALUE) {
                        validateResult.setType(vr.getType());
                    }
                    // merge failed names between cluster level and node group level.
                    for (String failureName : vr.getFailureNames()) {
                        if (!failureNames.contains(failureName)) {
                            failureNames.add(failureName);
                        }
                    }

                    // merge failed names between cluster level and node group level.
                    for (String failureValue : vr.getFailureValues()) {
                        if (!failureValues.contains(failureValue)) {
                            failureValues.add(failureValue);
                        }
                    }

                    // merge no existing file names between cluster level and node
                    // group level
                    for (Entry<String, List<String>> noExistingFileNames : vr.getNoExistFileNames().entrySet()) {
                        String configType = noExistingFileNames.getKey();
                        if (noExistingFileNamesMap.containsKey(configType)) {
                            List<String> noExistingFilesTemp = noExistingFileNames.getValue();
                            List<String> noExistingFiles = noExistingFileNamesMap.get(configType);
                            for (String fileName : noExistingFilesTemp) {
                                if (!noExistingFiles.contains(fileName)) {
                                    noExistingFiles.add(fileName);
                                }
                            }
                            noExistingFileNamesMap.put(configType, noExistingFiles);
                        } else {
                            noExistingFileNamesMap.put(configType, noExistingFileNames.getValue());
                        }
                    }
                }
            }
        }
        validateResult.setFailureNames(failureNames);
        validateResult.setNoExistFileNames(noExistingFileNamesMap);
        validateResult.setFailureValues(failureValues);
        return validateResult;
    }

    private void addWhiteListWarningOrFailure(final String clusterName, ValidateResult whiteListResult,
            List<String> warningMsgList, List<String> failedMsgList) {
        if (whiteListResult.getType() == ValidateResult.Type.WHITE_LIST_INVALID_NAME) {
            String noExistingWarningMsg = getValidateWarningMsg(whiteListResult.getNoExistFileNames());
            String failureNameWarningMsg = getValidateWarningMsg(whiteListResult.getFailureNames(),
                    Constants.PARAM_CLUSTER_NOT_IN_WHITE_LIST_WARNING);
            if (warningMsgList != null) {
                if (!CommonUtil.isBlank(noExistingWarningMsg)) {
                    warningMsgList.add(noExistingWarningMsg);
                }
                if (!CommonUtil.isBlank(failureNameWarningMsg)) {
                    warningMsgList.add(failureNameWarningMsg);
                }
            }
        } else if (whiteListResult.getType() == ValidateResult.Type.WHITE_LIST_INVALID_VALUE) {
            if (!whiteListResult.getFailureValues().isEmpty()) {
                failedMsgList.addAll(whiteListResult.getFailureValues());
            }
        }
    }

    private void addBlackListWarning(ValidateResult blackListResult, List<String> warningList) {
        if (blackListResult.getType() == ValidateResult.Type.NAME_IN_BLACK_LIST) {
            String warningMsg = getValidateWarningMsg(blackListResult.getFailureNames(),
                    Constants.PARAM_CLUSTER_IN_BLACK_LIST_WARNING + Constants.PARAM_CLUSTER_NOT_TAKE_EFFECT);
            if (warningList != null) {
                if (!CommandsUtils.isBlank(warningMsg)) {
                    warningList.add(warningMsg);
                }
            }
        }
    }

    private String getValidateWarningMsg(List<String> failureNames, String warningMsg) {
        StringBuilder warningMsgBuff = new StringBuilder();
        if (failureNames != null && !failureNames.isEmpty()) {
            warningMsgBuff.append("Warning: ");
            for (String failureName : failureNames) {
                warningMsgBuff.append(failureName).append(", ");
            }
            warningMsgBuff.delete(warningMsgBuff.length() - 2, warningMsgBuff.length());
            if (failureNames.size() > 1) {
                warningMsgBuff.append(" are ");
            } else {
                warningMsgBuff.append(" is ");
            }
            warningMsgBuff.append(warningMsg);
        }
        return warningMsgBuff.toString();
    }

    private String getValidateWarningMsg(Map<String, List<String>> noExistingFilesMap) {
        StringBuilder warningMsgBuff = new StringBuilder();
        if (noExistingFilesMap != null && !noExistingFilesMap.isEmpty()) {
            warningMsgBuff.append("Warning: ");
            for (Entry<String, List<String>> noExistingFilesEntry : noExistingFilesMap.entrySet()) {
                List<String> noExistingFileNames = noExistingFilesEntry.getValue();
                for (String noExistingFileName : noExistingFileNames) {
                    warningMsgBuff.append(noExistingFileName).append(", ");
                }
                warningMsgBuff.delete(warningMsgBuff.length() - 2, warningMsgBuff.length());
                if (noExistingFileNames.size() > 1) {
                    warningMsgBuff.append(" are ");
                } else {
                    warningMsgBuff.append(" is ");
                }
                warningMsgBuff.append("not existing in ");
                warningMsgBuff.append(noExistingFilesEntry.getKey() + " scope , ");
            }
            warningMsgBuff.replace(warningMsgBuff.length() - 2, warningMsgBuff.length(), ". ");
            warningMsgBuff.append(Constants.PARAM_CLUSTER_NOT_TAKE_EFFECT);
        }
        return warningMsgBuff.toString();
    }

    private boolean validateHAInfo(NodeGroupCreate[] nodeGroups) {
        List<String> haFlagList = Arrays.asList("off", "on", "ft");
        if (nodeGroups != null) {
            for (NodeGroupCreate group : nodeGroups) {
                if (!haFlagList.contains(group.getHaFlag().toLowerCase())) {
                    return false;
                }
            }
        }
        return true;
    }

    private TopologyType validateTopologyValue(String clusterName, String topology) {
        TopologyType value = null;
        try {
            value = TopologyType.valueOf(topology);
        } catch (IllegalArgumentException ex) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_CREATE,
                    Constants.OUTPUT_OP_RESULT_FAIL, Constants.INVALID_VALUE + " " + "topologyType=" + topology);
        }
        return value;
    }

    public static void prettyOutputClusterIp2FqdnMapping(ClusterRead cluster, String filename, String delimeter)
            throws Exception {
        List<Object> list = new ArrayList<Object>();
        for (NodeGroupRead nodegroup : cluster.getNodeGroups()) {
            List<NodeRead> nodes = nodegroup.getInstances();
            if (nodes != null && !nodes.isEmpty()) {
                for (NodeRead node : nodes) {
                    if (node.getIpConfigs() != null) {
                        for (NetTrafficType trafficType : node.getIpConfigs().keySet()) {
                            String ip2Fqdn = String.format("%-15s", node.fetchIpOf(trafficType)) + " "
                                    + node.fetchFqdnOf(trafficType);
                            if (!list.contains(ip2Fqdn)) {
                                list.add(ip2Fqdn);
                            }
                        }
                    }
                }
            }
        }
        CommandsUtils.prettyOutputStrings(list, filename, delimeter);
    }

    private String validateInstantCloneWithHA(String specFilePath, ClusterCreate cluster) {
        String warningMsg = null;
        ArrayList<String> ngs = new ArrayList<String>();
        if (null != specFilePath) {
            NodeGroupCreate[] nodeGroups = cluster.getNodeGroups();
            if (null != nodeGroups) {
                for (NodeGroupCreate ngc : nodeGroups) {
                    String haFlag = ngc.getHaFlag();
                    if (null != haFlag && !haFlag.equals(com.vmware.bdd.utils.Constants.HA_FLAG_OFF)) {
                        ngs.add(ngc.getName());
                    }
                }
            }
        } else {
            // currently if user does not provide spec file, the default HA option for master group is
            // set to 'on'
            ngs.add("master");
        }

        if (ngs.size() > 0) {
            warningMsg = String.format(Constants.WARNING_INSTANT_CLONE_WITH_HA, ngs.toString());
        }
        return warningMsg;
    }

    @CliCommand(value = "cluster expand", help = "Expand element for existed cluster")
    public void expandCluster(
            @CliOption(key = { "name" }, mandatory = true, help = "The cluster name") final String name,
            @CliOption(key = {
                    "specFile" }, mandatory = true, help = "The spec file name path") final String specFilePath) {
        try {
            if (specFilePath == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_EXPAND,
                        Constants.OUTPUT_OP_RESULT_FAIL, Constants.PARAM_NOT_CONTAIN_SPECFILE);
                return;
            }
            ClusterRead cluster = restClient.get(name, false);

            if (cluster == null) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_EXPAND,
                        Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + name + " does not exist.");
                return;
            }

            NodeGroupAdd nodeGroupAdd = new NodeGroupAdd();
            NodeGroupAdd nodeGroupAddSpec = CommandsUtils.getObjectByJsonString(NodeGroupAdd.class,
                    CommandsUtils.dataFromFile(specFilePath));
            nodeGroupAdd.setSpecFile(true);
            nodeGroupAdd.setNodeGroups(nodeGroupAddSpec.getNodeGroups());

            // validate the name
            if (name.indexOf("-") != -1) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_EXPAND,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.PARAM_NODEGROUP + Constants.PARAM_NOT_CONTAIN_HORIZONTAL_LINE);
                return;
            } else if (name.indexOf(" ") != -1) {
                CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_EXPAND,
                        Constants.OUTPUT_OP_RESULT_FAIL,
                        Constants.PARAM_NODEGROUP + Constants.PARAM_NOT_CONTAIN_BLANK_SPACE);
                return;
            }

            restClient.addNodeGroups(name, nodeGroupAdd);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_EXPAND);
        } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_EXPAND,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
            return;
        }

    }

    @CliCommand(value = "cluster recover", help = "Recover clusters")
    public void recoverCluster(@CliOption(key = {
            "resMapFile" }, mandatory = false, help = "The resource map file name path") final String mapFilePath) {
        try {
            VcResourceMap vcResMap = new VcResourceMap();
            if (null != mapFilePath) {
                vcResMap = CommandsUtils.getObjectByJsonString(VcResourceMap.class,
                        CommandsUtils.dataFromFile(mapFilePath));
            }
            restClient.recover(vcResMap);
            CommandsUtils.printCmdSuccess(Constants.OUTPUT_OBJECT_CLUSTER_ALL,
                    Constants.OUTPUT_OP_RESULT_RECOVER_SUCC);
        } catch (CliRestException | IOException e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER, Constants.OUTPUT_OP_RESULT_RECOVER,
                    Constants.OUTPUT_OP_RESULT_FAIL, e.getMessage());
        }
    }
}