Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:hydrograph.engine.cascading.assembly.InputFileHiveTextAssembly.java

License:Apache License

@Override
protected void prepareScheme() {
    LOG.debug("Applying HiveTextScheme to read data from Hive");

    // HiveTextTableDescriptor is developed specifically for handling
    // Text File format with Hive. Hence, the object of table descriptor
    // is created in its respective assembly and not in its base class.

    Configuration conf = new Configuration();
    conf.addResource(new Path(HiveConfigurationMapping.getHiveConf("path_to_hive_site_xml")));

    HiveTableDescriptor.Factory factory = new HiveTableDescriptor.Factory(conf);
    HiveTableDescriptor tb = factory.newInstance(inputHiveFileEntity.getDatabaseName(),
            inputHiveFileEntity.getTableName());

    tableDesc = new HiveTextTableDescriptor(tb.getDatabaseName(),

            tb.getTableName(), tb.getColumnNames(), tb.getColumnTypes(), tb.getPartitionKeys(),
            tb.getDelimiter(), "", getHiveExternalTableLocationPath(), false);

    Fields fields = getFieldsToWrite(tb);
    HydrographDelimitedParser delimitedParser = new HydrographDelimitedParser(
            inputHiveFileEntity.getDelimiter() != null ? inputHiveFileEntity.getDelimiter() : "\1",

            inputHiveFileEntity.getQuote(), null, inputHiveFileEntity.isStrict(), inputHiveFileEntity.isSafe());

    scheme = new TextDelimited(fields, null, false, false, "UTF-8", delimitedParser);

    // scheme = new
    // TextDelimited(fields,inputHiveFileEntity.getDelimiter());
    scheme.setSourceFields(fields);//  ww w.  j a v a 2 s. co  m
    scheme.setSinkFields(fields);
}

From source file:hydrograph.engine.utilities.HiveMetastoreTokenProvider.java

License:Apache License

public static void obtainTokenForHiveMetastore(Configuration conf) throws TException, IOException {
    conf.addResource(new Path(HiveConfigurationMapping.getHiveConf("path_to_hive_site_xml")));
    HiveConf hiveConf = new HiveConf();
    hiveConf.addResource(conf);/*from  w  w w  .  j a va2 s . co  m*/
    try {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        HiveMetaStoreClient hiveMetaStoreClient = new HiveMetaStoreClient(hiveConf);

        if (UserGroupInformation.isSecurityEnabled()) {
            String metastore_uri = conf.get("hive.metastore.uris");

            LOG.trace("Metastore URI:" + metastore_uri);

            // Check for local metastore
            if (metastore_uri != null && metastore_uri.length() > 0) {
                String principal = conf.get("hive.metastore.kerberos.principal");
                String username = ugi.getUserName();

                if (principal != null && username != null) {
                    LOG.debug("username: " + username);
                    LOG.debug("principal: " + principal);

                    String tokenStr;
                    try {
                        // Get a delegation token from the Metastore.
                        tokenStr = hiveMetaStoreClient.getDelegationToken(username, principal);
                        // LOG.debug("Token String: " + tokenStr);
                    } catch (TException e) {
                        LOG.error(e.getMessage(), e);
                        throw new RuntimeException(e);
                    }

                    // Create the token from the token string.
                    Token<DelegationTokenIdentifier> hmsToken = new Token<DelegationTokenIdentifier>();
                    hmsToken.decodeFromUrlString(tokenStr);
                    // LOG.debug("Hive Token: " + hmsToken);

                    // Add the token to the credentials.
                    ugi.addToken(new Text("hive.metastore.delegation.token"), hmsToken);
                    LOG.trace("Added hive.metastore.delegation.token to conf.");
                } else {
                    LOG.debug("Username or principal == NULL");
                    LOG.debug("username= " + username);
                    LOG.debug("principal= " + principal);
                    throw new IllegalArgumentException("username and/or principal is equal to null!");
                }

            } else {
                LOG.info("HiveMetaStore configured in local mode");
            }
        }
    } catch (IOException e) {
        LOG.error(e.getMessage(), e);
        throw new RuntimeException(e);
    } catch (MetaException e) {
        LOG.error(e.getMessage(), e);
        throw new RuntimeException(e);
    }
}

From source file:hydrograph.server.metadata.strategy.HiveMetadataStrategy.java

License:Apache License

/**
 * Used to set the connection for RedShift
 *
 * @param connectionProperties - contain request params details
 * @throws ClassNotFoundException//from  www . j a v  a 2 s .c o m
 * @throws SQLException
 */
@SuppressWarnings("unchecked")
@Override
public void setConnection(Map connectionProperties) {
    String userId = connectionProperties
            .getOrDefault(Constants.USERNAME,
                    new ParamsCannotBeNullOrEmpty(Constants.USERNAME + " not found in request parameter"))
            .toString();
    String service_pwd = connectionProperties
            .getOrDefault(Constants.SERVICE_PWD,
                    new ParamsCannotBeNullOrEmpty(Constants.SERVICE_PWD + " not found in request parameter"))
            .toString();
    String databaseName = connectionProperties.getOrDefault(Constants.DATABASE_NAME,
            new ParamsCannotBeNullOrEmpty(Constants.DATABASE_NAME + " name not found in request parameter"))
            .toString();
    String tableName = connectionProperties
            .getOrDefault(Constants.TABLENAME,
                    new ParamsCannotBeNullOrEmpty(Constants.TABLENAME + " not found in request parameter"))
            .toString();

    KerberosUtilities kerberosUtilities = new KerberosUtilities();
    Configuration conf = new Configuration();

    // load hdfs-site.xml and core-site.xml
    String hdfsConfigPath = ServiceUtilities.getServiceConfigResourceBundle()
            .getString(Constants.HDFS_SITE_CONFIG_PATH);
    String coreSiteConfigPath = ServiceUtilities.getServiceConfigResourceBundle()
            .getString(Constants.CORE_SITE_CONFIG_PATH);
    LOG.debug("Loading hdfs-site.xml:" + hdfsConfigPath);
    conf.addResource(new Path(hdfsConfigPath));
    LOG.debug("Loading hdfs-site.xml:" + coreSiteConfigPath);
    conf.addResource(new Path(coreSiteConfigPath));

    try {
        kerberosUtilities.applyKerberosToken(userId, service_pwd, conf);
    } catch (LoginException e1) {
        throw new RuntimeException("Unable to login " + e1.getMessage());
    } catch (IOException e1) {
        throw new RuntimeException("Login failed : " + e1.getMessage());
    }
    this.hiveConf = new HiveConf();
    String pathToHiveSiteXml = ServiceUtilities.getServiceConfigResourceBundle()
            .getString(Constants.HIVE_SITE_CONFIG_PATH);

    if (pathToHiveSiteXml.equals(null) || pathToHiveSiteXml.equals("")) {
        LOG.error("Error loading hive-site.xml: Path to hive-site.xml should not be null or empty.");
        throw new RuntimeException(
                "Error loading hive-site.xml: Path to hive-site.xml should not be null or empty.");
    }
    LOG.debug("Loading hive-site.xml: " + pathToHiveSiteXml);
    hiveConf.addResource(new Path(pathToHiveSiteXml));

    HiveMetaStoreClient client;
    try {
        client = new HiveMetaStoreClient(hiveConf);
        this.table = client.getTable(databaseName, tableName);
        this.storageDescriptor = table.getSd();
    } catch (MetaException e) {
        throw new RuntimeException(e.getMessage());
    } catch (NoSuchObjectException e) {
        throw new RuntimeException(e.getMessage());
    } catch (TException e) {
        throw new RuntimeException(e.getMessage());
    }

}

From source file:hydrograph.server.service.HydrographService.java

License:Apache License

private void start() {
    int portNumber = Constants.DEFAULT_PORT_NUMBER;
    try {/*from   w  w w.j a va 2s  .c  o m*/
        portNumber = Integer
                .parseInt(ServiceUtilities.getServiceConfigResourceBundle().getString(Constants.PORT_ID));
        LOG.debug("Port number '" + portNumber + "' fetched from properties file");
    } catch (Exception e) {
        LOG.error("Error fetching port number. Defaulting to " + Constants.DEFAULT_PORT_NUMBER, e);
    }

    /**
     * Setting Port number to the server
     */
    Spark.port(portNumber);

    /**
     * Creating Websocket on Server for Execution tracking service.
     */
    webSocket("/executionTracking", ExecutionTrackingWebsocketHandler.class);

    Spark.post("/getConnectionStatus", new Route() {
        @SuppressWarnings({ "unchecked", "rawtypes", "unused" })
        @Override
        public Object handle(Request request, Response response)
                throws InstantiationException, IllegalAccessException, ClassNotFoundException, JSONException {
            LOG.info("************************getConnectionStatus endpoint - started************************");
            LOG.info("+++ Start: " + new Timestamp((new Date()).getTime()));
            ObjectMapper objectMapper = new ObjectMapper();
            String requestParameters = request.queryParams(Constants.REQUEST_PARAMETERS), dbClassName = null,
                    objectAsString = null;
            JSONObject requestParameterValues = new JSONObject(requestParameters);
            Map metadataProperties = extractingJsonObjects(requestParameterValues);
            String dbTypeToTest = metadataProperties
                    .getOrDefault(Constants.dbType,
                            new ParamsCannotBeNullOrEmpty(Constants.dbType + " Cannot be null or empty"))
                    .toString();
            switch (dbTypeToTest.toLowerCase()) {
            case Constants.ORACLE:
                try {
                    if (ServiceUtilities.getConnectionStatus(metadataProperties,
                            Constants.ORACLE_JDBC_CLASSNAME, Constants.QUERY_TO_TEST)) {
                        LOG.trace("Connection Successful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To Oracle database is Successful");
                    } else {
                        LOG.trace("Connection UnSuccessful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To Oracle database UnSuccessful");
                    }
                } catch (Exception e) {
                    LOG.error("Connection fails with exception : " + e);
                    objectAsString = e.getLocalizedMessage();
                }
                break;
            case Constants.MYSQL:
                try {
                    if (ServiceUtilities.getConnectionStatus(metadataProperties, Constants.MYSQL_JDBC_CLASSNAME,
                            Constants.QUERY_TO_TEST)) {
                        LOG.trace("Connection Successful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To MySQL database is Successful");
                    } else {
                        LOG.trace("Connection UnSuccessful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To MySQL database UnSuccessful");
                    }
                } catch (Exception e) {
                    LOG.error("Connection fails with exception : " + e);
                    objectAsString = e.getLocalizedMessage();
                }
                break;

            case Constants.REDSHIFT:
                try {
                    if (ServiceUtilities.getConnectionStatus(metadataProperties,
                            Constants.REDSHIFT_JDBC_CLASSNAME, Constants.DEFAULT_REDRESHIFT_QUERY_TO_TEST)) {
                        LOG.trace("Connection Successful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To Redshift database is Successful");
                    } else {
                        LOG.trace("Connection UnSuccessful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To Redshift database UnSuccessful");
                    }
                } catch (Exception e) {
                    LOG.error("Connection fails with exception : " + e);
                    objectAsString = e.getLocalizedMessage();
                }
                break;
            case Constants.TERADATA:
                try {
                    if (ServiceUtilities.getConnectionStatus(metadataProperties,
                            Constants.TERADATA_JDBC_CLASSNAME, Constants.QUERY_TO_TEST_TERADATA)) {
                        LOG.trace("Connection Successful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To Teradata database is Successful");
                    } else {
                        LOG.trace("Connection UnSuccessful");
                        objectAsString = objectMapper
                                .writeValueAsString("Connection To Teradata database UnSuccessful");
                    }
                } catch (Exception e) {
                    LOG.error("Connection fails with exception : " + e);
                    objectAsString = e.getLocalizedMessage();
                }
                break;
            }
            return objectAsString;
        }

        @SuppressWarnings({ "unchecked", "rawtypes" })
        private Map extractingJsonObjects(JSONObject requestParameterValues) throws JSONException {

            String dbType = null, userId = null, password = null, host = null, port = null, sid = null,
                    driverType = null, query = null, tableName = null, database = null;
            Map metadataProperties = new HashMap();
            if (!requestParameterValues.isNull(Constants.dbType)) {
                dbType = requestParameterValues.getString(Constants.dbType);
                metadataProperties.put(Constants.dbType, dbType);
            }
            if (!requestParameterValues.isNull(Constants.USERNAME)) {
                userId = requestParameterValues.getString(Constants.USERNAME);
                metadataProperties.put(Constants.USERNAME, userId);
            }
            if (!requestParameterValues.isNull(Constants.SERVICE_PWD)) {
                password = requestParameterValues.getString(Constants.SERVICE_PWD);
                metadataProperties.put(Constants.SERVICE_PWD, password);
            }
            if (!requestParameterValues.isNull(Constants.HOST_NAME)) {
                host = requestParameterValues.getString(Constants.HOST_NAME);
                metadataProperties.put(Constants.HOST_NAME, host);
            }
            if (!requestParameterValues.isNull(Constants.PORT_NUMBER)) {
                port = requestParameterValues.getString(Constants.PORT_NUMBER);
                metadataProperties.put(Constants.PORT_NUMBER, port);
            } else {
                if (metadataProperties.get(Constants.dbType).toString().equalsIgnoreCase("mysql")) {
                    port = Constants.MYSQL_DEFAULT_PORT;
                    metadataProperties.put(Constants.PORT_NUMBER, port);

                } else if (metadataProperties.get(Constants.dbType).toString().equalsIgnoreCase("oracle")) {
                    port = Constants.ORACLE_DEFAULT_PORT;
                    metadataProperties.put(Constants.PORT_NUMBER, port);
                }
                LOG.info("Connecting " + dbType + " port is not provided using default port : " + port);
            }
            if (!requestParameterValues.isNull(Constants.SID)) {
                sid = requestParameterValues.getString(Constants.SID);
                metadataProperties.put(Constants.SID, sid);
            }
            if (!requestParameterValues.isNull(Constants.DRIVER_TYPE)) {
                driverType = requestParameterValues.getString(Constants.DRIVER_TYPE);
                metadataProperties.put(Constants.DRIVER_TYPE, driverType);
            }
            if (!requestParameterValues.isNull(Constants.QUERY)) {
                query = requestParameterValues.getString(Constants.QUERY);
                metadataProperties.put(Constants.QUERY, query);
            }
            if (!requestParameterValues.isNull(Constants.TABLENAME)) {
                tableName = requestParameterValues.getString(Constants.TABLENAME);
                metadataProperties.put(Constants.TABLENAME, tableName);
            }
            if (!requestParameterValues.isNull(Constants.DATABASE_NAME)) {
                database = requestParameterValues.getString(Constants.DATABASE_NAME);
                metadataProperties.put(Constants.DATABASE_NAME, database);
            }

            LOG.info("Fetched request parameters are: " + Constants.dbType + " => " + dbType + " "
                    + Constants.USERNAME + " => " + userId + " " + Constants.HOST_NAME + " => " + host + " "
                    + Constants.PORT_NUMBER + " => " + port + " " + Constants.SID + " => " + sid + " "
                    + Constants.DRIVER_TYPE + " => " + driverType + " " + Constants.QUERY + " => " + query + " "
                    + Constants.TABLENAME + " => " + tableName + " " + Constants.DATABASE_NAME + " => "
                    + database + " ");
            return metadataProperties;
        }
    });

    Spark.post("readFromMetastore", new Route() {

        @Override
        public Object handle(Request request, Response response)
                throws ParamsCannotBeNullOrEmpty, ClassNotFoundException, IllegalAccessException, JSONException,
                JsonProcessingException, TableOrQueryParamNotFound, SQLException, InstantiationException {
            LOG.info("************************readFromMetastore endpoint - started************************");
            LOG.info("+++ Start: " + new Timestamp((new Date()).getTime()));
            ObjectMapper objectMapper = new ObjectMapper();
            String requestParameters = request.queryParams(Constants.REQUEST_PARAMETERS), objectAsString = null,
                    dbClassName = null;
            JSONObject requestParameterValues = new JSONObject(requestParameters);
            // Method to extracting request parameter details from input
            // json.
            Map metadataProperties = extractingJsonObjects(requestParameterValues);

            String dbType = metadataProperties
                    .getOrDefault(Constants.dbType,
                            new ParamsCannotBeNullOrEmpty(Constants.dbType + " Cannot be null or empty"))
                    .toString();
            LOG.info("Retrieving schema for " + dbType + " Database.");
            try {
                switch (dbType.toLowerCase()) {
                case Constants.ORACLE:
                    dbClassName = Constants.oracle;
                    OracleMetadataStrategy oracleMetadataHelper = (OracleMetadataStrategy) Class
                            .forName(dbClassName).newInstance();
                    oracleMetadataHelper.setConnection(metadataProperties);
                    objectAsString = objectMapper
                            .writeValueAsString(oracleMetadataHelper.fillComponentSchema(metadataProperties));
                    LOG.trace("Schema json for oracle : " + objectAsString);
                    LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
                    break;
                case Constants.HIVE:
                    dbClassName = Constants.hive;
                    HiveMetadataStrategy hiveMetadataHelper = (HiveMetadataStrategy) Class.forName(dbClassName)
                            .newInstance();
                    hiveMetadataHelper.setConnection(metadataProperties);
                    objectAsString = objectMapper
                            .writeValueAsString(hiveMetadataHelper.fillComponentSchema(metadataProperties));
                    LOG.trace("Schema json for hive : " + objectAsString);
                    LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
                    break;
                case Constants.REDSHIFT:
                    dbClassName = Constants.redshift;
                    RedshiftMetadataStrategy redShiftMetadataHelper = (RedshiftMetadataStrategy) Class
                            .forName(dbClassName).newInstance();
                    redShiftMetadataHelper.setConnection(metadataProperties);
                    objectAsString = objectMapper
                            .writeValueAsString(redShiftMetadataHelper.fillComponentSchema(metadataProperties));
                    LOG.trace("Schema json for redshift : " + objectAsString);
                    LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
                    break;
                case Constants.MYSQL:
                    dbClassName = Constants.mysql;
                    MysqlMetadataStrategy mysqlMetadataHelper = (MysqlMetadataStrategy) Class
                            .forName(dbClassName).newInstance();
                    mysqlMetadataHelper.setConnection(metadataProperties);
                    objectAsString = objectMapper
                            .writeValueAsString(mysqlMetadataHelper.fillComponentSchema(metadataProperties));
                    LOG.trace("Schema json for mysql : " + objectAsString);
                    LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
                    break;
                case Constants.TERADATA:
                    dbClassName = Constants.teradata;
                    TeradataMetadataStrategy teradataMetadataHelper = (TeradataMetadataStrategy) Class
                            .forName(dbClassName).newInstance();
                    teradataMetadataHelper.setConnection(metadataProperties);
                    objectAsString = objectMapper
                            .writeValueAsString(teradataMetadataHelper.fillComponentSchema(metadataProperties));
                    LOG.trace("Schema json for teradata : " + objectAsString);
                    LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
                    break;
                }
            } catch (Exception e) {
                LOG.error("Metadata read for database  '" + dbType + "' not completed.");
                LOG.error("Exception : " + e);
                response.status(400);
                return "Metadata read for database '" + dbType + "' not completed.";
            }
            LOG.info("Class Name used for " + dbType + " Is : " + dbClassName);
            LOG.debug("Json for " + dbType + " : " + objectAsString);
            return objectAsString;
        }

        @SuppressWarnings({ "unchecked", "rawtypes" })
        private Map extractingJsonObjects(JSONObject requestParameterValues) throws JSONException {

            String dbType = null, userId = null, password = null, host = null, port = null, sid = null,
                    driverType = null, query = null, tableName = null, database = null;
            Map metadataProperties = new HashMap();
            if (!requestParameterValues.isNull(Constants.dbType)) {
                dbType = requestParameterValues.getString(Constants.dbType);
                metadataProperties.put(Constants.dbType, dbType);
            }
            if (!requestParameterValues.isNull(Constants.USERNAME)) {
                userId = requestParameterValues.getString(Constants.USERNAME);
                metadataProperties.put(Constants.USERNAME, userId);
            }
            if (!requestParameterValues.isNull(Constants.SERVICE_PWD)) {
                password = requestParameterValues.getString(Constants.SERVICE_PWD);
                metadataProperties.put(Constants.SERVICE_PWD, password);
            }
            if (!requestParameterValues.isNull(Constants.HOST_NAME)) {
                host = requestParameterValues.getString(Constants.HOST_NAME);
                metadataProperties.put(Constants.HOST_NAME, host);
            }
            if (!requestParameterValues.isNull(Constants.PORT_NUMBER)) {
                port = requestParameterValues.getString(Constants.PORT_NUMBER);
                metadataProperties.put(Constants.PORT_NUMBER, port);
            } else {
                if (metadataProperties.get(Constants.dbType).toString().equalsIgnoreCase("mysql")) {
                    port = Constants.MYSQL_DEFAULT_PORT;
                    metadataProperties.put(Constants.PORT_NUMBER, port);

                } else if (metadataProperties.get(Constants.dbType).toString().equalsIgnoreCase("oracle")) {
                    port = Constants.ORACLE_DEFAULT_PORT;
                    metadataProperties.put(Constants.PORT_NUMBER, port);
                }
                LOG.info("Connecting " + dbType + " port is not provided using default port : " + port);
            }
            if (!requestParameterValues.isNull(Constants.SID)) {
                sid = requestParameterValues.getString(Constants.SID);
                metadataProperties.put(Constants.SID, sid);
            }
            if (!requestParameterValues.isNull(Constants.DRIVER_TYPE)) {
                driverType = requestParameterValues.getString(Constants.DRIVER_TYPE);
                metadataProperties.put(Constants.DRIVER_TYPE, driverType);
            }
            if (!requestParameterValues.isNull(Constants.QUERY)) {
                query = requestParameterValues.getString(Constants.QUERY);
                metadataProperties.put(Constants.QUERY, query);
            }
            if (!requestParameterValues.isNull(Constants.TABLENAME)) {
                tableName = requestParameterValues.getString(Constants.TABLENAME);
                metadataProperties.put(Constants.TABLENAME, tableName);
            }
            if (!requestParameterValues.isNull(Constants.DATABASE_NAME)) {
                database = requestParameterValues.getString(Constants.DATABASE_NAME);
                metadataProperties.put(Constants.DATABASE_NAME, database);
            }

            LOG.info("Fetched request parameters are: " + Constants.dbType + " => " + dbType + " "
                    + Constants.USERNAME + " => " + userId + " " + Constants.HOST_NAME + " => " + host + " "
                    + Constants.PORT_NUMBER + " => " + port + " " + Constants.SID + " => " + sid + " "
                    + Constants.DRIVER_TYPE + " => " + driverType + " " + Constants.QUERY + " => " + query + " "
                    + Constants.TABLENAME + " => " + tableName + " " + Constants.DATABASE_NAME + " => "
                    + database + " ");
            return metadataProperties;
        }
    });

    Spark.post("/read", new Route() {
        @Override
        public Object handle(Request request, Response response) {
            LOG.info("************************read endpoint - started************************");
            LOG.info("+++ Start: " + new Timestamp((new Date()).getTime()));
            String jobId = request.queryParams(Constants.JOB_ID);
            String componentId = request.queryParams(Constants.COMPONENT_ID);
            String socketId = request.queryParams(Constants.SOCKET_ID);
            String basePath = request.queryParams(Constants.BASE_PATH);

            // String host = request.queryParams(Constants.HOST);
            String userID = request.queryParams(Constants.USER_ID);
            String password = request.queryParams(Constants.SERVICE_PWD);

            double sizeOfData = Double.parseDouble(request.queryParams(Constants.FILE_SIZE)) * 1024 * 1024;
            LOG.info("Base Path: {}, Job Id: {}, Component Id: {}, Socket ID: {}, User ID:{}, DataSize:{}",
                    basePath, jobId, componentId, socketId, userID, sizeOfData);

            String batchID = jobId + "_" + componentId + "_" + socketId;
            String tempLocationPath = ServiceUtilities.getServiceConfigResourceBundle()
                    .getString(Constants.TEMP_LOCATION_PATH);
            String filePath = tempLocationPath + "/" + batchID + ".csv";
            try {
                readFileFromHDFS(basePath + "/debug/" + jobId + "/" + componentId + "_" + socketId, sizeOfData,
                        filePath, userID, password);
                LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
            } catch (Exception e) {
                LOG.error("Error in reading debug files", e);
                return "error";
            }
            return filePath;
        }

        /**
         * This method will read the HDFS file, fetch the records from it
         * and write its records to a local file on edge node with size <=
         * {@code sizeOfData} passed in parameter.
         *
         * @param hdfsFilePath   path of HDFS file from where records to be read
         * @param sizeOfData     defines the size of data (in bytes) to be read from
         *                       HDFS file
         * @param remoteFileName after reading the data of {@code sizeOfData} bytes
         *                       from HDFS file, it will be written to local file on
         *                       edge node with file name {@code remoteFileName}
         * @param userId
         * @param password
         */
        private void readFileFromHDFS(String hdfsFilePath, double sizeOfData, String remoteFileName,
                String userId, String password) {
            try {
                Path path = new Path(hdfsFilePath);
                LOG.debug("Reading Debug file:" + hdfsFilePath);
                Configuration conf = new Configuration();

                // load hdfs-site.xml and core-site.xml
                String hdfsConfigPath = ServiceUtilities.getServiceConfigResourceBundle()
                        .getString(Constants.HDFS_SITE_CONFIG_PATH);
                String coreSiteConfigPath = ServiceUtilities.getServiceConfigResourceBundle()
                        .getString(Constants.CORE_SITE_CONFIG_PATH);
                LOG.debug("Loading hdfs-site.xml:" + hdfsConfigPath);
                conf.addResource(new Path(hdfsConfigPath));
                LOG.debug("Loading hdfs-site.xml:" + coreSiteConfigPath);
                conf.addResource(new Path(coreSiteConfigPath));

                KerberosUtilities kerberosUtilities = new KerberosUtilities();
                // apply kerberos token
                kerberosUtilities.applyKerberosToken(userId, password, conf);

                listAndWriteFiles(remoteFileName, path, conf, sizeOfData);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }

        /**
         * This method will list all files for {@code path}, read all files
         * and writes its data to a local file on edge node with size <=
         * {@code sizeOfData} passed in parameter.
         *
         * @param remoteFileName
         * @param path
         * @param conf
         * @param sizeOfData
         * @throws IOException
         */
        private void listAndWriteFiles(String remoteFileName, Path path, Configuration conf, double sizeOfData)
                throws IOException {
            FileSystem fs = FileSystem.get(conf);
            FileStatus[] status = fs.listStatus(path);
            File remoteFile = new File(remoteFileName);

            OutputStream os = new FileOutputStream(remoteFileName);
            try {

                int numOfBytes = 0;
                for (int i = 0; i < status.length; i++) {
                    BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(status[i].getPath())));
                    String line = "";
                    line = br.readLine();
                    if (line != null) {
                        // header will only get fetch from first part file
                        // and it
                        // will skip header from remaining files
                        if (numOfBytes == 0) {
                            os.write((line + "\n").toString().getBytes());
                            numOfBytes += line.toString().length();
                        }
                        while ((line = br.readLine()) != null) {
                            numOfBytes += line.toString().length();
                            // line = br.readLine();
                            if (numOfBytes <= sizeOfData) {
                                os.write((line + "\n").toString().getBytes());
                            } else {
                                break;
                            }
                        }
                    }
                    br.close();
                    remoteFile.setReadable(true, false);
                }
            } catch (Exception e) {
                throw new RuntimeException(e);
            } finally {
                os.close();
                fs.close();
            }
        }

    });

    Spark.post("/delete", new Route() {
        @Override
        public Object handle(Request request, Response response) {
            LOG.info("************************delete endpoint - started************************");
            LOG.info("+++ Start: " + new Timestamp((new Date()).getTime()));
            response.type("text/json");
            String jobId = request.queryParams(Constants.JOB_ID);
            String basePath = request.queryParams(Constants.BASE_PATH);
            String componentId = request.queryParams(Constants.COMPONENT_ID);
            String socketId = request.queryParams(Constants.SOCKET_ID);
            String userID = request.queryParams(Constants.USER_ID);
            String password = request.queryParams(Constants.SERVICE_PWD);

            LOG.info("Base Path: {}, Job Id: {}, Component Id: {}, Socket ID: {}, User ID:{}", basePath, jobId,
                    componentId, socketId, userID);

            try {
                removeDebugFiles(basePath, jobId, componentId, socketId, userID, password);
                LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
            } catch (Exception e) {
                LOG.error("Error in deleting debug files", e);
            }
            return "error";
        }

        private void removeDebugFiles(String basePath, String jobId, String componentId, String socketId,
                String userID, String password) {
            try {
                // DebugFilesReader debugFilesReader = new
                // DebugFilesReader(basePath, jobId, componentId, socketId,
                // userID,
                // password);
                delete(basePath, jobId, componentId, socketId, userID, password);
            } catch (Exception e) {
                LOG.error("Error while deleting the debug file", e);
                throw new RuntimeException(e);
            }
        }

        /**
         * Deletes the jobId directory
         *
         * @param password
         * @param userID
         * @param socketId
         * @param componentId
         * @param jobId
         * @param basePath
         * @throws IOException
         */
        public void delete(String basePath, String jobId, String componentId, String socketId, String userID,
                String password) throws IOException {
            LOG.trace("Entering method delete()");
            String deletePath = basePath + "/debug/" + jobId;
            Configuration configuration = new Configuration();
            FileSystem fileSystem = FileSystem.get(configuration);
            Path deletingFilePath = new Path(deletePath);
            if (!fileSystem.exists(deletingFilePath)) {
                throw new PathNotFoundException(deletingFilePath.toString());
            } else {
                // Delete file
                fileSystem.delete(deletingFilePath, true);
                LOG.info("Deleted path : " + deletePath);
            }
            fileSystem.close();
        }
    });

    Spark.post("/deleteLocalDebugFile", new Route() {
        @Override
        public Object handle(Request request, Response response) {
            String error = "";
            LOG.info("+++ Start: " + new Timestamp((new Date()).getTime()));
            LOG.info("************************deleteLocalDebugFile endpoint - started************************");
            try {
                String jobId = request.queryParams(Constants.JOB_ID);
                String componentId = request.queryParams(Constants.COMPONENT_ID);
                String socketId = request.queryParams(Constants.SOCKET_ID);
                String batchID = jobId + "_" + componentId + "_" + socketId;
                String tempLocationPath = ServiceUtilities.getServiceConfigResourceBundle()
                        .getString(Constants.TEMP_LOCATION_PATH);

                LOG.info("Job Id: {}, Component Id: {}, Socket ID: {}, TemporaryPath: {}", jobId, componentId,
                        socketId, tempLocationPath);
                LOG.debug("File to be deleted: " + tempLocationPath + "/" + batchID + ".csv");
                File file = new File(tempLocationPath + "/" + batchID + ".csv");
                file.delete();
                LOG.trace("Local debug file deleted successfully.");
                return "Success";
            } catch (Exception e) {
                LOG.error("Error in deleting local debug file.", e);
                error = e.getMessage();
            }
            LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
            return "Local file delete failed. Error: " + error;
        }
    });

    // TODO : Keep this for test
    Spark.post("/post", new Route() {

        @Override
        public Object handle(Request request, Response response) {
            LOG.info("****TEST SPARK POST STARTED**********");
            response.type("text/json");
            return "calling post...";
        }
    });

    // TODO : Keep this for test
    Spark.get("/test", new Route() {

        @Override
        public Object handle(Request request, Response response) {
            LOG.info("****TEST SPARK GET STARTED**********");
            response.type("text/json");
            response.status(200);
            response.body("Test successful!");
            return "Test successful!";
        }
    });

    Spark.post("/filter", new Route() {
        @Override
        public Object handle(Request request, Response response) {

            LOG.info("************************filter - started************************");
            LOG.info("+++ Start: " + new Timestamp((new Date()).getTime()));

            Gson gson = new Gson();
            String json = request.queryParams(Constants.REQUEST_PARAMETERS);
            RemoteFilterJson remoteFilterJson = gson.fromJson(json, RemoteFilterJson.class);

            String jobId = remoteFilterJson.getJobDetails().getUniqueJobID();
            String componentId = remoteFilterJson.getJobDetails().getComponentID();
            String socketId = remoteFilterJson.getJobDetails().getComponentSocketID();
            String basePath = remoteFilterJson.getJobDetails().getBasepath();
            String username = remoteFilterJson.getJobDetails().getUsername();
            String password = remoteFilterJson.getJobDetails().getService_pwd();
            double outputFileSizeInMB = remoteFilterJson.getFileSize();
            double sizeOfDataInByte = outputFileSizeInMB * 1024 * 1024;

            String condition = parseSQLQueryToLingualQuery(remoteFilterJson);

            LOG.info("Base Path: {}, Job Id: {}, Component Id: {}, Socket ID: {}, User ID:{}, DataSize:{}",
                    basePath, jobId, componentId, socketId, username, sizeOfDataInByte);

            String batchID = jobId + "_" + componentId + "_" + socketId;

            String tempLocationPath = ServiceUtilities.getServiceConfigResourceBundle()
                    .getString(Constants.TEMP_LOCATION_PATH);

            String filePath = tempLocationPath + "/" + batchID + ".csv";
            String UUID = generateUUID();
            String uniqueId = batchID + "_" + UUID;
            String linugalMetaDataPath = basePath + "/filter/" + UUID;

            String fieldNames[] = getHeader(basePath + "/debug/" + jobId + "/" + componentId + "_" + socketId,
                    username, password);
            try {
                HashMap<String, Type> fieldNameAndDatatype = getFieldNameAndType(remoteFilterJson);
                Type[] fieldTypes = getFieldTypeFromMap(fieldNames, fieldNameAndDatatype);
                Configuration conf = getConfiguration(username, password);

                new LingualFilter().filterData(linugalMetaDataPath, uniqueId,
                        basePath + "/debug/" + jobId + "/" + componentId + "_" + socketId, sizeOfDataInByte,
                        filePath, condition, fieldNames, fieldTypes, conf);

                LOG.info("debug output path : " + filePath);
                LOG.info("+++ Stop: " + new Timestamp((new Date()).getTime()));
            } catch (Exception e) {
                LOG.error("Error in reading debug files", e);
                return "error";
            } finally {
                try {
                    System.gc();
                    deleteLingualResult(linugalMetaDataPath);
                } catch (Exception e) {
                    LOG.error("Error in deleting lingual result", e);
                    return "Error in deleting lingual result: " + e.getMessage();
                }
            }

            return filePath;
        }

        private Type[] getFieldTypeFromMap(String[] fieldNames, HashMap<String, Type> fieldNameAndDatatype) {
            Type[] type = new Type[fieldNameAndDatatype.size()];
            int i = 0;
            for (String eachFieldName : fieldNames) {
                type[i++] = fieldNameAndDatatype.get(eachFieldName);
            }
            return type;
        }

        private String[] getHeader(String path, String username, String password) {
            String[] header = readFile(path, username, password);
            return header;
        }

        private String[] readFile(String hdfsFilePath, String username, String password) {
            String[] header = null;
            try {
                Path path = new Path(hdfsFilePath);
                LOG.debug("Reading Debug file:" + hdfsFilePath);
                Configuration conf = getConfiguration(username, password);

                header = getHeaderArray(path, conf);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            return header;
        }

        private Path filterOutSuccessFile(FileStatus[] fileStatus) {
            for (FileStatus status : fileStatus) {
                if (status.getPath().getName().toUpperCase().contains("_SUCCESS"))
                    continue;
                else
                    return status.getPath();
            }
            return null;
        }

        private String[] getHeaderArray(Path path, Configuration conf) throws IOException {
            FileSystem fs = FileSystem.get(conf);
            FileStatus[] status = fs.listStatus(path);
            String line = "";
            try {
                BufferedReader br = new BufferedReader(
                        new InputStreamReader(fs.open(filterOutSuccessFile(status))));

                line = br.readLine();
                br.close();

            } catch (Exception e) {
                throw new RuntimeException(e);
            } finally {
                fs.close();
            }
            return line.split(",");
        }

        private Configuration getConfiguration(String userId, String password)
                throws LoginException, IOException {
            Configuration conf = new Configuration();

            // load hdfs-site.xml and core-site.xml
            String hdfsConfigPath = ServiceUtilities.getServiceConfigResourceBundle()
                    .getString(Constants.HDFS_SITE_CONFIG_PATH);
            String coreSiteConfigPath = ServiceUtilities.getServiceConfigResourceBundle()
                    .getString(Constants.CORE_SITE_CONFIG_PATH);
            LOG.debug("Loading hdfs-site.xml:" + hdfsConfigPath);
            conf.addResource(new Path(hdfsConfigPath));
            LOG.debug("Loading hdfs-site.xml:" + coreSiteConfigPath);
            conf.addResource(new Path(coreSiteConfigPath));

            KerberosUtilities kerberosUtilities = new KerberosUtilities();
            // apply kerberos token
            kerberosUtilities.applyKerberosToken(userId, password, conf);
            return conf;
        }

        private void deleteLingualResult(String deletePath) throws IOException {
            Configuration configuration = new Configuration();
            FileSystem fileSystem = FileSystem.get(configuration);
            Path deletingFilePath = new Path(deletePath);

            if (!fileSystem.exists(deletingFilePath)) {
                throw new PathNotFoundException(deletingFilePath.toString());
            } else {
                boolean isDeleted = fileSystem.delete(deletingFilePath, true);
                if (isDeleted) {
                    fileSystem.deleteOnExit(deletingFilePath);
                }
                LOG.info("Deleted path : " + deletePath);
            }

            fileSystem.close();
        }

        private String generateUUID() {
            return String.valueOf(UUID.randomUUID());
        }

        private String parseSQLQueryToLingualQuery(RemoteFilterJson remoteFilterJson) {
            ANTLRInputStream stream = new ANTLRInputStream(remoteFilterJson.getCondition());
            QueryParserLexer lexer = new QueryParserLexer(stream);
            CommonTokenStream tokenStream = new CommonTokenStream(lexer);
            QueryParserParser parser = new QueryParserParser(tokenStream);
            parser.removeErrorListeners();
            LingualQueryCreator customVisitor = new LingualQueryCreator(remoteFilterJson.getSchema());
            String condition = customVisitor.visit(parser.eval());
            return condition;
        }

        private HashMap<String, Type> getFieldNameAndType(RemoteFilterJson remoteFilterJson)
                throws ClassNotFoundException {
            HashMap<String, Type> fieldDataTypeMap = new HashMap<>();
            Type type;
            for (int i = 0; i < remoteFilterJson.getSchema().size(); i++) {
                Class clazz = Class.forName(remoteFilterJson.getSchema().get(i).getDataTypeValue());
                if (clazz.getSimpleName().toString().equalsIgnoreCase("Date")) {
                    type = new SQLTimestampCoercibleType();
                } else {
                    type = clazz;
                }
                fieldDataTypeMap.put(remoteFilterJson.getSchema().get(i).getFieldName(), type);
            }
            return fieldDataTypeMap;
        }

    });
}

From source file:io.amient.yarn1.YarnClient.java

License:Open Source License

/**
 * This method should be called by the implementing application static main
 * method. It does all the work around creating a yarn application and
 * submitting the request to the yarn resource manager. The class given in
 * the appClass argument will be run inside the yarn-allocated master
 * container.//from   w w w. j ava 2  s .  c o  m
 */
public static void submitApplicationMaster(Properties appConfig, Class<? extends YarnMaster> masterClass,
        String[] args, Boolean awaitCompletion) throws Exception {
    log.info("Yarn1 App Configuration:");
    for (Object param : appConfig.keySet()) {
        log.info(param.toString() + " = " + appConfig.get(param).toString());
    }
    String yarnConfigPath = appConfig.getProperty("yarn1.site", "/etc/hadoop");
    String masterClassName = masterClass.getName();
    appConfig.setProperty("yarn1.master.class", masterClassName);
    String applicationName = appConfig.getProperty("yarn1.application.name", masterClassName);
    log.info("--------------------------------------------------------------");

    if (Boolean.valueOf(appConfig.getProperty("yarn1.local.mode", "false"))) {
        YarnMaster.run(appConfig, args);
        return;
    }

    int masterPriority = Integer.valueOf(
            appConfig.getProperty("yarn1.master.priority", String.valueOf(YarnMaster.DEFAULT_MASTER_PRIORITY)));
    int masterMemoryMb = Integer.valueOf(appConfig.getProperty("yarn1.master.memory.mb",
            String.valueOf(YarnMaster.DEFAULT_MASTER_MEMORY_MB)));
    int masterNumCores = Integer.valueOf(
            appConfig.getProperty("yarn1.master.num.cores", String.valueOf(YarnMaster.DEFAULT_MASTER_CORES)));
    String queue = appConfig.getProperty("yarn1.queue");

    Configuration yarnConfig = new YarnConfiguration();
    yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/core-site.xml"));
    yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/hdfs-site.xml"));
    yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/yarn-site.xml"));
    for (Map.Entry<Object, Object> entry : appConfig.entrySet()) {
        yarnConfig.set(entry.getKey().toString(), entry.getValue().toString());
    }

    final org.apache.hadoop.yarn.client.api.YarnClient yarnClient = org.apache.hadoop.yarn.client.api.YarnClient
            .createYarnClient();
    yarnClient.init(yarnConfig);
    yarnClient.start();

    for (NodeReport report : yarnClient.getNodeReports(NodeState.RUNNING)) {
        log.debug("Node report:" + report.getNodeId() + " @ " + report.getHttpAddress() + " | "
                + report.getCapability());
    }

    log.info("Submitting application master class " + masterClassName);

    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    final ApplicationId appId = appResponse.getApplicationId();
    if (appId == null) {
        System.exit(111);
    } else {
        appConfig.setProperty("am.timestamp", String.valueOf(appId.getClusterTimestamp()));
        appConfig.setProperty("am.id", String.valueOf(appId.getId()));
    }

    YarnClient.distributeResources(yarnConfig, appConfig, applicationName);

    String masterJvmArgs = appConfig.getProperty("yarn1.master.jvm.args", "");
    YarnContainerContext masterContainer = new YarnContainerContext(yarnConfig, appConfig, masterJvmArgs,
            masterPriority, masterMemoryMb, masterNumCores, applicationName, YarnMaster.class, args);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName(masterClassName);
    appContext.setResource(masterContainer.capability);
    appContext.setPriority(masterContainer.priority);
    appContext.setQueue(queue);
    appContext.setApplicationType(appConfig.getProperty("yarn1.application.type", "YARN"));
    appContext.setAMContainerSpec(masterContainer.createContainerLaunchContext());

    log.info("Master container spec: " + masterContainer.capability);

    yarnClient.submitApplication(appContext);

    ApplicationReport report = yarnClient.getApplicationReport(appId);
    log.info("Tracking URL: " + report.getTrackingUrl());

    if (awaitCompletion) {
        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                if (!yarnClient.isInState(Service.STATE.STOPPED)) {
                    log.info("Killing yarn application in shutdown hook");
                    try {
                        yarnClient.killApplication(appId);
                    } catch (Throwable e) {
                        log.error("Failed to kill yarn application - please check YARN Resource Manager", e);
                    }
                }
            }
        });

        float lastProgress = -0.0f;
        while (true) {
            try {
                Thread.sleep(10000);
                report = yarnClient.getApplicationReport(appId);
                if (lastProgress != report.getProgress()) {
                    lastProgress = report.getProgress();
                    log.info(report.getApplicationId() + " " + (report.getProgress() * 100.00) + "% "
                            + (System.currentTimeMillis() - report.getStartTime()) + "(ms) "
                            + report.getDiagnostics());
                }
                if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.UNDEFINED)) {
                    log.info(report.getApplicationId() + " " + report.getFinalApplicationStatus());
                    log.info("Tracking url: " + report.getTrackingUrl());
                    log.info("Finish time: " + ((System.currentTimeMillis() - report.getStartTime()) / 1000)
                            + "(s)");
                    break;
                }
            } catch (Throwable e) {
                log.error("Master Heart Beat Error - terminating", e);
                yarnClient.killApplication(appId);
                Thread.sleep(2000);
            }
        }
        yarnClient.stop();

        if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.SUCCEEDED)) {
            System.exit(112);
        }
    }
    yarnClient.stop();
}

From source file:io.aos.t4f.hadoop.mapreduce.WordCountMapReduceTest2.java

License:Apache License

public static int main(String... args) throws Exception {

    // Get the default configuration object
    Configuration conf = new Configuration();

    // Add resources
    conf.addResource("hdfs-default.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("mapred-default.xml");
    conf.addResource("mapred-site.xml");

    Job job = new Job(conf);
    job.setJobName("WordCount");

    List<String> other_args = parseArguments(args, job);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);

    // the keys are words (strings)
    job.setOutputKeyClass(Text.class);
    // the values are counts (ints)
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(MapClass.class);
    job.setCombinerClass(ReduceClass.class);
    job.setReducerClass(ReduceClass.class);

    // Set the input format class
    job.setInputFormatClass(TextInputFormat.class);
    // Set the output format class
    job.setOutputFormatClass(TextOutputFormat.class);
    // Set the input path
    TextInputFormat.setInputPaths(job, other_args.get(0));
    // Set the output path
    TextOutputFormat.setOutputPath(job, new Path(other_args.get(1)));

    /*/*from ww w  . j av  a 2s .  c  om*/
     * Set the minimum and maximum split sizes This parameter helps to
     * specify the number of map tasks. For each input split, there will be
     * a separate map task. In this example each split is of size 32 MB
     */
    TextInputFormat.setMinInputSplitSize(job, 32 * MEGABYTES);
    TextInputFormat.setMaxInputSplitSize(job, 32 * MEGABYTES);

    // Set the jar file to run
    job.setJarByClass(WordCountMapReduceTest2.class);

    // Submit the job
    Date startTime = new Date();
    System.out.println("Job started: " + startTime);
    int exitCode = job.waitForCompletion(true) ? 0 : 1;

    if (exitCode == 0) {
        Date end_time = new Date();
        System.out.println("Job ended: " + end_time);
        System.out.println("The job took " + (end_time.getTime() - startTime.getTime()) / 1000 + " seconds.");
    } else {
        System.out.println("Job Failed!!!");
    }

    return exitCode;

}

From source file:io.fluo.stress.trie.NumberIngest.java

License:Apache License

private static void setupHdfs(String hadoopPrefix, String testDir, int numMappers, int numPerMapper)
        throws IllegalArgumentException, IOException {
    Configuration config = new Configuration();
    config.addResource(new Path(hadoopPrefix + "/conf/core-site.xml"));
    config.addResource(new Path(hadoopPrefix + "/conf/hdfs-site.xml"));
    @SuppressWarnings("resource")
    FileSystem hdfs = FileSystem.get(config);

    String inputDir = testDir + "/input";

    hdfs.mkdirs(new Path(inputDir));
    try (FSDataOutputStream fos = hdfs.create(new Path(inputDir + "/data"))) {
        for (int i = 0; i < numMappers; i++) {
            fos.writeUTF(Integer.toString(numPerMapper) + "\n");
        }// w  ww .  j  a v  a  2s.  c o  m
    }
}

From source file:io.fluo.webindex.data.spark.IndexEnv.java

License:Apache License

public static FileSystem getHDFS(String hadoopConfDir) throws IOException {
    Configuration config = new Configuration();
    config.addResource(hadoopConfDir);
    return FileSystem.get(config);
}

From source file:io.hops.hopsworks.common.security.BaseHadoopClientsService.java

License:Open Source License

@PostConstruct
public void init() {
    String confDir = settings.getHadoopConfDir();
    File coreSite = new File(confDir, "core-site.xml");
    if (!coreSite.exists()) {
        handleMissingConf("core-site.xml", confDir);
    }/*from w w  w.  j a v a2 s.  c  om*/

    Configuration conf = new Configuration();
    conf.addResource(new Path(coreSite.getAbsolutePath()));

    sslConf = new Configuration(false);
    String hadoopConfDir = settings.getHadoopConfDir();
    File serverSSLConf = new File(hadoopConfDir, conf.get(SSLFactory.SSL_SERVER_CONF_KEY, "ssl-server.xml"));
    sslConf.addResource(new Path(serverSSLConf.getAbsolutePath()));
    superKeystorePath = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY));
    superKeystorePassword = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY));
    superTrustStorePath = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY));
    superTrustStorePassword = sslConf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY));
    try {
        superuser = UserGroupInformation.getLoginUser().getUserName();
    } catch (IOException ex) {
        throw new IllegalStateException("Could not identify login user");
    }
}

From source file:io.hops.security.HopsUtil.java

License:Apache License

private static Configuration generateSSLServerConf(Configuration conf, String cryptoMaterialPassword) {
    Configuration sslConf = new Configuration(false);
    sslConf.set(//from  w ww .ja va  2 s.c o m
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
                    FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY),
            HopsSSLSocketFactory.LOCALIZED_KEYSTORE_FILE_NAME);
    sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), cryptoMaterialPassword);
    sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), cryptoMaterialPassword);

    sslConf.set(
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
                    FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY),
            HopsSSLSocketFactory.LOCALIZED_TRUSTSTORE_FILE_NAME);
    sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), cryptoMaterialPassword);

    sslConf.set(
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
                    FileBasedKeyStoresFactory.SSL_PASSWORDFILE_LOCATION_TPL_KEY),
            HopsSSLSocketFactory.LOCALIZED_PASSWD_FILE_NAME);

    Configuration sslClientConf = new Configuration(false);
    String sslClientResource = conf.get(SSLFactory.SSL_CLIENT_CONF_KEY, "ssl-client.xml");
    sslClientConf.addResource(sslClientResource);
    long keyStoreReloadInterval = sslClientConf.getLong(
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT,
                    FileBasedKeyStoresFactory.SSL_KEYSTORE_RELOAD_INTERVAL_TPL_KEY),
            FileBasedKeyStoresFactory.DEFAULT_SSL_KEYSTORE_RELOAD_INTERVAL);
    String timeUnitStr = sslClientConf.get(
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT,
                    FileBasedKeyStoresFactory.SSL_KEYSTORE_RELOAD_TIMEUNIT_TPL_KEY),
            FileBasedKeyStoresFactory.DEFAULT_SSL_KEYSTORE_RELOAD_TIMEUNIT);
    long trustStoreReloadInterval = sslClientConf.getLong(
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT,
                    FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
            FileBasedKeyStoresFactory.DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);

    sslConf.setLong(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_RELOAD_INTERVAL_TPL_KEY), keyStoreReloadInterval);
    sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
            FileBasedKeyStoresFactory.SSL_KEYSTORE_RELOAD_TIMEUNIT_TPL_KEY), timeUnitStr);
    sslConf.setLong(
            FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
                    FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
            trustStoreReloadInterval);

    return sslConf;
}