Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:com.streamsets.pipeline.stage.destination.hbase.HBaseTarget.java

License:Apache License

private Configuration getHBaseConfiguration(List<ConfigIssue> issues) {
    Configuration hbaseConf = HBaseConfiguration.create();
    if (hbaseConfDir != null && !hbaseConfDir.isEmpty()) {
        File hbaseConfigDir = new File(hbaseConfDir);
        if (getContext().isClusterMode() && hbaseConfigDir.isAbsolute()) {
            //Do not allow absolute hdfs config directory in cluster mode
            issues.add(getContext().createConfigIssue(Groups.HBASE.name(), HBASE_CONF_DIR_CONFIG,
                    Errors.HBASE_24, hbaseConfDir));
        } else {/*from w  ww . j ava  2s.  c  o  m*/
            if (!hbaseConfigDir.isAbsolute()) {
                hbaseConfigDir = new File(getContext().getResourcesDirectory(), hbaseConfDir).getAbsoluteFile();
            }
            if (!hbaseConfigDir.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HBASE.name(), HBASE_CONF_DIR_CONFIG,
                        Errors.HBASE_19, hbaseConfDir));
            } else if (!hbaseConfigDir.isDirectory()) {
                issues.add(getContext().createConfigIssue(Groups.HBASE.name(), HBASE_CONF_DIR_CONFIG,
                        Errors.HBASE_20, hbaseConfDir));
            } else {
                File hbaseSiteXml = new File(hbaseConfigDir, "hbase-site.xml");
                if (hbaseSiteXml.exists()) {
                    if (!hbaseSiteXml.isFile()) {
                        issues.add(getContext().createConfigIssue(Groups.HBASE.name(), HBASE_CONF_DIR_CONFIG,
                                Errors.HBASE_21, hbaseConfDir, "hbase-site.xml"));
                    }
                    hbaseConf.addResource(new Path(hbaseSiteXml.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hbaseConfigs.entrySet()) {
        hbaseConf.set(config.getKey(), config.getValue());
    }
    return hbaseConf;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTarget.java

License:Apache License

Configuration getHadoopConfiguration(List<ConfigIssue> issues) {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/*  w w w  .ja v a2  s.c  om*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "hdfs/_HOST@" + KerberosUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.toString()));
            }
        }
    }
    if (hadoopConfDir != null && !hadoopConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hadoopConfDir);
        if (getContext().isClusterMode() && hadoopConfigDir.isAbsolute()) {
            //Do not allow absolute hadoop config directory in cluster mode
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                    Errors.HADOOPFS_45, hadoopConfDir));
        } else {
            if (!hadoopConfigDir.isAbsolute()) {
                hadoopConfigDir = new File(getContext().getResourcesDirectory(), hadoopConfDir)
                        .getAbsoluteFile();
            }
            if (!hadoopConfigDir.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                        Errors.HADOOPFS_25, hadoopConfigDir.getPath()));
            } else if (!hadoopConfigDir.isDirectory()) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                        Errors.HADOOPFS_26, hadoopConfigDir.getPath()));
            } else {
                File coreSite = new File(hadoopConfigDir, "core-site.xml");
                if (coreSite.exists()) {
                    if (!coreSite.isFile()) {
                        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                                Errors.HADOOPFS_27, coreSite.getPath()));
                    }
                    conf.addResource(new Path(coreSite.getAbsolutePath()));
                }
                File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
                if (hdfsSite.exists()) {
                    if (!hdfsSite.isFile()) {
                        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                                Errors.HADOOPFS_27, hdfsSite.getPath()));
                    }
                    conf.addResource(new Path(hdfsSite.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }
    return conf;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTargetConfigBean.java

License:Apache License

private Configuration getHadoopConfiguration(Stage.Context context, List<Stage.ConfigIssue> issues) {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {//from w w  w.  j ava 2 s  .co m
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.toString()));
            }
        }
    }
    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if ((context.getExecutionMode() == ExecutionMode.CLUSTER_BATCH
                || context.getExecutionMode() == ExecutionMode.CLUSTER_YARN_STREAMING
                || context.getExecutionMode() == ExecutionMode.CLUSTER_MESOS_STREAMING)
                && hadoopConfigDir.isAbsolute()) {
            //Do not allow absolute hadoop config directory in cluster mode
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                    HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_45, hdfsConfDir));
        } else {
            if (!hadoopConfigDir.isAbsolute()) {
                hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
            }
            if (!hadoopConfigDir.exists()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_25,
                        hadoopConfigDir.getPath()));
            } else if (!hadoopConfigDir.isDirectory()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_26,
                        hadoopConfigDir.getPath()));
            } else {
                File coreSite = new File(hadoopConfigDir, "core-site.xml");
                if (coreSite.exists()) {
                    if (!coreSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_27,
                                coreSite.getPath()));
                    }
                    conf.addResource(new Path(coreSite.getAbsolutePath()));
                }
                File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
                if (hdfsSite.exists()) {
                    if (!hdfsSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_27,
                                hdfsSite.getPath()));
                    }
                    conf.addResource(new Path(hdfsSite.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }
    return conf;
}

From source file:com.streamsets.pipeline.stage.destination.hive.HiveMetastoreTarget.java

License:Apache License

private void initConfDirAndHDFS(final List<ConfigIssue> issues) {
    String hiveConfDirString = conf.hiveConfigBean.confDir;
    File hiveConfDir = new File(hiveConfDirString);
    final Configuration configuration = new Configuration();

    if (!hiveConfDir.isAbsolute()) {
        hiveConfDir = new File(getContext().getResourcesDirectory(), conf.hiveConfigBean.confDir)
                .getAbsoluteFile();/*w  w w .  ja v a2 s  .  c om*/
    }

    if (hiveConfDir.exists()) {
        File coreSite = new File(hiveConfDir.getAbsolutePath(), "core-site.xml");
        File hiveSite = new File(hiveConfDir.getAbsolutePath(), "hive-site.xml");
        File hdfsSite = new File(hiveConfDir.getAbsolutePath(), "hdfs-site.xml");

        if (!coreSite.exists()) {
            issues.add(getContext().createConfigIssue(Groups.HIVE.name(),
                    JOINER.join(CONF, HIVE_CONFIG_BEAN, CONF_DIR), Errors.HIVE_06, coreSite.getName(),
                    hiveConfDirString));
        } else {
            configuration.addResource(new Path(coreSite.getAbsolutePath()));
        }

        if (!hdfsSite.exists()) {
            issues.add(getContext().createConfigIssue(Groups.HIVE.name(),
                    JOINER.join(CONF, HIVE_CONFIG_BEAN, CONF_DIR), Errors.HIVE_06, hdfsSite.getName(),
                    hiveConfDirString));
        } else {
            configuration.addResource(new Path(hdfsSite.getAbsolutePath()));
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(),
                JOINER.join(CONF, HIVE_CONFIG_BEAN, CONF_DIR), Errors.HIVE_07, hiveConfDirString));
    }

    // Add any additional configuration overrides
    for (Map.Entry<String, String> entry : conf.hiveConfigBean.additionalConfigProperties.entrySet()) {
        configuration.set(entry.getKey(), entry.getValue());
    }

    if (!issues.isEmpty()) {
        return;
    }

    if (!conf.useAsAvro) {
        try {
            // forcing UGI to initialize with the security settings from the stage
            loginUgi = HadoopSecurityUtil.getLoginUser(configuration);
            if (conf.hdfsKerberos) {
                LOG.info("HDFS Using Kerberos");
                if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                    issues.add(getContext().createConfigIssue(Groups.ADVANCED.name(),
                            JOINER.join(CONF, HDFS_KERBEROS), Errors.HIVE_01,
                            loginUgi.getAuthenticationMethod(),
                            UserGroupInformation.AuthenticationMethod.KERBEROS));
                }
            } else {
                LOG.info("HDFS Using Simple");
                configuration.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                        UserGroupInformation.AuthenticationMethod.SIMPLE.name());
            }
        } catch (Exception ex) {
            LOG.info("Validation Error: " + ex.toString(), ex);
            issues.add(getContext().createConfigIssue(Groups.ADVANCED.name(), JOINER.join(CONF, HDFS_KERBEROS),
                    Errors.HIVE_01, "Exception in configuring HDFS"));
        }

        //use ugi.
        try {
            loginUgi.doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() {
                    try {
                        fs = FileSystem.get(configuration);
                    } catch (IOException e) {
                        LOG.error("Error with HDFS File System Configuration.", e);

                    }
                    return null;
                }
            });
        } catch (Exception e) {
            issues.add(getContext().createConfigIssue(Groups.HIVE.name(),
                    JOINER.join(CONF, HIVE_CONFIG_BEAN, CONF_DIR), Errors.HIVE_01, e.getMessage()));
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

License:Apache License

Configuration getHadoopConfiguration(List<ConfigIssue> issues) {
    Configuration conf = new Configuration();
    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/*  ww  w  . java  2  s . c  om*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "hdfs/_HOST@" + KerberosUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.getMessage()));
            }
        }
    }
    if (hadoopConfDir != null && !hadoopConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hadoopConfDir);
        if (hadoopConfigDir.isAbsolute()) {
            // Do not allow absolute hadoop config directory in cluster mode
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                    Errors.HADOOPFS_29, hadoopConfDir));
        } else {
            hadoopConfigDir = new File(getContext().getResourcesDirectory(), hadoopConfDir).getAbsoluteFile();
        }
        if (!hadoopConfigDir.exists()) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                    Errors.HADOOPFS_25, hadoopConfigDir.getPath()));
        } else if (!hadoopConfigDir.isDirectory()) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                    Errors.HADOOPFS_26, hadoopConfigDir.getPath()));
        } else {
            File coreSite = new File(hadoopConfigDir, "core-site.xml");
            if (coreSite.exists()) {
                if (!coreSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, coreSite.getPath()));
                }
                conf.addResource(new Path(coreSite.getAbsolutePath()));
            }
            File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
            if (hdfsSite.exists()) {
                if (!hdfsSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, hdfsSite.getPath()));
                }
                conf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }
            File yarnSite = new File(hadoopConfigDir, "yarn-site.xml");
            if (yarnSite.exists()) {
                if (!yarnSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, yarnSite.getPath()));
                }
                conf.addResource(new Path(yarnSite.getAbsolutePath()));
            }
            File mapredSite = new File(hadoopConfigDir, "mapred-site.xml");
            if (mapredSite.exists()) {
                if (!mapredSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, mapredSite.getPath()));
                }
                conf.addResource(new Path(mapredSite.getAbsolutePath()));
            }
        }
    }
    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }
    return conf;
}

From source file:com.stumbleupon.hbaseadmin.HBaseCompact.java

License:Open Source License

/**
 * Main entry point//from   w ww  .  j  a v a  2s  . co  m
 * @param args command line arguments
 * @throws Exception 
 */
public static void main(String[] args) throws Exception {
    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;
    String hbaseSite = null;
    String jmxRemotePasswordFile = null;
    String jmxPort = null;
    Date startDate = null;
    Date endDate = null;
    int throttleFactor = 1;
    int numCycles = 1;
    int pauseInterval = DEFAULT_PAUSE_INTERVAL;
    int waitInterval = DEFAULT_WAIT_INTERVAL;
    int filesKeep = DEFAULT_FILES_KEEP;
    long regionCompactWaitTime = DEFAULT_REGION_COMPACT_WAIT_TIME;
    long maxStoreFileAge = 0;
    boolean excludeTables = false;
    String tableNamesString = "";
    List<String> tableNames = new ArrayList<String>();
    SimpleDateFormat sdf = new SimpleDateFormat("HH:mm");

    // Parse command line options
    try {
        cmd = parser.parse(getOptions(), args);
    } catch (org.apache.commons.cli.ParseException e) {
        System.out.println(e.getMessage());
        printOptions();
        System.exit(-1);
    }

    for (Option option : cmd.getOptions()) {
        switch (option.getId()) {
        case 'c':
            hbaseSite = option.getValue();
            break;
        case 'j':
            jmxRemotePasswordFile = option.getValue();
            break;
        case 't':
            throttleFactor = Integer.parseInt(option.getValue());
            break;
        case 'n':
            numCycles = Integer.parseInt(option.getValue());
            break;
        case 'p':
            pauseInterval = Integer.parseInt(option.getValue());
            break;
        case 'w':
            waitInterval = Integer.parseInt(option.getValue());
            break;
        case 's':
            startDate = sdf.parse(option.getValue());
            break;
        case 'e':
            endDate = sdf.parse(option.getValue());
            break;
        case 'b':
            tableNamesString = option.getValue();
            tableNames = Arrays.asList(option.getValue().split(","));
            break;
        case 'f':
            filesKeep = Integer.parseInt(option.getValue());
            break;
        case 'r':
            jmxPort = option.getValue();
            break;
        case 'x':
            excludeTables = true;
            break;
        case 'm':
            regionCompactWaitTime = Long.parseLong(option.getValue());
            break;
        case 'a':
            maxStoreFileAge = Long.parseLong(option.getValue());
            break;
        default:
            throw new IllegalArgumentException("unexpected option " + option);
        }
    }

    LOG.info("Starting compactor");
    LOG.info("--------------------------------------------------");
    LOG.info("HBase site              : {}", hbaseSite);
    LOG.info("RegionServer Jmx port   : {}", jmxPort);
    LOG.info("Jmx password file       : {}", jmxRemotePasswordFile);
    LOG.info("Compact interval        : {}", pauseInterval);
    LOG.info("Check interval          : {}", waitInterval);
    LOG.info("Throttle factor         : {}", throttleFactor);
    LOG.info("Number of cycles        : {}", numCycles);
    LOG.info("Off-peak start time     : {}", Utils.dateString(startDate, "HH:mm"));
    LOG.info("Off-peak end time       : {}", Utils.dateString(endDate, "HH:mm"));
    LOG.info("Minimum store files     : {}", filesKeep);
    LOG.info("Table names             : {}", tableNamesString);
    LOG.info("Exclude tables          : {}", excludeTables);
    LOG.info("Region compact wait time: {}", regionCompactWaitTime);
    LOG.info("Max store file age      : {}", maxStoreFileAge);
    LOG.info("--------------------------------------------------");

    // Get command line options
    final Configuration conf = HBaseConfiguration.create();
    conf.addResource(new Path(hbaseSite));

    HBaseCompact compact = new HBaseCompact();
    ClusterUtils clusterUtils = new ClusterUtils(compact, regionCompactWaitTime);

    compact.setClusterUtils(clusterUtils);
    compact.setAdmin(new HBaseAdmin(conf));
    compact.setSleepBetweenCompacts(pauseInterval);
    compact.setSleepBetweenChecks(waitInterval);
    compact.setThrottleFactor(throttleFactor);
    compact.setNumCycles(numCycles);
    compact.setStartDate(startDate);
    compact.setEndDate(endDate);
    compact.setNumStoreFiles(filesKeep);
    compact.setTableNames(tableNames);
    compact.setExcludeTables(excludeTables);
    compact.setMaxStoreFileAge(maxStoreFileAge);

    clusterUtils.setJmxPort(jmxPort);
    clusterUtils.setJmxPasswordFile(jmxRemotePasswordFile);

    compact.runCompactions();
}

From source file:com.synerzip.apexelasticsearchapp.ApplicationTest.java

@Test
public void testApplication() throws IOException, Exception {
    try {// w  ww  . j a  v  a 2 s . c o m
        LocalMode lma = LocalMode.newInstance();
        Configuration conf = new Configuration(false);
        conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
        lma.prepareDAG(new Application(), conf);
        LocalMode.Controller lc = lma.getController();
        lc.run(900000); // runs for 10 seconds and quits
    } catch (ConstraintViolationException e) {
        Assert.fail("constraint violations: " + e.getConstraintViolations());
    }
}

From source file:com.talis.hbase.rdf.connection.HBaseRdfConnectionFactory.java

License:Apache License

public static HBaseAdmin createHBaseAdmin(HBaseRdfConnectionDesc desc) {
    Configuration config = HBaseConfiguration.create();
    config.addResource(desc.getConfig());
    try {/*from  www.j  a v a  2 s. co  m*/
        return new HBaseAdmin(config);
    } catch (Exception e) {
        throw new HBaseRdfException("HBase exception while creating admin");
    }
}

From source file:com.talis.hbase.rdf.connection.HBaseRdfConnectionFactory.java

License:Apache License

public static HBaseAdmin createHBaseAdmin(String configFile) {
    Configuration config = HBaseConfiguration.create();
    config.addResource(new Path(configFile));
    try {//w  w  w. j  ava 2  s  . c om
        return new HBaseAdmin(config);
    } catch (Exception e) {
        throw new HBaseRdfException("HBase exception while creating admin");
    }
}

From source file:com.talis.hbase.rdf.connection.HBaseRdfConnectionFactory.java

License:Apache License

public static Configuration createHBaseConfiguration(String configFile) {
    Configuration config = HBaseConfiguration.create();
    config.addResource(new Path(configFile));
    return config;
}