Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:org.apache.phoenix.monitoring.PhoenixMetricsDisabledIT.java

License:Apache License

@BeforeClass
public static void doSetup() throws Exception {
    final Configuration conf = HBaseConfiguration.create();
    conf.set(QueryServices.GLOBAL_METRICS_ENABLED, String.valueOf(false));
    conf.set(QueryServices.RENEW_LEASE_ENABLED, String.valueOf(false));
    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();/*from ww w  .  j ava 2 s  .c om*/
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });

    Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));

    DriverManager.registerDriver(PhoenixDriver.INSTANCE);
}

From source file:org.apache.phoenix.queryserver.client.SqllineWrapper.java

License:Apache License

public static void main(String[] args) throws Exception {
    final Configuration conf = new Configuration(false);
    conf.addResource("hbase-site.xml");

    // Check if the server config says SPNEGO auth is actually disabled.
    final boolean disableSpnego = conf.getBoolean(QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
            DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
    if (disableSpnego) {
        SqlLine.main(args);/*from  w w  w  . ja  va  2s .com*/
    }

    UserGroupInformation ugi = loginIfNecessary(conf);

    if (null != ugi) {
        final String[] updatedArgs = updateArgsForKerberos(args);
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                SqlLine.main(updatedArgs);
                return null;
            }
        });
    } else {
        SqlLine.main(args);
    }
}

From source file:org.apache.pig.backend.hadoop.datastorage.ConfigurationUtil.java

License:Apache License

public static Properties getLocalFSProperties() {
    Configuration localConf;
    if (PigMapReduce.sJobContext != null
            && PigMapReduce.sJobContext.getConfiguration().get("exectype").equals(ExecType.LOCAL.toString())) {
        localConf = new Configuration(false);
        localConf.addResource("core-default.xml");
    } else {//  w w w  . j  a v a 2s .com
        localConf = new Configuration(true);
        // It's really hacky, try to get unit test working under hadoop 23.
        // Hadoop23 MiniMRCluster currently need setup Distributed cache before start,
        // so build/classes/hadoop-site.xml contains such entry. This prevents some tests from
        // successful (They expect those files in hdfs), so we need to unset it in hadoop 23.
        // This should go away once MiniMRCluster fix the distributed cache issue.
        HadoopShims.unsetConf(localConf, MRConfiguration.JOB_CACHE_FILES);
    }
    localConf.set(MapRedUtil.FILE_SYSTEM_NAME, "file:///");
    Properties props = ConfigurationUtil.toProperties(localConf);
    return props;
}

From source file:org.apache.pig.test.TezMiniCluster.java

License:Apache License

@Override
public void setupMiniDfsAndMrClusters() {
    try {/*from w  w  w  .  j  a v  a 2  s  .c o m*/
        deleteConfFiles();
        CONF_DIR.mkdirs();

        // Build mini DFS cluster
        Configuration hdfsConf = new Configuration(false);
        hdfsConf.addResource("core-default.xml");
        hdfsConf.addResource("hdfs-default.xml");
        m_dfs = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(2).format(true).racks(null).build();
        m_fileSys = m_dfs.getFileSystem();
        m_dfs_conf = m_dfs.getConfiguration(0);
        //Create user home directory
        m_fileSys.mkdirs(m_fileSys.getWorkingDirectory());

        // Write core-site.xml
        Configuration core_site = new Configuration(false);
        core_site.set(FileSystem.FS_DEFAULT_NAME_KEY, m_dfs_conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
        core_site.writeXml(new FileOutputStream(CORE_CONF_FILE));

        Configuration hdfs_site = new Configuration(false);
        for (Entry<String, String> conf : m_dfs_conf) {
            if (ArrayUtils.contains(m_dfs_conf.getPropertySources(conf.getKey()), "programatically")) {
                hdfs_site.set(conf.getKey(), m_dfs_conf.getRaw(conf.getKey()));
            }
        }
        hdfs_site.writeXml(new FileOutputStream(HDFS_CONF_FILE));

        // Build mini YARN cluster
        m_mr = new MiniMRYarnCluster("PigMiniCluster", 2);
        m_mr.init(m_dfs_conf);
        m_mr.start();
        m_mr_conf = m_mr.getConfig();
        m_mr_conf.set(MRConfiguration.FRAMEWORK_NAME, "yarn-tez");
        m_mr_conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, System.getProperty("java.class.path"));
        m_mr_conf.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx2048m");
        m_mr_conf.set(MRJobConfig.REDUCE_JAVA_OPTS, "-Xmx2048m");

        Configuration mapred_site = new Configuration(false);
        Configuration yarn_site = new Configuration(false);
        for (Entry<String, String> conf : m_mr_conf) {
            if (ArrayUtils.contains(m_mr_conf.getPropertySources(conf.getKey()), "programatically")) {
                if (conf.getKey().contains("yarn")) {
                    yarn_site.set(conf.getKey(), m_mr_conf.getRaw(conf.getKey()));
                } else if (!conf.getKey().startsWith("dfs")) {
                    mapred_site.set(conf.getKey(), m_mr_conf.getRaw(conf.getKey()));
                }
            }
        }

        mapred_site.writeXml(new FileOutputStream(MAPRED_CONF_FILE));
        yarn_site.writeXml(new FileOutputStream(YARN_CONF_FILE));

        // Write tez-site.xml
        Configuration tez_conf = new Configuration(false);
        // TODO PIG-3659 - Remove this once memory management is fixed
        tez_conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, "20");
        tez_conf.set("tez.lib.uris", "hdfs:///tez,hdfs:///tez/lib");
        // Set to a lower value so that tests don't get stuck for long because of 1 AM running at a time
        tez_conf.set(TezConfiguration.TEZ_SESSION_AM_DAG_SUBMIT_TIMEOUT_SECS, "20");
        // Lower the max task attempts to 2 so that negative tests fail
        // faster. By default, tasks retry 4 times
        tez_conf.set(TezConfiguration.TEZ_AM_TASK_MAX_FAILED_ATTEMPTS, "2");
        tez_conf.writeXml(new FileOutputStream(TEZ_CONF_FILE));

        // Copy tez jars to hdfs
        m_fileSys.mkdirs(new Path("/tez/lib"));
        FileFilter fileFilter = new RegexFileFilter("tez-.+\\.jar$");
        File[] tezJars = TEZ_LIB_DIR.listFiles(fileFilter);
        for (int i = 0; i < tezJars.length; i++) {
            if (tezJars[i].getName().startsWith("tez-api")) {
                m_fileSys.copyFromLocalFile(new Path(tezJars[i].getAbsoluteFile().toString()),
                        new Path("/tez"));
            } else {
                m_fileSys.copyFromLocalFile(new Path(tezJars[i].getAbsoluteFile().toString()),
                        new Path("/tez/lib"));
            }
        }

        m_conf = m_mr_conf;
        // Turn FetchOptimizer off so that we can actually test Tez
        m_conf.set(PigConfiguration.OPT_FETCH, System.getProperty("test.opt.fetch", "false"));

        System.setProperty("junit.hadoop.conf", CONF_DIR.getPath());
        System.setProperty("hadoop.log.dir", "build/test/logs");
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.pulsar.io.hdfs.AbstractHdfsConnector.java

License:Apache License

private static Configuration getConfig(final Configuration config, String res) throws IOException {
    boolean foundResources = false;
    if (null != res) {
        String[] resources = res.split(",");
        for (String resource : resources) {
            config.addResource(new Path(resource.trim()));
            foundResources = true;//from www  . j av  a 2 s . c  o  m
        }
    }

    if (!foundResources) {
        // check that at least 1 non-default resource is available on the classpath
        String configStr = config.toString();
        for (String resource : configStr.substring(configStr.indexOf(":") + 1).split(",")) {
            if (!resource.contains("default") && config.getResource(resource.trim()) != null) {
                foundResources = true;
                break;
            }
        }
    }

    if (!foundResources) {
        throw new IOException("Could not find any of the " + res + " on the classpath");
    }
    return config;
}