Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:org.apache.oozie.action.hadoop.PigMain.java

License:Apache License

@Override
protected void run(String[] args) throws Exception {
    System.out.println();/*www  . j a v  a 2 s.co m*/
    System.out.println("Oozie Pig action configuration");
    System.out.println("=================================================================");

    // loading action conf prepared by Oozie
    Configuration actionConf = new Configuration(false);

    String actionXml = System.getProperty("oozie.action.conf.xml");

    if (actionXml == null) {
        throw new RuntimeException("Missing Java System Property [oozie.action.conf.xml]");
    }
    if (!new File(actionXml).exists()) {
        throw new RuntimeException("Action Configuration XML file [" + actionXml + "] does not exist");
    }

    actionConf.addResource(new Path("file:///", actionXml));
    setYarnTag(actionConf);

    Properties pigProperties = new Properties();
    for (Map.Entry<String, String> entry : actionConf) {
        pigProperties.setProperty(entry.getKey(), entry.getValue());
    }

    // propagate delegation related props from launcher job to Pig job
    String jobTokenFile = getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION");
    if (jobTokenFile != null) {
        pigProperties.setProperty("mapreduce.job.credentials.binary", jobTokenFile);
        pigProperties.setProperty("tez.credentials.path", jobTokenFile);
        System.out.println("------------------------");
        System.out.println("Setting env property for mapreduce.job.credentials.binary to:" + jobTokenFile);
        System.out.println("------------------------");
        System.setProperty("mapreduce.job.credentials.binary", jobTokenFile);
    } else {
        System.out.println("Non-kerberoes execution");
    }

    OutputStream os = new FileOutputStream("pig.properties");
    pigProperties.store(os, "");
    os.close();

    logMasking("pig.properties:", Arrays.asList("password"), pigProperties.entrySet());

    List<String> arguments = new ArrayList<String>();
    String script = actionConf.get(PigActionExecutor.PIG_SCRIPT);

    if (script == null) {
        throw new RuntimeException("Action Configuration does not have [oozie.pig.script] property");
    }

    if (!new File(script).exists()) {
        throw new RuntimeException("Error: Pig script file [" + script + "] does not exist");
    }

    System.out.println("Pig script [" + script + "] content: ");
    System.out.println("------------------------");
    BufferedReader br = new BufferedReader(new FileReader(script));
    String line = br.readLine();
    while (line != null) {
        System.out.println(line);
        line = br.readLine();
    }
    br.close();
    System.out.println("------------------------");
    System.out.println();

    arguments.add("-file");
    arguments.add(script);
    String[] params = MapReduceMain.getStrings(actionConf, PigActionExecutor.PIG_PARAMS);
    for (String param : params) {
        arguments.add("-param");
        arguments.add(param);
    }

    String hadoopJobId = System.getProperty("oozie.launcher.job.id");
    if (hadoopJobId == null) {
        throw new RuntimeException("Launcher Hadoop Job ID system property not set");
    }

    String logFile = new File("pig-oozie-" + hadoopJobId + ".log").getAbsolutePath();

    URL log4jFile = Thread.currentThread().getContextClassLoader().getResource("log4j.properties");
    if (log4jFile != null) {

        String pigLogLevel = actionConf.get("oozie.pig.log.level", "INFO");
        String rootLogLevel = actionConf.get("oozie.action ." + LauncherMapper.ROOT_LOGGER_LEVEL, "INFO");

        // append required PIG properties to the default hadoop log4j file
        Properties hadoopProps = new Properties();
        hadoopProps.load(log4jFile.openStream());
        hadoopProps.setProperty("log4j.rootLogger", rootLogLevel + ", A, B");
        hadoopProps.setProperty("log4j.logger.org.apache.pig", pigLogLevel + ", A, B");
        hadoopProps.setProperty("log4j.additivity.org.apache.pig", "false");
        hadoopProps.setProperty("log4j.appender.A", "org.apache.log4j.ConsoleAppender");
        hadoopProps.setProperty("log4j.appender.A.layout", "org.apache.log4j.PatternLayout");
        hadoopProps.setProperty("log4j.appender.A.layout.ConversionPattern", "%d [%t] %-5p %c %x - %m%n");
        hadoopProps.setProperty("log4j.appender.B", "org.apache.log4j.FileAppender");
        hadoopProps.setProperty("log4j.appender.B.file", logFile);
        hadoopProps.setProperty("log4j.appender.B.layout", "org.apache.log4j.PatternLayout");
        hadoopProps.setProperty("log4j.appender.B.layout.ConversionPattern", "%d [%t] %-5p %c %x - %m%n");

        String localProps = new File("piglog4j.properties").getAbsolutePath();
        OutputStream os1 = new FileOutputStream(localProps);
        hadoopProps.store(os1, "");
        os1.close();

        arguments.add("-log4jconf");
        arguments.add(localProps);

        // print out current directory
        File localDir = new File(localProps).getParentFile();
        System.out.println("Current (local) dir = " + localDir.getAbsolutePath());
    } else {
        System.out.println("log4jfile is null");
    }

    String pigLog = "pig-" + hadoopJobId + ".log";
    arguments.add("-logfile");
    arguments.add(pigLog);

    String[] pigArgs = MapReduceMain.getStrings(actionConf, PigActionExecutor.PIG_ARGS);
    for (String pigArg : pigArgs) {
        if (DISALLOWED_PIG_OPTIONS.contains(pigArg)) {
            throw new RuntimeException("Error: Pig argument " + pigArg + " is not supported");
        }
        arguments.add(pigArg);
    }

    System.out.println("Pig command arguments :");
    for (String arg : arguments) {
        System.out.println("             " + arg);
    }

    LauncherMainHadoopUtils.killChildYarnJobs(actionConf);

    System.out.println("=================================================================");
    System.out.println();
    System.out.println(">>> Invoking Pig command line now >>>");
    System.out.println();
    System.out.flush();

    System.out.println();
    runPigJob(new String[] { "-version" }, null, true, false);
    System.out.println();
    System.out.flush();
    boolean hasStats = Boolean.parseBoolean(actionConf.get(EXTERNAL_STATS_WRITE));
    runPigJob(arguments.toArray(new String[arguments.size()]), pigLog, false, hasStats);

    System.out.println();
    System.out.println("<<< Invocation of Pig command completed <<<");
    System.out.println();

    // For embedded python or for version of pig lower than 0.8, pig stats are not supported.
    // So retrieving hadoop Ids here
    File file = new File(System.getProperty(EXTERNAL_CHILD_IDS));
    if (!file.exists()) {
        writeExternalChildIDs(logFile, PIG_JOB_IDS_PATTERNS, "Pig");
    }
}

From source file:org.apache.oozie.action.hadoop.PigMainWithOldAPI.java

License:Apache License

protected void run(String[] args) throws Exception {
    System.out.println();//from   w ww. j  a va  2s  . co  m
    System.out.println("Oozie Pig action configuration");
    System.out.println("=================================================================");

    // loading action conf prepared by Oozie
    Configuration actionConf = new Configuration(false);

    String actionXml = System.getProperty("oozie.action.conf.xml");

    if (actionXml == null) {
        throw new RuntimeException("Missing Java System Property [oozie.action.conf.xml]");
    }
    if (!new File(actionXml).exists()) {
        throw new RuntimeException("Action Configuration XML file [" + actionXml + "] does not exist");
    }

    actionConf.addResource(new Path("file:///", actionXml));

    Properties pigProperties = new Properties();
    for (Map.Entry<String, String> entry : actionConf) {
        pigProperties.setProperty(entry.getKey(), entry.getValue());
    }

    //propagate delegation related props from launcher job to Pig job
    String jobTokenFile = getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION");
    if (jobTokenFile != null) {
        pigProperties.setProperty("mapreduce.job.credentials.binary", jobTokenFile);
        System.out.println("------------------------");
        System.out.println("Setting env property for mapreduce.job.credentials.binary to:" + jobTokenFile);
        System.out.println("------------------------");
        System.setProperty("mapreduce.job.credentials.binary", jobTokenFile);
    } else {
        System.out.println("Non-kerberoes execution");
    }

    OutputStream os = new FileOutputStream("pig.properties");
    pigProperties.store(os, "");
    os.close();

    System.out.println();
    System.out.println("pig.properties content:");
    System.out.println("------------------------");
    pigProperties.store(System.out, "");
    System.out.flush();
    System.out.println("------------------------");
    System.out.println();

    List<String> arguments = new ArrayList<String>();
    String script = actionConf.get("oozie.pig.script");

    if (script == null) {
        throw new RuntimeException("Action Configuration does not have [oozie.pig.script] property");
    }

    if (!new File(script).exists()) {
        throw new RuntimeException("Error: Pig script file [" + script + "] does not exist");
    }

    System.out.println("Pig script [" + script + "] content: ");
    System.out.println("------------------------");
    BufferedReader br = new BufferedReader(new FileReader(script));
    String line = br.readLine();
    while (line != null) {
        System.out.println(line);
        line = br.readLine();
    }
    br.close();
    System.out.println("------------------------");
    System.out.println();

    arguments.add("-file");
    arguments.add(script);
    String[] params = MapReduceMain.getStrings(actionConf, "oozie.pig.params");
    for (String param : params) {
        arguments.add("-param");
        arguments.add(param);
    }

    String hadoopJobId = System.getProperty("oozie.launcher.job.id");
    if (hadoopJobId == null) {
        throw new RuntimeException("Launcher Hadoop Job ID system property not set");
    }

    String logFile = new File("pig-oozie-" + hadoopJobId + ".log").getAbsolutePath();

    URL log4jFile = Thread.currentThread().getContextClassLoader().getResource("log4j.properties");
    if (log4jFile != null) {

        String pigLogLevel = actionConf.get("oozie.pig.log.level", "INFO");
        String rootLogLevel = actionConf.get("oozie.action." + LauncherMapper.ROOT_LOGGER_LEVEL, "INFO");

        // append required PIG properties to the default hadoop log4j file
        Properties hadoopProps = new Properties();
        hadoopProps.load(log4jFile.openStream());
        hadoopProps.setProperty("log4j.rootLogger", rootLogLevel + ", A, B");
        hadoopProps.setProperty("log4j.logger.org.apache.pig", pigLogLevel + ", A, B");
        hadoopProps.setProperty("log4j.appender.A", "org.apache.log4j.ConsoleAppender");
        hadoopProps.setProperty("log4j.appender.A.layout", "org.apache.log4j.PatternLayout");
        hadoopProps.setProperty("log4j.appender.A.layout.ConversionPattern", "%-4r [%t] %-5p %c %x - %m%n");
        hadoopProps.setProperty("log4j.appender.B", "org.apache.log4j.FileAppender");
        hadoopProps.setProperty("log4j.appender.B.file", logFile);
        hadoopProps.setProperty("log4j.appender.B.layout", "org.apache.log4j.PatternLayout");
        hadoopProps.setProperty("log4j.appender.B.layout.ConversionPattern", "%-4r [%t] %-5p %c %x - %m%n");

        String localProps = new File("piglog4j.properties").getAbsolutePath();
        OutputStream os1 = new FileOutputStream(localProps);
        hadoopProps.store(os1, "");
        os1.close();

        arguments.add("-log4jconf");
        arguments.add(localProps);

        // print out current directory
        File localDir = new File(localProps).getParentFile();
        System.out.println("Current (local) dir = " + localDir.getAbsolutePath());
    } else {
        System.out.println("log4jfile is null");
    }

    String pigLog = "pig-" + hadoopJobId + ".log";
    arguments.add("-logfile");
    arguments.add(pigLog);

    String[] pigArgs = MapReduceMain.getStrings(actionConf, "oozie.pig.args");
    for (String pigArg : pigArgs) {
        if (DISALLOWED_PIG_OPTIONS.contains(pigArg)) {
            throw new RuntimeException("Error: Pig argument " + pigArg + " is not supported");
        }
        arguments.add(pigArg);
    }

    System.out.println("Pig command arguments :");
    for (String arg : arguments) {
        System.out.println("             " + arg);
    }

    System.out.println("=================================================================");
    System.out.println();
    System.out.println(">>> Invoking Pig command line now >>>");
    System.out.println();
    System.out.flush();

    try {
        System.out.println();
        runPigJob(new String[] { "-version" });
    } catch (SecurityException ex) {
        LauncherSecurityManager.reset();
    }
    System.out.println();
    System.out.flush();
    try {
        runPigJob(arguments.toArray(new String[arguments.size()]));
    } catch (SecurityException ex) {
        if (LauncherSecurityManager.getExitInvoked()) {
            if (LauncherSecurityManager.getExitCode() != 0) {
                System.err.println();
                System.err.println("Pig logfile dump:");
                System.err.println();
                try {
                    BufferedReader reader = new BufferedReader(new FileReader(pigLog));
                    line = reader.readLine();
                    while (line != null) {
                        System.err.println(line);
                        line = reader.readLine();
                    }
                    reader.close();
                } catch (FileNotFoundException e) {
                    System.err.println("pig log file: " + pigLog + "  not found.");
                }
                throw ex;
            }
        }
    }

    System.out.println();
    System.out.println("<<< Invocation of Pig command completed <<<");
    System.out.println();

    // harvesting and recording Hadoop Job IDs
    Properties jobIds = getHadoopJobIds(logFile);
    File file = new File(
            System.getProperty(LauncherMapper.ACTION_PREFIX + LauncherMapper.ACTION_DATA_OUTPUT_PROPS));
    os = new FileOutputStream(file);
    jobIds.store(os, "");
    os.close();
    System.out.println(" Hadoop Job IDs executed by Pig: " + jobIds.getProperty(HADOOP_JOBS));
    System.out.println();
}

From source file:org.apache.oozie.action.hadoop.SqoopMain.java

License:Apache License

private static Configuration initActionConf() {
    // loading action conf prepared by Oozie
    Configuration sqoopConf = new Configuration(false);

    String actionXml = System.getProperty("oozie.action.conf.xml");

    if (actionXml == null) {
        throw new RuntimeException("Missing Java System Property [oozie.action.conf.xml]");
    }//ww  w  .  jav a 2 s  .com
    if (!new File(actionXml).exists()) {
        throw new RuntimeException("Action Configuration XML file [" + actionXml + "] does not exist");
    }

    sqoopConf.addResource(new Path("file:///", actionXml));
    setYarnTag(sqoopConf);

    String delegationToken = getFilePathFromEnv("HADOOP_TOKEN_FILE_LOCATION");
    if (delegationToken != null) {
        sqoopConf.setBoolean("sqoop.hbase.security.token.skip", true);
        sqoopConf.set("mapreduce.job.credentials.binary", delegationToken);
        System.out.println("------------------------");
        System.out.println("Setting env property for mapreduce.job.credentials.binary to: " + delegationToken);
        System.out.println("------------------------");
        System.setProperty("mapreduce.job.credentials.binary", delegationToken);
    } else {
        System.out.println("Non-Kerberos execution");
    }

    return sqoopConf;
}

From source file:org.apache.oozie.action.hadoop.TestMapReduceActionExecutor.java

License:Apache License

public void testMapReduceWithConfigClass() throws Exception {
    FileSystem fs = getFileSystem();

    Path inputDir = new Path(getFsTestCaseDir(), "input");
    Path outputDir = new Path(getFsTestCaseDir(), "output");

    Writer w = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
    w.write("dummy\n");
    w.write("dummy\n");
    w.close();/*www  . j  av  a  2 s  . co  m*/

    Path jobXml = new Path(getFsTestCaseDir(), "job.xml");
    XConfiguration conf = getMapReduceConfig(inputDir.toString(), outputDir.toString());
    conf.set(MapperReducerForTest.JOB_XML_OUTPUT_LOCATION, jobXml.toUri().toString());
    conf.set("B", "b");
    String actionXml = "<map-reduce>" + "<job-tracker>" + getJobTrackerUri() + "</job-tracker>" + "<name-node>"
            + getNameNodeUri() + "</name-node>" + conf.toXmlString(false) + "<config-class>"
            + OozieActionConfiguratorForTest.class.getName() + "</config-class>" + "</map-reduce>";

    _testSubmit("map-reduce", actionXml);
    Configuration conf2 = new Configuration(false);
    conf2.addResource(fs.open(jobXml));
    assertEquals("a", conf2.get("A"));
    assertEquals("c", conf2.get("B"));
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

/**
 * Like {@link #setUp()} but allows skipping cleaning up the database tables.  Most tests should use the other method, unless
 * they specifically don't want to (or can't) clean up the database tables.
 *
 * @param cleanUpDBTables true if should cleanup the database tables, false if not
 * @throws Exception if the test workflow working directory could not be created or there was a problem cleaning the database
 */// ww w.  j  a  v a 2 s .c  om
protected void setUp(boolean cleanUpDBTables) throws Exception {
    RUNNING_TESTCASES.incrementAndGet();
    super.setUp();
    String baseDir = System.getProperty(OOZIE_TEST_DIR, new File("target/test-data").getAbsolutePath());
    String msg = null;
    File f = new File(baseDir);
    if (!f.isAbsolute()) {
        msg = XLog.format("System property [{0}]=[{1}] must be set to an absolute path", OOZIE_TEST_DIR,
                baseDir);
    } else {
        if (baseDir.length() < 4) {
            msg = XLog.format("System property [{0}]=[{1}] path must be at least 4 chars", OOZIE_TEST_DIR,
                    baseDir);
        }
    }
    if (msg != null) {
        System.err.println();
        System.err.println(msg);
        System.exit(-1);
    }
    f.mkdirs();
    if (!f.exists() || !f.isDirectory()) {
        System.err.println();
        System.err.println(XLog.format("Could not create test dir [{0}]", baseDir));
        System.exit(-1);
    }
    hadoopVersion = System.getProperty(HADOOP_VERSION, "0.20.0");
    testCaseDir = createTestCaseDir(this, true);

    //setting up Oozie HOME and Oozie conf directory
    setSystemProperty(Services.OOZIE_HOME_DIR, testCaseDir);
    Services.setOozieHome();
    testCaseConfDir = createTestCaseSubDir("conf");

    // load test Oozie site
    String oozieTestDB = System.getProperty("oozie.test.db", "hsqldb");
    String defaultOozieSize = new File(OOZIE_SRC_DIR,
            "core/src/test/resources/" + oozieTestDB + "-oozie-site.xml").getAbsolutePath();
    String customOozieSite = System.getProperty("oozie.test.config.file", defaultOozieSize);
    File source = new File(customOozieSite);
    if (!source.isAbsolute()) {
        source = new File(OOZIE_SRC_DIR, customOozieSite);
    }
    source = source.getAbsoluteFile();
    InputStream oozieSiteSourceStream = null;
    if (source.exists()) {
        oozieSiteSourceStream = new FileInputStream(source);
    } else {
        // If we can't find it, try using the class loader (useful if we're using XTestCase from outside core)
        URL sourceURL = getClass().getClassLoader().getResource(oozieTestDB + "-oozie-site.xml");
        if (sourceURL != null) {
            oozieSiteSourceStream = sourceURL.openStream();
        } else {
            // If we still can't find it, then exit
            System.err.println();
            System.err.println(XLog.format("Custom configuration file for testing does no exist [{0}]",
                    source.getAbsolutePath()));
            System.err.println();
            System.exit(-1);
        }
    }
    // Copy the specified oozie-site file from oozieSiteSourceStream to the test case dir as oozie-site.xml
    Configuration oozieSiteConf = new Configuration(false);
    oozieSiteConf.addResource(oozieSiteSourceStream);
    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
    InputStream inputStream = classLoader.getResourceAsStream(ConfigurationService.DEFAULT_CONFIG_FILE);
    XConfiguration configuration = new XConfiguration(inputStream);
    String classes = configuration.get(Services.CONF_SERVICE_CLASSES);
    // Disable sharelib service as it cannot find the sharelib jars
    // as maven has target/classes in classpath and not the jar because test phase is before package phase
    oozieSiteConf.set(Services.CONF_SERVICE_CLASSES,
            classes.replaceAll("org.apache.oozie.service.ShareLibService,", ""));
    // Make sure to create the Oozie DB during unit tests
    oozieSiteConf.set(JPAService.CONF_CREATE_DB_SCHEMA, "true");
    File target = new File(testCaseConfDir, "oozie-site.xml");
    oozieSiteConf.writeXml(new FileOutputStream(target));

    File hadoopConfDir = new File(testCaseConfDir, "hadoop-conf");
    hadoopConfDir.mkdir();
    File actionConfDir = new File(testCaseConfDir, "action-conf");
    actionConfDir.mkdir();
    source = new File(OOZIE_SRC_DIR, "core/src/test/resources/hadoop-config.xml");
    target = new File(hadoopConfDir, "hadoop-site.xml");
    IOUtils.copyStream(new FileInputStream(source), new FileOutputStream(target));

    if (System.getProperty("oozielocal.log") == null) {
        setSystemProperty("oozielocal.log", "/tmp/oozielocal.log");
    }
    if (System.getProperty("oozie.test.hadoop.security", "simple").equals("kerberos")) {
        System.setProperty("oozie.service.HadoopAccessorService.kerberos.enabled", "true");
    }
    if (System.getProperty("oozie.test.hadoop.minicluster", "true").equals("true")) {
        setUpEmbeddedHadoop(getTestCaseDir());
        // Second cluster is not necessary without the first one
        if (System.getProperty("oozie.test.hadoop.minicluster2", "false").equals("true")) {
            setUpEmbeddedHadoop2();
        }
    }

    if (System.getProperty("oozie.test.db.host") == null) {
        System.setProperty("oozie.test.db.host", "localhost");
    }
    setSystemProperty(ConfigurationService.OOZIE_DATA_DIR, testCaseDir);

    setSystemProperty(HadoopAccessorService.SUPPORTED_FILESYSTEMS, "*");

    if (mrCluster != null) {
        OutputStream os = new FileOutputStream(new File(hadoopConfDir, "core-site.xml"));
        Configuration conf = createJobConfFromMRCluster();
        conf.writeXml(os);
        os.close();
    }

    if (System.getProperty("oozie.test.metastore.server", "true").equals("true")) {
        setupHCatalogServer();
    }

    // Cleanup any leftover database data to make sure we start each test with an empty database
    if (cleanUpDBTables) {
        cleanUpDBTables();
    }
}

From source file:org.apache.phoenix.end2end.BasePermissionsIT.java

License:Apache License

void startNewMiniCluster(Configuration overrideConf) throws Exception {
    if (null != testUtil) {
        testUtil.shutdownMiniCluster();/*from  w  w w.  j  a  v a2 s .c  om*/
        testUtil = null;
    }

    testUtil = new HBaseTestingUtility();

    Configuration config = testUtil.getConfiguration();
    enablePhoenixHBaseAuthorization(config);
    configureNamespacesOnServer(config);
    configureRandomHMasterPort(config);
    if (overrideConf != null) {
        config.addResource(overrideConf);
    }

    testUtil.startMiniCluster(1);
    initializeUsers(testUtil.getConfiguration());
}

From source file:org.apache.phoenix.end2end.HttpParamImpersonationQueryServerIT.java

License:Apache License

/**
 * Setup and start kerberos, hbase/*from   ww  w .j ava 2 s  . c om*/
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    //     (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();

    // Create a number of unprivileged users
    createUsers(2);

    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName(),
            TokenProvider.class.getName());

    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.principal", SPNEGO_PRINCIPAL);
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.HTTP.groups", "*");
    conf.set("hadoop.proxyuser.HTTP.hosts", "*");
    // user1 is allowed to impersonate others, user2 is not
    conf.set("hadoop.proxyuser.user1.groups", "*");
    conf.set("hadoop.proxyuser.user1.hosts", "*");
    conf.setBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB, true);

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();

    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(HttpParamImpersonationQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();

    // Then fork a thread with PQS in it.
    startQueryServer();
}

From source file:org.apache.phoenix.end2end.SecureQueryServerIT.java

License:Apache License

/**
 * Setup and start kerberos, hbase/*from w w w  . j a va  2  s.co m*/
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    //     (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();

    // Create a number of unprivileged users
    createUsers(3);

    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName());

    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.principal", SPNEGO_PRINCIPAL);
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.HTTP.groups", "*");
    conf.set("hadoop.proxyuser.HTTP.hosts", "*");

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();

    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();

    // Then fork a thread with PQS in it.
    startQueryServer();
}

From source file:org.apache.phoenix.jdbc.SecureUserConnectionsIT.java

License:Apache License

@BeforeClass
public static void setupKdc() throws Exception {
    ensureIsEmptyDirectory(KDC_DIR);//w  ww.  j a v a 2  s.  c  o  m
    ensureIsEmptyDirectory(KEYTAB_DIR);
    // Create and start the KDC. MiniKDC appears to have a race condition in how it does
    // port allocation (with apache-ds). See PHOENIX-3287.
    boolean started = false;
    for (int i = 0; !started && i < KDC_START_ATTEMPTS; i++) {
        Properties kdcConf = MiniKdc.createConf();
        kdcConf.put(MiniKdc.DEBUG, true);
        KDC = new MiniKdc(kdcConf, KDC_DIR);
        try {
            KDC.start();
            started = true;
        } catch (Exception e) {
            LOG.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
        }
    }
    assertTrue("The embedded KDC failed to start successfully after " + KDC_START_ATTEMPTS + " attempts.",
            started);

    createUsers(NUM_USERS);
    createServiceUsers(NUM_USERS);

    final Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
    conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true);
    UserGroupInformation.setConfiguration(conf);

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();
}

From source file:org.apache.phoenix.mapreduce.CsvBulkImportUtilTest.java

License:Apache License

@Test
public void testInitCsvImportJob() throws IOException {
    Configuration conf = new Configuration();

    char delimiter = '\001';
    char quote = '\002';
    char escape = '!';

    CsvBulkImportUtil.initCsvImportJob(conf, delimiter, quote, escape, null, null);

    // Serialize and deserialize the config to ensure that there aren't any issues
    // with non-printable characters as delimiters
    File tempFile = File.createTempFile("test-config", ".xml");
    FileOutputStream fileOutputStream = new FileOutputStream(tempFile);
    conf.writeXml(fileOutputStream);/* w  ww.  jav a2  s .c  om*/
    fileOutputStream.close();
    Configuration deserialized = new Configuration();
    deserialized.addResource(new FileInputStream(tempFile));

    assertEquals(Character.valueOf('\001'),
            CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY));
    assertEquals(Character.valueOf('\002'),
            CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY));
    assertEquals(Character.valueOf('!'),
            CsvBulkImportUtil.getCharacter(deserialized, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY));
    assertNull(deserialized.get(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY));

    tempFile.delete();
}