Example usage for org.apache.hadoop.conf Configuration writeXml

List of usage examples for org.apache.hadoop.conf Configuration writeXml

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration writeXml.

Prototype

public void writeXml(Writer out) throws IOException 

Source Link

Usage

From source file:com.sequenceiq.ambari.shell.commands.ConfigCommands.java

License:Apache License

/**
 * Modify the desired configuration.//  ww w.j a va 2 s.  c o m
 */
@CliCommand(value = "configuration download", help = "Downloads the desired configuration")
public String downloadConfig(
        @CliOption(key = "type", mandatory = true, help = "Type of the configuration") ConfigType configType)
        throws IOException {
    String configTypeName = configType.getName();
    Map<String, String> config = client.getServiceConfigMap(configTypeName).get(configTypeName);
    Configuration configuration = new Configuration(false);
    for (String key : config.keySet()) {
        configuration.set(key, config.get(key));
    }
    File file = new File(configTypeName);
    FileWriter writer = new FileWriter(file);
    configuration.writeXml(writer);
    return "Configuration saved to: " + file.getAbsolutePath();
}

From source file:com.splicemachine.test.SpliceTestYarnPlatform.java

License:Apache License

public void start(int nodeCount) throws Exception {
    if (yarnCluster == null) {
        LOG.info("Starting up YARN cluster with " + nodeCount + " nodes. Server yarn-site.xml is: "
                + yarnSiteConfigURL);/*from   w  w w .  j av a2 s  . c o m*/
        conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0");
        yarnCluster = new MiniYARNClusterSplice(SpliceTestYarnPlatform.class.getSimpleName(), nodeCount, 1, 1);
        yarnCluster.init(conf);
        yarnCluster.start();

        NodeManager nm = getNodeManager();
        waitForNMToRegister(nm);

        // save the server config to classpath so yarn clients can read it
        Configuration yarnClusterConfig = yarnCluster.getConfig();
        yarnClusterConfig.set("yarn.application.classpath", new File(yarnSiteConfigURL.getPath()).getParent());
        //write the document to a buffer (not directly to the file, as that
        //can cause the file being written to get read -which will then fail.
        ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
        yarnClusterConfig.writeXml(bytesOut);
        bytesOut.close();
        //write the bytes to the file in the classpath
        OutputStream os = new FileOutputStream(new File(yarnSiteConfigURL.getPath()));
        os.write(bytesOut.toByteArray());
        os.close();
    }
    LOG.info("YARN cluster started.");
}

From source file:com.streamsets.pipeline.stage.BaseHiveIT.java

License:Apache License

/**
 * Write given Hadoop configuration to given file.
 *///from   w  w w  .j av  a2s . c  om
private static void writeConfiguration(Configuration conf, String path) throws Exception {
    File outputFile = new File(path);
    FileOutputStream outputStream = new FileOutputStream((outputFile));
    conf.writeXml(outputStream);
    outputStream.close();
}

From source file:com.streamsets.pipeline.stage.destination.BaseMapReduceIT.java

License:Apache License

private static void writeConfiguration(Configuration conf, File outputFile) throws Exception {
    FileOutputStream outputStream = new FileOutputStream(outputFile);
    conf.writeXml(outputStream);
    outputStream.close();//from  w  ww. jav a2  s .co  m
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.TestHdfsTarget.java

License:Apache License

@Test
public void testOnlyConfDirectory() throws Exception {
    // Create custom core-site.xml
    Configuration configuration = new Configuration();
    configuration.clear();/*w  w w.j a  v a 2 s.  c o m*/
    configuration.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "file:///");
    FileOutputStream configOut = FileUtils.openOutputStream(new File(getTestDir() + "/conf-dir/core-site.xml"));
    configuration.writeXml(configOut);
    configOut.close();

    HdfsTarget hdfsTarget = HdfsTargetUtil.newBuilder().hdfsUri("").hdfsConfDir(getTestDir() + "/conf-dir/")
            .build();

    TargetRunner runner = new TargetRunner.Builder(HdfsDTarget.class, hdfsTarget)
            .setOnRecordError(OnRecordError.STOP_PIPELINE).build();

    runner.runInit();

    // The configuration object should have the FS config from core-site.xml
    Assert.assertEquals("file:///",
            hdfsTarget.getHdfsConfiguration().get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));

    runner.runDestroy();
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHDFSSourceIT.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
    minidfsDir.mkdirs();//from w  w  w  .  jav  a 2s. c  o m
    Assert.assertTrue(minidfsDir.exists());
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    dir = new Path(miniDFS.getURI() + "/dir");
    FileSystem fs = miniDFS.getFileSystem();
    fs.mkdirs(dir);
    writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
    dummyEtc = new File(minidfsDir, "dummy-etc");
    dummyEtc.mkdirs();
    Assert.assertTrue(dummyEtc.exists());
    Configuration dummyConf = new Configuration(false);
    for (String file : new String[] { "core", "hdfs", "mapred", "yarn" }) {
        File siteXml = new File(dummyEtc, file + "-site.xml");
        FileOutputStream out = new FileOutputStream(siteXml);
        dummyConf.writeXml(out);
        out.close();
    }
    resourcesDir = minidfsDir.getAbsolutePath();
    hadoopConfDir = dummyEtc.getName();
    System.setProperty("sdc.resources.dir", resourcesDir);
    ;
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHDFSSourceIT.java

License:Apache License

private void writeConfig(File configDir, String configFileNamePrefix) throws IOException {
    Configuration dummyConf = new Configuration(false);
    File siteXml = new File(configDir, configFileNamePrefix + "-site.xml");
    FileOutputStream out = new FileOutputStream(siteXml);
    dummyConf.writeXml(out);
    out.close();//from w  w  w .j  a  v  a  2 s.  com
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.TestClusterHDFSSource.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    File minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
    minidfsDir.mkdirs();//from  w ww . ja  v a  2s. c  o  m
    Assert.assertTrue(minidfsDir.exists());
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    dir = new Path(miniDFS.getURI() + "/dir");
    FileSystem fs = miniDFS.getFileSystem();
    fs.mkdirs(dir);
    writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
    dummyEtc = new File(minidfsDir, "dummy-etc");
    dummyEtc.mkdirs();
    Assert.assertTrue(dummyEtc.exists());
    Configuration dummyConf = new Configuration(false);
    for (String file : new String[] { "core", "hdfs", "mapred", "yarn" }) {
        File siteXml = new File(dummyEtc, file + "-site.xml");
        FileOutputStream out = new FileOutputStream(siteXml);
        dummyConf.writeXml(out);
        out.close();
    }
    resourcesDir = minidfsDir.getAbsolutePath();
    hadoopConfDir = dummyEtc.getName();
    System.setProperty("sdc.resources.dir", resourcesDir);
    ;
}

From source file:com.wibidata.maven.plugins.hbase.StartMojo.java

License:Apache License

/**
 * Writes the contents of the specified configuration to the HBase site file.
 *
 * @param conf The configuration to write.
 * @throws MojoExecutionException If there is an error writing the file.
 */// w  ww  . j  a v a 2  s.c  o  m
private void writeHBaseSiteFile(Configuration conf) throws MojoExecutionException {
    // Create the parent directory for the hbase conf file if it does not already exist.
    createFileParentDir(mHBaseSiteFile);

    // Write the file.
    FileOutputStream fileOutputStream = null;
    try {
        fileOutputStream = new FileOutputStream(mHBaseSiteFile);
        conf.writeXml(fileOutputStream);
    } catch (IOException e) {
        throw new MojoExecutionException("Unable to write to hbase conf file: " + mHBaseSiteFile.getPath(), e);
    } finally {
        closeFileOutputStream(fileOutputStream);
    }
    getLog().info("Wrote " + mHBaseSiteFile.getPath() + ".");
}

From source file:com.yahoo.storm.yarn.TestConfig.java

License:Open Source License

synchronized File createYarnSiteConfig(Configuration yarn_conf) throws IOException {
    yarn_site_xml = new File("./target/conf/yarn-site.xml");
    yarn_site_xml.getParentFile().mkdirs();
    FileWriter writer = new FileWriter(yarn_site_xml);
    yarn_conf.writeXml(writer);
    writer.flush();/*  www . ja v  a  2s .com*/
    writer.close();
    return yarn_site_xml;
}