Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:com.cloudera.llama.am.MiniLlama.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration(false);
    conf.addResource("llama-site.xml");

    CLIParser parser = createParser();/*from ww w . j a v a2s  . com*/
    try {
        CLIParser.Command command = parser.parse(args);
        if (command.getName().equals(HELP_CMD)) {
            parser.showHelp(command.getCommandLine());
        } else {
            final MiniLlama llama;
            if (command.getName().equals(MINICLUSTER_CMD)) {
                CommandLine cl = command.getCommandLine();
                int nodes = Integer.parseInt(cl.getOptionValue(NODES, "1"));
                conf = createMiniLlamaConf(conf, nodes);
                llama = new MiniLlama(conf);
                llama.skipDfsFormat(cl.hasOption(HDFS_NO_FORMAT));
                if (cl.hasOption(HDFS_WRITE_CONF)) {
                    llama.setWriteHadoopConfig(cl.getOptionValue(HDFS_WRITE_CONF));
                }
            } else {
                conf.setBoolean(MINI_USE_EXTERNAL_HADOOP_KEY, true);
                conf = createMiniLlamaConf(conf, 1); //nodes is ignored
                llama = new MiniLlama(conf);
            }
            llama.start();
            String clusterType = (command.getName().equals(MINICLUSTER_CMD)) ? "embedded HDFS/Yarn mini-cluster"
                    : "external HDFS/Yarn cluster";
            LOG.info("**************************************************************"
                    + "*******************************************************");
            LOG.info("Mini Llama running with {} with {} nodes, " + "HDFS URI: {} Llama URI: {}", clusterType,
                    llama.getNodes(), llama.getHadoopConf().get("fs.defaultFS"),
                    llama.getAddressHost() + ":" + llama.getAddressPort());
            LOG.info("*************************************************************"
                    + "********************************************************");
            Runtime.getRuntime().addShutdownHook(new Thread("minillama-shutdownhoock") {
                @Override
                public void run() {
                    llama.stop();
                }
            });
            synchronized (MiniLlama.class) {
                MiniLlama.class.wait();
            }
        }
    } catch (ParseException ex) {
        System.err.println("Invalid sub-command: " + ex.getMessage());
        System.err.println();
        System.err.println(parser.shortHelp());
        System.exit(1);
    } catch (Throwable ex) {
        System.err.println("Error: " + ex.getMessage());
        ex.printStackTrace(System.err);
        System.exit(2);
    }

}

From source file:com.cloudera.llama.nm.LlamaNMAuxiliaryService.java

License:Apache License

@Override
protected synchronized void serviceStart() throws Exception {
    AbstractMain.logServerInfo();//from  w w w.  ja  va2s.  c o m

    Configuration llamaConf = new Configuration(getConfig());
    llamaConf.addResource("llama-site.xml");
    LOG.info("Server: {}", LlamaNMServer.class.getName());
    LOG.info("-----------------------------------------------------------------");
    nmServer = new LlamaNMServer();
    nmServer.setConf(llamaConf);
    nmServer.start();
}

From source file:com.cloudera.llama.server.AbstractMain.java

License:Apache License

private static Configuration loadConfiguration(String confDir) {
    Configuration llamaConf = new Configuration(false);
    confDir = (confDir != null) ? confDir : "";
    File file = new File(confDir, SITE_XML);
    if (!file.exists()) {
        LOG.warn("Llama configuration file '{}' not found in '{}'", SITE_XML, confDir);
    } else {// ww  w .  j a va2 s. c o  m
        llamaConf.addResource(new Path(file.getAbsolutePath()));
    }
    llamaConf.set(CONF_DIR_SYS_PROP, confDir);
    return llamaConf;
}

From source file:com.cloudera.oryx.common.servcomp.OryxConfiguration.java

License:Open Source License

private static void addResource(File hadoopConfDir, String fileName, Configuration conf) {
    File file = new File(hadoopConfDir, fileName);
    if (!file.exists()) {
        log.info("Hadoop config file not found: {}", file);
        return;/* ww w  .  ja  v  a  2 s  .com*/
    }
    try {
        conf.addResource(file.toURI().toURL());
    } catch (MalformedURLException e) {
        throw new IllegalStateException(e);
    }
}

From source file:com.cloudwick.training.ApplicationTest.java

@Test
public void testApplication() throws Exception {
    try {/*from   ww w  .j  a v  a 2 s . c o  m*/
        LocalMode lma = LocalMode.newInstance();
        Configuration conf = new Configuration(false);
        conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
        lma.prepareDAG(new Application(), conf);
        LocalMode.Controller lc = lma.getController();
        lc.run(10000); // runs for 10 seconds and quits
    } catch (ConstraintViolationException e) {
        Assert.fail("constraint violations: " + e.getConstraintViolations());
    }
}

From source file:com.codefutures.tutorial.mesos.docker.ExampleFramework.java

License:Apache License

/**
 * Command-line entry point.//from   ww  w . ja  v  a  2  s  .  c o  m
 * <br/>
 * Example usage: java ExampleFramework 127.0.0.1:5050 fedora/apache 2
 */

public static void main(String[] args) throws Exception {

    final String frameworkNameString = args[0];

    Configuration conf = new Configuration();
    conf.addResource(new Path("mesos-docker-framework.xml"));

    // If the framework stops running, mesos will terminate all of the tasks that
    // were initiated by the framework but only once the fail-over timeout period
    // has expired. Using a timeout of zero here means that the tasks will
    // terminate immediately when the framework is terminated. For production
    // deployments this probably isn't the desired behavior, so a timeout can be
    // specified here, allowing another instance of the framework to take over.
    final int frameworkFailoverTimeout = 0;

    FrameworkInfo.Builder frameworkBuilder = FrameworkInfo.newBuilder()
            .setName(frameworkNameString + "-on-Docker").setUser("") // Have Mesos fill in the current user.
            .setFailoverTimeout(frameworkFailoverTimeout); // timeout in seconds

    if (System.getenv("MESOS_CHECKPOINT") != null) {
        System.out.println("Enabling checkpoint for the framework");
        frameworkBuilder.setCheckpoint(true);
    }

    // parse mesos-docker-framework.xml parameters
    final String mesos_master = conf.get("mesos.master");
    final String mesos_slaves_ip = conf.get("will.start.mesos.slaves.host.name");
    final String imageName = conf.get("hadoop.image.name");
    final String resourceManagerHostName = conf.get("resource.manager.host.name");
    final int set_refuse_seconds = Integer.parseInt(conf.get("set.refuse.seconds"));
    final String[] hadoop_master_publish_ports = conf.get("hadoop.master.publish.ports").split(",");
    final double each_docker_container_cpus = Double.parseDouble(conf.get("each.docker.container.cpus"));
    final double each_docker_container_mem = Double.parseDouble(conf.get("each.docker.container.mem"));
    final String start_mesos_docker_hadoop_shell_script = conf.get("start-mesos-docker-hadoop.shell.script");
    final String mount_directory_on_host = conf.get("mount.directory.on.host");
    final String mount_directory_on_container = conf.get("mount.directory.on.container");

    // create the scheduler
    final Scheduler scheduler = new ExampleScheduler(frameworkNameString, imageName, mesos_slaves_ip,
            hadoop_master_publish_ports, each_docker_container_cpus, each_docker_container_mem,
            start_mesos_docker_hadoop_shell_script, mount_directory_on_host, mount_directory_on_container,
            set_refuse_seconds, resourceManagerHostName);

    // create the driver
    MesosSchedulerDriver driver;
    if (System.getenv("MESOS_AUTHENTICATE") != null) {
        System.out.println("Enabling authentication for the framework");

        if (System.getenv("DEFAULT_PRINCIPAL") == null) {
            System.err.println("Expecting authentication principal in the environment");
            System.exit(1);
        }

        if (System.getenv("DEFAULT_SECRET") == null) {
            System.err.println("Expecting authentication secret in the environment");
            System.exit(1);
        }

        Credential credential = Credential.newBuilder().setPrincipal(System.getenv("DEFAULT_PRINCIPAL"))
                .setSecret(ByteString.copyFrom(System.getenv("DEFAULT_SECRET").getBytes())).build();

        frameworkBuilder.setPrincipal(System.getenv("DEFAULT_PRINCIPAL"));

        driver = new MesosSchedulerDriver(scheduler, frameworkBuilder.build(), mesos_master, credential);
    } else {
        frameworkBuilder.setPrincipal("test-framework-java");

        driver = new MesosSchedulerDriver(scheduler, frameworkBuilder.build(), mesos_master);
    }

    int status = driver.run() == Status.DRIVER_STOPPED ? 0 : 1;

    // Ensure that the driver process terminates.
    driver.stop();

    System.exit(status);
}

From source file:com.collective.celos.ci.config.deploy.CelosCiContext.java

License:Apache License

private Configuration setupConfiguration(String username, CelosCiTarget target) throws Exception {
    JScpWorker jscpWorker = new JScpWorker(username);
    Configuration conf = new Configuration();

    conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
    conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());

    conf.addResource(jscpWorker.getFileObjectByUri(target.getPathToHdfsSite()).getContent().getInputStream());
    conf.addResource(jscpWorker.getFileObjectByUri(target.getPathToCoreSite()).getContent().getInputStream());

    UserGroupInformation.setConfiguration(conf);

    return conf;/*from  w w  w. j  a v a 2s  . co  m*/
}

From source file:com.collective.celos.trigger.HDFSCheckTrigger.java

License:Apache License

private void addFileToConfiguration(Configuration conf, String fileName) throws FileNotFoundException {
    InputStream fileStream = getClass().getClassLoader().getResourceAsStream(fileName);
    if (fileStream != null) {
        conf.addResource(fileStream);
    }/*w  w  w.  java2  s.  com*/
}

From source file:com.datatorrent.ApplicationTest.java

@Test
public void testApplication1() throws IOException, Exception {
    try {// ww  w .  j  a  v a2s .c om
        LocalMode lma = LocalMode.newInstance();
        Configuration conf = new Configuration(false);
        conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
        lma.prepareDAG(new Application1(), conf);
        LocalMode.Controller lc = lma.getController();
        lc.run(10000); // runs for 10 seconds and quits
    } catch (ConstraintViolationException e) {
        Assert.fail("constraint violations: " + e.getConstraintViolations());
    }
}

From source file:com.datatorrent.ApplicationTest.java

@Test
public void testApplication2() throws IOException, Exception {
    try {/*from   www .j  a  v a  2s  .  com*/
        LocalMode lma = LocalMode.newInstance();
        Configuration conf = new Configuration(false);
        conf.addResource(this.getClass().getResourceAsStream("/META-INF/properties.xml"));
        lma.prepareDAG(new Application2(), conf);
        LocalMode.Controller lc = lma.getController();
        lc.run(10000); // runs for 10 seconds and quits
    } catch (ConstraintViolationException e) {
        Assert.fail("constraint violations: " + e.getConstraintViolations());
    }
}