Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:com.scaleoutsoftware.soss.hserver.hadoop.SubmittedJob.java

License:Apache License

SubmittedJob(JobID jobID, String jobSubmitDirectory, Credentials credentials, Configuration configuration)
        throws IOException, InterruptedException {
    this.jobID = jobID;
    this.configuration = configuration;
    this.jobSubmitDirectoryPath = new Path(jobSubmitDirectory);
    this.fileSystem = FileSystem.get(configuration);

    JobSplit.TaskSplitMetaInfo splitInfo[] = SplitMetaInfoReader.readSplitMetaInfo(jobID, fileSystem,
            configuration, jobSubmitDirectoryPath);

    Path jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDirectoryPath);
    FSDataInputStream stream = fileSystem.open(jobSplitFile);

    for (JobSplit.TaskSplitMetaInfo info : splitInfo) {
        Object split = getSplitDetails(stream, info.getStartOffset(), configuration);
        inputSplits.add(split);//from  www  . j  a v  a  2  s  . com
        splitLocations.put(split, info.getLocations());
        LOG.info("Adding split for execution. Split = " + split + " Locations: "
                + Arrays.toString(splitLocations.get(split)));
    }

    stream.close();

    jobConfPath = JobSubmissionFiles.getJobConfPath(jobSubmitDirectoryPath);

    if (!fileSystem.exists(jobConfPath)) {
        throw new IOException("Cannot find job.xml. Path = " + jobConfPath);
    }

    //We cannot just use JobConf(Path) constructor,
    //because it does not work for HDFS locations.
    //The comment in Configuration#loadResource() states,
    //for the case when the Path to the resource is provided:
    //"Can't use FileSystem API or we get an infinite loop
    //since FileSystem uses Configuration API.  Use java.io.File instead."
    //
    //Workaround: construct empty Configuration, provide it with
    //input stream and give it to JobConf constructor.
    FSDataInputStream jobConfStream = fileSystem.open(jobConfPath);
    Configuration jobXML = new Configuration(false);
    jobXML.addResource(jobConfStream);

    //The configuration does not actually gets read before we attempt to
    //read some property. Call to #size() will make Configuration to
    //read the input stream.
    jobXML.size();

    //We are done with input stream, can close it now.
    jobConfStream.close();

    jobConf = new JobConf(jobXML);

    newApi = jobConf.getUseNewMapper();

    jobStatus = new JobStatus(jobID, 0f, 0f, 0f, 0f, JobStatus.State.RUNNING, JobPriority.NORMAL,
            UserGroupInformation.getCurrentUser().getUserName(), jobID.toString(), jobConfPath.toString(), "");
}

From source file:com.sequenceiq.ambari.shell.commands.ConfigCommands.java

License:Apache License

/**
 * Sets the desired configuration./*from  w ww  . j a v a  2  s . c o m*/
 */
@CliCommand(value = "configuration set", help = "Sets the desired configuration")
public String setConfig(
        @CliOption(key = "type", mandatory = true, help = "Type of the configuration") ConfigType configType,
        @CliOption(key = "url", help = "URL of the config") String url,
        @CliOption(key = "file", help = "File of the config") File file) throws IOException {
    Configuration configuration = new Configuration(false);
    if (file == null) {
        configuration.addResource(new URL(url));
    } else {
        configuration.addResource(new FileInputStream(file));
    }
    Map<String, String> config = new HashMap<String, String>();
    Iterator<Map.Entry<String, String>> iterator = configuration.iterator();
    while (iterator.hasNext()) {
        Map.Entry<String, String> entry = iterator.next();
        config.put(entry.getKey(), entry.getValue());
    }
    client.modifyConfiguration(configType.getName(), config);
    return "Restart is required!\n" + renderSingleMap(config, "KEY", "VALUE");
}

From source file:com.shopzilla.hadoop.repl.HadoopREPL.java

License:Apache License

public static void main(final String[] args) {
    int exitCode = 0;

    try {//from ww  w.j a  v a2  s. c  o m
        final Configuration configuration = new Configuration(true);
        if (args.length == 1) {
            configuration.addResource(new File(args[0]).toURI().toURL());
        } else if (args.length > 1) {
            throw new ExitSignal(1, "Usage: ./hadoop-repl <path-to-hadoop-core-site-file>");
        }
        new HadoopREPL(configuration).loop("hadoop> ");
    } catch (final ExitSignal ex) {
        System.err.println(ex.getMessage());
        exitCode = ex.getExitCode();
    } catch (final Exception ex) {
        System.err.println(ex);
        exitCode = 1;
    }

    System.exit(exitCode);
}

From source file:com.sina.data.bigmonitor.web.HttpServer.java

License:Apache License

public static void main(String[] args) {
    System.out.println("from here");
    HttpServer infoServer = null;/*from w w  w.  j a  va2s. c  o  m*/
    int tmpInfoPort = 8080;
    //String infoHost = "szwg-hadoop-con1.szwg01";
    String infoHost = "0.0.0.0";
    Configuration conf = new Configuration();
    conf.addResource("/Users/shiboyan/sinam/data/DGM/BigMonitor/GmondCollector_hbase1/conf/hadoop-site.xml");
    try {
        infoServer = new HttpServer("tentacles", infoHost, tmpInfoPort, tmpInfoPort == 0, conf);
        infoServer.start();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:com.splicemachine.constants.SpliceConfiguration.java

License:Apache License

private static void addSpliceResources(Configuration c) {
    c.addResource("splice-site.xml");
}

From source file:com.splicemachine.mrio.api.core.SMInputFormat.java

License:Apache License

public SMRecordReaderImpl getRecordReader(InputSplit split, Configuration config)
        throws IOException, InterruptedException {
    config.addResource(conf);
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "getRecordReader with table=%s, inputTable=%s," + "conglomerate=%s", table,
                config.get(TableInputFormat.INPUT_TABLE), config.get(MRConstants.SPLICE_INPUT_CONGLOMERATE));
    rr = new SMRecordReaderImpl(conf);
    if (table == null) {
        TableName tableInfo = TableName.valueOf(config.get(TableInputFormat.INPUT_TABLE));
        PartitionFactory tableFactory = SIDriver.driver().getTableFactory();
        table = ((ClientPartition) tableFactory.getTable(tableInfo)).unwrapDelegate();
    }/*from w  w  w .  j  ava  2s.  c om*/
    rr.setHTable(table);
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "returning record reader");
    return rr;
}

From source file:com.streamsets.pipeline.hbase.api.impl.AbstractHBaseConnectionHelper.java

License:Apache License

private Configuration initialHBaseConfiguration(List<Stage.ConfigIssue> issues, Stage.Context context,
        String hbaseName, String hbaseConfDir, String tableName, Map<String, String> hbaseConfigs) {
    Configuration hbaseConf = HBaseConfiguration.create();
    if (hbaseConfDir != null && !hbaseConfDir.isEmpty()) {
        File hbaseConfigDir = new File(hbaseConfDir);

        if ((context.getExecutionMode() == ExecutionMode.CLUSTER_BATCH
                || context.getExecutionMode() == ExecutionMode.CLUSTER_YARN_STREAMING
                || context.getExecutionMode() == ExecutionMode.CLUSTER_MESOS_STREAMING)
                && hbaseConfigDir.isAbsolute()) {
            //Do not allow absolute hdfs config directory in cluster mode
            issues.add(/*from  w  ww .  jav  a  2s  . co m*/
                    context.createConfigIssue(hbaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_24, hbaseConfDir));
        } else {
            if (!hbaseConfigDir.isAbsolute()) {
                hbaseConfigDir = new File(context.getResourcesDirectory(), hbaseConfDir).getAbsoluteFile();
            }
            if (!hbaseConfigDir.exists()) {
                issues.add(context.createConfigIssue(hbaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_19,
                        hbaseConfDir));
            } else if (!hbaseConfigDir.isDirectory()) {
                issues.add(context.createConfigIssue(hbaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_20,
                        hbaseConfDir));
            } else {
                File hbaseSiteXml = new File(hbaseConfigDir, "hbase-site.xml");
                if (hbaseSiteXml.exists()) {
                    if (!hbaseSiteXml.isFile()) {
                        issues.add(context.createConfigIssue(hbaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_21,
                                hbaseConfDir, "hbase-site.xml"));
                    }
                    hbaseConf.addResource(new Path(hbaseSiteXml.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hbaseConfigs.entrySet()) {
        hbaseConf.set(config.getKey(), config.getValue());
    }

    if (context.isPreview()) {
        // by default the retry number is set to 35 which is too much for preview mode
        LOG.debug("Setting HBase client retries to 3 for preview");
        hbaseConf.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "3");
    }

    if (tableName == null || tableName.isEmpty()) {
        issues.add(context.createConfigIssue(hbaseName, TABLE_NAME, Errors.HBASE_05));
    }

    return hbaseConf;
}

From source file:com.streamsets.pipeline.lib.hbase.common.HBaseUtil.java

License:Apache License

public static Configuration getHBaseConfiguration(List<Stage.ConfigIssue> issues, Stage.Context context,
        String HBaseName, String hbaseConfDir, String zookeeperQuorum, String zookeeperParentZnode,
        int clientPort, String tableName, boolean kerberosAuth, Map<String, String> hbaseConfigs) {
    Configuration hbaseConf = HBaseConfiguration.create();
    if (hbaseConfDir != null && !hbaseConfDir.isEmpty()) {
        File hbaseConfigDir = new File(hbaseConfDir);

        if ((context.getExecutionMode() == ExecutionMode.CLUSTER_BATCH
                || context.getExecutionMode() == ExecutionMode.CLUSTER_YARN_STREAMING
                || context.getExecutionMode() == ExecutionMode.CLUSTER_MESOS_STREAMING)
                && hbaseConfigDir.isAbsolute()) {
            //Do not allow absolute hdfs config directory in cluster mode
            issues.add(// w  ww  . j a  v a2  s.  c o m
                    context.createConfigIssue(HBaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_24, hbaseConfDir));
        } else {
            if (!hbaseConfigDir.isAbsolute()) {
                hbaseConfigDir = new File(context.getResourcesDirectory(), hbaseConfDir).getAbsoluteFile();
            }
            if (!hbaseConfigDir.exists()) {
                issues.add(context.createConfigIssue(HBaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_19,
                        hbaseConfDir));
            } else if (!hbaseConfigDir.isDirectory()) {
                issues.add(context.createConfigIssue(HBaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_20,
                        hbaseConfDir));
            } else {
                File hbaseSiteXml = new File(hbaseConfigDir, "hbase-site.xml");
                if (hbaseSiteXml.exists()) {
                    if (!hbaseSiteXml.isFile()) {
                        issues.add(context.createConfigIssue(HBaseName, HBASE_CONF_DIR_CONFIG, Errors.HBASE_21,
                                hbaseConfDir, "hbase-site.xml"));
                    }
                    hbaseConf.addResource(new Path(hbaseSiteXml.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hbaseConfigs.entrySet()) {
        hbaseConf.set(config.getKey(), config.getValue());
    }

    if (context.isPreview()) {
        // by default the retry number is set to 35 which is too much for preview mode
        LOG.debug("Setting HBase client retries to 3 for preview");
        hbaseConf.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "3");
    }

    if (tableName == null || tableName.isEmpty()) {
        issues.add(context.createConfigIssue(HBaseName, "tableName", Errors.HBASE_05));
    }

    return hbaseConf;
}

From source file:com.streamsets.pipeline.lib.hdfs.common.HdfsBaseConfigBean.java

License:Apache License

protected Configuration getHadoopConfiguration(Stage.Context context, List<Stage.ConfigIssue> issues) {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    //We handle the file system close ourselves in destroy
    //If enabled, Also this will cause issues (not allow us to rename the files on destroy call)
    // when we run a shutdown hook on app kill
    //See https://issues.streamsets.com/browse/SDC-4057
    conf.setBoolean("fs.automatic.close", false);

    // See SDC-5451, we set hadoop.treat.subject.external automatically to take advantage of HADOOP-13805
    HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(conf);

    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {//from ww  w  . j a  va2  s .c om
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.stream().anyMatch(i -> DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY.equals(i.key))) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.toString()));
            }
        }
    }
    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if ((context.getExecutionMode() == ExecutionMode.CLUSTER_BATCH
                || context.getExecutionMode() == ExecutionMode.CLUSTER_YARN_STREAMING
                || context.getExecutionMode() == ExecutionMode.CLUSTER_MESOS_STREAMING)
                && hadoopConfigDir.isAbsolute()) {
            //Do not allow absolute hadoop config directory in cluster mode
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), getConfigBeanPrefix() + "hdfsConfDir",
                    Errors.HADOOPFS_45, hdfsConfDir));
        } else {
            if (!hadoopConfigDir.isAbsolute()) {
                hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
            }
            if (!hadoopConfigDir.exists()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_25, hadoopConfigDir.getPath()));
            } else if (!hadoopConfigDir.isDirectory()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_26, hadoopConfigDir.getPath()));
            } else {
                File coreSite = new File(hadoopConfigDir, "core-site.xml");
                if (coreSite.exists()) {
                    if (!coreSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_27, coreSite.getPath()));
                    }
                    conf.addResource(new Path(coreSite.getAbsolutePath()));
                }
                File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
                if (hdfsSite.exists()) {
                    if (!hdfsSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_27, hdfsSite.getPath()));
                    }
                    conf.addResource(new Path(hdfsSite.getAbsolutePath()));
                }
            }
        }
    } else {
        Optional<HadoopConfigBean> fsDefaultFS = hdfsConfigs.stream()
                .filter(item -> CommonConfigurationKeys.FS_DEFAULT_NAME_KEY.equals(item.key)).findFirst();
        if (StringUtils.isEmpty(hdfsUri) && !fsDefaultFS.isPresent()) {
            // No URI, no config dir, and no fs.defaultFS config param
            // Avoid defaulting to writing to file:/// (SDC-5143)
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), getConfigBeanPrefix() + "hdfsUri",
                    Errors.HADOOPFS_61));
        }
    }

    for (HadoopConfigBean configBean : hdfsConfigs) {
        try {
            conf.set(configBean.key, configBean.value.get());
        } catch (StageException e) {
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), getConfigBeanPrefix() + "hdfsConfigs",
                    Errors.HADOOPFS_62, e.toString()));
        }
    }

    return conf;
}

From source file:com.streamsets.pipeline.spark.SparkStreamingBinding.java

License:Apache License

private Configuration getHadoopConf(Configuration conf) {
    String hdfsS3ConfProp = properties.getProperty("hdfsS3ConfDir");
    if (hdfsS3ConfProp != null && !hdfsS3ConfProp.isEmpty()) {
        File hdfsS3ConfDir = new File(System.getProperty("sdc.resources.dir"), hdfsS3ConfProp)
                .getAbsoluteFile();/*w  w  w.  j  av a  2  s . com*/
        if (!hdfsS3ConfDir.exists()) {
            throw new IllegalArgumentException("The config dir for hdfs/S3 doesn't exist");
        } else {
            File coreSite = new File(hdfsS3ConfDir, "core-site.xml");
            if (coreSite.exists()) {
                conf.addResource(new Path(coreSite.getAbsolutePath()));
            } else {
                throw new IllegalStateException(
                        "Core-site xml for configuring Hadoop/S3 filesystem is required for checkpoint related metadata while running Spark Streaming");
            }
            File hdfsSite = new File(hdfsS3ConfDir, "hdfs-site.xml");
            if (hdfsSite.exists()) {
                conf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }
        }
    }
    if ((hdfsS3ConfProp == null || hdfsS3ConfProp.isEmpty())) {
        throw new IllegalArgumentException("Cannot find hdfs/S3 config; hdfsS3ConfDir cannot be null");
    }
    return conf;
}