Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:org.apache.falcon.latedata.LateDataHandler.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Path confPath = new Path("file:///" + System.getProperty("oozie.action.conf.xml"));

    LOG.info("{} found ? {}", confPath, confPath.getFileSystem(conf).exists(confPath));
    conf.addResource(confPath);
    ToolRunner.run(conf, new LateDataHandler(), args);
}

From source file:org.apache.falcon.regression.core.util.OozieUtil.java

License:Apache License

/**
 * Returns configuration object of a given bundleID for a given instanceTime.
 *
 * @param oozieClient  oozie client of cluster job is running on
 * @param bundleID     bundleID of given cluster
 * @param time         instanceTime/*w  ww. j a  va  2 s.c o  m*/
 * @throws org.apache.oozie.client.OozieClientException
 * @throws org.json.JSONException
 */
public static Configuration getProcessConf(OozieClient oozieClient, String bundleID, String time)
        throws OozieClientException, JSONException {
    waitForCoordinatorJobCreation(oozieClient, bundleID);
    List<CoordinatorJob> coordJobs = oozieClient.getBundleJobInfo(bundleID).getCoordinators();
    CoordinatorJob coordJobInfo = oozieClient.getCoordJobInfo(coordJobs.get(0).getId());

    Configuration conf = new Configuration();
    for (CoordinatorAction action : coordJobInfo.getActions()) {
        String dateStr = (new DateTime(action.getNominalTime(), DateTimeZone.UTC)).toString();
        if (!dateStr.isEmpty() && dateStr.contains(time.replace("Z", ""))) {
            conf.addResource(new ByteArrayInputStream(
                    oozieClient.getJobInfo(action.getExternalId()).getConf().getBytes()));
        }
    }
    return conf;
}

From source file:org.apache.falcon.regression.core.util.OozieUtil.java

License:Apache License

/**
 * Returns configuration object of a given bundleID for a given retentionFeed.
 *
 * @param oozieClient  oozie client of cluster job is running on
 * @param bundleID     bundleID of given cluster
 * @throws OozieClientException//from  w  w w. jav  a  2  s  .c o  m
 */
public static Configuration getRetentionConfiguration(OozieClient oozieClient, String bundleID)
        throws OozieClientException {
    waitForCoordinatorJobCreation(oozieClient, bundleID);
    CoordinatorJob coord = null;
    List<CoordinatorJob> coordJobs = oozieClient.getBundleJobInfo(bundleID).getCoordinators();
    for (CoordinatorJob coordinatorJob : coordJobs) {
        if (coordinatorJob.getAppName().startsWith("FALCON_FEED_RETENTION")) {
            coord = oozieClient.getCoordJobInfo(coordinatorJob.getId());
        }
    }

    Configuration configuration = new Configuration();
    if (coord != null) {
        WorkflowJob wid = oozieClient.getJobInfo(coord.getActions().get(0).getExternalId());
        configuration.addResource(new ByteArrayInputStream(wid.getConf().getBytes()));
    } else {
        configuration = null;
    }

    return configuration;
}

From source file:org.apache.falcon.replication.FeedReplicator.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    CommandLine cmd = getCommand(args);//from  w w  w . jav a 2  s .  c  o  m

    Configuration conf = this.getConf();
    // inject wf configs
    Path confPath = new Path("file:///" + System.getProperty("oozie.action.conf.xml"));

    LOG.info("{} found conf ? {}", confPath, confPath.getFileSystem(conf).exists(confPath));
    conf.addResource(confPath);

    String includePathConf = conf.get("falcon.include.path");
    final boolean includePathSet = (includePathConf != null) && !IGNORE.equalsIgnoreCase(includePathConf);

    DistCpOptions options = getDistCpOptions(cmd, includePathSet);

    String availabilityFlagOpt = cmd.getOptionValue("availabilityFlag");
    if (StringUtils.isEmpty(availabilityFlagOpt)) {
        availabilityFlagOpt = "NA";
    }
    String availabilityFlag = EntityUtil.SUCCEEDED_FILE_NAME;
    if (cmd.getOptionValue("falconFeedStorageType").equals(Storage.TYPE.FILESYSTEM.name())) {
        availabilityFlag = "NA".equals(availabilityFlagOpt) ? availabilityFlag : availabilityFlagOpt;
    }

    conf.set("falcon.feed.availability.flag", availabilityFlag);
    DistCp distCp = (includePathSet) ? new CustomReplicator(conf, options) : new DistCp(conf, options);
    LOG.info("Started DistCp with options :" + options);
    Job job = distCp.execute();

    if (cmd.hasOption("counterLogDir") && job.getStatus().getState() == JobStatus.State.SUCCEEDED) {
        LOG.info("Gathering counters for the the Feed Replication job");
        Path counterFile = new Path(cmd.getOptionValue("counterLogDir"), "counter.txt");
        JobCounters fsReplicationCounters = JobCountersHandler.getCountersType(JobType.FSREPLICATION.name());
        if (fsReplicationCounters != null) {
            fsReplicationCounters.obtainJobCounters(conf, job, true);
            fsReplicationCounters.storeJobCounters(conf, counterFile);
        }
    }

    if (includePathSet) {
        executePostProcessing(conf, options); // this only applies for FileSystem Storage.
    }

    LOG.info("Completed DistCp");
    return 0;
}

From source file:org.apache.falcon.workflow.engine.OozieDAGEngine.java

License:Apache License

private InstancesResult.KeyValuePair[] getWFParams(WorkflowJob jobInfo) {
    Configuration conf = new Configuration(false);
    conf.addResource(new ByteArrayInputStream(jobInfo.getConf().getBytes()));
    InstancesResult.KeyValuePair[] wfParams = new InstancesResult.KeyValuePair[conf.size()];
    int i = 0;/*from ww  w.  j  a  v  a 2s .c  o m*/
    for (Map.Entry<String, String> entry : conf) {
        wfParams[i++] = new InstancesResult.KeyValuePair(entry.getKey(), entry.getValue());
    }
    return wfParams;
}

From source file:org.apache.falcon.workflow.engine.OozieDAGEngine.java

License:Apache License

@Override
public Properties getConfiguration(String externalID) throws DAGEngineException {
    Properties props = new Properties();
    try {//w ww .ja v a  2s.c  o  m
        switchUser();
        WorkflowJob jobInfo = client.getJobInfo(externalID);
        Configuration conf = new Configuration(false);
        conf.addResource(new ByteArrayInputStream(jobInfo.getConf().getBytes()));

        for (Map.Entry<String, String> entry : conf) {
            props.put(entry.getKey(), entry.getValue());
        }
    } catch (OozieClientException e) {
        throw new DAGEngineException(e);
    }

    return props;
}

From source file:org.apache.falcon.workflow.util.OozieActionConfigurationHelper.java

License:Apache License

public static Configuration createActionConf() throws IOException {
    Configuration conf = new Configuration();
    Path confPath = new Path("file:///" + System.getProperty("oozie.action.conf.xml"));

    final boolean actionConfExists = confPath.getFileSystem(conf).exists(confPath);
    LOG.info("Oozie Action conf {} found ? {}", confPath, actionConfExists);
    if (actionConfExists) {
        LOG.info("Oozie Action conf found, adding path={}, conf={}", confPath, conf.toString());
        conf.addResource(confPath);
        dumpConf(conf, "oozie action conf ");
    }/*from  w  w w  .  j  a  va 2  s.c  o m*/

    String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
    if (tokenFile != null) {
        if (Shell.WINDOWS) {
            if (tokenFile.charAt(0) == '"') {
                tokenFile = tokenFile.substring(1);
            }
            if (tokenFile.charAt(tokenFile.length() - 1) == '"') {
                tokenFile = tokenFile.substring(0, tokenFile.length() - 1);
            }
        }

        conf.set("mapreduce.job.credentials.binary", tokenFile);
        System.setProperty("mapreduce.job.credentials.binary", tokenFile);
        conf.set("tez.credentials.path", tokenFile);
        System.setProperty("tez.credentials.path", tokenFile);
    }

    conf.set("datanucleus.plugin.pluginRegistryBundleCheck", "LOG");
    conf.setBoolean("hive.exec.mode.local.auto", false);

    return conf;
}

From source file:org.apache.flink.api.java.hadoop.mapred.utils.HadoopUtils.java

License:Apache License

/**
 * Returns a new Hadoop Configuration object using the path to the hadoop conf configured
 * in the main configuration (flink-conf.yaml).
 * This method is public because its being used in the HadoopDataSource.
 *//*from w w  w  .j  a v  a  2  s.c  o  m*/
public static org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
    Configuration retConf = new org.apache.hadoop.conf.Configuration();

    // We need to load both core-site.xml and hdfs-site.xml to determine the default fs path and
    // the hdfs configuration
    // Try to load HDFS configuration from Hadoop's own configuration files
    // 1. approach: Flink configuration
    final String hdfsDefaultPath = GlobalConfiguration.getString(ConfigConstants.HDFS_DEFAULT_CONFIG, null);
    if (hdfsDefaultPath != null) {
        retConf.addResource(new org.apache.hadoop.fs.Path(hdfsDefaultPath));
    } else {
        LOG.debug("Cannot find hdfs-default configuration file");
    }

    final String hdfsSitePath = GlobalConfiguration.getString(ConfigConstants.HDFS_SITE_CONFIG, null);
    if (hdfsSitePath != null) {
        retConf.addResource(new org.apache.hadoop.fs.Path(hdfsSitePath));
    } else {
        LOG.debug("Cannot find hdfs-site configuration file");
    }

    // 2. Approach environment variables
    String[] possibleHadoopConfPaths = new String[4];
    possibleHadoopConfPaths[0] = GlobalConfiguration.getString(ConfigConstants.PATH_HADOOP_CONFIG, null);
    possibleHadoopConfPaths[1] = System.getenv("HADOOP_CONF_DIR");

    if (System.getenv("HADOOP_HOME") != null) {
        possibleHadoopConfPaths[2] = System.getenv("HADOOP_HOME") + "/conf";
        possibleHadoopConfPaths[3] = System.getenv("HADOOP_HOME") + "/etc/hadoop"; // hadoop 2.2
    }

    for (String possibleHadoopConfPath : possibleHadoopConfPaths) {
        if (possibleHadoopConfPath != null) {
            if (new File(possibleHadoopConfPath).exists()) {
                if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) {
                    retConf.addResource(
                            new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/core-site.xml"));

                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "Adding " + possibleHadoopConfPath + "/core-site.xml to hadoop configuration");
                    }
                }
                if (new File(possibleHadoopConfPath + "/hdfs-site.xml").exists()) {
                    retConf.addResource(
                            new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/hdfs-site.xml"));

                    if (LOG.isDebugEnabled()) {
                        LOG.debug(
                                "Adding " + possibleHadoopConfPath + "/hdfs-site.xml to hadoop configuration");
                    }
                }
            }
        }
    }
    return retConf;
}

From source file:org.apache.flink.runtime.fs.hdfs.DistributedFileSystem.java

License:Apache License

/**
 * Returns a new Hadoop Configuration object using the path to the hadoop conf configured 
 * in the main configuration (flink-conf.yaml).
 * This method is public because its being used in the HadoopDataSource.
 *//*from  w w w .ja  v a2 s.com*/
public static org.apache.hadoop.conf.Configuration getHadoopConfiguration() {
    Configuration retConf = new org.apache.hadoop.conf.Configuration();

    // We need to load both core-site.xml and hdfs-site.xml to determine the default fs path and
    // the hdfs configuration
    // Try to load HDFS configuration from Hadoop's own configuration files
    // 1. approach: Flink configuration
    final String hdfsDefaultPath = GlobalConfiguration.getString(ConfigConstants.HDFS_DEFAULT_CONFIG, null);
    if (hdfsDefaultPath != null) {
        retConf.addResource(new org.apache.hadoop.fs.Path(hdfsDefaultPath));
    } else {
        LOG.debug("Cannot find hdfs-default configuration file");
    }

    final String hdfsSitePath = GlobalConfiguration.getString(ConfigConstants.HDFS_SITE_CONFIG, null);
    if (hdfsSitePath != null) {
        retConf.addResource(new org.apache.hadoop.fs.Path(hdfsSitePath));
    } else {
        LOG.debug("Cannot find hdfs-site configuration file");
    }

    // 2. Approach environment variables
    String[] possibleHadoopConfPaths = new String[4];
    possibleHadoopConfPaths[0] = GlobalConfiguration.getString(ConfigConstants.PATH_HADOOP_CONFIG, null);
    possibleHadoopConfPaths[1] = System.getenv("HADOOP_CONF_DIR");

    if (System.getenv("HADOOP_HOME") != null) {
        possibleHadoopConfPaths[2] = System.getenv("HADOOP_HOME") + "/conf";
        possibleHadoopConfPaths[3] = System.getenv("HADOOP_HOME") + "/etc/hadoop"; // hadoop 2.2
    }

    for (int i = 0; i < possibleHadoopConfPaths.length; i++) {
        if (possibleHadoopConfPaths[i] == null) {
            continue;
        }

        if (new File(possibleHadoopConfPaths[i]).exists()) {
            if (new File(possibleHadoopConfPaths[i] + "/core-site.xml").exists()) {
                retConf.addResource(
                        new org.apache.hadoop.fs.Path(possibleHadoopConfPaths[i] + "/core-site.xml"));

                if (LOG.isDebugEnabled()) {
                    LOG.debug(
                            "Adding " + possibleHadoopConfPaths[i] + "/core-site.xml to hadoop configuration");
                }
            }
            if (new File(possibleHadoopConfPaths[i] + "/hdfs-site.xml").exists()) {
                retConf.addResource(
                        new org.apache.hadoop.fs.Path(possibleHadoopConfPaths[i] + "/hdfs-site.xml"));

                if (LOG.isDebugEnabled()) {
                    LOG.debug(
                            "Adding " + possibleHadoopConfPaths[i] + "/hdfs-site.xml to hadoop configuration");
                }
            }
        }
    }
    return retConf;
}

From source file:org.apache.flink.runtime.util.HadoopUtils.java

License:Apache License

@SuppressWarnings("deprecation")
public static Configuration getHadoopConfiguration(
        org.apache.flink.configuration.Configuration flinkConfiguration) {

    // Instantiate a HdfsConfiguration to load the hdfs-site.xml and hdfs-default.xml
    // from the classpath
    Configuration result = new HdfsConfiguration();
    boolean foundHadoopConfiguration = false;

    // We need to load both core-site.xml and hdfs-site.xml to determine the default fs path and
    // the hdfs configuration
    // Try to load HDFS configuration from Hadoop's own configuration files
    // 1. approach: Flink configuration
    final String hdfsDefaultPath = flinkConfiguration.getString(ConfigConstants.HDFS_DEFAULT_CONFIG, null);

    if (hdfsDefaultPath != null) {
        result.addResource(new org.apache.hadoop.fs.Path(hdfsDefaultPath));
        LOG.debug("Using hdfs-default configuration-file path form Flink config: {}", hdfsDefaultPath);
        foundHadoopConfiguration = true;
    } else {/*ww  w .  j a va 2 s.  co m*/
        LOG.debug("Cannot find hdfs-default configuration-file path in Flink config.");
    }

    final String hdfsSitePath = flinkConfiguration.getString(ConfigConstants.HDFS_SITE_CONFIG, null);
    if (hdfsSitePath != null) {
        result.addResource(new org.apache.hadoop.fs.Path(hdfsSitePath));
        LOG.debug("Using hdfs-site configuration-file path form Flink config: {}", hdfsSitePath);
        foundHadoopConfiguration = true;
    } else {
        LOG.debug("Cannot find hdfs-site configuration-file path in Flink config.");
    }

    // 2. Approach environment variables
    String[] possibleHadoopConfPaths = new String[4];
    possibleHadoopConfPaths[0] = flinkConfiguration.getString(ConfigConstants.PATH_HADOOP_CONFIG, null);
    possibleHadoopConfPaths[1] = System.getenv("HADOOP_CONF_DIR");

    final String hadoopHome = System.getenv("HADOOP_HOME");
    if (hadoopHome != null) {
        possibleHadoopConfPaths[2] = hadoopHome + "/conf";
        possibleHadoopConfPaths[3] = hadoopHome + "/etc/hadoop"; // hadoop 2.2
    }

    for (String possibleHadoopConfPath : possibleHadoopConfPaths) {
        if (possibleHadoopConfPath != null) {
            if (new File(possibleHadoopConfPath).exists()) {
                if (new File(possibleHadoopConfPath + "/core-site.xml").exists()) {
                    result.addResource(
                            new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/core-site.xml"));
                    LOG.debug("Adding " + possibleHadoopConfPath + "/core-site.xml to hadoop configuration");
                    foundHadoopConfiguration = true;
                }
                if (new File(possibleHadoopConfPath + "/hdfs-site.xml").exists()) {
                    result.addResource(
                            new org.apache.hadoop.fs.Path(possibleHadoopConfPath + "/hdfs-site.xml"));
                    LOG.debug("Adding " + possibleHadoopConfPath + "/hdfs-site.xml to hadoop configuration");
                    foundHadoopConfiguration = true;
                }
            }
        }
    }

    if (!foundHadoopConfiguration) {
        LOG.debug("Could not find Hadoop configuration via any of the supported methods "
                + "(Flink configuration, environment variables).");
    }

    return result;
}