Example usage for org.apache.hadoop.conf Configuration addResource

List of usage examples for org.apache.hadoop.conf Configuration addResource

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration addResource.

Prototype

public void addResource(Configuration conf) 

Source Link

Document

Add a configuration resource.

Usage

From source file:com.ikanow.aleph2.harvest.logstash.utils.LogstashUtils.java

License:Apache License

protected static Configuration getConfiguration(final GlobalPropertiesBean globals, final int attempt) {
    synchronized (Configuration.class) {
        Configuration config = new Configuration(false);

        if (new File(globals.local_yarn_config_dir()).exists()) {
            config.addResource(new Path(globals.local_yarn_config_dir() + "/yarn-site.xml"));
            config.addResource(new Path(globals.local_yarn_config_dir() + "/core-site.xml"));
            config.addResource(new Path(globals.local_yarn_config_dir() + "/hdfs-site.xml"));
        } else {//from  w  w w . j  ava 2s  .co  m
            final String alternative = System.getenv("HADOOP_CONF_DIR");

            _logger.warn("Aleph2 yarn-config dir not found, try alternative: " + alternative);
            // (another alternative would be HADOOP_HOME + "/conf")

            if ((null != alternative) && new File(alternative).exists()) {
                config.addResource(new Path(alternative + "/yarn-site.xml"));
                config.addResource(new Path(alternative + "/core-site.xml"));
                config.addResource(new Path(alternative + "/hdfs-site.xml"));
            } else // last ditch - will work for local testing but never from anything remote
                config.addResource("default_fs.xml");
        }
        if (attempt > 10) { // (try sleeping here)
            final long to_sleep = 500L + (new Date().getTime() % 100L); // (add random component)
            try {
                Thread.sleep(to_sleep);
            } catch (Exception e) {
            }
        }

        // These are not added by Hortonworks, so add them manually
        config.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        config.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
        config.set("fs.AbstractFileSystem.hdfs.impl", "org.apache.hadoop.fs.Hdfs");
        config.set("fs.AbstractFileSystem.file.impl", "org.apache.hadoop.fs.local.LocalFs");
        return config;
    }
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchHiveUtils.java

License:Apache License

/** Support for strange concurrent modification exception
 * @param try_number/*from  w  w w . ja  v  a 2  s . co m*/
 * @return
 */
protected static Configuration getHiveConfiguration(int attempt, final GlobalPropertiesBean globals) {
    synchronized (Configuration.class) {
        Configuration config = new Configuration(false);

        final String hive_config_file = globals.local_yarn_config_dir() + "/hive-site.xml";
        if (new File(hive_config_file).exists()) {
            config.addResource(new Path(hive_config_file));
        } else {
            throw new RuntimeException(ERROR_HIVE_NOT_CONFIGURED);
        }
        if (attempt > 10) { // (try sleeping here)
            final long to_sleep = 500L + (new Date().getTime() % 100L); // (add random component)
            try {
                Thread.sleep(to_sleep);
            } catch (Exception e) {
            }
        }

        return config;
    }
}

From source file:com.ikanow.aleph2.storage_service_hdfs.services.HdfsStorageService.java

License:Apache License

/** 
 * Override this function with system specific configuration
 * @return//  www  .  j  ava  2  s . com
 */
protected Configuration getConfiguration() {
    Configuration config = new Configuration(false);

    if (new File(_globals.local_yarn_config_dir()).exists()) {
        config.addResource(new Path(_globals.local_yarn_config_dir() + "/yarn-site.xml"));
        config.addResource(new Path(_globals.local_yarn_config_dir() + "/core-site.xml"));
        config.addResource(new Path(_globals.local_yarn_config_dir() + "/hdfs-site.xml"));
    } else {
        config.addResource("default_fs.xml");
    }
    // These are not added by Hortonworks, so add them manually
    config.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
    config.set("fs.AbstractFileSystem.hdfs.impl", "org.apache.hadoop.fs.Hdfs");

    return config;

}

From source file:com.ikanow.infinit.e.processing.custom.launcher.CustomHadoopTaskLauncher.java

License:Open Source License

@SuppressWarnings({ "unchecked", "rawtypes" })
public String runHadoopJob(CustomMapReduceJobPojo job, String tempJarLocation)
        throws IOException, SAXException, ParserConfigurationException {
    StringWriter xml = new StringWriter();
    String outputCollection = job.outputCollectionTemp;// (non-append mode) 
    if ((null != job.appendResults) && job.appendResults)
        outputCollection = job.outputCollection; // (append mode, write directly in....)
    else if (null != job.incrementalMode)
        job.incrementalMode = false; // (not allowed to be in incremental mode and not update mode)

    createConfigXML(xml, job.jobtitle, job.inputCollection,
            InfiniteHadoopUtils.getQueryOrProcessing(job.query, InfiniteHadoopUtils.QuerySpec.INPUTFIELDS),
            job.isCustomTable, job.getOutputDatabase(), job._id.toString(), outputCollection, job.mapper,
            job.reducer, job.combiner,//w  w  w.  ja v a  2  s. c  om
            InfiniteHadoopUtils.getQueryOrProcessing(job.query, InfiniteHadoopUtils.QuerySpec.QUERY),
            job.communityIds, job.outputKey, job.outputValue, job.arguments, job.incrementalMode,
            job.submitterID, job.selfMerge, job.outputCollection, job.appendResults);

    ClassLoader savedClassLoader = Thread.currentThread().getContextClassLoader();

    URLClassLoader child = new URLClassLoader(new URL[] { new File(tempJarLocation).toURI().toURL() },
            savedClassLoader);
    Thread.currentThread().setContextClassLoader(child);

    // Check version: for now, any infinit.e.data_model with an VersionTest class is acceptable
    boolean dataModelLoaded = true;
    try {
        URLClassLoader versionTest = new URLClassLoader(new URL[] { new File(tempJarLocation).toURI().toURL() },
                null);
        try {
            Class.forName("com.ikanow.infinit.e.data_model.custom.InfiniteMongoInputFormat", true, versionTest);
        } catch (ClassNotFoundException e2) {
            //(this is fine, will use the cached version)
            dataModelLoaded = false;
        }
        if (dataModelLoaded)
            Class.forName("com.ikanow.infinit.e.data_model.custom.InfiniteMongoVersionTest", true, versionTest);
    } catch (ClassNotFoundException e1) {
        throw new RuntimeException(
                "This JAR is compiled with too old a version of the data-model, please recompile with Jan 2014 (rc2) onwards");
    }

    // Now load the XML into a configuration object: 
    Configuration config = new Configuration();
    // Add the client configuration overrides:
    if (!bLocalMode) {
        String hadoopConfigPath = props_custom.getHadoopConfigPath() + "/hadoop/";
        config.addResource(new Path(hadoopConfigPath + "core-site.xml"));
        config.addResource(new Path(hadoopConfigPath + "mapred-site.xml"));
        config.addResource(new Path(hadoopConfigPath + "hadoop-site.xml"));
    } //TESTED

    try {
        DocumentBuilderFactory dbFactory = DocumentBuilderFactory.newInstance();
        DocumentBuilder dBuilder = dbFactory.newDocumentBuilder();
        Document doc = dBuilder.parse(new ByteArrayInputStream(xml.toString().getBytes()));
        NodeList nList = doc.getElementsByTagName("property");

        for (int temp = 0; temp < nList.getLength(); temp++) {
            Node nNode = nList.item(temp);
            if (nNode.getNodeType() == Node.ELEMENT_NODE) {
                Element eElement = (Element) nNode;
                String name = getTagValue("name", eElement);
                String value = getTagValue("value", eElement);
                if ((null != name) && (null != value)) {
                    config.set(name, value);
                }
            }
        }
    } catch (Exception e) {
        throw new IOException(e.getMessage());
    }

    // Some other config defaults:
    // (not sure if these are actually applied, or derived from the defaults - for some reason they don't appear in CDH's client config)
    config.set("mapred.map.tasks.speculative.execution", "false");
    config.set("mapred.reduce.tasks.speculative.execution", "false");
    // (default security is ignored here, have it set via HADOOP_TASKTRACKER_CONF in cloudera)

    // Now run the JAR file
    try {
        BasicDBObject advancedConfigurationDbo = null;
        try {
            advancedConfigurationDbo = (null != job.query)
                    ? ((BasicDBObject) com.mongodb.util.JSON.parse(job.query))
                    : (new BasicDBObject());
        } catch (Exception e) {
            advancedConfigurationDbo = new BasicDBObject();
        }
        boolean esMode = advancedConfigurationDbo.containsField("qt") && !job.isCustomTable;
        if (esMode && !job.inputCollection.equals("doc_metadata.metadata")) {
            throw new RuntimeException(
                    "Infinit.e Queries are only supported on doc_metadata - use MongoDB queries instead.");
        }

        config.setBoolean("mapred.used.genericoptionsparser", true); // (just stops an annoying warning from appearing)
        if (bLocalMode) { // local job tracker and FS mode
            config.set("mapred.job.tracker", "local");
            config.set("fs.default.name", "local");
        } else {
            if (bTestMode) { // run job tracker locally but FS mode remotely
                config.set("mapred.job.tracker", "local");
            } else { // normal job tracker
                String trackerUrl = HadoopUtils.getXMLProperty(
                        props_custom.getHadoopConfigPath() + "/hadoop/mapred-site.xml", "mapred.job.tracker");
                config.set("mapred.job.tracker", trackerUrl);
            }
            String fsUrl = HadoopUtils.getXMLProperty(
                    props_custom.getHadoopConfigPath() + "/hadoop/core-site.xml", "fs.default.name");
            config.set("fs.default.name", fsUrl);
        }
        if (!dataModelLoaded && !(bTestMode || bLocalMode)) { // If running distributed and no data model loaded then add ourselves
            Path jarToCache = InfiniteHadoopUtils.cacheLocalFile("/opt/infinite-home/lib/",
                    "infinit.e.data_model.jar", config);
            DistributedCache.addFileToClassPath(jarToCache, config);
            jarToCache = InfiniteHadoopUtils.cacheLocalFile("/opt/infinite-home/lib/",
                    "infinit.e.processing.custom.library.jar", config);
            DistributedCache.addFileToClassPath(jarToCache, config);
        } //TESTED

        // Debug scripts (only if they exist), and only in non local/test mode
        if (!bLocalMode && !bTestMode) {

            try {
                Path scriptToCache = InfiniteHadoopUtils.cacheLocalFile("/opt/infinite-home/scripts/",
                        "custom_map_error_handler.sh", config);
                config.set("mapred.map.task.debug.script", "custom_map_error_handler.sh " + job.jobtitle);
                config.set("mapreduce.map.debug.script", "custom_map_error_handler.sh " + job.jobtitle);
                DistributedCache.createSymlink(config);
                DistributedCache.addCacheFile(scriptToCache.toUri(), config);
            } catch (Exception e) {
            } // just carry on

            try {
                Path scriptToCache = InfiniteHadoopUtils.cacheLocalFile("/opt/infinite-home/scripts/",
                        "custom_reduce_error_handler.sh", config);
                config.set("mapred.reduce.task.debug.script", "custom_reduce_error_handler.sh " + job.jobtitle);
                config.set("mapreduce.reduce.debug.script", "custom_reduce_error_handler.sh " + job.jobtitle);
                DistributedCache.createSymlink(config);
                DistributedCache.addCacheFile(scriptToCache.toUri(), config);
            } catch (Exception e) {
            } // just carry on

        } //TODO (???): TOTEST

        // (need to do these 2 things here before the job is created, at which point the config class has been copied across)
        //1)
        Class<?> mapperClazz = Class.forName(job.mapper, true, child);
        if (ICustomInfiniteInternalEngine.class.isAssignableFrom(mapperClazz)) { // Special case: internal custom engine, so gets an additional integration hook
            ICustomInfiniteInternalEngine preActivities = (ICustomInfiniteInternalEngine) mapperClazz
                    .newInstance();
            preActivities.preTaskActivities(job._id, job.communityIds, config, !(bTestMode || bLocalMode));
        } //TESTED
          //2)
        if (job.inputCollection.equalsIgnoreCase("file.binary_shares")) {
            // Need to download the GridFSZip file
            try {
                Path jarToCache = InfiniteHadoopUtils.cacheLocalFile("/opt/infinite-home/lib/unbundled/",
                        "GridFSZipFile.jar", config);
                DistributedCache.addFileToClassPath(jarToCache, config);
            } catch (Throwable t) {
            } // (this is fine, will already be on the classpath .. otherwise lots of other stuff will be failing all over the place!)            
        }

        if (job.inputCollection.equals("records")) {

            InfiniteElasticsearchHadoopUtils.handleElasticsearchInput(job, config, advancedConfigurationDbo);

            //(won't run under 0.19 so running with "records" should cause all sorts of exceptions)

        } //TESTED (by hand)         

        if (bTestMode || bLocalMode) { // If running locally, turn "snappy" off - tomcat isn't pointing its native library path in the right place
            config.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.DefaultCodec");
        }

        // Manually specified caches
        List<URL> localJarCaches = InfiniteHadoopUtils.handleCacheList(advancedConfigurationDbo.get("$caches"),
                job, config, props_custom);

        Job hj = new Job(config); // (NOTE: from here, changes to config are ignored)
        try {

            if (null != localJarCaches) {
                if (bLocalMode || bTestMode) {
                    Method method = URLClassLoader.class.getDeclaredMethod("addURL", new Class[] { URL.class });
                    method.setAccessible(true);
                    method.invoke(child, localJarCaches.toArray());

                } //TOTEST (tested logically)
            }
            Class<?> classToLoad = Class.forName(job.mapper, true, child);
            hj.setJarByClass(classToLoad);

            if (job.inputCollection.equalsIgnoreCase("filesystem")) {
                String inputPath = null;
                try {
                    inputPath = MongoDbUtil.getProperty(advancedConfigurationDbo, "file.url");
                    if (!inputPath.endsWith("/")) {
                        inputPath = inputPath + "/";
                    }
                } catch (Exception e) {
                }
                if (null == inputPath) {
                    throw new RuntimeException("Must specify 'file.url' if reading from filesystem.");
                }
                inputPath = InfiniteHadoopUtils.authenticateInputDirectory(job, inputPath);

                InfiniteFileInputFormat.addInputPath(hj, new Path(inputPath + "*/*")); // (that extra bit makes it recursive)
                InfiniteFileInputFormat.setMaxInputSplitSize(hj, 33554432); // (32MB)
                InfiniteFileInputFormat.setInfiniteInputPathFilter(hj, config);
                hj.setInputFormatClass((Class<? extends InputFormat>) Class.forName(
                        "com.ikanow.infinit.e.data_model.custom.InfiniteFileInputFormat", true, child));
            } else if (job.inputCollection.equalsIgnoreCase("file.binary_shares")) {

                String[] oidStrs = null;
                try {
                    String inputPath = MongoDbUtil.getProperty(advancedConfigurationDbo, "file.url");
                    Pattern oidExtractor = Pattern.compile("inf://share/([^/]+)");
                    Matcher m = oidExtractor.matcher(inputPath);
                    if (m.find()) {
                        oidStrs = m.group(1).split("\\s*,\\s*");

                    } else {
                        throw new RuntimeException(
                                "file.url must be in format inf://share/<oid-list>/<string>: " + inputPath);
                    }
                    InfiniteHadoopUtils.authenticateShareList(job, oidStrs);
                } catch (Exception e) {
                    throw new RuntimeException(
                            "Authentication error: " + e.getMessage() + ": " + advancedConfigurationDbo, e);
                }

                hj.getConfiguration().setStrings("mapred.input.dir", oidStrs);
                hj.setInputFormatClass((Class<? extends InputFormat>) Class.forName(
                        "com.ikanow.infinit.e.data_model.custom.InfiniteShareInputFormat", true, child));
            } else if (job.inputCollection.equals("records")) {
                hj.setInputFormatClass((Class<? extends InputFormat>) Class
                        .forName("com.ikanow.infinit.e.data_model.custom.InfiniteEsInputFormat", true, child));
            } else {
                if (esMode) {
                    hj.setInputFormatClass((Class<? extends InputFormat>) Class.forName(
                            "com.ikanow.infinit.e.processing.custom.utils.InfiniteElasticsearchMongoInputFormat",
                            true, child));
                } else {
                    hj.setInputFormatClass((Class<? extends InputFormat>) Class.forName(
                            "com.ikanow.infinit.e.data_model.custom.InfiniteMongoInputFormat", true, child));
                }
            }
            if ((null != job.exportToHdfs) && job.exportToHdfs) {

                //TODO (INF-2469): Also, if the output key is BSON then also run as text (but output as JSON?)

                Path outPath = InfiniteHadoopUtils.ensureOutputDirectory(job, props_custom);

                if ((null != job.outputKey) && (null != job.outputValue)
                        && job.outputKey.equalsIgnoreCase("org.apache.hadoop.io.text")
                        && job.outputValue.equalsIgnoreCase("org.apache.hadoop.io.text")) {
                    // (slight hack before I sort out the horrendous job class - if key/val both text and exporting to HDFS then output as Text)
                    hj.setOutputFormatClass((Class<? extends OutputFormat>) Class
                            .forName("org.apache.hadoop.mapreduce.lib.output.TextOutputFormat", true, child));
                    TextOutputFormat.setOutputPath(hj, outPath);
                } //TESTED
                else {
                    hj.setOutputFormatClass((Class<? extends OutputFormat>) Class.forName(
                            "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", true, child));
                    SequenceFileOutputFormat.setOutputPath(hj, outPath);
                } //TESTED
            } else { // normal case, stays in MongoDB
                hj.setOutputFormatClass((Class<? extends OutputFormat>) Class.forName(
                        "com.ikanow.infinit.e.data_model.custom.InfiniteMongoOutputFormat", true, child));
            }
            hj.setMapperClass((Class<? extends Mapper>) mapperClazz);
            String mapperOutputKeyOverride = advancedConfigurationDbo.getString("$mapper_key_class", null);
            if (null != mapperOutputKeyOverride) {
                hj.setMapOutputKeyClass(Class.forName(mapperOutputKeyOverride));
            } //TESTED 

            String mapperOutputValueOverride = advancedConfigurationDbo.getString("$mapper_value_class", null);
            if (null != mapperOutputValueOverride) {
                hj.setMapOutputValueClass(Class.forName(mapperOutputValueOverride));
            } //TESTED 

            if ((null != job.reducer) && !job.reducer.startsWith("#") && !job.reducer.equalsIgnoreCase("null")
                    && !job.reducer.equalsIgnoreCase("none")) {
                hj.setReducerClass((Class<? extends Reducer>) Class.forName(job.reducer, true, child));
                // Variable reducers:
                if (null != job.query) {
                    try {
                        hj.setNumReduceTasks(advancedConfigurationDbo.getInt("$reducers", 1));
                    } catch (Exception e) {
                        try {
                            // (just check it's not a string that is a valid int)
                            hj.setNumReduceTasks(
                                    Integer.parseInt(advancedConfigurationDbo.getString("$reducers", "1")));
                        } catch (Exception e2) {
                        }
                    }
                } //TESTED
            } else {
                hj.setNumReduceTasks(0);
            }
            if ((null != job.combiner) && !job.combiner.startsWith("#")
                    && !job.combiner.equalsIgnoreCase("null") && !job.combiner.equalsIgnoreCase("none")) {
                hj.setCombinerClass((Class<? extends Reducer>) Class.forName(job.combiner, true, child));
            }
            hj.setOutputKeyClass(Class.forName(job.outputKey, true, child));
            hj.setOutputValueClass(Class.forName(job.outputValue, true, child));

            hj.setJobName(job.jobtitle);
            currJobName = job.jobtitle;
        } catch (Error e) { // (messing about with class loaders = lots of chances for errors!)
            throw new RuntimeException(e.getMessage(), e);
        }
        if (bTestMode || bLocalMode) {
            hj.submit();
            currThreadId = null;
            Logger.getRootLogger().addAppender(this);
            currLocalJobId = hj.getJobID().toString();
            currLocalJobErrs.setLength(0);
            while (!hj.isComplete()) {
                Thread.sleep(1000);
            }
            Logger.getRootLogger().removeAppender(this);
            if (hj.isSuccessful()) {
                if (this.currLocalJobErrs.length() > 0) {
                    return "local_done: " + this.currLocalJobErrs.toString();
                } else {
                    return "local_done";
                }
            } else {
                return "Error: " + this.currLocalJobErrs.toString();
            }
        } else {
            hj.submit();
            String jobId = hj.getJobID().toString();
            return jobId;
        }
    } catch (Exception e) {
        e.printStackTrace();
        Thread.currentThread().setContextClassLoader(savedClassLoader);
        return "Error: " + InfiniteHadoopUtils.createExceptionMessage(e);
    } finally {
        Thread.currentThread().setContextClassLoader(savedClassLoader);
    }
}

From source file:com.inforefiner.hdata.SubmitClient.java

License:Apache License

/**
 * @param args Command line arguments/*from   ww  w .ja v  a  2  s  . c o m*/
 */
public static void main(String[] args) {
    Configuration conf = new Configuration();
    String hadoop_conf_dir = System.getenv("HADOOP_CONF_DIR");
    if (StringUtils.isNotBlank(hadoop_conf_dir)) {
        LOG.info("HADOOP_CONF_DIR using " + hadoop_conf_dir);
        conf.addResource(new Path(hadoop_conf_dir, "core-site.xml"));
        conf.addResource(new Path(hadoop_conf_dir, "hdfs-site.xml"));
        conf.addResource(new Path(hadoop_conf_dir, "yarn-site.xml"));
    } else {
        LOG.info("HADOOP_HOME not config, using DEFAULT");
    }
    boolean result = false;
    try {
        SubmitClient client = new SubmitClient(conf);
        LOG.info("Initializing Client");
        try {
            boolean doRun = client.init(args);
            if (!doRun) {
                System.exit(0);
            }
        } catch (IllegalArgumentException e) {
            System.err.println(e.getLocalizedMessage());
            client.printUsage();
            System.exit(-1);
        }
        result = client.run();
    } catch (Throwable t) {
        LOG.fatal("Error running Client", t);
        System.exit(1);
    }
    if (result) {
        LOG.info("Application completed successfully");
        System.exit(0);
    }
    LOG.error("Application failed to complete successfully");
    System.exit(2);
}

From source file:com.inmobi.conduit.distcp.tools.DistCp.java

License:Apache License

/**
 * Public Constructor. Creates DistCp object with specified input-parameters.
 * (E.g. source-paths, target-location, etc.)
 * @param inputOptions: Options (indicating source-paths, target-location.)
 * @param configuration: The Hadoop configuration against which the Copy-mapper must run.
 * @throws Exception, on failure./*from w  w  w  . j  a v  a  2 s  .  co  m*/
 */
public DistCp(Configuration configuration, DistCpOptions inputOptions) throws Exception {
    Configuration config = (configuration instanceof JobConf) ? new JobConf(configuration)
            : new Configuration(configuration);
    Configuration defaultConf = new Configuration(false);
    defaultConf.addResource(DISTCP_DEFAULT_XML);
    for (Map.Entry<String, String> entry : defaultConf)
        if (config.get(entry.getKey()) == null)
            config.set(entry.getKey(), entry.getValue());
    setConf(config);
    this.inputOptions = inputOptions;
    this.metaFolder = createMetaFolderPath();
}

From source file:com.inmobi.conduit.distcp.tools.DistCp.java

License:Apache License

/**
 * Add SSL files to distributed cache. Trust store, key store and ssl config xml
 *
 * @param configuration - Job configuration
 * @param sslConfigPath - ssl Configuration file specified through options
 * @throws IOException - If any/* ww  w  .j ava  2s  .com*/
 */
private void addSSLFilesToDistCache(Configuration configuration, Path sslConfigPath) throws IOException {
    FileSystem localFS = FileSystem.getLocal(configuration);

    Configuration sslConf = new Configuration(false);
    sslConf.addResource(sslConfigPath);

    Path localStorePath = getLocalStorePath(sslConf, "ssl.client.truststore.location");
    DistributedCache.addCacheFile(localStorePath.makeQualified(localFS).toUri(), configuration);
    configuration.set("ssl.client.truststore.location", localStorePath.getName());

    localStorePath = getLocalStorePath(sslConf, "ssl.client.keystore.location");
    DistributedCache.addCacheFile(localStorePath.makeQualified(localFS).toUri(), configuration);
    configuration.set("ssl.client.keystore.location", localStorePath.getName());

    DistributedCache.addCacheFile(sslConfigPath.makeQualified(localFS).toUri(), configuration);
}

From source file:com.inmobi.conduit.distcp.tools.DistCp.java

License:Apache License

/**
 * Loads properties from distcp-default.xml into configuration
 * object//from ww w . j a  v a 2s  .  c om
 * @return Configuration which includes properties from distcp-default.xml
 */
private static Configuration getDefaultConf() {
    Configuration config = new Configuration();

    // Propagate properties related to delegation tokens.
    String tokenFile = System.getenv("HADOOP_TOKEN_FILE_LOCATION");
    if (tokenFile != null) {
        config.set("mapreduce.job.credentials.binary", tokenFile);
    }

    config.addResource(DISTCP_DEFAULT_XML);
    return config;
}

From source file:com.inmobi.conduit.distcp.tools.mapred.CopyMapper.java

License:Apache License

/**
 * Initialize SSL Config if same is set in conf
 *
 * @throws IOException - If any//from   w  ww.ja v a  2s  . c  om
 */
private void initializeSSLConf() throws IOException {
    LOG.info("Initializing SSL configuration");

    String workDir = conf.get("mapred.local.dir") + "/work";
    Path[] cacheFiles = DistributedCache.getLocalCacheFiles(conf);

    Configuration sslConfig = new Configuration(false);
    String sslConfFileName = conf.get(DistCpConstants.CONF_LABEL_SSL_CONF);
    Path sslClient = findCacheFile(cacheFiles, sslConfFileName);
    if (sslClient == null) {
        LOG.warn("SSL Client config file not found. Was looking for " + sslConfFileName + " in "
                + Arrays.toString(cacheFiles));
        return;
    }
    sslConfig.addResource(sslClient);

    String trustStoreFile = conf.get("ssl.client.truststore.location");
    Path trustStorePath = findCacheFile(cacheFiles, trustStoreFile);
    sslConfig.set("ssl.client.truststore.location", trustStorePath.toString());

    String keyStoreFile = conf.get("ssl.client.keystore.location");
    Path keyStorePath = findCacheFile(cacheFiles, keyStoreFile);
    sslConfig.set("ssl.client.keystore.location", keyStorePath.toString());

    try {
        OutputStream out = new FileOutputStream(workDir + "/" + sslConfFileName);
        try {
            sslConfig.writeXml(out);
        } finally {
            out.close();
        }
        conf.set(DistCpConstants.CONF_LABEL_SSL_KEYSTORE, sslConfFileName);
    } catch (IOException e) {
        LOG.warn("Unable to write out the ssl configuration. "
                + "Will fall back to default ssl-client.xml in class path, if there is one", e);
    }
}

From source file:com.intropro.prairie.unit.hadoop.HadoopUnit.java

License:Apache License

protected Configuration gatherConfigs() {
    Configuration conf = new Configuration();
    conf.addResource("core-site.prairie.xml");
    conf.set("hadoop.tmp.dir", getTmpDir().toString());
    conf.addResource("prairie-site.xml");
    return conf;/*from   w w  w  .j a  v a  2 s.  co  m*/
}