Example usage for org.apache.hadoop.mapreduce Job getCredentials

List of usage examples for org.apache.hadoop.mapreduce Job getCredentials

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job getCredentials.

Prototype

public Credentials getCredentials() 

Source Link

Usage

From source file:org.apache.sqoop.job.mr.MRConfigurationUtils.java

License:Apache License

/**
 * Persist driver configuration object for job.
 *
 * @param job MapReduce job object/* www .  j  av  a2 s  .c o m*/
 * @param obj Configuration object
 */
public static void setDriverConfig(Job job, Object obj) {
    job.getConfiguration().set(MR_JOB_CONFIG_DRIVER_CONFIG_CLASS, obj.getClass().getName());
    job.getCredentials().addSecretKey(MR_JOB_CONFIG_DRIVER_CONFIG_KEY, ConfigUtils.toJson(obj).getBytes());
}

From source file:org.apache.sqoop.job.mr.MRConfigurationUtils.java

License:Apache License

/**
 * Persist Connector generated schema./*from w w  w.ja  v  a2s.  co  m*/
 *
 * @param type  Direction of schema we are persisting
 * @param job MapReduce Job object
 * @param schema Schema
 */
public static void setConnectorSchema(Direction type, Job job, Schema schema) {
    if (schema != null) {
        String jsonSchema = SchemaSerialization.extractSchema(schema).toJSONString();
        switch (type) {
        case FROM:
            job.getCredentials().addSecretKey(SCHEMA_FROM_KEY, jsonSchema.getBytes());
            return;
        case TO:
            job.getCredentials().addSecretKey(SCHEMA_TO_KEY, jsonSchema.getBytes());
            return;
        }
    }
}

From source file:org.apache.sqoop.mapredsparkcommon.MRConfigurationUtils.java

License:Apache License

/**
 * Persist Connector generated schema./*from ww w.  ja v  a 2s  . co m*/
 *
 * @param type  Direction of schema we are persisting
 * @param job MapReduce Job object
 * @param schema Schema
 */
public static void setConnectorSchema(Direction type, Job job, Schema schema) {
    String jsonSchema = SchemaSerialization.extractSchema(schema).toJSONString();
    switch (type) {
    case FROM:
        job.getCredentials().addSecretKey(SCHEMA_FROM_KEY, jsonSchema.getBytes());
        return;
    case TO:
        job.getCredentials().addSecretKey(SCHEMA_TO_KEY, jsonSchema.getBytes());
        return;
    }
}

From source file:org.apache.tez.mapreduce.hadoop.MRInputHelpers.java

License:Apache License

/**
 * Generates Input splits and stores them in a {@link org.apache.tez.mapreduce.protos.MRRuntimeProtos.MRSplitsProto} instance.
 *
 * Returns an instance of {@link InputSplitInfoMem}
 *
 * With grouping enabled, the eventual configuration used by the tasks, will have
 * the user-specified InputFormat replaced by either {@link org.apache.hadoop.mapred.split.TezGroupedSplitsInputFormat}
 * or {@link org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat}
 *
 * @param conf/*from  w  w w . j a  v a2 s . c om*/
 *          an instance of Configuration which is used to determine whether
 *          the mapred of mapreduce API is being used. This Configuration
 *          instance should also contain adequate information to be able to
 *          generate splits - like the InputFormat being used and related
 *          configuration.
 * @param groupSplits whether to group the splits or not
 * @param targetTasks the number of target tasks if grouping is enabled. Specify as 0 otherwise.
 * @return an instance of {@link InputSplitInfoMem} which supports a subset of
 *         the APIs defined on {@link InputSplitInfo}
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
@InterfaceStability.Unstable
public static InputSplitInfoMem generateInputSplitsToMem(Configuration conf, boolean groupSplits,
        int targetTasks) throws IOException, ClassNotFoundException, InterruptedException {

    InputSplitInfoMem splitInfoMem = null;
    JobConf jobConf = new JobConf(conf);
    if (jobConf.getUseNewMapper()) {
        LOG.info("Generating mapreduce api input splits");
        Job job = Job.getInstance(conf);
        org.apache.hadoop.mapreduce.InputSplit[] splits = generateNewSplits(job, groupSplits, targetTasks);
        splitInfoMem = new InputSplitInfoMem(splits, createTaskLocationHintsFromSplits(splits), splits.length,
                job.getCredentials(), job.getConfiguration());
    } else {
        LOG.info("Generating mapred api input splits");
        org.apache.hadoop.mapred.InputSplit[] splits = generateOldSplits(jobConf, groupSplits, targetTasks);
        splitInfoMem = new InputSplitInfoMem(splits, createTaskLocationHintsFromSplits(splits), splits.length,
                jobConf.getCredentials(), jobConf);
    }
    LOG.info("NumSplits: " + splitInfoMem.getNumTasks() + ", SerializedSize: "
            + splitInfoMem.getSplitsProto().getSerializedSize());
    return splitInfoMem;
}