Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:org.apache.accumulo.core.client.mapreduce.lib.impl.OutputConfigurator.java

License:Apache License

/**
 * Sets the directive to use simulation mode for this job. In simulation mode, no output is produced. This is useful for testing.
 *
 * <p>//from w  w w . j  a va  2s .  co m
 * By default, this feature is <b>disabled</b>.
 *
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @param enableFeature
 *          the feature is enabled if true, disabled otherwise
 * @since 1.6.0
 */
public static void setSimulationMode(Class<?> implementingClass, Configuration conf, boolean enableFeature) {
    conf.setBoolean(enumToConfKey(implementingClass, Features.SIMULATION_MODE), enableFeature);
}

From source file:org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.java

License:Apache License

/**
 * Sets the connector information needed to communicate with Accumulo in this job.
 *
 * <p>//from ww  w .  ja  v a  2 s  .co m
 * <b>WARNING:</b> The serialized token is stored in the configuration and shared with all
 * MapReduce tasks. It is BASE64 encoded to provide a charset safe conversion to a string, and is
 * not intended to be secure.
 *
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @param principal
 *          a valid Accumulo user name
 * @param token
 *          the user's password
 * @since 1.6.0
 */
public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal,
        AuthenticationToken token) {
    if (isConnectorInfoSet(implementingClass, conf))
        throw new IllegalStateException(
                "Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");
    checkArgument(principal != null, "principal is null");
    checkArgument(token != null, "token is null");
    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
    if (token instanceof DelegationTokenImpl) {
        // Avoid serializing the DelegationToken secret in the configuration -- the Job will do that
        // work for us securely
        DelegationTokenImpl delToken = (DelegationTokenImpl) token;
        conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.JOB.prefix()
                + token.getClass().getName() + ":" + delToken.getServiceName().toString());
    } else {
        conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN),
                TokenSource.INLINE.prefix() + token.getClass().getName() + ":"
                        + Base64.getEncoder().encodeToString(AuthenticationTokenSerializer.serialize(token)));
    }
}

From source file:org.apache.accumulo.core.clientImpl.mapreduce.lib.ConfiguratorBase.java

License:Apache License

/**
 * Sets the connector information needed to communicate with Accumulo in this job.
 *
 * <p>//from   w ww  . j  a v a 2 s  .c  o m
 * Pulls a token file into the Distributed Cache that contains the authentication token in an
 * attempt to be more secure than storing the password in the Configuration. Token file created
 * with "bin/accumulo create-token".
 *
 * @param implementingClass
 *          the class whose name will be used as a prefix for the property configuration key
 * @param conf
 *          the Hadoop configuration object to configure
 * @param principal
 *          a valid Accumulo user name
 * @param tokenFile
 *          the path to the token file in DFS
 * @since 1.6.0
 */
public static void setConnectorInfo(Class<?> implementingClass, Configuration conf, String principal,
        String tokenFile) {
    if (isConnectorInfoSet(implementingClass, conf))
        throw new IllegalStateException(
                "Connector info for " + implementingClass.getSimpleName() + " can only be set once per job");

    checkArgument(principal != null, "principal is null");
    checkArgument(tokenFile != null, "tokenFile is null");

    try {
        DistributedCacheHelper.addCacheFile(new URI(tokenFile), conf);
    } catch (URISyntaxException e) {
        throw new IllegalStateException("Unable to add tokenFile \"" + tokenFile + "\" to distributed cache.");
    }

    conf.setBoolean(enumToConfKey(implementingClass, ConnectorInfo.IS_CONFIGURED), true);
    conf.set(enumToConfKey(implementingClass, ConnectorInfo.PRINCIPAL), principal);
    conf.set(enumToConfKey(implementingClass, ConnectorInfo.TOKEN), TokenSource.FILE.prefix() + tokenFile);
}

From source file:org.apache.accumulo.hadoopImpl.mapreduce.lib.ConfiguratorBase.java

License:Apache License

public static void setClientProperties(Class<?> implementingClass, Configuration conf, Properties props,
        String clientPropsPath) {
    if (clientPropsPath != null) {
        try {// ww w .  java 2 s.  co m
            DistributedCacheHelper.addCacheFile(new URI(clientPropsPath), conf);
        } catch (URISyntaxException e) {
            throw new IllegalStateException(
                    "Unable to add client properties file \"" + clientPropsPath + "\" to distributed cache.");
        }
        conf.set(enumToConfKey(implementingClass, ClientOpts.CLIENT_PROPS_FILE), clientPropsPath);
    } else {
        StringWriter writer = new StringWriter();
        try {
            props.store(writer, "client properties");
        } catch (IOException e) {
            throw new IllegalStateException(e);
        }
        conf.set(enumToConfKey(implementingClass, ClientOpts.CLIENT_PROPS), writer.toString());
    }
    conf.setBoolean(enumToConfKey(implementingClass, ClientOpts.IS_CONFIGURED), true);
}

From source file:org.apache.accumulo.hadoopImpl.mapreduce.lib.ConfiguratorBase.java

License:Apache License

/**
 * The store method was called.//from  w  w w  . java  2s  . c  om
 *
 * @since 2.0.0
 */
public static void setJobStored(Class<?> implementingClass, Configuration conf) {
    conf.setBoolean(enumToConfKey(implementingClass, ClientOpts.STORE_JOB_CALLED), true);
}

From source file:org.apache.accumulo.server.test.functional.RunTests.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    job = new Job(getConf(), JOB_NAME);
    job.setJarByClass(this.getClass());

    // this is like 1-2 tests per mapper
    Configuration conf = job.getConfiguration();
    conf.setInt("mapred.max.split.size", 40);
    conf.set("accumulo.home", System.getenv("ACCUMULO_HOME"));
    conf.setInt("mapred.task.timeout", 8 * 60 * 1000);
    conf.setBoolean("mapred.map.tasks.speculative.execution", false);

    // set input/*from  w w w . j a v a  2s.  c o m*/
    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.setInputPaths(job, new Path(args[0]));

    // set output
    job.setOutputFormatClass(TextOutputFormat.class);
    FileSystem fs = FileSystem.get(conf);
    Path destination = new Path(args[1]);
    if (fs.exists(destination)) {
        log.info("Deleting existing output directory " + args[1]);
        fs.delete(destination, true);
    }
    TextOutputFormat.setOutputPath(job, destination);

    // configure default reducer: put the results into one file
    job.setNumReduceTasks(1);

    // set mapper
    job.setMapperClass(TestMapper.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    // don't do anything with the results (yet) a summary would be nice
    job.setReducerClass(IdentityReducer.class);

    // submit the job
    log.info("Starting tests");
    return 0;
}

From source file:org.apache.accumulo.test.functional.RunTests.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    job = new Job(getConf(), JOB_NAME);
    job.setJarByClass(this.getClass());
    Opts opts = new Opts();
    opts.parseArgs(RunTests.class.getName(), args);

    // this is like 1-2 tests per mapper
    Configuration conf = job.getConfiguration();
    conf.setInt("mapred.max.split.size", 40);
    conf.set("accumulo.home", System.getenv("ACCUMULO_HOME"));
    conf.setInt("mapred.task.timeout", 8 * 60 * 1000);
    conf.setBoolean("mapred.map.tasks.speculative.execution", false);

    // set input/*w  ww  . ja va 2 s  .c om*/
    job.setInputFormatClass(TextInputFormat.class);
    TextInputFormat.setInputPaths(job, new Path(opts.testFile));

    // set output
    job.setOutputFormatClass(TextOutputFormat.class);
    FileSystem fs = FileSystem.get(conf);
    Path destination = new Path(opts.outputPath);
    if (fs.exists(destination)) {
        log.info("Deleting existing output directory " + opts.outputPath);
        fs.delete(destination, true);
    }
    TextOutputFormat.setOutputPath(job, destination);

    // configure default reducer: put the results into one file
    job.setNumReduceTasks(1);

    // set mapper
    job.setMapperClass(TestMapper.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    // don't do anything with the results (yet) a summary would be nice
    job.setNumReduceTasks(0);

    // submit the job
    log.info("Starting tests");
    return 0;
}

From source file:org.apache.accumulo.test.mrit.IntegrationTestMapReduce.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    // read a list of tests from the input, and print out the results
    if (args.length != 2) {
        System.err.println("Wrong number of args: <input> <output>");
        return 1;
    }// w  w  w. ja  v  a  2  s . com
    Configuration conf = getConf();
    Job job = Job.getInstance(conf, "accumulo integration test runner");
    conf = job.getConfiguration();

    // some tests take more than 10 minutes
    conf.setLong(MRJobConfig.TASK_TIMEOUT, 20 * 60 * 1000);

    // minicluster uses a lot of ram
    conf.setInt(MRJobConfig.MAP_MEMORY_MB, 4000);

    // hadoop puts an ancient version of jline on the classpath
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);

    // no need to run a test multiple times
    job.setSpeculativeExecution(false);

    // read one line at a time
    job.setInputFormatClass(NLineInputFormat.class);
    NLineInputFormat.setNumLinesPerSplit(job, 1);

    // run the test
    job.setJarByClass(IntegrationTestMapReduce.class);
    job.setMapperClass(TestMapper.class);

    // group test by result code
    job.setReducerClass(TestReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));
    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java

License:Apache License

/**
 * Create a DFS Instance that is not cached
 *
 * @param conf the configuration to work with
 * @return the DFS Instance//from  w w  w  .  j  a  va 2s.  co  m
 * @throws IOException on any IO problem
 * @throws ExitMainException if the default FS isn't HDFS
 */
public static DistributedFileSystem createUncachedDFS(Configuration conf) throws IOException {
    conf.setBoolean(HadoopKeys.FS_HDFS_IMPL_DISABLE_CACHE, true);
    FileSystem filesys = FileSystem.get(conf);
    URI fsURI = filesys.getUri();
    if (!(filesys instanceof DistributedFileSystem)) {
        throw new ExitMainException(-1, "Filesystem is not HDFS " + fsURI);
    }
    return (DistributedFileSystem) filesys;
}

From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java

License:Apache License

/**
 * Take a configuration and add the parameters to make it blocking
 *
 * @param conf configuration to patch/*from  w ww . j  a  va  2 s.c o  m*/
 */
public static void makeDfsCallsBlocking(Configuration conf) {
    conf.setBoolean(DFS_CLIENT_RETRY_POLICY_ENABLED, true);
}