Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:com.yahoo.glimmer.indexing.RDFDocumentFactory.java

License:Open Source License

protected static void setupConf(Configuration conf, IndexType type, boolean withContexts, String resourcesHash,
        String resourceIdPrefix, String... fields) {
    conf.setEnum(CONF_INDEX_TYPE_KEY, type);
    conf.setBoolean(CONF_WITH_CONTEXTS_KEY, withContexts);
    if (resourcesHash != null) {
        conf.set(CONF_RESOURCES_HASH_KEY, resourcesHash);
    }//from  ww w .  j a  v  a 2  s .  c om
    conf.set(CONF_RESOURCE_ID_PREFIX_KEY, resourceIdPrefix);
    conf.setStrings(CONF_FIELDNAMES_KEY, fields);
}

From source file:com.zjy.mongo.util.MongoConfigUtil.java

License:Apache License

/**
 * Enable using {@code $lt} and {@code $gt} to define InputSplits rather
 * than {@code $min} and {@code $max}. This allows the database's query
 * optimizer to choose the best index instead of using the one in the
 * $max/$min keys. This will only work if the key used for splitting is
 * *not* a compound key. Make sure that all values under the splitting key
 * are of the same type, or this will cause incomplete results.
 *
 * @param conf the Configuration/*from   w w w .j  a v  a 2  s.co  m*/
 * @param value enables using {@code $lt} and {@code $gt}
 */
public static void setRangeQueryEnabled(final Configuration conf, final boolean value) {
    conf.setBoolean(SPLITS_USE_RANGEQUERY, value);
}

From source file:com.zjy.mongo.util.MongoConfigUtil.java

License:Apache License

/**
 * Set whether the reading directly from shards is enabled.
 *
 * When {@code true}, splits are read directly from shards. By default,
 * splits are read through a mongos router when connected to a sharded
 * cluster. Note that reading directly from shards can lead to bizarre
 * results when there are orphaned documents or if the balancer is running.
 *
 * @param conf the Configuration// w  ww  . j av  a2 s  .c  o  m
 * @param value enables reading from shards directly
 *
 * @see <a href="http://docs.mongodb.org/manual/core/sharding-balancing/">Sharding Balancing</a>
 * @see <a href="http://docs.mongodb.org/manual/reference/command/cleanupOrphaned/#dbcmd.cleanupOrphaned">cleanupOrphaned command</a>
 */
public static void setReadSplitsFromShards(final Configuration conf, final boolean value) {
    conf.setBoolean(SPLITS_USE_SHARDS, value);
}

From source file:com.zjy.mongo.util.MongoConfigUtil.java

License:Apache License

/**
 * Set whether using shard chunk splits as InputSplits is enabled.
 * @param conf the Configuration//from   www.j  a  v  a  2s .  c o m
 * @param value enables using shard chunk splits as InputSplits.
 */
public static void setShardChunkSplittingEnabled(final Configuration conf, final boolean value) {
    conf.setBoolean(SPLITS_USE_CHUNKS, value);
}

From source file:com.zjy.mongo.util.MongoConfigUtil.java

License:Apache License

public static Configuration buildConfiguration(final Map<String, Object> data) {
    Configuration newConf = new Configuration();
    for (Entry<String, Object> entry : data.entrySet()) {
        String key = entry.getKey();
        Object val = entry.getValue();
        if (val instanceof String) {
            newConf.set(key, (String) val);
        } else if (val instanceof Boolean) {
            newConf.setBoolean(key, (Boolean) val);
        } else if (val instanceof Integer) {
            newConf.setInt(key, (Integer) val);
        } else if (val instanceof Float) {
            newConf.setFloat(key, (Float) val);
        } else if (val instanceof DBObject) {
            setDBObject(newConf, key, (DBObject) val);
        } else {/*from  ww  w  .j  av  a 2 s . c o m*/
            throw new RuntimeException("can't convert " + val.getClass() + " into any type for Configuration");
        }
    }
    return newConf;
}

From source file:crunch.MaxTemperature.java

License:Apache License

public static void main(String[] args) throws Exception {
        if (args.length != 2) {
            System.err.println("Usage: MaxTemperatureWithMapOutputCompression " + "<input path> <output path>");
            System.exit(-1);//w  ww .  j a  va  2s  . c  o m
        }

        // vv MaxTemperatureWithMapOutputCompression
        Configuration conf = new Configuration();
        conf.setBoolean("mapred.compress.map.output", true);
        conf.setClass("mapred.map.output.compression.codec", GzipCodec.class, CompressionCodec.class);
        Job job = new Job(conf);
        // ^^ MaxTemperatureWithMapOutputCompression
        job.setJarByClass(MaxTemperature.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setMapperClass(MaxTemperatureMapper.class);
        job.setCombinerClass(MaxTemperatureReducer.class);
        job.setReducerClass(MaxTemperatureReducer.class);

        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

From source file:crunch.MaxTemperature.java

License:Apache License

@Override
    public int run(String[] args) throws Exception {
        if (args.length != 2) {
            System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getSimpleName());
            ToolRunner.printGenericCommandUsage(System.err);
            return -1;
        }/*from  w w  w.  j a v  a 2 s  .c  o  m*/

        //vv MaxTemperatureDriverV6
        Configuration conf = getConf();
        conf.setBoolean("mapred.task.profile", true);
        conf.set("mapred.task.profile.params",
                "-agentlib:hprof=cpu=samples," + "heap=sites,depth=6,force=n,thread=y,verbose=n,file=%s");
        conf.set("mapred.task.profile.maps", "0-2");
        conf.set("mapred.task.profile.reduces", ""); // no reduces
        Job job = new Job(conf, "Max temperature");
        //^^ MaxTemperatureDriverV6

        // Following alternative is only available in 0.21 onwards
        //    conf.setBoolean(JobContext.TASK_PROFILE, true);
        //    conf.set(JobContext.TASK_PROFILE_PARAMS, "-agentlib:hprof=cpu=samples," +
        //        "heap=sites,depth=6,force=n,thread=y,verbose=n,file=%s");
        //    conf.set(JobContext.NUM_MAP_PROFILES, "0-2");
        //    conf.set(JobContext.NUM_REDUCE_PROFILES, "");

        job.setJarByClass(getClass());

        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.setMapperClass(MaxTemperatureMapper.class);
        job.setCombinerClass(MaxTemperatureReducer.class);
        job.setReducerClass(MaxTemperatureReducer.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        return job.waitForCompletion(true) ? 0 : 1;
    }

From source file:de.gesundkrank.wikipedia.hadoop.util.RepackToMapFile.java

License:Open Source License

public int run(String basePath, String outputPath, boolean checkNew, boolean skipRedirect) throws Exception {
    Configuration configuration = getConf();
    configuration.setBoolean("skipRedirect", skipRedirect);

    LOGGER.info("Tool name: " + getClass().getSimpleName());

    Job job = Job.getInstance(configuration, getClass().getSimpleName());
    job.setJarByClass(getClass());/*from   w  w w . jav a  2s. c  o  m*/

    job.setMapperClass(WikiMapper.class);
    job.setInputFormatClass(WikiInputFormat.class);
    job.setOutputFormatClass(MapFileOutputFormat.class);
    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(WikiRevisionWritable.class);

    WikiDumpLoader wikiDumpLoader = new WikiDumpLoader(checkNew);
    wikiDumpLoader.addWikiDump(job, basePath);

    MapFileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.setNumReduceTasks(1);

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:de.tiqsolutions.hdfs.HadoopTestBase.java

License:Apache License

@BeforeClass
public static void setUp() throws Exception {
    String key = "java.protocol.handler.pkgs";
    String newValue = "de.tiqsolutions";
    if (System.getProperty(key) != null) {
        String previousValue = System.getProperty(key);
        newValue = String.valueOf(newValue) + "|" + previousValue;
    }// w w w  . j a  v  a  2s.c om
    System.setProperty(key, newValue);
    File baseDir = new File("./target/hdfs/" + HadoopTestBase.class.getName()).getAbsoluteFile();
    FileUtil.fullyDelete((File) baseDir);
    Configuration conf = new Configuration();
    conf.set("hdfs.minidfs.basedir", baseDir.getAbsolutePath());
    conf.setBoolean("dfs.webhdfs.enabled", true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    hdfsCluster.waitActive();
    HDFS_BASE_URI = hdfsCluster.getURI();
    WEBHDFS_BASE_URI = new URI("webhdfs://" + conf.get("dfs.namenode.http-address"));

    try (FileSystem fs = FileSystems.newFileSystem(HDFS_BASE_URI, System.getenv())) {

        Files.copy(Paths.get(HadoopTestBase.class.getResource("/test.csv").toURI()),
                Paths.get(HDFS_BASE_URI.resolve("/test.csv")), StandardCopyOption.REPLACE_EXISTING);

    }

}

From source file:de.tuberlin.dima.aim3.HadoopJob.java

License:Open Source License

protected Job prepareJob(Path inputPath, Path outputPath, Class<? extends InputFormat> inputFormat,
        Class<? extends Mapper> mapper, Class<? extends Writable> mapperKey,
        Class<? extends Writable> mapperValue, Class<? extends OutputFormat> outputFormat) throws IOException {

    Job job = new Job(new Configuration(getConf()));
    Configuration jobConf = job.getConfiguration();

    if (mapper.equals(Mapper.class)) {
        throw new IllegalStateException("Can't figure out the user class jar file from mapper/reducer");
    } else {/*from  ww w.ja v  a  2s.  c  o  m*/
        job.setJarByClass(mapper);
    }

    job.setInputFormatClass(inputFormat);
    jobConf.set("mapred.input.dir", inputPath.toString());

    job.setMapperClass(mapper);
    job.setMapOutputKeyClass(mapperKey);
    job.setMapOutputValueClass(mapperValue);
    job.setOutputKeyClass(mapperKey);
    job.setOutputValueClass(mapperValue);

    jobConf.setBoolean("mapred.compress.map.output", true);

    job.setNumReduceTasks(0);

    job.setJobName(getCustomJobName(job, mapper));

    job.setOutputFormatClass(outputFormat);
    jobConf.set("mapred.output.dir", outputPath.toString());

    return job;
}