Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:org.apache.avro.mapreduce.TestAvroKeyOutputFormat.java

License:Apache License

@Test
public void testWithSnappyCodeWithHadoopConfig() throws IOException {
    Configuration conf = new Configuration();
    conf.setBoolean("mapred.output.compress", true);
    conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    testGetRecordWriter(conf, CodecFactory.snappyCodec());
}

From source file:org.apache.avro.mapreduce.TestAvroKeyOutputFormat.java

License:Apache License

@Test
public void testWithBZip2CodeWithHadoopConfig() throws IOException {
    Configuration conf = new Configuration();
    conf.setBoolean("mapred.output.compress", true);
    conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.BZip2Codec");
    testGetRecordWriter(conf, CodecFactory.bzip2Codec());
}

From source file:org.apache.blur.hive.BlurHiveOutputFormat.java

License:Apache License

public static void setBlurUserAsProxy(Configuration configuration, boolean blurUserProxy) {
    configuration.setBoolean(BLUR_USER_PROXY, blurUserProxy);
}

From source file:org.apache.blur.mapreduce.lib.BlurOutputFormat.java

License:Apache License

/**
 * Enabled by default, this will enable local indexing on the machine where
 * the task is running. Then when the {@link RecordWriter} closes the index is
 * copied to the remote destination in HDFS.
 * /*from w  w w.  j a va2s. co  m*/
 * @param configuration
 *          the configuration to setup.
 * @param b
 *          the boolean to true enable, false to disable.
 */
public static void setIndexLocally(Configuration configuration, boolean b) {
    configuration.setBoolean(BLUR_OUTPUT_INDEXLOCALLY, b);
}

From source file:org.apache.blur.mapreduce.lib.BlurOutputFormat.java

License:Apache License

/**
 * Enabled by default, this will optimize the index while copying from the
 * local index to the remote destination in HDFS. Used in conjunction with the
 * setIndexLocally./*from   w ww .  j a v  a  2  s . c  o m*/
 * 
 * @param job
 *          the job to setup.
 * @param b
 *          the boolean to true enable, false to disable.
 */
public static void setOptimizeInFlight(Configuration configuration, boolean b) {
    configuration.setBoolean(BLUR_OUTPUT_OPTIMIZEINFLIGHT, b);
}

From source file:org.apache.blur.mapreduce.lib.CsvBlurMapper.java

License:Apache License

/**
 * If set to true the record id will be automatically generated as a hash of
 * the data that the record contains./* ww w.j ava 2 s  .  c  o m*/
 * 
 * @param configuration
 *          the configuration to setup.
 * @param autoGenerateRecordIdAsHashOfData
 *          boolean.
 */
public static void setAutoGenerateRecordIdAsHashOfData(Configuration configuration,
        boolean autoGenerateRecordIdAsHashOfData) {
    configuration.setBoolean(BLUR_CSV_AUTO_GENERATE_RECORD_ID_AS_HASH_OF_DATA,
            autoGenerateRecordIdAsHashOfData);
}

From source file:org.apache.blur.mapreduce.lib.CsvBlurMapper.java

License:Apache License

/**
 * If set to true the record id will be automatically generated as a hash of
 * the data that the record contains.//  w  w w .java  2  s  .com
 * 
 * @param configuration
 *          the configuration to setup.
 * @param autoGenerateRecordIdAsHashOfData
 *          boolean.
 */
public static void setAutoGenerateRowIdAsHashOfData(Configuration configuration,
        boolean autoGenerateRowIdAsHashOfData) {
    configuration.setBoolean(BLUR_CSV_AUTO_GENERATE_ROW_ID_AS_HASH_OF_DATA, autoGenerateRowIdAsHashOfData);
}

From source file:org.apache.carbondata.hadoop.csv.CSVInputFormatTest.java

License:Apache License

private void prepareConf(Configuration conf) {
    conf.setBoolean(CSVInputFormat.HEADER_PRESENT, true);
}

From source file:org.apache.carbondata.processing.loading.csvinput.CSVInputFormatTest.java

License:Apache License

/**
 * test read csv files/*  ww  w  .  ja v  a  2s.co m*/
 * @throws Exception
 */
@Test
public void testReadCSVFiles() throws Exception {
    Configuration conf = new Configuration();
    prepareConf(conf);
    conf.setBoolean(CSVInputFormat.HEADER_PRESENT, true);
    File output = new File("target/output_CSVInputFormatTest");
    conf.set("mapreduce.cluster.local.dir", output.getCanonicalPath());
    Job job = Job.getInstance(conf, "CSVInputFormat_normal");
    job.setJarByClass(CSVInputFormatTest.class);
    job.setMapperClass(CSVCheckMapper.class);
    job.setNumReduceTasks(0);
    job.setInputFormatClass(CSVInputFormat.class);

    String inputFolder = new File("src/test/resources/csv").getCanonicalPath();
    FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "data.csv"));
    FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "data.csv.bz2"));
    FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "data.csv.gz"));
    // FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "data.csv.lz4"));
    // FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "data.csv.snappy"));

    deleteOutput(output);
    FileOutputFormat.setOutputPath(job, new Path(output.getCanonicalPath()));

    Assert.assertTrue(job.waitForCompletion(true));
}

From source file:org.apache.carbondata.processing.loading.csvinput.CSVInputFormatTest.java

License:Apache License

/**
 * test read csv files encoded as UTF-8 with BOM
 * @throws Exception//from   w w  w.ja v a2  s  . c o  m
 */
@Test
public void testReadCSVFilesWithBOM() throws Exception {

    Configuration conf = new Configuration();
    prepareConf(conf);
    conf.setBoolean(CSVInputFormat.HEADER_PRESENT, false);
    File output = new File("target/output_CSVInputFormatTest_bom");
    conf.set("mapreduce.cluster.local.dir", output.getCanonicalPath());
    Job job = Job.getInstance(conf, "CSVInputFormat_normal_bom");
    job.setJarByClass(CSVInputFormatTest.class);
    job.setMapperClass(CSVCheckMapper.class);
    job.setNumReduceTasks(0);
    job.setInputFormatClass(CSVInputFormat.class);

    String inputFolder = new File("src/test/resources/csv").getCanonicalPath();
    FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "csv_with_bom.csv"));
    FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "csv_with_bom.csv.bz2"));
    FileInputFormat.addInputPath(job, new Path(inputFolder + File.separator + "csv_with_bom.csv.gz"));

    deleteOutput(output);
    FileOutputFormat.setOutputPath(job, new Path(output.getCanonicalPath()));

    Assert.assertTrue(job.waitForCompletion(true));
    deleteOutput(output);
}