Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.inmobi.conduit.distcp.tools.mapred.CopyCommitter.java

License:Apache License

/** @inheritDoc */
@Override/*from   w w w. ja  v  a2s  . c  om*/
public void commitJob(JobContext jobContext) throws IOException {
    Configuration conf = HadoopCompat.getConfiguration(jobContext);
    super.commitJob(jobContext);

    cleanupTempFiles(jobContext);

    String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
    if (attributes != null && !attributes.isEmpty()) {
        preserveFileAttributes(conf);
    }

    if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) {
        deleteMissing(conf);
    } else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) {
        commitData(conf);
    }
    HadoopCompat.setStatus(taskAttemptContext, "Commit Successful");
    cleanup(conf);
}

From source file:com.inmobi.conduit.distcp.tools.TestOptionsParser.java

License:Apache License

@Test
public void testOptionsSwitchAddToConf() {
    Configuration conf = new Configuration();
    Assert.assertNull(conf.get(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel()));
    DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.ATOMIC_COMMIT);
    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
}

From source file:com.inmobi.conduit.distcp.tools.TestOptionsParser.java

License:Apache License

@Test
public void testOptionsAppendToConf() {
    Configuration conf = new Configuration();
    Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
    Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
    DistCpOptions options = OptionsParser.parse(new String[] { "-atomic", "-i",
            "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    options.appendToConf(conf);/*from  ww  w .j ava  2  s.  c  o m*/
    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false));
    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false));
    Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH_KB.getConfigLabel(), -1),
            DistCpConstants.DEFAULT_BANDWIDTH_KB);

    conf = new Configuration();
    Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
    Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
    Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()), null);
    options = OptionsParser.parse(new String[] { "-update", "-delete", "-pu", "-bandwidth", "11",
            "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" });
    options.appendToConf(conf);
    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false));
    Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false));
    Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()), "U");
    Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH_KB.getConfigLabel(), -1), 11 * 1024);
}

From source file:com.inmobi.grill.driver.cube.CubeGrillDriver.java

License:Apache License

@Override
public DriverQueryPlan explain(String query, Configuration conf) throws GrillException {
    if (conf.getBoolean(GrillConfConstants.PREPARE_ON_EXPLAIN, GrillConfConstants.DEFAULT_PREPARE_ON_EXPLAIN)) {
        PreparedQueryContext ctx = new PreparedQueryContext(query, null, conf);
        return explainAndPrepare(ctx);
    }/*w  w  w. ja  va 2  s  .  c o  m*/
    Map<GrillDriver, String> driverQueries = RewriteUtil.rewriteQuery(query, drivers, conf);
    GrillDriver driver = selectDriver(driverQueries, conf);
    return driver.explain(driverQueries.get(driver), conf);
}

From source file:com.inmobi.grill.server.api.query.QueryContext.java

License:Apache License

private QueryContext(String userQuery, String user, GrillConf qconf, Configuration conf, String driverQuery,
        GrillDriver selectedDriver) {//from  w  w w .  ja  v a2 s .co  m
    this.submissionTime = new Date().getTime();
    this.queryHandle = new QueryHandle(UUID.randomUUID());
    this.status = new QueryStatus(0.0f, Status.NEW, "Query just got created", false, null, null);
    this.priority = Priority.NORMAL;
    this.conf = conf;
    this.isPersistent = conf.getBoolean(GrillConfConstants.GRILL_PERSISTENT_RESULT_SET, true);
    this.userQuery = userQuery;
    this.submittedUser = user;
    this.driverQuery = driverQuery;
    this.selectedDriver = selectedDriver;
    this.qconf = qconf;
    this.driverStatus = new DriverQueryStatus();
}

From source file:com.intel.hadoop.hbase.dot.DotUtil.java

License:Apache License

/**
 * Prepare a dot table, set its required configuration items in
 * HTableDescriptor/*from ww w  .j  a v a  2 s  . c om*/
 *
 * @param conf
 * @param htd
 *          HTableDescriptor
 * @return HTableDescriptor
 */
public static HTableDescriptor prepareDotTable(Configuration conf, HTableDescriptor htd) {
    boolean isDot = conf.getBoolean(DotConstants.HBASE_TABLE_IS_A_DOT_TABLE, false);
    if (isDot) {
        htd.setValue(DotConstants.HBASE_TABLE_IS_A_DOT_TABLE, String.valueOf(true));
        htd.setValue(DotConstants.HBASE_DOT_TABLE_TYPE,
                conf.get(DotConstants.HBASE_DOT_TABLE_TYPE, DotConstants.HBASE_DOT_TABLE_TYPE_DEFAULT));
    }
    return htd;
}

From source file:com.kasabi.labs.freebase.mr.Freebase2RDFDriver.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (log.isDebugEnabled()) {
        log.debug("run({})", Utils.toString(args));
    }/*w  ww  . j  av a2s.  co  m*/

    if (args.length != 2) {
        System.err.printf("Usage: %s [generic options] <input> <output>\n", getClass().getName());
        ToolRunner.printGenericCommandUsage(System.err);
        return -1;
    }

    Configuration configuration = getConf();
    boolean useCompression = configuration.getBoolean(Constants.OPTION_USE_COMPRESSION,
            Constants.OPTION_USE_COMPRESSION_DEFAULT);

    if (useCompression) {
        configuration.setBoolean("mapred.compress.map.output", true);
        configuration.set("mapred.output.compression.type", "BLOCK");
        configuration.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec");
    }

    boolean overrideOutput = configuration.getBoolean(Constants.OPTION_OVERRIDE_OUTPUT,
            Constants.OPTION_OVERRIDE_OUTPUT_DEFAULT);
    FileSystem fs = FileSystem.get(new Path(args[1]).toUri(), configuration);
    if (overrideOutput) {
        fs.delete(new Path(args[1]), true);
    }

    Job job = new Job(configuration);
    job.setJobName("Freebase2RDFDriver");
    job.setJarByClass(getClass());

    FileInputFormat.addInputPath(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1]));

    job.setInputFormatClass(TextInputFormat.class);

    job.setMapperClass(Freebase2RDFMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    job.setReducerClass(Freebase2RDFReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    Utils.setReducers(job, configuration, log);

    job.setOutputFormatClass(TextOutputFormat.class);

    if (log.isDebugEnabled())
        Utils.log(job, log);

    return job.waitForCompletion(true) ? 0 : 1;
}

From source file:com.kasabi.labs.freebase.mr.Utils.java

License:Apache License

public static void setReducers(Job job, Configuration configuration, Logger log) {
    boolean runLocal = configuration.getBoolean(Constants.OPTION_RUN_LOCAL, Constants.OPTION_RUN_LOCAL_DEFAULT);
    int num_reducers = configuration.getInt(Constants.OPTION_NUM_REDUCERS,
            Constants.OPTION_NUM_REDUCERS_DEFAULT);

    if (runLocal) {
        if (log != null)
            log.debug("Setting number of reducers to {}", 1);
        job.setNumReduceTasks(1);// w ww. ja  v a2  s .  c  om
    } else {
        job.setNumReduceTasks(num_reducers);
        if (log != null)
            log.debug("Setting number of reducers to {}", num_reducers);
    }
}

From source file:com.knewton.mapreduce.SSTableRecordReader.java

License:Apache License

/**
 * Moves all the minimum required tables for the table reader to work to local disk.
 *
 * @param split The table to work on.//from  www. j  a va  2 s  .  c o  m
 */
@VisibleForTesting
void copyTablesToLocal(FileSystem remoteFS, FileSystem localFS, Path dataTablePath, TaskAttemptContext context)
        throws IOException {
    Configuration conf = context.getConfiguration();
    String hdfsDataTablePathStr = dataTablePath.toUri().getPath();
    String localDataTablePathStr = dataTablePath.toUri().getHost() + File.separator
            + dataTablePath.toUri().getPath();
    // Make path relative due to EMR permissions
    if (localDataTablePathStr.startsWith("/")) {
        String mapTaskId = conf.get("mapreduce.task.attempt.id");
        String mapTempDir = conf.get("mapreduce.cluster.temp.dir");
        String taskWorkDir = mapTempDir + File.separator + mapTaskId;
        LOG.info("Appending {} to {}", taskWorkDir, localDataTablePathStr);
        localDataTablePathStr = taskWorkDir + localDataTablePathStr;
    }
    Path localDataTablePath = new Path(localDataTablePathStr);
    LOG.info("Copying hdfs file from {} to local disk at {}.", dataTablePath.toUri(),
            localDataTablePath.toUri());
    copyToLocalFile(remoteFS, localFS, dataTablePath, localDataTablePath);
    boolean isCompressed = conf.getBoolean(PropertyConstants.COMPRESSION_ENABLED.txt, false);
    if (isCompressed) {
        decompress(localDataTablePath, context);
    }
    components.add(Component.DATA);
    desc = Descriptor.fromFilename(localDataTablePathStr);
    Descriptor hdfsDesc = Descriptor.fromFilename(hdfsDataTablePathStr);
    String indexPathStr = hdfsDesc.filenameFor(Component.PRIMARY_INDEX);
    components.add(Component.PRIMARY_INDEX);
    Path localIdxPath = new Path(desc.filenameFor(Component.PRIMARY_INDEX));
    LOG.info("Copying hdfs file from {} to local disk at {}.", indexPathStr, localIdxPath);
    copyToLocalFile(remoteFS, localFS, new Path(indexPathStr), localIdxPath);
    if (isCompressed) {
        decompress(localIdxPath, context);
    }
    String compressionTablePathStr = hdfsDesc.filenameFor(Component.COMPRESSION_INFO.name());
    Path compressionTablePath = new Path(compressionTablePathStr);
    if (remoteFS.exists(compressionTablePath)) {
        Path localCompressionPath = new Path(desc.filenameFor(Component.COMPRESSION_INFO.name()));
        LOG.info("Copying hdfs file from {} to local disk at {}.", compressionTablePath.toUri(),
                localCompressionPath);
        copyToLocalFile(remoteFS, localFS, compressionTablePath, localCompressionPath);
        if (isCompressed) {
            decompress(localCompressionPath, context);
        }
        components.add(Component.COMPRESSION_INFO);
    }
}

From source file:com.knewton.mapreduce.SSTableRecordReader.java

License:Apache License

/**
 * @return True if the columns are sparse, false if they're dense
 *///w  ww  .  j a v  a 2  s. c o m
private boolean getConfIsSparse(Configuration conf) {
    return conf.getBoolean(PropertyConstants.SPARSE_COLUMN.txt, true);
}