Example usage for org.apache.hadoop.conf Configuration set

List of usage examples for org.apache.hadoop.conf Configuration set

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration set.

Prototype

public void set(String name, String value) 

Source Link

Document

Set the value of the name property.

Usage

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

private Configuration setupConfig(File hdfsDir) throws Exception {
    Configuration conf = new Configuration();
    File backup = new File(tmpHdfs, "backup");
    backup.mkdirs();/* w  w  w.  ja  va2  s .c  o  m*/
    conf.set(DFS_BACKUP_NAMENODE_LOCAL_DIR_KEY, backup.getAbsolutePath());
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
    conf.set(DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY, BackupFsDatasetSpiFactory.class.getName());
    conf.set(DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY, DataNodeBackupServicePlugin.class.getName());
    conf.set(DFSConfigKeys.DFS_NAMENODE_PLUGINS_KEY, NameNodeBackupServicePlugin.class.getName());

    conf.setInt(BackupConstants.DFS_BACKUP_DATANODE_RPC_PORT_KEY, 0);
    conf.setInt(BackupConstants.DFS_BACKUP_NAMENODE_HTTP_PORT_KEY, 0);

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);// 3
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY, 2);// 3
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, 6000);// 30000
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 6000);// 5*60*1000

    org.apache.commons.configuration.Configuration configuration = BackupUtil.convert(conf);
    setupBackupStore(configuration);
    @SuppressWarnings("unchecked")
    Iterator<String> keys = configuration.getKeys();
    while (keys.hasNext()) {
        String key = keys.next();
        conf.set(key, configuration.getString(key));
    }

    return conf;
}

From source file:be.uantwerpen.adrem.bigfim.BigFIMDriver.java

License:Apache License

private static void setConfigurationValues(Configuration conf, FIMOptions opt) {
    conf.set(DELIMITER_KEY, opt.delimiter);
    conf.setInt(MIN_SUP_KEY, opt.minSup);
    conf.setInt(NUMBER_OF_MAPPERS_KEY, opt.nrMappers);
    conf.setInt(NUMBER_OF_CHUNKS, opt.nrMappers);
    conf.setInt(PREFIX_LENGTH_KEY, opt.prefixLength);
    conf.setStrings(OUTPUT_DIR_KEY, opt.outputDir);
}

From source file:be.uantwerpen.adrem.disteclat.DistEclatDriver.java

License:Apache License

/**
 * Passes all configuration flags to the Hadoop Configuration framework
 * /*from w  ww.  jav a  2  s  .  c om*/
 * @param conf
 *          the Hadoop configuration
 * @param config
 *          the configuration that has user-defined flags
 */
private static void setConfigurationValues(Configuration conf, FIMOptions opt) {
    conf.set(DELIMITER_KEY, opt.delimiter);
    conf.setInt(MIN_SUP_KEY, opt.minSup);
    conf.setInt(NUMBER_OF_MAPPERS_KEY, opt.nrMappers);
    conf.setInt(NUMBER_OF_CHUNKS, opt.nrMappers);
    conf.setInt(PREFIX_LENGTH_KEY, opt.prefixLength);
    conf.setStrings(OUTPUT_DIR_KEY, opt.outputDir);
}

From source file:be.uantwerpen.adrem.eclat.util.SplitByKTextInputFormatTest.java

License:Apache License

private Configuration createConfiguration(int... numberOfLines) {
    Configuration conf = new Configuration();
    if (numberOfLines.length > 0) {
        conf.setLong(NUMBER_OF_LINES_KEY, numberOfLines[0]);
    }/*  w w w .j a v a2  s . c o m*/
    conf.set("fs.default.name", "file:///");
    conf.setBoolean("fs.file.impl.disable.cache", false);
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    return conf;
}

From source file:be.uantwerpen.adrem.hadoop.util.Tools.java

License:Apache License

@SuppressWarnings("rawtypes")
public static Job prepareJob(Path inputPath, Path outputPath, Class<? extends InputFormat> inputFormat,
        Class<? extends Mapper> mapper, Class<? extends Writable> mapperKey,
        Class<? extends Writable> mapperValue, Class<? extends Reducer> reducer,
        Class<? extends Writable> reducerKey, Class<? extends Writable> reducerValue,
        Class<? extends OutputFormat> outputFormat) throws IOException {
    Job job = new Job(new Configuration());

    Configuration jobConf = job.getConfiguration();

    if (reducer.equals(Reducer.class)) {
        if (mapper.equals(Mapper.class)) {
            throw new IllegalStateException("Can't figure out the user class jar file from mapper/reducer");
        }/*from w ww. j  a  v a 2  s.  com*/
        job.setJarByClass(mapper);
    } else {
        job.setJarByClass(reducer);
    }

    job.setInputFormatClass(inputFormat);
    jobConf.set("mapred.input.dir", inputPath.toString());

    job.setMapperClass(mapper);
    if (mapperKey != null) {
        job.setMapOutputKeyClass(mapperKey);
    }
    if (mapperValue != null) {
        job.setMapOutputValueClass(mapperValue);
    }

    jobConf.setBoolean("mapred.compress.map.output", true);

    job.setReducerClass(reducer);
    job.setOutputKeyClass(reducerKey);
    job.setOutputValueClass(reducerValue);

    job.setOutputFormatClass(outputFormat);
    jobConf.set("mapred.output.dir", outputPath.toString());

    return job;
}

From source file:be.ugent.intec.halvade.HalvadeResourceManager.java

License:Open Source License

public static void setJobResources(HalvadeOptions opt, Configuration conf, int type, boolean subtractAM,
        boolean BAMinput) throws InterruptedException {
    int tmpmem = (int) (opt.mem * 1024);
    int tmpvcores = opt.vcores;

    BAMinput = BAMinput && type < 3;
    int mmem = RESOURCE_REQ[BAMinput ? 3 : type][0];
    int rmem = RESOURCE_REQ[type][1] == ALL ? tmpmem - MEM_AM : RESOURCE_REQ[type][1];
    if (opt.overrideMem > 0 && type != COMBINE) {
        if (!BAMinput)
            mmem = opt.overrideMem;//from   w  w w  .  j  a v a 2 s .c  om
        rmem = opt.overrideMem;
    }
    if (mmem > opt.mem * 1024 || rmem > opt.mem * 1024)
        throw new InterruptedException("Not enough memory available on system; memory requirements: "
                + opt.mem * 1024 + "/" + Math.max(rmem, mmem));
    if (opt.setMapContainers)
        opt.mapContainersPerNode = Math.min(tmpvcores, Math.max(tmpmem / mmem, 1));
    if (opt.setReduceContainers)
        opt.reducerContainersPerNode = Math.min(tmpvcores, Math.max(tmpmem / rmem, 1));

    opt.maps = Math.max(1, opt.nodes * opt.mapContainersPerNode);
    Logger.DEBUG("set # map containers: " + opt.maps);
    HalvadeConf.setMapContainerCount(conf, opt.maps);
    HalvadeConf.setVcores(conf, opt.vcores);
    opt.mthreads = Math.max(1, tmpvcores / opt.mapContainersPerNode);
    opt.rthreads = Math.max(1, tmpvcores / opt.reducerContainersPerNode);
    if (opt.smtEnabled) {
        opt.mthreads *= 2;
        opt.rthreads *= 2;
    }

    Logger.DEBUG("resources set to " + opt.mapContainersPerNode + " maps [" + opt.mthreads + " cpu , " + mmem
            + " mb] per node and " + opt.reducerContainersPerNode + " reducers [" + opt.rthreads + " cpu, "
            + rmem + " mb] per node");

    conf.set("mapreduce.map.cpu.vcores", "" + opt.mthreads);
    conf.set("mapreduce.map.memory.mb", "" + mmem);
    if (subtractAM)
        conf.set("mapreduce.reduce.cpu.vcores", "" + (opt.rthreads - VCORES_AM));
    else
        conf.set("mapreduce.reduce.cpu.vcores", "" + opt.rthreads);
    conf.set("mapreduce.reduce.memory.mb", "" + rmem);
    conf.set("mapreduce.reduce.java.opts", "-Xmx" + (int) (0.7 * rmem) + "m");
    conf.set("mapreduce.map.java.opts", "-Xmx" + (int) (0.8 * mmem) + "m");
    //                + " -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:/tmp/halvade/@taskid@.gc"); 
    conf.set("mapreduce.job.reduce.slowstart.completedmaps", "0.99");

    HalvadeConf.setMapThreads(conf, opt.mthreads);
    HalvadeConf.setReducerThreads(conf, opt.rthreads);
}

From source file:be.ugent.intec.halvade.utils.HalvadeConf.java

License:Open Source License

public static void setJava(Configuration conf, String val) {
    conf.set(java, val);
}

From source file:be.ugent.intec.halvade.utils.HalvadeConf.java

License:Open Source License

public static void setFilterDBSnp(Configuration conf, boolean filter) {
    if (filter)/*www  .  ja v a 2 s .co  m*/
        conf.set(filterDBSnp, "true");
    else
        conf.set(filterDBSnp, "false");
}

From source file:be.ugent.intec.halvade.utils.HalvadeConf.java

License:Open Source License

public static void setUseUnifiedGenotyper(Configuration conf, boolean use) {
    if (use)/*  ww  w .  j ava  2 s  .co m*/
        conf.set(useGenotyper, "true");
    else
        conf.set(useGenotyper, "false");
}

From source file:be.ugent.intec.halvade.utils.HalvadeConf.java

License:Open Source License

public static void setKeepFiles(Configuration conf, boolean use) {
    if (use)// w ww.  jav  a  2s .  co m
        conf.set(keepFiles, "true");
    else
        conf.set(keepFiles, "false");
}