Example usage for org.apache.hadoop.conf Configuration setFloat

List of usage examples for org.apache.hadoop.conf Configuration setFloat

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setFloat.

Prototype

public void setFloat(String name, float value) 

Source Link

Document

Set the value of the name property to a float.

Usage

From source file:com.kakao.hbase.manager.command.BalanceFactor.java

License:Apache License

public void setConf(Configuration conf) {
    for (BalanceFactor balanceFactor : BalanceFactor.values()) {
        if (balanceFactor != EMPTY) {
            conf.setFloat(balanceFactor.getConfKey(), balanceFactor == this ? WEIGHT_HIGH : WEIGHT_LOW);
        }//w  ww. j av  a  2 s. c o  m
    }
}

From source file:com.kakao.hbase.specific.CommandAdapter.java

License:Apache License

public static List<RegionPlan> makePlan(HBaseAdmin admin, List<RegionPlan> newRegionPlan) throws IOException {
    // snapshot current region assignment
    Map<HRegionInfo, ServerName> regionAssignmentMap = createRegionAssignmentMap(admin);

    // update with new plan
    for (RegionPlan regionPlan : newRegionPlan) {
        regionAssignmentMap.put(regionPlan.getRegionInfo(), regionPlan.getDestination());
    }/*from   ww w.jav a 2  s.com*/

    Map<ServerName, List<HRegionInfo>> clusterState = initializeRegionMap(admin);
    for (Map.Entry<HRegionInfo, ServerName> entry : regionAssignmentMap.entrySet())
        clusterState.get(entry.getValue()).add(entry.getKey());

    StochasticLoadBalancer balancer = new StochasticLoadBalancer();
    Configuration conf = admin.getConfiguration();
    conf.setFloat("hbase.regions.slop", 0.2f);
    balancer.setConf(conf);
    return balancer.balanceCluster(clusterState);
}

From source file:com.ML_Hadoop.MultipleLinearRegression.MultipleLinearRegressionMapReduce.java

public static void main(String[] args) throws Exception {
    String[] theta;//from  w  w  w  .  j av  a 2  s  .  co  m
    int iteration = 0, num_of_iteration = 1;
    int feature_size = 0, input_data_size = 0;
    FileSystem fs;
    Float alpha = 0.1f;

    do {
        Configuration conf = new Configuration();
        fs = FileSystem.get(conf);

        Job job = new Job(conf, "LinearRegressionMapReduce");
        job.setJarByClass(MultipleLinearRegressionMapReduce.class);

        // the following two lines are needed for propagating "theta"
        conf = job.getConfiguration();

        job.setOutputKeyClass(LongWritable.class);
        job.setOutputValueClass(FloatWritable.class);

        job.setMapperClass(MultipleLinearRegressionMap.class);
        job.setReducerClass(MultipleLinearRegressionReduce.class);

        job.setInputFormatClass(TextInputFormat.class);
        job.setOutputFormatClass(TextOutputFormat.class);

        job.setNumReduceTasks(1); // set mapred.reduce.tasks = 1 (only one reducer)

        FileInputFormat.addInputPath(job, new Path(args[0]));
        Path out = new Path(args[1]);
        if (fs.exists(out))
            fs.delete(out, true);

        FileOutputFormat.setOutputPath(job, out);
        alpha = Float.parseFloat(args[2]);
        num_of_iteration = Integer.parseInt(args[3]);
        feature_size = Integer.parseInt(args[4]);
        input_data_size = Integer.parseInt(args[5]);
        conf.setFloat("alpha", alpha);
        conf.setInt("feature_size", feature_size);
        conf.setInt("input_data_size", input_data_size);
        conf.setInt("iteration", iteration);

        theta = new String[feature_size];

        if (iteration == 0) { // first iteration
            for (int i = 0; i < theta.length; i++)
                theta[i] = "0.0";
            conf.setStrings("theta", theta);
        } else {
            try {
                String uri = "/user/hduser/theta.txt";
                fs = FileSystem.get(conf);
                //FSDataInputStream in = fs.open(new Path(uri));
                BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(new Path(uri))));
                theta = br.readLine().split(",");
            } catch (Exception e) {

            }
            conf.setStrings("theta", theta);
        }

        for (int i = 0; i < theta.length; i++)
            System.out.println("In MapRedce main function: theta[ " + i + " ]" + theta[i]);

        try {
            job.waitForCompletion(true);
            iteration++;
        } catch (IOException e) {
            e.printStackTrace();
        }
    } while (iteration < num_of_iteration);

}

From source file:com.mongodb.hadoop.util.MapredMongoConfigUtil.java

License:Apache License

public static Configuration buildConfiguration(final Map<String, Object> data) {
    Configuration newConf = new Configuration();
    for (Map.Entry<String, Object> entry : data.entrySet()) {
        String key = entry.getKey();
        Object val = entry.getValue();
        if (val instanceof String) {
            newConf.set(key, (String) val);
        } else if (val instanceof Boolean) {
            newConf.setBoolean(key, (Boolean) val);
        } else if (val instanceof Integer) {
            newConf.setInt(key, (Integer) val);
        } else if (val instanceof Float) {
            newConf.setFloat(key, (Float) val);
        } else if (val instanceof DBObject) {
            setDBObject(newConf, key, (DBObject) val);
        } else {/* www  .  j  a  va  2 s.  com*/
            throw new RuntimeException("can't convert " + val.getClass() + " into any type for Configuration");
        }
    }
    return newConf;
}

From source file:com.moz.fiji.hadoop.configurator.TestHadoopConfigurator.java

License:Apache License

@Test
public void testConfigure() {
    Configuration conf = new Configuration();
    conf.setBoolean("my.boolean.value", true);
    conf.setFloat("my.float.value", 3.1f);
    conf.setFloat("my.double.value", 1.9f);
    conf.setInt("my.int.value", 12);
    conf.set("my.string.value", "bar");
    conf.setStrings("my.string.collection", "apple", "banana");
    conf.setStrings("my.string.array", "red", "green", "blue");
    conf.setBoolean("your.boolean.value", true);
    conf.setFloat("your.float.value", 1.0f);
    conf.setFloat("your.double.value", 2.0f);
    conf.setInt("your.int.value", 1);
    conf.setLong("your.long.value", 2L);
    conf.set("your.string.value", "asdf");

    MyConfiguredClass instance = ReflectionUtils.newInstance(MyConfiguredClass.class, conf);
    assertEquals(true, instance.getBooleanValue());
    assertEquals(3.1f, instance.getFloatValue(), 1e-6f);
    assertEquals(1.9, instance.getDoubleValue(), 1e-6);
    assertEquals(12, instance.getIntValue());
    assertEquals(456L, instance.getLongValue());
    assertEquals("bar", instance.getStringValue());
    assertEquals(true, instance.getYourBoolean());
    assertEquals(1.0f, instance.getYourFloat(), 1e-6f);
    assertEquals(2.0, instance.getYourDouble(), 1e-6);
    assertEquals(1, instance.getYourInt());
    assertEquals(2L, instance.getYourLong());
}

From source file:com.neusoft.hbase.test.hadoop.dataload.HFileOutputFormat2.java

License:Apache License

static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter(
        final TaskAttemptContext context) throws IOException, InterruptedException {
    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);//from ww  w .  j  a v  a 2  s  . c  o m

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, V>() {
        // Map of families to writers and how much has been output on the writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, V cell) throws IOException {
            KeyValue kv = KeyValueUtil.ensureKeyValue(cell);

            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();
            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory exists
            if (wl == null) {
                fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /* Create a new StoreFile.Writer.
         * @param family
         * @return A WriterLength, containing a new StoreFile.Writer.
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
                    .withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
                    .build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }
    };
}

From source file:com.neusoft.hbase.test.hadoop.dataload.HFileOutputFormatBase.java

License:Apache License

public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(// cellKeyValue
        //static <V extends Cell> RecordWriter<ImmutableBytesWritable, V> createRecordWriter()
        //getRecordWriter()
        final TaskAttemptContext context) throws IOException, InterruptedException {

    // Get the path of the temporary output file
    final Path outputPath = FileOutputFormat.getOutputPath(context);
    final Path outputdir = new FileOutputCommitter(outputPath, context).getWorkPath();
    final Path ignoreOutputPath = getDeleteRowKeyFile(outputPath);// 

    final Configuration conf = context.getConfiguration();
    final FileSystem fs = outputdir.getFileSystem(conf);
    // These configs. are from hbase-*.xml
    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config. Add to hbase-*.xml if other than default
    // compression.
    final String defaultCompression = conf.get("hfile.compression", Compression.Algorithm.NONE.getName());
    final boolean compactionExclude = conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
            false);/*from  w  w  w . j a v  a 2s.c  o m*/

    // create a map from column family to the compression algorithm
    final Map<byte[], String> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], String> bloomTypeMap = createFamilyBloomMap(conf);
    final Map<byte[], String> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
    final HFileDataBlockEncoder encoder;
    if (dataBlockEncodingStr == null) {
        encoder = NoOpDataBlockEncoder.INSTANCE;
    } else {
        try {
            encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
        } catch (IllegalArgumentException ex) {
            throw new RuntimeException("Invalid data block encoding type configured for the param "
                    + DATABLOCK_ENCODING_CONF_KEY + " : " + dataBlockEncodingStr);
        }
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {// VKeyValue

        // Map of families to writers and how much has been output on the
        // writer.
        private final Map<byte[], WriterLength> writers = new TreeMap<byte[], WriterLength>(
                Bytes.BYTES_COMPARATOR);
        private final FSDataOutputStream dos = fs.create(ignoreOutputPath);
        private byte[] previousRow = HConstants.EMPTY_BYTE_ARRAY;
        private final byte[] now = Bytes.toBytes(System.currentTimeMillis());
        private boolean rollRequested = false;

        public void write(ImmutableBytesWritable row, KeyValue kv)// V cellKeyValue kv

                throws IOException {
            // KeyValue kv = KeyValueUtil.ensureKeyValue(cell);//

            // null input == user explicitly wants to flush
            if (row == null && kv == null) {
                rollWriters();
                return;
            }

            byte[] rowKey = kv.getRow();
            long length = kv.getLength();
            byte[] family = kv.getFamily();

            if (ignore(kv)) {// if
                byte[] readBuf = rowKey;
                dos.write(readBuf, 0, readBuf.length);
                dos.write(Bytes.toBytes("\n"));
                return;
            }

            WriterLength wl = this.writers.get(family);

            // If this is a new column family, verify that the directory
            // exists
            if (wl == null) {
                fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
            }

            // If any of the HFiles for the column families has reached
            // maxsize, we need to roll all the writers
            if (wl != null && wl.written + length >= maxsize) {
                this.rollRequested = true;
            }

            // This can only happen once a row is finished though
            if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
                rollWriters();
            }

            // create a new HLog writer, if necessary
            if (wl == null || wl.writer == null) {
                wl = getNewWriter(family, conf);
            }

            // we now have the proper HLog writer. full steam ahead
            kv.updateLatestStamp(this.now);
            wl.writer.append(kv);
            wl.written += length;

            // Copy the row so we know when a row transition.
            this.previousRow = rowKey;
        }

        private void rollWriters() throws IOException {
            for (WriterLength wl : this.writers.values()) {
                if (wl.writer != null) {
                    LOG.info("Writer=" + wl.writer.getPath()
                            + ((wl.written == 0) ? "" : ", wrote=" + wl.written));
                    close(wl.writer);
                }
                wl.writer = null;
                wl.written = 0;
            }
            this.rollRequested = false;
        }

        /*
         * Create a new StoreFile.Writer.
         * 
         * @param family
         * 
         * @return A WriterLength, containing a new StoreFile.Writer.
         * 
         * @throws IOException
         */
        private WriterLength getNewWriter(byte[] family, Configuration conf) throws IOException {
            WriterLength wl = new WriterLength();
            Path familydir = new Path(outputdir, Bytes.toString(family));
            String compression = compressionMap.get(family);
            compression = compression == null ? defaultCompression : compression;
            String bloomTypeStr = bloomTypeMap.get(family);
            BloomType bloomType = BloomType.NONE;
            if (bloomTypeStr != null) {
                bloomType = BloomType.valueOf(bloomTypeStr);
            }
            String blockSizeString = blockSizeMap.get(family);
            int blockSize = blockSizeString == null ? HConstants.DEFAULT_BLOCKSIZE
                    : Integer.parseInt(blockSizeString);
            Configuration tempConf = new Configuration(conf);
            tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
            wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
                    .withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
                    .build();

            this.writers.put(family, wl);
            return wl;
        }

        private void close(final StoreFile.Writer w) throws IOException {
            if (w != null) {
                w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
                w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
                        Bytes.toBytes(context.getTaskAttemptID().toString()));
                w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, Bytes.toBytes(true));
                w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude));
                w.appendTrackedTimestampsToMetadata();
                w.close();
            }
        }

        public void close(TaskAttemptContext c) throws IOException, InterruptedException {
            dos.flush();// 
            dos.close();// 
            for (WriterLength wl : this.writers.values()) {
                close(wl.writer);
            }
        }

    };
}

From source file:com.splicemachine.derby.stream.function.HFileGenerationFunction.java

License:Open Source License

private StoreFile.Writer getNewWriter(Configuration conf, Path familyPath) throws IOException {
    Compression.Algorithm compression = Compression.getCompressionAlgorithmByName(compressionAlgorithm);
    BloomType bloomType = BloomType.ROW;
    Integer blockSize = HConstants.DEFAULT_BLOCKSIZE;
    DataBlockEncoding encoding = DataBlockEncoding.NONE;
    Configuration tempConf = new Configuration(conf);
    tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
    HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression)
            .withChecksumType(HStore.getChecksumType(conf))
            .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize);

    if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
        contextBuilder.withIncludesTags(true);
    }/*from   w  w  w. j a  v a2  s  .c o m*/

    contextBuilder.withDataBlockEncoding(encoding);
    HFileContext hFileContext = contextBuilder.build();
    try {
        return new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs).withOutputDir(familyPath)
                .withBloomType(bloomType).withComparator(KeyValue.COMPARATOR).withFileContext(hFileContext)
                .build();
    } catch (Exception e) {
        throw new IOException(e);
    }
}

From source file:com.splicemachine.orc.OrcConf.java

License:Open Source License

public static void setFloatVar(Configuration conf, OrcConf.ConfVars var, float val) {
    conf.setFloat(var.varname, val);
}

From source file:com.splicemachine.test.SpliceTestPlatformConfig.java

License:Apache License

public static Configuration create(String hbaseRootDirUri, Integer masterPort, Integer masterInfoPort,
        Integer regionServerPort, Integer regionServerInfoPort, Integer derbyPort, boolean failTasksRandomly) {

    Configuration config = HConfiguration.unwrapDelegate();

    config.set(SQLConfiguration.STORAGE_FACTORY_HOME, hbaseRootDirUri);

    ////from w ww .j a v  a 2  s .  co  m
    // Coprocessors
    //
    config.set("hbase.coprocessor.regionserver.classes", getRegionServerCoprocessorsAsString());
    config.set("hbase.coprocessor.region.classes", getRegionCoprocessorsAsString());
    config.set("hbase.coprocessor.master.classes", getMasterCoprocessorsAsString());

    //
    // Networking
    //
    config.set("hbase.zookeeper.quorum", "127.0.0.1:2181");
    config.setInt("hbase.master.port", masterPort);
    config.setInt("hbase.master.info.port", masterInfoPort);
    config.setInt("hbase.regionserver.port", regionServerPort);
    config.setInt("hbase.regionserver.info.port", regionServerInfoPort);
    config.setInt("hbase.master.jmx.port", HConfiguration.DEFAULT_JMX_BIND_PORT); // this is set because the HBase master and regionserver are running on the same machine and in the same JVM
    config.setInt(SQLConfiguration.NETWORK_BIND_PORT, derbyPort);
    config.setClass(DefaultStoreEngine.DEFAULT_COMPACTOR_CLASS_KEY, SpliceDefaultCompactor.class,
            Compactor.class);
    // config.setClass(ConsistencyControlUtils.MVCC_IMPL, SIMultiVersionConsistencyControl.class, ConsistencyControl.class);
    config.setClass(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, SpliceDefaultCompactionPolicy.class,
            CompactionPolicy.class);

    //
    // Networking -- interfaces
    //
    // force use of loop back interface on MacOSX, else don't set it
    //        if (System.getProperty("os.name").contains("Mac") ) {
    //            String interfaceName = "lo0";
    //            config.set("hbase.zookeeper.dns.interface", interfaceName);
    //            config.set("hbase.master.dns.interface", interfaceName);
    //            config.set("hbase.regionserver.dns.interface", interfaceName);
    //        }

    //
    // File System
    //
    config.set("fs.defaultFS", "file:///"); // MapR Hack, tells it local filesystem // fs.default.name is deprecated
    config.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
    config.setDouble("yarn.nodemanager.resource.io-spindles", 2.0);
    config.set("fs.default.name", "file:///");
    config.set("yarn.nodemanager.container-executor.class",
            "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor");

    // Must allow Cygwin instance to config its own rootURI
    if (!"CYGWIN".equals(hbaseRootDirUri)) {
        config.set("hbase.rootdir", hbaseRootDirUri);
    }

    //
    // Threads, timeouts
    //
    config.setLong("hbase.rpc.timeout", MINUTES.toMillis(2));
    config.setLong("hbase.client.scanner.timeout.period", MINUTES.toMillis(2)); // hbase.regionserver.lease.period is deprecated
    config.setLong("hbase.client.operation.timeout", MINUTES.toMillis(2));
    config.setLong("hbase.regionserver.handler.count", 200);
    config.setLong("hbase.regionserver.msginterval", 1000);
    config.setLong("hbase.master.event.waiting.time", 20);
    config.setLong("hbase.master.lease.thread.wakefrequency", SECONDS.toMillis(3));
    //        config.setBoolean("hbase.master.loadbalance.bytable",true);
    config.setInt("hbase.balancer.period", 5000);

    config.setLong("hbase.server.thread.wakefrequency", SECONDS.toMillis(1));
    config.setLong("hbase.client.pause", 100);

    //
    // Compaction Controls
    //
    config.setLong("hbase.hstore.compaction.min", 5); // min number of eligible files before we compact
    config.setLong("hbase.hstore.compaction.max", 10); // max files to be selected for a single minor compaction
    config.setLong("hbase.hstore.compaction.min.size", 16 * MiB); // store files smaller than this will always be eligible for minor compaction.  HFiles this size or larger are evaluated by hbase.hstore.compaction.ratio to determine if they are eligible
    config.setLong("hbase.hstore.compaction.max.size", 248 * MiB); // store files larger than this will be excluded from compaction
    config.setFloat("hbase.hstore.compaction.ratio", 1.25f); // default is 1.2f, at one point we had this set to 0.25f and 25f (which was likely a typo)

    //
    // Memstore, store files, splits
    //
    config.setLong(HConstants.HREGION_MAX_FILESIZE, 32 * MiB); // hbase.hregion.max.filesize
    config.setLong("hbase.hregion.memstore.flush.size", 128 * MiB); // was 512 MiB
    config.setLong("hbase.hregion.memstore.block.multiplier", 4);
    config.setFloat("hbase.regionserver.global.memstore.size", 0.25f); // set mem store to 25% of heap
    config.setLong("hbase.hstore.blockingStoreFiles", 20);
    //        config.set("hbase.regionserver.region.split.policy", "org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy"); // change default split policy.  this makes more sense for a standalone/single regionserver

    // Support SI
    //config.setClass(HConstants.MVCC_IMPL, SIMultiVersionConsistencyControl.class, ConsistencyControl.class);

    //
    // HFile
    //
    config.setInt("hfile.index.block.max.size", 16 * 1024); // 16KiB
    config.setFloat("hfile.block.cache.size", 0.25f); // set block cache to 25% of heap
    config.setFloat("io.hfile.bloom.error.rate", (float) 0.005);
    config.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, true); // hfile.block.bloom.cacheonwrite
    //config.set("hbase.master.hfilecleaner.plugins", getHFileCleanerAsString());
    config.set("hbase.master.hfilecleaner.plugins", getHFileCleanerAsString());
    //
    // Misc
    //
    config.set("hbase.cluster.distributed", "true"); // don't start zookeeper for us
    config.set("hbase.master.distributed.log.splitting", "false"); // TODO: explain why we are setting this

    // AWS Credentials for test...
    //

    config.set(ACCESS_KEY, "AKIAJ6HBMCK5ALHVBFPQ");
    config.set(SECRET_KEY, "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");

    //
    // Splice
    //

    config.setLong("splice.ddl.drainingWait.maximum", SECONDS.toMillis(15)); // wait 15 seconds before bailing on bad ddl statements
    config.setLong("splice.ddl.maxWaitSeconds", 120000);
    //
    // Snapshots
    //
    config.setBoolean("hbase.snapshot.enabled", true);

    HConfiguration.reloadConfiguration(config);
    return HConfiguration.unwrapDelegate();
}