List of usage examples for org.apache.hadoop.conf Configuration getFloat
public float getFloat(String name, float defaultValue)
name
property as a float
. From source file:com.koda.integ.hbase.util.ConfigHelper.java
License:Open Source License
/** * Load disk store cfg.//from w w w. jav a2 s . c o m * * @param cfg the cfg * @param value the value * @return the disk store configuration */ private static DiskStoreConfiguration loadDiskStoreCfg(Configuration cfg, String value) { DiskStoreConfiguration diskCfg = null; PersistenceMode mode = PersistenceMode.valueOf(value); switch (mode) { case ONDEMAND: case SNAPSHOT: diskCfg = new RawFSConfiguration(); diskCfg.setDiskStoreImplementation(RawFSStore.class); diskCfg.setStoreClassName(RawFSStore.class.getName()); break; // case WRITE_AROUND: // case WRITE_BEHIND: // case WRITE_THROUGH: // diskCfg = new LevelDBConfiguration(); // diskCfg.setDiskStoreImplementation(LevelDBStore.class); // diskCfg.setStoreClassName(LevelDBStore.class.getName()); // break; } diskCfg.setPersistenceMode(mode); String val = cfg.get(DiskStoreConfiguration.DATA_DIRS); if (val == null) { LOG.fatal("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted."); throw new RuntimeException("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted."); } diskCfg.setDbDataStoreRoots(val.split(",")); diskCfg.setStoreName(cfg.get(DiskStoreConfiguration.NAME, "default")); // DB name/ Subfolder in a root diskCfg.setDbSnapshotInterval(cfg.getLong(DiskStoreConfiguration.SNAPSHOT_INTERVAL, 3600000)); diskCfg.setDbCompressionType( CodecType.valueOf(cfg.get(DiskStoreConfiguration.COMPRESSION, "none").toUpperCase())); diskCfg.setDiskStoreMaxSize(cfg.getLong(DiskStoreConfiguration.STORE_SIZE_LIMIT, 0)); diskCfg.setDiskStoreEvictionPolicy( EvictionPolicy.valueOf(cfg.get(DiskStoreConfiguration.EVICTION_POLICY, "none"))); diskCfg.setDiskStoreEvictionHighWatermark( cfg.getFloat(DiskStoreConfiguration.EVICTION_HIGHWATERMARK, 0.98f)); diskCfg.setDiskStoreEvictionLowWatermak(cfg.getFloat(DiskStoreConfiguration.EVICTION_LOWWATERMARK, 0.95f)); diskCfg = loadSpecific(cfg, diskCfg); return diskCfg; }
From source file:com.mortardata.pig.storage.DynamoDBStorage.java
License:Apache License
/** * FRONTEND and BACKEND//from ww w. ja v a 2 s .c o m **/ @Override public void setStoreLocation(String location, Job job) throws IOException { this.hadoopJobInfo = loadHadoopJobInfo(job); Configuration conf = this.hadoopJobInfo.getJobConfiguration(); this.maxRetryWaitMilliseconds = conf.getLong(MAX_RETRY_WAIT_MILLISECONDS_PROPERTY, MAX_RETRY_WAIT_MILLISECONDS_DEFAULT); this.maxNumRetriesPerBatchWrite = conf.getInt(MAX_NUM_RETRIES_PER_BATCH_WRITE_PROPERTY, MAX_NUM_RETRIES_PER_BATCH_WRITE); this.throughputWritePercent = new Float( conf.getFloat(THROUGHPUT_WRITE_PERCENT_PROPERTY, THROUGHPUT_WRITE_PERCENT_DEFAULT)).doubleValue(); if (this.throughputWritePercent < 0.1 || this.throughputWritePercent > 1.5) { throw new IOException(THROUGHPUT_WRITE_PERCENT_PROPERTY + " must be between 0.1 and 1.5. Got: " + this.throughputWritePercent); } this.minBatchSize = conf.getInt(MINIMUM_BATCH_SIZE_PROPERTY, MINIMUM_BATCH_SIZE_DEFAULT); if (this.minBatchSize < 1 || this.minBatchSize > DYNAMO_MAX_ITEMS_IN_BATCH_WRITE_REQUEST) { throw new IOException(MINIMUM_BATCH_SIZE_PROPERTY + " must be between 1 and " + DYNAMO_MAX_ITEMS_IN_BATCH_WRITE_REQUEST + ". Got: " + this.minBatchSize); } }
From source file:com.moz.fiji.hadoop.configurator.ConfigurationMethod.java
License:Apache License
/** * Calls an object's method with the value read from a Configuration instance. * * @param instance The object to populate. * @param conf The configuration to read from. * @throws IllegalAccessException If the method cannot be called on the object. * @throws HadoopConfigurationException If there is a problem with the annotation definition. */// w w w . j a v a 2 s .c om public void call(Object instance, Configuration conf) throws IllegalAccessException { final String key = getKey(); if (null == key) { throw new HadoopConfigurationException("Missing 'key' attribute of @HadoopConf on " + instance.getClass().getName() + "." + mMethod.getName()); } if (!mMethod.isAccessible()) { mMethod.setAccessible(true); } final Class<?>[] parameterTypes = mMethod.getParameterTypes(); if (1 != parameterTypes.length) { throw new HadoopConfigurationException( "Methods annotated with @HadoopConf must have exactly one parameter: " + instance.getClass().getName() + "." + mMethod.getName()); } final Class<?> parameterType = parameterTypes[0]; try { try { if (boolean.class == parameterType) { mMethod.invoke(instance, conf.getBoolean(key, Boolean.parseBoolean(getDefault()))); } else if (float.class == parameterType) { mMethod.invoke(instance, conf.getFloat(key, Float.parseFloat(getDefault()))); } else if (double.class == parameterType) { mMethod.invoke(instance, conf.getFloat(key, Float.parseFloat(getDefault()))); } else if (int.class == parameterType) { mMethod.invoke(instance, conf.getInt(key, Integer.parseInt(getDefault()))); } else if (long.class == parameterType) { mMethod.invoke(instance, conf.getLong(key, Long.parseLong(getDefault()))); } else if (parameterType.isAssignableFrom(String.class)) { mMethod.invoke(instance, conf.get(key, getDefault())); } else if (parameterType.isAssignableFrom(Collection.class)) { mMethod.invoke(instance, conf.getStringCollection(key)); } else if (String[].class == parameterType) { mMethod.invoke(instance, new Object[] { conf.getStrings(key) }); } else { throw new HadoopConfigurationException( "Unsupported method parameter type annotated by @HadoopConf: " + instance.getClass().getName() + "." + mMethod.getName()); } } catch (NumberFormatException e) { mMethod.invoke(instance, getDefault()); } } catch (InvocationTargetException e) { throw new HadoopConfigurationException(e); } }
From source file:com.moz.fiji.hadoop.configurator.ConfigurationVariable.java
License:Apache License
/** * Populates an object's field with the value read from a Configuration instance. * * @param instance The object to populate. * @param conf The configuration to read from. * @throws IllegalAccessException If the field cannot be set on the object. * @throws HadoopConfigurationException If there is a problem with the annotation definition. */// w w w. j a va 2s. com public void setValue(Object instance, Configuration conf) throws IllegalAccessException { final String key = getKey(); if (null == key) { throw new HadoopConfigurationException("Missing 'key' attribute of @HadoopConf on " + instance.getClass().getName() + "." + mField.getName()); } if (null == conf.get(key) && mAnnotation.defaultValue().isEmpty()) { // Nothing set in the configuration, and no default value // specified. Just leave the field alone. return; } if (!mField.isAccessible()) { mField.setAccessible(true); } try { if (boolean.class == mField.getType()) { mField.setBoolean(instance, conf.getBoolean(key, getDefaultBoolean(instance))); } else if (float.class == mField.getType()) { mField.setFloat(instance, conf.getFloat(key, getDefaultFloat(instance))); } else if (double.class == mField.getType()) { mField.setDouble(instance, conf.getFloat(key, getDefaultDouble(instance))); } else if (int.class == mField.getType()) { mField.setInt(instance, conf.getInt(key, getDefaultInt(instance))); } else if (long.class == mField.getType()) { mField.setLong(instance, conf.getLong(key, getDefaultLong(instance))); } else if (mField.getType().isAssignableFrom(String.class)) { mField.set(instance, conf.get(key, getDefaultString(instance))); } else if (mField.getType().isAssignableFrom(Collection.class)) { mField.set(instance, conf.getStringCollection(key)); } else if (String[].class == mField.getType()) { mField.set(instance, conf.getStrings(key)); } else { throw new HadoopConfigurationException("Unsupported field type annotated by @HadoopConf: " + instance.getClass().getName() + "." + mField.getName()); } } catch (NumberFormatException e) { // That's okay. The default value for the field will be kept. } }
From source file:com.moz.fiji.mapreduce.kvstore.TestKeyValueStoreConfiguration.java
License:Apache License
@Test public void testStoreFloat() { Configuration parent = new Configuration(false); KeyValueStoreConfiguration isolated = KeyValueStoreConfiguration.createInConfiguration(parent, 0); isolated.setFloat("foo-key", 3.14F); assertEquals(3.14F, isolated.getFloat("foo-key", 0.0F), 0.0F); // Check that this value is stored in the namespace on the parent: Configuration delegate = isolated.getDelegate(); assertEquals(3.14F, delegate.getFloat(KeyValueStoreConfiguration.confKeyAtIndex("foo-key", 0), 0.0F), 0.0F); }
From source file:com.netease.news.classifier.naivebayes.ThetaMapper.java
License:Apache License
@Override protected void setup(Context ctx) throws IOException, InterruptedException { super.setup(ctx); Configuration conf = ctx.getConfiguration(); float alphaI = conf.getFloat(ALPHA_I, 1.0f); Map<String, Vector> scores = BayesUtils.readScoresFromCache(conf); if (conf.getBoolean(TRAIN_COMPLEMENTARY, false)) { trainer = new ComplementaryThetaTrainer(scores.get(TrainNaiveBayesJob.WEIGHTS_PER_FEATURE), scores.get(TrainNaiveBayesJob.WEIGHTS_PER_LABEL), alphaI); } else {//from w w w. j a va 2 s .co m trainer = new StandardThetaTrainer(scores.get(TrainNaiveBayesJob.WEIGHTS_PER_FEATURE), scores.get(TrainNaiveBayesJob.WEIGHTS_PER_LABEL), alphaI); } }
From source file:com.netflix.bdp.s3mper.listing.ConsistentListingAspect.java
License:Apache License
private void updateConfig(Configuration conf) { disabled = conf.getBoolean("s3mper.disable", disabled); if (disabled) { log.warn("S3mper Consistency explicitly disabled."); return;/*from ww w. j ava 2 s . c o m*/ } darkload = conf.getBoolean("s3mper.darkload", darkload); failOnError = conf.getBoolean("s3mper.failOnError", failOnError); taskFailOnError = conf.getBoolean("s3mper.task.failOnError", taskFailOnError); checkTaskListings = conf.getBoolean("s3mper.listing.task.check", checkTaskListings); failOnTimeout = conf.getBoolean("s3mper.failOnTimeout", failOnTimeout); delistDeleteMarkedFiles = conf.getBoolean("s3mper.listing.delist.deleted", delistDeleteMarkedFiles); trackDirectories = conf.getBoolean("s3mper.listing.directory.tracking", trackDirectories); fileThreshold = conf.getFloat("s3mper.listing.threshold", fileThreshold); recheckCount = conf.getLong("s3mper.listing.recheck.count", recheckCount); recheckPeriod = conf.getLong("s3mper.listing.recheck.period", recheckPeriod); taskRecheckCount = conf.getLong("s3mper.listing.task.recheck.count", taskRecheckCount); taskRecheckPeriod = conf.getLong("s3mper.listing.task.recheck.period", taskRecheckPeriod); statOnMissingFile = conf.getBoolean("s3mper.listing.statOnMissingFile", false); }
From source file:com.ricemap.spateDB.core.RTreeGridRecordWriter.java
License:Apache License
/** * Initializes a new RTreeGridRecordWriter. * @param fileSystem - of output file//from w w w .j av a 2 s . c o m * @param outDir - output file path * @param cells - the cells used to partition the input * @param overwrite - whether to overwrite existing files or not * @throws IOException */ public RTreeGridRecordWriter(Path outDir, JobConf job, String prefix, CellInfo[] cells, boolean pack, boolean expand) throws IOException { super(outDir, job, prefix, cells, pack, expand); LOG.info("Writing to RTrees"); // Initialize the counters for each cell cellCount = new int[this.cells.length]; intermediateFileSize = new int[this.cells.length]; // Determine the size of each RTree to decide when to flush a cell Configuration conf = fileSystem.getConf(); this.fastRTree = conf.get(SpatialSite.RTREE_BUILD_MODE, "fast").equals("fast"); this.columnarStorage = conf.get(SpatialSite.STORAGE_MODE, "columnar").equals("columnar"); this.maximumStorageOverhead = (int) (conf.getFloat(SpatialSite.INDEXING_OVERHEAD, 0.1f) * blockSize); }
From source file:com.ricemap.spateDB.operations.Repartition.java
License:Apache License
/** * Calculates number of partitions required to index the given file * @param inFs/*from w ww . ja v a 2 s . co m*/ * @param inFile * @param rtree * @return * @throws IOException */ public static int calculateNumberOfPartitions(Configuration conf, long inFileSize, FileSystem outFs, Path outFile, long blockSize) throws IOException { final float IndexingOverhead = conf.getFloat(SpatialSite.INDEXING_OVERHEAD, 0.1f); long indexedFileSize = (long) (inFileSize * (1 + IndexingOverhead)); if (blockSize == 0) blockSize = outFs.getDefaultBlockSize(outFile); return (int) Math.ceil((float) indexedFileSize / blockSize); }
From source file:com.splicemachine.orc.OrcConf.java
License:Open Source License
public static float getFloatVar(Configuration conf, OrcConf.ConfVars var) { return conf.getFloat(var.varname, var.defaultFloatVal); }