List of usage examples for org.apache.hadoop.conf Configuration getInt
public int getInt(String name, int defaultValue)
name
property as an int
. From source file:com.ikanow.aleph2.analytics.hadoop.assets.BatchEnrichmentJob.java
License:Apache License
/** Pulls out batch enrichment parameters from the Hadoop configuration file * (Common to mapper and reducer)/*from w w w .j a v a2 s. c o m*/ * @param beJobConfigurable * @param configuration * @throws Exception */ public static void extractBeJobParameters(IBeJobConfigurable beJobConfigurable, Configuration configuration) throws Exception { final String contextSignature = configuration.get(HadoopBatchEnrichmentUtils.BE_CONTEXT_SIGNATURE); final BatchEnrichmentContext enrichmentContext = (BatchEnrichmentContext) ContextUtils .getEnrichmentContext(contextSignature); beJobConfigurable.setEnrichmentContext(enrichmentContext); final DataBucketBean dataBucket = enrichmentContext.getBucket().get(); beJobConfigurable.setDataBucket(dataBucket); final List<EnrichmentControlMetadataBean> config = Optional .ofNullable(dataBucket.batch_enrichment_configs()).orElse(Collections.emptyList()); beJobConfigurable.setEcMetadata(config.isEmpty() ? Arrays.asList(BeanTemplateUtils.build(EnrichmentControlMetadataBean.class).done().get()) : config); beJobConfigurable.setBatchSize(configuration.getInt(HadoopBatchEnrichmentUtils.BATCH_SIZE_PARAM, 100)); }
From source file:com.ikanow.aleph2.analytics.r.assets.BatchEnrichmentJob.java
License:Apache License
/** Pulls out batch enrichment parameters from the Hadoop configuration file * (Common to mapper and reducer)/* w w w .j a va 2 s. com*/ * @param beJobConfigurable * @param configuration * @throws Exception */ public static void extractBeJobParameters(IBeJobConfigurable beJobConfigurable, Configuration configuration) throws Exception { final String contextSignature = configuration.get(BE_CONTEXT_SIGNATURE); final BatchEnrichmentContext enrichmentContext = (BatchEnrichmentContext) ContextUtils .getEnrichmentContext(contextSignature); beJobConfigurable.setEnrichmentContext(enrichmentContext); final DataBucketBean dataBucket = enrichmentContext.getBucket().get(); beJobConfigurable.setDataBucket(dataBucket); final List<EnrichmentControlMetadataBean> config = Optional .ofNullable(dataBucket.batch_enrichment_configs()).orElse(Collections.emptyList()); beJobConfigurable.setEcMetadata(config.isEmpty() ? Arrays.asList(BeanTemplateUtils.build(EnrichmentControlMetadataBean.class).done().get()) : config); beJobConfigurable.setBatchSize(configuration.getInt(BATCH_SIZE_PARAM, 100)); }
From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchHiveUtils.java
License:Apache License
/** Pull out parameters from configuration * @param config/* www .j a va 2 s . co m*/ * @return */ public static Tuple3<String, String, String> getParamsFromHiveConfig(final Configuration config) { final String username = config.get("javax.jdo.option.ConnectionUserName", ""); final String password = ""; final Matcher m = hive_extractor.matcher(config.get("javax.jdo.option.ConnectionURL", "")); final int port = config.getInt("hive.server2.thrift.port", 10000); final String connection = Lambdas.get(() -> { if (m.matches()) { //(table name is not needed when connecting this way) return ErrorUtils.get("jdbc:hive2://{0}:{2,number,#}", m.group(1), m.group(2), port); } else return ""; }); return Tuples._3T(connection, username, password); }
From source file:com.ikanow.infinit.e.data_model.custom.InfiniteMongoConfigUtil.java
License:Apache License
public static int getMaxSplits(Configuration conf) { return conf.getInt(MAX_SPLITS, 0); }
From source file:com.ikanow.infinit.e.data_model.custom.InfiniteMongoConfigUtil.java
License:Apache License
public static int getMaxDocsPerSplit(Configuration conf) { return conf.getInt(MAX_DOCS_PER_SPLIT, 0); }
From source file:com.inclouds.hbase.utils.ConfigHelper.java
License:Open Source License
/** * Gets the cache configuration.//from ww w . j a v a 2 s.c o m * * @param cfg the cfg * @return the cache configuration */ public static CacheConfiguration getCacheConfiguration(Configuration cfg) { CacheConfiguration ccfg = new CacheConfiguration(); String value = cfg.get(CacheConfiguration.COMPRESSION, "none"); //TODO not safe ccfg.setCodecType(CodecType.valueOf(value.toUpperCase())); ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100)); ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0)); ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true)); ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30))); ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru"))); ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.95f)); ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.90f)); value = cfg.get(CacheConfiguration.KEY_CLASSNAME); if (value != null) { ccfg.setKeyClassName(value); } value = cfg.get(CacheConfiguration.VALUE_CLASSNAME); if (value != null) { ccfg.setValueClassName(value); } ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0)); ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0)); ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0)); value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY); if (value != null) { ccfg.setMaxGlobalMemory(Long.parseLong(value)); } else { LOG.warn("[row-cache] Max global memory is not specified."); } value = cfg.get(CacheConfiguration.MAX_MEMORY); if (value != null) { ccfg.setMaxMemory(Long.parseLong(value)); } else { LOG.info("[row-cache] Max memory is not specified."); } ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "row-cache")); ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default")); ccfg.setSerDeBufferSize(cfg.getInt(RowCache.ROWCACHE_BUFFER_SIZE, RowCache.DEFAULT_BUFFER_SIZE)); // TODO bucket number must be calculated ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000)); // Done with common cache configurations value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none"); if (value.equals("none")) { // We are done return ccfg; } DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value); ccfg.setDataStoreConfiguration(dcfg); return ccfg; }
From source file:com.inclouds.hbase.utils.ConfigHelper.java
License:Open Source License
/** * Load raw fs specific./*from w ww. j ava2 s . c o m*/ * * @param cfg the cfg * @param dcfg the dcfg * @return the disk store configuration */ private static DiskStoreConfiguration loadRawFSSpecific(Configuration cfg, RawFSConfiguration dcfg) { String value = cfg.get(RawFSConfiguration.IO_THREADS); if (value != null) { dcfg.setTotalIOThreads(Integer.parseInt(value)); } else { dcfg.setTotalIOThreads(dcfg.getDbDataStoreRoots().length); } value = cfg.get(RawFSConfiguration.WORKER_THREADS); if (value != null) { dcfg.setTotalWorkerThreads(Integer.parseInt(value)); } else { dcfg.setTotalIOThreads(Runtime.getRuntime().availableProcessors()); } dcfg.setRWBufferSize(cfg.getInt(RawFSConfiguration.RW_BUFFER_SIZE, 4 * 1024 * 1024)); return dcfg; }
From source file:com.inmobi.conduit.distcp.tools.mapred.lib.DynamicInputFormat.java
License:Apache License
private static int getListingSplitRatio(Configuration configuration, int numMaps, int numPaths) { return configuration.getInt(CONF_LABEL_LISTING_SPLIT_RATIO, getSplitRatio(numMaps, numPaths)); }
From source file:com.inmobi.conduit.distcp.tools.mapred.RetriableFileCopyCommand.java
License:Apache License
private static long getAllowedBandwidth(Configuration conf) { return (long) conf.getInt(DistCpConstants.CONF_LABEL_BANDWIDTH_KB, DistCpConstants.DEFAULT_BANDWIDTH_KB); }
From source file:com.inmobi.conduit.distcp.tools.TestOptionsParser.java
License:Apache License
@Test public void testOptionsAppendToConf() { Configuration conf = new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false)); DistCpOptions options = OptionsParser.parse(new String[] { "-atomic", "-i", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" }); options.appendToConf(conf);/* ww w. j av a2 s . c o m*/ Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(), false)); Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH_KB.getConfigLabel(), -1), DistCpConstants.DEFAULT_BANDWIDTH_KB); conf = new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false)); Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()), null); options = OptionsParser.parse(new String[] { "-update", "-delete", "-pu", "-bandwidth", "11", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/" }); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(), false)); Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()), "U"); Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH_KB.getConfigLabel(), -1), 11 * 1024); }