List of usage examples for org.apache.hadoop.conf Configuration getFloat
public float getFloat(String name, float defaultValue)
name
property as a float
. From source file:com.iflytek.spider.crawl.AdaptiveFetchSchedule.java
License:Apache License
public void setConf(Configuration conf) { super.setConf(conf); if (conf == null) return;//from w ww . ja v a2 s . c o m INC_RATE = conf.getFloat("db.fetch.schedule.adaptive.inc_rate", 0.2f); DEC_RATE = conf.getFloat("db.fetch.schedule.adaptive.dec_rate", 0.2f); MIN_INTERVAL = conf.getInt("db.fetch.schedule.adaptive.min_interval", 60); MAX_INTERVAL = conf.getInt("db.fetch.schedule.adaptive.max_interval", SECONDS_PER_DAY * 365); // 1 year SYNC_DELTA = conf.getBoolean("db.fetch.schedule.adaptive.sync_delta", true); SYNC_DELTA_RATE = conf.getFloat("db.fetch.schedule.adaptive.sync_delta_rate", 0.2f); }
From source file:com.iflytek.spider.protocol.http.HttpBase.java
License:Apache License
public void setConf(Configuration conf) { this.conf = conf; this.proxyHost = conf.get("http.proxy.host"); this.proxyPort = conf.getInt("http.proxy.port", 8080); this.useProxy = (proxyHost != null && proxyHost.length() > 0); this.timeout = conf.getInt("http.timeout", 10000); this.maxContent = conf.getInt("http.content.limit", 64 * 1024); this.maxDelays = conf.getInt("http.max.delays", 3); this.maxThreadsPerHost = conf.getInt("fetcher.threads.per.host", 1); this.userAgent = getAgentString(conf.get("http.agent.name"), conf.get("http.agent.version"), conf.get("http.agent.description"), conf.get("http.agent.url"), conf.get("http.agent.email")); this.acceptLanguage = conf.get("http.accept.language", acceptLanguage); this.serverDelay = (long) (conf.getFloat("fetcher.server.delay", 1.0f) * 1000); this.maxCrawlDelay = (long) (conf.getInt("fetcher.max.crawl.delay", -1) * 1000); // backward-compatible default setting this.byIP = conf.getBoolean("fetcher.threads.per.host.by.ip", true); this.useHttp11 = conf.getBoolean("http.useHttp11", false); //logConf();/*from w w w . j av a 2 s. c om*/ }
From source file:com.inclouds.hbase.utils.ConfigHelper.java
License:Open Source License
/** * Gets the cache configuration./*ww w .j a va2 s. c om*/ * * @param cfg the cfg * @return the cache configuration */ public static CacheConfiguration getCacheConfiguration(Configuration cfg) { CacheConfiguration ccfg = new CacheConfiguration(); String value = cfg.get(CacheConfiguration.COMPRESSION, "none"); //TODO not safe ccfg.setCodecType(CodecType.valueOf(value.toUpperCase())); ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100)); ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0)); ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true)); ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30))); ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru"))); ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.95f)); ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.90f)); value = cfg.get(CacheConfiguration.KEY_CLASSNAME); if (value != null) { ccfg.setKeyClassName(value); } value = cfg.get(CacheConfiguration.VALUE_CLASSNAME); if (value != null) { ccfg.setValueClassName(value); } ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0)); ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0)); ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0)); value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY); if (value != null) { ccfg.setMaxGlobalMemory(Long.parseLong(value)); } else { LOG.warn("[row-cache] Max global memory is not specified."); } value = cfg.get(CacheConfiguration.MAX_MEMORY); if (value != null) { ccfg.setMaxMemory(Long.parseLong(value)); } else { LOG.info("[row-cache] Max memory is not specified."); } ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "row-cache")); ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default")); ccfg.setSerDeBufferSize(cfg.getInt(RowCache.ROWCACHE_BUFFER_SIZE, RowCache.DEFAULT_BUFFER_SIZE)); // TODO bucket number must be calculated ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000)); // Done with common cache configurations value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none"); if (value.equals("none")) { // We are done return ccfg; } DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value); ccfg.setDataStoreConfiguration(dcfg); return ccfg; }
From source file:com.inclouds.hbase.utils.ConfigHelper.java
License:Open Source License
/** * Load disk store cfg.// w ww . j a v a2s . co m * * @param cfg the cfg * @param value the value * @return the disk store configuration */ private static DiskStoreConfiguration loadDiskStoreCfg(Configuration cfg, String value) { DiskStoreConfiguration diskCfg = null; PersistenceMode mode = PersistenceMode.valueOf(value); switch (mode) { case ONDEMAND: case SNAPSHOT: diskCfg = new RawFSConfiguration(); diskCfg.setDiskStoreImplementation(RawFSStore.class); diskCfg.setStoreClassName(RawFSStore.class.getName()); break; } diskCfg.setPersistenceMode(mode); String val = cfg.get(DiskStoreConfiguration.DATA_DIRS); if (val == null) { LOG.fatal("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted."); throw new RuntimeException("\'" + DiskStoreConfiguration.DATA_DIRS + "\' is not specified. Aborted."); } diskCfg.setDbDataStoreRoots(val.split(",")); diskCfg.setStoreName(cfg.get(DiskStoreConfiguration.NAME, "default")); // DB name/ Subfolder in a root diskCfg.setDbSnapshotInterval(cfg.getLong(DiskStoreConfiguration.SNAPSHOT_INTERVAL, 3600000)); diskCfg.setDbCompressionType( CodecType.valueOf(cfg.get(DiskStoreConfiguration.COMPRESSION, "none").toUpperCase())); diskCfg.setDiskStoreMaxSize(cfg.getLong(DiskStoreConfiguration.STORE_SIZE_LIMIT, 0)); diskCfg.setDiskStoreEvictionPolicy( EvictionPolicy.valueOf(cfg.get(DiskStoreConfiguration.EVICTION_POLICY, "none"))); diskCfg.setDiskStoreEvictionHighWatermark( cfg.getFloat(DiskStoreConfiguration.EVICTION_HIGHWATERMARK, 0.98f)); diskCfg.setDiskStoreEvictionLowWatermak(cfg.getFloat(DiskStoreConfiguration.EVICTION_LOWWATERMARK, 0.95f)); diskCfg = loadSpecific(cfg, diskCfg); return diskCfg; }
From source file:com.kakao.hbase.manager.command.BalanceFactorTest.java
License:Apache License
@Test public void testSetConf() throws Exception { Configuration conf = HBaseConfiguration.create(new Configuration(true)); BalanceFactor balanceFactor;// ww w.jav a 2 s . c om balanceFactor = BalanceFactor.MINIMIZE_STOREFILE_SIZE_SKEW; balanceFactor.setConf(conf); Assert.assertEquals(BalanceFactor.WEIGHT_HIGH, conf.getFloat(balanceFactor.getConfKey(), Float.MIN_VALUE), 0.0f); Assert.assertEquals(BalanceFactor.WEIGHT_LOW, conf.getFloat(BalanceFactor.MINIMIZE_MOVE_COUNT.getConfKey(), Float.MIN_VALUE), 0.0f); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
/** * Instantiates a new off heap block cache. * * @param conf the conf/*from w ww . j a va2 s . co m*/ */ public OffHeapBlockCache(Configuration conf) { this.blockSize = conf.getInt("hbase.offheapcache.minblocksize", HColumnDescriptor.DEFAULT_BLOCKSIZE); blockCacheMaxSize = conf.getLong(BLOCK_CACHE_MEMORY_SIZE, 0L); if (blockCacheMaxSize == 0L) { throw new RuntimeException("off heap block cache size is not defined"); } nativeBufferSize = conf.getInt(BLOCK_CACHE_BUFFER_SIZE, DEFAULT_BLOCK_CACH_BUFFER_SIZE); extCacheMaxSize = conf.getLong(BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE, (long) (0.1 * blockCacheMaxSize)); youngGenFactor = conf.getFloat(BLOCK_CACHE_YOUNG_GEN_FACTOR, DEFAULT_YOUNG_FACTOR); overflowExtEnabled = conf.getBoolean(BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, false); isPersistent = conf.getBoolean(BLOCK_CACHE_PERSISTENT, false); if (isPersistent) { // Check if we have already set CacheableDeserializer // We need to set deserializer before starting cache // because we can have already cached blocks on cache start up // and first get before put will fail. if (CacheableSerializer.getDeserializer() == null) { CacheableSerializer.setHFileDeserializer(); } else { LOG.info("CacheableSerializer is already set."); } } isSnapshotsEnabled = conf.getBoolean(BLOCK_CACHE_SNAPSHOTS, false); snapshotsInterval = conf.getInt(BLOCK_CACHE_SNAPSHOT_INTERVAL, 600) * 1000; String[] dataRoots = getDataRoots(conf.get(BLOCK_CACHE_DATA_ROOTS)); if (isPersistent && dataRoots == null) { dataRoots = getHDFSRoots(conf); if (dataRoots == null) { LOG.warn("Data roots are not defined. Set persistent mode to false."); isPersistent = false; } } adjustMaxMemory(); /** Possible values: none, snappy, gzip, lz4, lz4hc */ // TODO: LZ4 is not supported on all platforms // TODO: default compression is LZ4? CodecType codec = CodecType.LZ4; String value = conf.get(BLOCK_CACHE_COMPRESSION); if (value != null) { codec = CodecType.valueOf(value.toUpperCase()); } try { CacheConfiguration cacheCfg = new CacheConfiguration(); cacheCfg.setCacheName("block-cache"); cacheCfg.setSerDeBufferSize(nativeBufferSize); cacheCfg.setMaxMemory(blockCacheMaxSize); cacheCfg.setCodecType(codec); String evictionPolicy = conf.get(BLOCK_CACHE_EVICTION, "LRU").toUpperCase(); cacheCfg.setEvictionPolicy(evictionPolicy); // Set this only for LRU2Q cacheCfg.setLRU2QInsertPoint(youngGenFactor); setBucketNumber(cacheCfg); CacheManager manager = CacheManager.getInstance(); if (overflowExtEnabled == true) { LOG.info("Overflow to external storage is enabled."); // External storage handle cache CacheConfiguration extStorageCfg = new CacheConfiguration(); extStorageCfg.setCacheName("extStorageCache"); extStorageCfg.setMaxMemory(extCacheMaxSize); extStorageCfg.setEvictionPolicy(EvictionPolicy.FIFO.toString()); extStorageCfg.setSerDeBufferSize(4096);// small extStorageCfg.setPreevictionListSize(40); extStorageCfg.setKeyClassName(byte[].class.getName()); extStorageCfg.setValueClassName(byte[].class.getName()); // calculate bucket number // 50 is estimate of a record size int buckets = (extCacheMaxSize / EXT_STORAGE_REF_SIZE) > Integer.MAX_VALUE ? Integer.MAX_VALUE - 1 : (int) (extCacheMaxSize / EXT_STORAGE_REF_SIZE); extStorageCfg.setBucketNumber(buckets); if (isPersistent) { // TODO - this in memory cache has same data dirs as a major cache. RawFSConfiguration storeConfig = new RawFSConfiguration(); storeConfig.setStoreName(extStorageCfg.getCacheName()); storeConfig.setDiskStoreImplementation(RawFSStore.class); storeConfig.setDbDataStoreRoots(dataRoots); storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND); storeConfig.setDbCompressionType(CodecType.LZ4); storeConfig.setDbSnapshotInterval(15); //storeConfig.setTotalWorkerThreads(Runtime.getRuntime().availableProcessors() /2); //storeConfig.setTotalIOThreads(1); extStorageCfg.setDataStoreConfiguration(storeConfig); } // This will initiate the load of stored cache data // if persistence is enabled extStorageCache = manager.getCache(extStorageCfg, null); // Initialize external storage storage = ExtStorageManager.getInstance().getStorage(conf, extStorageCache); } else { LOG.info("Overflow to external storage is disabled."); if (isPersistent) { RawFSConfiguration storeConfig = new RawFSConfiguration(); storeConfig.setStoreName(cacheCfg.getCacheName()); storeConfig.setDiskStoreImplementation(RawFSStore.class); storeConfig.setDbDataStoreRoots(dataRoots); storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND); storeConfig.setDbSnapshotInterval(15); cacheCfg.setDataStoreConfiguration(storeConfig); // Load cache data offHeapCache = manager.getCache(cacheCfg, null); } } if (offHeapCache == null) { offHeapCache = manager.getCache(cacheCfg, null); } } catch (Exception e) { throw new RuntimeException(e); } boolean onHeapEnabled = conf.getBoolean(BLOCK_CACHE_ONHEAP_ENABLED, true); if (onHeapEnabled) { long onHeapCacheSize = calculateOnHeapCacheSize(conf); if (onHeapCacheSize > 0) { onHeapCache = new OnHeapBlockCache(onHeapCacheSize, blockSize, conf); LOG.info("Created fast on-heap cache. Size=" + onHeapCacheSize); } else { LOG.warn("Conflicting configuration options. On-heap cache is disabled."); } } this.stats = new CacheStats(); this.onHeapStats = new CacheStats(); this.offHeapStats = new CacheStats(); this.extStats = new CacheStats(); this.extRefStats = new CacheStats(); EvictionListener listener = new EvictionListener() { @Override public void evicted(long ptr, Reason reason, long nanoTime) { stats.evict(); stats.evicted(); } }; offHeapCache.setEvictionListener(listener); // Cacheable serializer registration CacheableSerializer serde = new CacheableSerializer(); offHeapCache.getSerDe().registerSerializer(serde); // if( extStorageCache != null){ // //StorageHandleSerializer serde2 = new StorageHandleSerializer(); // // SmallByteArraySerializer serde2 = new SmallByteArraySerializer(); // // extStorageCache.getSerDe().registerSerializer(serde2); // } // Start statistics thread statThread = new StatisticsThread(this); statThread.start(); }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java
License:Open Source License
/** * Calculate on heap cache size.//from w w w . j ava 2s . c o m * * @param conf the conf * @return the long */ private long calculateOnHeapCacheSize(Configuration conf) { float cachePercentage = conf.getFloat(HEAP_BLOCK_CACHE_MEMORY_RATIO, DEFAULT_HEAP_BLOCK_CACHE_MEMORY_RATIO); if (cachePercentage == 0L) { // block cache disabled on heap return 0L; } if (cachePercentage > 1.0) { throw new IllegalArgumentException( HEAP_BLOCK_CACHE_MEMORY_RATIO + " must be between 0.0 and 1.0, and not > 1.0"); } // Calculate the amount of heap to give the heap. MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); long cacheSize = (long) (mu.getMax() * cachePercentage); return cacheSize; }
From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java
License:Open Source License
/** * Instantiates a new off heap block cache. * * @param conf the conf/*from w w w . jav a 2s .co m*/ */ public OffHeapBlockCacheOld(Configuration conf) { this.blockSize = conf.getInt("hbase.offheapcache.minblocksize", HColumnDescriptor.DEFAULT_BLOCKSIZE); CacheConfiguration cacheCfg = ConfigHelper.getCacheConfiguration(conf); maxSize = cacheCfg.getMaxGlobalMemory(); if (maxSize == 0) { // USE max memory maxSize = cacheCfg.getMaxMemory(); LOG.warn("[OffHeapBlockCache] Gloabal max memory is not specified, using max memory instead."); } if (maxSize == 0) { LOG.fatal(CacheConfiguration.MAX_GLOBAL_MEMORY + " is not specified."); throw new RuntimeException( "[OffHeapBlockCache]" + CacheConfiguration.MAX_GLOBAL_MEMORY + " is not specified."); } //TODO make sure sum == 1 youngGenFactor = conf.getFloat(YOUNG_GEN_FACTOR, DEFAULT_YOUNG_FACTOR); tenGenFactor = conf.getFloat(TENURED_GEN_FACTOR, DEFAULT_PERM_FACTOR); permGenFactor = conf.getFloat(PERM_GEN_FACTOR, DEFAULT_PERM_FACTOR); extStorageFactor = conf.getFloat(EXT_STORAGE_FACTOR, DEFAULT_EXT_STORAGE_FACTOR); overflowExtEnabled = conf.getBoolean(OVERFLOW_TO_EXT_STORAGE_ENABLED, false); long youngSize = (long) (youngGenFactor * maxSize); long tenSize = (long) (tenGenFactor * maxSize); long permSize = (long) (permGenFactor * maxSize); long extStorageSize = (long) (extStorageFactor * maxSize); /** Possible values: none, snappy, gzip, lz4 */ // TODO: LZ4 is not supported on all platforms CodecType youngGenCodec = CodecType.LZ4; CodecType tenGenCodec = CodecType.LZ4; CodecType permGenCodec = CodecType.LZ4; CodecType extStorageCodec = CodecType.LZ4; String value = conf.get(YOUNG_GEN_COMPRESSION); if (value != null) { youngGenCodec = CodecType.valueOf(value.toUpperCase()); } value = conf.get(TENURED_GEN_COMPRESSION); if (value != null) { tenGenCodec = CodecType.valueOf(value.toUpperCase()); } value = conf.get(PERM_GEN_COMPRESSION); if (value != null) { permGenCodec = CodecType.valueOf(value.toUpperCase()); } value = conf.get(EXT_STORAGE_COMPRESSION); if (value != null) { extStorageCodec = CodecType.valueOf(value.toUpperCase()); } try { //TODO - Verify we have deep enough copy CacheConfiguration youngCfg = cacheCfg.copy(); youngCfg.setMaxMemory(youngSize); // Disable disk persistence for young gen //TODO - Do we really need disabling //youngCfg.setDataStoreConfiguration(null); // TODO - enable exceed over limit mode //youngCfg.setCompressionEnabled(youngGenCodec !=CodecType.NONE); youngCfg.setCodecType(youngGenCodec); String name = youngCfg.getCacheName(); youngCfg.setCacheName(name + "_young"); setBucketNumber(youngCfg); CacheConfiguration tenCfg = cacheCfg.copy(); tenCfg.setMaxMemory(tenSize); // TODO - enable exceed over limit mode //tenCfg.setCompressionEnabled(tenGenCodec != CodecType.NONE); tenCfg.setCodecType(tenGenCodec); name = tenCfg.getCacheName(); tenCfg.setCacheName(name + "_tenured"); setBucketNumber(tenCfg); CacheConfiguration permCfg = cacheCfg.copy(); permCfg.setMaxMemory(permSize); // TODO - enable exceed over limit mode //permCfg.setCompressionEnabled(permGenCodec != CodecType.NONE); permCfg.setCodecType(permGenCodec); name = permCfg.getCacheName(); permCfg.setCacheName(name + "_perm"); setBucketNumber(permCfg); CacheManager manager = CacheManager.getInstance(); //TODO add ProgressListener youngGenCache = manager.getCache(youngCfg, null); // TODO - do we need this? //youngGenCache.setEvictionAuto(false); tenGenCache = manager.getCache(tenCfg, null); // TODO - do we need this? //tenGenCache.setEvictionAuto(false); permGenCache = manager.getCache(permCfg, null); // TODO - do we need this? //permGenCache.setEvictionAuto(false); if (overflowExtEnabled == true) { LOG.info("Overflow to external storage is enabled."); // External storage handle cache CacheConfiguration extStorageCfg = cacheCfg.copy(); permCfg.setMaxMemory(extStorageSize); permCfg.setCodecType(extStorageCodec); name = permCfg.getCacheName(); permCfg.setCacheName(name + "_ext"); // calculate bucket number // 50 is estimate of a record size int buckets = (extStorageSize / 50) > Integer.MAX_VALUE ? Integer.MAX_VALUE - 1 : (int) (extStorageSize / 50); extStorageCfg.setBucketNumber(buckets); extStorageCache = manager.getCache(extStorageCfg, null); // Initialize external storage storage = ExtStorageManager.getInstance().getStorage(conf, extStorageCache); } else { LOG.info("Overflow to external storage is disabled."); } } catch (Exception e) { throw new RuntimeException(e); } this.stats = new CacheStats(); EvictionListener listener = new EvictionListener() { @Override public void evicted(long ptr, Reason reason, long nanoTime) { stats.evict(); stats.evicted(); } }; youngGenCache.setEvictionListener(listener); // TODO separate eviction listener tenGenCache.setEvictionListener(listener); permGenCache.setEvictionListener(listener); // Cacheable serializer registration CacheableSerializer serde = new CacheableSerializer(); youngGenCache.getSerDe().registerSerializer(serde); tenGenCache.getSerDe().registerSerializer(serde); permGenCache.getSerDe().registerSerializer(serde); if (extStorageCache != null) { StorageHandleSerializer serde2 = new StorageHandleSerializer(); extStorageCache.getSerDe().registerSerializer(serde2); } }
From source file:com.koda.integ.hbase.blockcache.OnHeapBlockCache.java
License:Open Source License
/** * Constructor used for testing. Allows disabling of the eviction thread. * * @param maxSize the max size//from w w w. j a v a2 s .c o m * @param blockSize the block size * @param evictionThread the eviction thread * @param conf the conf */ public OnHeapBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), DEFAULT_SINGLE_FACTOR, DEFAULT_MULTI_FACTOR, DEFAULT_MEMORY_FACTOR); }
From source file:com.koda.integ.hbase.util.ConfigHelper.java
License:Open Source License
/** * Gets the cache configuration.//from w ww .j a va 2 s. com * * @param cfg the cfg * @return the cache configuration */ public static CacheConfiguration getCacheConfiguration(Configuration cfg) { CacheConfiguration ccfg = new CacheConfiguration(); String value = cfg.get(CacheConfiguration.COMPRESSION, "none"); //TODO not safe ccfg.setCodecType(CodecType.valueOf(value.toUpperCase())); ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100)); ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0)); ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true)); ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30))); ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru"))); ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.98f)); ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.95f)); value = cfg.get(CacheConfiguration.KEY_CLASSNAME); if (value != null) { ccfg.setKeyClassName(value); } value = cfg.get(CacheConfiguration.VALUE_CLASSNAME); if (value != null) { ccfg.setValueClassName(value); } ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0)); ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0)); ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0)); value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY); if (value != null) { ccfg.setMaxGlobalMemory(Long.parseLong(value)); } else { LOG.info(" Max global memory is not specified."); } value = cfg.get(CacheConfiguration.MAX_MEMORY); if (value != null) { ccfg.setMaxMemory(Long.parseLong(value)); } else { LOG.info(" Max memory is not specified."); } ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "default")); ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default")); ccfg.setSerDeBufferSize(cfg.getInt(CacheConfiguration.SERDE_BUFSIZE, 4 * 1024 * 1024)); // TODO bucket number must be calculated ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000)); // Done with common cache configurations value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none"); if (value.equals("none")) { // We are done return ccfg; } DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value); ccfg.setDataStoreConfiguration(dcfg); return ccfg; }