Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java

License:Open Source License

/**
 * Instantiates a new off heap block cache.
 *
 * @param conf the conf//from ww  w.  j  ava  2  s . c  o  m
 */
public OffHeapBlockCache(Configuration conf) {
    this.blockSize = conf.getInt("hbase.offheapcache.minblocksize", HColumnDescriptor.DEFAULT_BLOCKSIZE);

    blockCacheMaxSize = conf.getLong(BLOCK_CACHE_MEMORY_SIZE, 0L);
    if (blockCacheMaxSize == 0L) {
        throw new RuntimeException("off heap block cache size is not defined");
    }
    nativeBufferSize = conf.getInt(BLOCK_CACHE_BUFFER_SIZE, DEFAULT_BLOCK_CACH_BUFFER_SIZE);
    extCacheMaxSize = conf.getLong(BLOCK_CACHE_EXT_STORAGE_MEMORY_SIZE, (long) (0.1 * blockCacheMaxSize));
    youngGenFactor = conf.getFloat(BLOCK_CACHE_YOUNG_GEN_FACTOR, DEFAULT_YOUNG_FACTOR);
    overflowExtEnabled = conf.getBoolean(BLOCK_CACHE_OVERFLOW_TO_EXT_STORAGE_ENABLED, false);
    isPersistent = conf.getBoolean(BLOCK_CACHE_PERSISTENT, false);
    if (isPersistent) {
        // Check if we have already set CacheableDeserializer
        // We need to set deserializer before starting cache
        // because we can have already cached blocks on cache start up
        // and first get before put will fail. 
        if (CacheableSerializer.getDeserializer() == null) {
            CacheableSerializer.setHFileDeserializer();
        } else {
            LOG.info("CacheableSerializer is already set.");
        }
    }
    isSnapshotsEnabled = conf.getBoolean(BLOCK_CACHE_SNAPSHOTS, false);
    snapshotsInterval = conf.getInt(BLOCK_CACHE_SNAPSHOT_INTERVAL, 600) * 1000;

    String[] dataRoots = getDataRoots(conf.get(BLOCK_CACHE_DATA_ROOTS));

    if (isPersistent && dataRoots == null) {
        dataRoots = getHDFSRoots(conf);

        if (dataRoots == null) {
            LOG.warn("Data roots are not defined. Set persistent mode to false.");
            isPersistent = false;
        }
    }

    adjustMaxMemory();

    /** Possible values: none, snappy, gzip, lz4, lz4hc */
    // TODO: LZ4 is not supported on all platforms
    // TODO: default compression is LZ4?
    CodecType codec = CodecType.LZ4;

    String value = conf.get(BLOCK_CACHE_COMPRESSION);
    if (value != null) {
        codec = CodecType.valueOf(value.toUpperCase());
    }

    try {

        CacheConfiguration cacheCfg = new CacheConfiguration();
        cacheCfg.setCacheName("block-cache");

        cacheCfg.setSerDeBufferSize(nativeBufferSize);

        cacheCfg.setMaxMemory(blockCacheMaxSize);
        cacheCfg.setCodecType(codec);
        String evictionPolicy = conf.get(BLOCK_CACHE_EVICTION, "LRU").toUpperCase();
        cacheCfg.setEvictionPolicy(evictionPolicy);
        // Set this only for LRU2Q
        cacheCfg.setLRU2QInsertPoint(youngGenFactor);
        setBucketNumber(cacheCfg);

        CacheManager manager = CacheManager.getInstance();

        if (overflowExtEnabled == true) {
            LOG.info("Overflow to external storage is enabled.");
            // External storage handle cache
            CacheConfiguration extStorageCfg = new CacheConfiguration();
            extStorageCfg.setCacheName("extStorageCache");
            extStorageCfg.setMaxMemory(extCacheMaxSize);
            extStorageCfg.setEvictionPolicy(EvictionPolicy.FIFO.toString());
            extStorageCfg.setSerDeBufferSize(4096);// small
            extStorageCfg.setPreevictionListSize(40);
            extStorageCfg.setKeyClassName(byte[].class.getName());
            extStorageCfg.setValueClassName(byte[].class.getName());
            // calculate bucket number
            // 50 is estimate of a record size
            int buckets = (extCacheMaxSize / EXT_STORAGE_REF_SIZE) > Integer.MAX_VALUE ? Integer.MAX_VALUE - 1
                    : (int) (extCacheMaxSize / EXT_STORAGE_REF_SIZE);
            extStorageCfg.setBucketNumber(buckets);
            if (isPersistent) {
                // TODO - this in memory cache has same data dirs as a major cache.
                RawFSConfiguration storeConfig = new RawFSConfiguration();

                storeConfig.setStoreName(extStorageCfg.getCacheName());

                storeConfig.setDiskStoreImplementation(RawFSStore.class);

                storeConfig.setDbDataStoreRoots(dataRoots);
                storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND);
                storeConfig.setDbCompressionType(CodecType.LZ4);
                storeConfig.setDbSnapshotInterval(15);
                //storeConfig.setTotalWorkerThreads(Runtime.getRuntime().availableProcessors() /2);
                //storeConfig.setTotalIOThreads(1);          
                extStorageCfg.setDataStoreConfiguration(storeConfig);
            }

            // This will initiate the load of stored cache data
            // if persistence is enabled
            extStorageCache = manager.getCache(extStorageCfg, null);
            // Initialize external storage
            storage = ExtStorageManager.getInstance().getStorage(conf, extStorageCache);
        } else {
            LOG.info("Overflow to external storage is disabled.");
            if (isPersistent) {
                RawFSConfiguration storeConfig = new RawFSConfiguration();

                storeConfig.setStoreName(cacheCfg.getCacheName());

                storeConfig.setDiskStoreImplementation(RawFSStore.class);

                storeConfig.setDbDataStoreRoots(dataRoots);
                storeConfig.setPersistenceMode(PersistenceMode.ONDEMAND);
                storeConfig.setDbSnapshotInterval(15);
                cacheCfg.setDataStoreConfiguration(storeConfig);
                // Load cache data
                offHeapCache = manager.getCache(cacheCfg, null);
            }
        }

        if (offHeapCache == null) {
            offHeapCache = manager.getCache(cacheCfg, null);
        }

    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    boolean onHeapEnabled = conf.getBoolean(BLOCK_CACHE_ONHEAP_ENABLED, true);
    if (onHeapEnabled) {
        long onHeapCacheSize = calculateOnHeapCacheSize(conf);
        if (onHeapCacheSize > 0) {
            onHeapCache = new OnHeapBlockCache(onHeapCacheSize, blockSize, conf);
            LOG.info("Created fast on-heap cache. Size=" + onHeapCacheSize);
        } else {
            LOG.warn("Conflicting configuration options. On-heap cache is disabled.");
        }
    }

    this.stats = new CacheStats();
    this.onHeapStats = new CacheStats();
    this.offHeapStats = new CacheStats();
    this.extStats = new CacheStats();
    this.extRefStats = new CacheStats();

    EvictionListener listener = new EvictionListener() {

        @Override
        public void evicted(long ptr, Reason reason, long nanoTime) {
            stats.evict();
            stats.evicted();

        }

    };

    offHeapCache.setEvictionListener(listener);
    // Cacheable serializer registration

    CacheableSerializer serde = new CacheableSerializer();
    offHeapCache.getSerDe().registerSerializer(serde);

    //    if( extStorageCache != null){
    //      //StorageHandleSerializer serde2 = new StorageHandleSerializer();
    //      //  SmallByteArraySerializer serde2 = new SmallByteArraySerializer();
    //      //   extStorageCache.getSerDe().registerSerializer(serde2);
    //    }
    // Start statistics thread
    statThread = new StatisticsThread(this);
    statThread.start();

}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java

License:Open Source License

/**
 * Instantiates a new off heap block cache.
 *
 * @param conf the conf//from ww w. jav  a 2s.  c  om
 */
public OffHeapBlockCacheOld(Configuration conf) {
    this.blockSize = conf.getInt("hbase.offheapcache.minblocksize", HColumnDescriptor.DEFAULT_BLOCKSIZE);

    CacheConfiguration cacheCfg = ConfigHelper.getCacheConfiguration(conf);
    maxSize = cacheCfg.getMaxGlobalMemory();
    if (maxSize == 0) {
        // USE max memory
        maxSize = cacheCfg.getMaxMemory();
        LOG.warn("[OffHeapBlockCache] Gloabal max memory is not specified, using max memory instead.");
    }
    if (maxSize == 0) {
        LOG.fatal(CacheConfiguration.MAX_GLOBAL_MEMORY + " is not specified.");
        throw new RuntimeException(
                "[OffHeapBlockCache]" + CacheConfiguration.MAX_GLOBAL_MEMORY + " is not specified.");
    }

    //TODO make sure sum == 1
    youngGenFactor = conf.getFloat(YOUNG_GEN_FACTOR, DEFAULT_YOUNG_FACTOR);
    tenGenFactor = conf.getFloat(TENURED_GEN_FACTOR, DEFAULT_PERM_FACTOR);
    permGenFactor = conf.getFloat(PERM_GEN_FACTOR, DEFAULT_PERM_FACTOR);
    extStorageFactor = conf.getFloat(EXT_STORAGE_FACTOR, DEFAULT_EXT_STORAGE_FACTOR);
    overflowExtEnabled = conf.getBoolean(OVERFLOW_TO_EXT_STORAGE_ENABLED, false);

    long youngSize = (long) (youngGenFactor * maxSize);
    long tenSize = (long) (tenGenFactor * maxSize);
    long permSize = (long) (permGenFactor * maxSize);
    long extStorageSize = (long) (extStorageFactor * maxSize);

    /** Possible values: none, snappy, gzip, lz4 */
    // TODO: LZ4 is not supported on all platforms
    CodecType youngGenCodec = CodecType.LZ4;
    CodecType tenGenCodec = CodecType.LZ4;
    CodecType permGenCodec = CodecType.LZ4;
    CodecType extStorageCodec = CodecType.LZ4;

    String value = conf.get(YOUNG_GEN_COMPRESSION);
    if (value != null) {
        youngGenCodec = CodecType.valueOf(value.toUpperCase());
    }

    value = conf.get(TENURED_GEN_COMPRESSION);
    if (value != null) {
        tenGenCodec = CodecType.valueOf(value.toUpperCase());
    }

    value = conf.get(PERM_GEN_COMPRESSION);
    if (value != null) {
        permGenCodec = CodecType.valueOf(value.toUpperCase());
    }

    value = conf.get(EXT_STORAGE_COMPRESSION);
    if (value != null) {
        extStorageCodec = CodecType.valueOf(value.toUpperCase());
    }

    try {
        //TODO - Verify we have deep enough copy
        CacheConfiguration youngCfg = cacheCfg.copy();
        youngCfg.setMaxMemory(youngSize);
        // Disable disk persistence for young gen
        //TODO - Do we really need disabling
        //youngCfg.setDataStoreConfiguration(null);

        // TODO - enable exceed over limit mode

        //youngCfg.setCompressionEnabled(youngGenCodec !=CodecType.NONE);
        youngCfg.setCodecType(youngGenCodec);
        String name = youngCfg.getCacheName();
        youngCfg.setCacheName(name + "_young");

        setBucketNumber(youngCfg);

        CacheConfiguration tenCfg = cacheCfg.copy();
        tenCfg.setMaxMemory(tenSize);
        // TODO - enable exceed over limit mode
        //tenCfg.setCompressionEnabled(tenGenCodec != CodecType.NONE);
        tenCfg.setCodecType(tenGenCodec);
        name = tenCfg.getCacheName();
        tenCfg.setCacheName(name + "_tenured");

        setBucketNumber(tenCfg);

        CacheConfiguration permCfg = cacheCfg.copy();

        permCfg.setMaxMemory(permSize);
        // TODO - enable exceed over limit mode
        //permCfg.setCompressionEnabled(permGenCodec != CodecType.NONE);
        permCfg.setCodecType(permGenCodec);
        name = permCfg.getCacheName();
        permCfg.setCacheName(name + "_perm");

        setBucketNumber(permCfg);

        CacheManager manager = CacheManager.getInstance();
        //TODO add ProgressListener
        youngGenCache = manager.getCache(youngCfg, null);
        // TODO - do we need this?
        //youngGenCache.setEvictionAuto(false);
        tenGenCache = manager.getCache(tenCfg, null);
        // TODO - do we need this?
        //tenGenCache.setEvictionAuto(false);
        permGenCache = manager.getCache(permCfg, null);
        // TODO - do we need this?         
        //permGenCache.setEvictionAuto(false);
        if (overflowExtEnabled == true) {
            LOG.info("Overflow to external storage is enabled.");
            // External storage handle cache
            CacheConfiguration extStorageCfg = cacheCfg.copy();

            permCfg.setMaxMemory(extStorageSize);
            permCfg.setCodecType(extStorageCodec);
            name = permCfg.getCacheName();
            permCfg.setCacheName(name + "_ext");
            // calculate bucket number
            // 50 is estimate of a record size
            int buckets = (extStorageSize / 50) > Integer.MAX_VALUE ? Integer.MAX_VALUE - 1
                    : (int) (extStorageSize / 50);
            extStorageCfg.setBucketNumber(buckets);
            extStorageCache = manager.getCache(extStorageCfg, null);
            // Initialize external storage
            storage = ExtStorageManager.getInstance().getStorage(conf, extStorageCache);
        } else {
            LOG.info("Overflow to external storage is disabled.");
        }

    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    this.stats = new CacheStats();

    EvictionListener listener = new EvictionListener() {

        @Override
        public void evicted(long ptr, Reason reason, long nanoTime) {
            stats.evict();
            stats.evicted();

        }

    };

    youngGenCache.setEvictionListener(listener);
    // TODO separate eviction listener      
    tenGenCache.setEvictionListener(listener);
    permGenCache.setEvictionListener(listener);

    // Cacheable serializer registration

    CacheableSerializer serde = new CacheableSerializer();

    youngGenCache.getSerDe().registerSerializer(serde);
    tenGenCache.getSerDe().registerSerializer(serde);
    permGenCache.getSerDe().registerSerializer(serde);

    if (extStorageCache != null) {
        StorageHandleSerializer serde2 = new StorageHandleSerializer();
        extStorageCache.getSerDe().registerSerializer(serde2);
    }

}

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

License:Open Source License

/**
 * Inits the config.//from   w w w  . j av  a  2s.  c o m
 *
 * @param cfg the cfg
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void initConfig(Configuration cfg) throws IOException {

    LOG.info("[FileExtStorage] init config ...");
    this.config = cfg;
    String value = cfg.get(FILE_STORAGE_BASE_DIR);
    if (value == null) {
        throw new IOException("[FileExtStorage] Base directory not specified.");
    }
    fileStorageBaseDir = value.trim();

    baseDir = new File(fileStorageBaseDir);

    initPartition();
    if (partition != null) {
        LOG.info("Partition size [" + partition.getAbsolutePath() + "]=" + getTotalPartitionSize() + " Usable="
                + getUsablePartitionSpace());
    } else {
        LOG.warn("Could not detect partition for cache directory: " + fileStorageBaseDir);
    }

    value = cfg.get(FILE_STORAGE_MAX_SIZE);
    if (value == null) {
        throw new IOException("[FileExtStorage] Maximum storage size not specified.");
    } else {
        maxStorageSize = Long.parseLong(value);
    }
    value = cfg.get(FILE_STORAGE_BUFFER_SIZE, DEFAULT_BUFFER_SIZE_STR);
    bufferSize = Integer.parseInt(value);
    value = cfg.get(FILE_STORAGE_NUM_BUFFERS, DEFAULT_NUM_BUFFERS_STR);
    numBuffers = Integer.parseInt(value);
    value = cfg.get(FILE_STORAGE_FLUSH_INTERVAL, DEFAULT_FLUSH_INTERVAL_STR);
    flushInterval = Long.parseLong(value);
    value = cfg.get(FILE_STORAGE_SC_RATIO, DEFAULT_SC_RATIO_STR);
    secondChanceFIFORatio = Float.parseFloat(value);
    value = cfg.get(FILE_STORAGE_FILE_SIZE_LIMIT, DEFAULT_FILE_SIZE_LIMIT_STR);
    fileSizeLimit = Long.parseLong(value);
    noPageCache = !cfg.getBoolean(FILE_STORAGE_PAGE_CACHE, Boolean.parseBoolean(DEFAULT_PAGE_CACHE));

    // init locks
    for (int i = 0; i < locks.length; i++) {
        locks[i] = new ReentrantReadWriteLock();
    }

}

From source file:com.koda.integ.hbase.util.ConfigHelper.java

License:Open Source License

/**
 * Gets the cache configuration./*  w  ww  .  ja  va 2s  .  c  om*/
 *
 * @param cfg the cfg
 * @return the cache configuration
 */
public static CacheConfiguration getCacheConfiguration(Configuration cfg) {

    CacheConfiguration ccfg = new CacheConfiguration();
    String value = cfg.get(CacheConfiguration.COMPRESSION, "none");
    //TODO not safe
    ccfg.setCodecType(CodecType.valueOf(value.toUpperCase()));
    ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100));
    ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0));
    ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true));
    ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30)));
    ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru")));
    ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.98f));
    ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.95f));

    value = cfg.get(CacheConfiguration.KEY_CLASSNAME);
    if (value != null) {
        ccfg.setKeyClassName(value);
    }

    value = cfg.get(CacheConfiguration.VALUE_CLASSNAME);
    if (value != null) {
        ccfg.setValueClassName(value);
    }

    ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0));
    ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0));

    ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0));
    value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY);
    if (value != null) {
        ccfg.setMaxGlobalMemory(Long.parseLong(value));
    } else {
        LOG.info(" Max global memory is not specified.");
    }

    value = cfg.get(CacheConfiguration.MAX_MEMORY);
    if (value != null) {
        ccfg.setMaxMemory(Long.parseLong(value));
    } else {
        LOG.info(" Max memory is not specified.");
    }

    ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "default"));

    ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default"));

    ccfg.setSerDeBufferSize(cfg.getInt(CacheConfiguration.SERDE_BUFSIZE, 4 * 1024 * 1024));

    // TODO bucket number must be calculated
    ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000));

    // Done with common cache configurations
    value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none");
    if (value.equals("none")) {
        // We are done
        return ccfg;
    }
    DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value);

    ccfg.setDataStoreConfiguration(dcfg);

    return ccfg;

}

From source file:com.kylinolap.common.util.HBaseRegionSizeCalculator.java

License:Apache License

boolean enabled(Configuration configuration) {
    return configuration.getBoolean(ENABLE_REGIONSIZECALCULATOR, true);
}

From source file:com.linkedin.cubert.io.avro.PigAvroInputFormatAdaptor.java

License:Open Source License

@Override
protected boolean isSplitable(JobContext context, Path filename) {
    Configuration conf = context.getConfiguration();
    return !conf.getBoolean("cubert.avro.input.unsplittable", false);
}

From source file:com.linkedin.cubert.io.CubertInputFormat.java

License:Open Source License

@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    ConfigurationDiff confDiff = new ConfigurationDiff(conf);

    int numMultiMappers = confDiff.getNumDiffs();

    List<InputSplit> splits = new ArrayList<InputSplit>();

    for (int mapperIndex = 0; mapperIndex < numMultiMappers; mapperIndex++) {
        // reset conf to multimapper i
        confDiff.applyDiff(mapperIndex);

        // get the actual input format
        InputFormat<K, V> actualInputFormat = getActualInputFormat(context);

        List<InputSplit> actualSplits = null;

        // check if combined input split is requested
        boolean combineSplit = conf.getBoolean(CubertStrings.COMBINED_INPUT, false);

        if (combineSplit) {
            // Create CombinedFileInputFormat
            CombineFileInputFormat<K, V> cfif = new CombineFileInputFormat<K, V>() {
                @Override//  w ww  .j  a va 2  s  .  c om
                public RecordReader<K, V> createRecordReader(InputSplit split, TaskAttemptContext context)
                        throws IOException {
                    throw new IllegalStateException("Should not be called");
                }
            };

            // get the splits
            actualSplits = cfif.getSplits(context);
        } else {
            actualSplits = actualInputFormat.getSplits(context);
        }

        // embed each split in MultiMapperSplit and add to list
        for (InputSplit actualSplit : actualSplits)
            splits.add(new MultiMapperSplit(actualSplit, mapperIndex));

        // undo the diff
        confDiff.undoDiff(mapperIndex);
    }
    return splits;
}

From source file:com.linkedin.cubert.io.rubix.RubixRecordWriter.java

License:Open Source License

@SuppressWarnings({ "unchecked", "rawtypes" })
public RubixRecordWriter(Configuration conf, FSDataOutputStream out, Class keyClass, Class valueClass,
        CompressionCodec codec) throws IOException {
    this.out = out;

    final SerializationFactory serializationFactory = new SerializationFactory(conf);
    keySerializer = serializationFactory.getSerializer(keyClass);

    ObjectMapper mapper = new ObjectMapper();
    metadataJson = mapper.readValue(conf.get(CubertStrings.JSON_METADATA), JsonNode.class);
    ((ObjectNode) metadataJson).put("keyClass", keyClass.getCanonicalName());
    ((ObjectNode) metadataJson).put("valueClass", valueClass.getCanonicalName());
    BlockSchema schema = new BlockSchema(metadataJson.get("schema"));

    if (conf.getBoolean(CubertStrings.USE_COMPACT_SERIALIZATION, false) && schema.isFlatSchema()) {
        valueSerializer = new CompactSerializer<V>(schema);
        ((ObjectNode) metadataJson).put("serializationType", BlockSerializationType.COMPACT.toString());
    } else {//ww w  . ja  v  a  2  s  .  c  om
        valueSerializer = serializationFactory.getSerializer(valueClass);
        ((ObjectNode) metadataJson).put("serializationType", BlockSerializationType.DEFAULT.toString());
    }

    keySerializer.open(keySectionStream);

    if (codec == null) {
        valueSerializer.open(out);
        compressedStream = null;
    } else {
        compressedStream = codec.createOutputStream(out);
        valueSerializer.open(compressedStream);
    }

}

From source file:com.linkedin.drelephant.DrElephant.java

License:Apache License

public DrElephant() throws IOException {
    HDFSContext.load();// w w w  .  j a v a 2s.  c  om
    Configuration configuration = ElephantContext.instance().getAutoTuningConf();
    autoTuningEnabled = configuration.getBoolean(AUTO_TUNING_ENABLED, false);
    logger.debug("Auto Tuning Configuration: " + configuration.toString());
    _elephant = new ElephantRunner();
    if (autoTuningEnabled) {
        _autoTuner = new AutoTuner();
        _autoTunerThread = new Thread(_autoTuner, "Auto Tuner Thread");
    }
}

From source file:com.linkedin.drelephant.tuning.PSOParamGeneratorTest.java

License:Apache License

@Before
public void setup() {
    Map<String, String> dbConn = new HashMap<String, String>();
    dbConn.put(DB_DEFAULT_DRIVER_KEY, DB_DEFAULT_DRIVER_VALUE);
    dbConn.put(DB_DEFAULT_URL_KEY, DB_DEFAULT_URL_VALUE);
    dbConn.put(EVOLUTION_PLUGIN_KEY, EVOLUTION_PLUGIN_VALUE);
    dbConn.put(APPLY_EVOLUTIONS_DEFAULT_KEY, APPLY_EVOLUTIONS_DEFAULT_VALUE);

    GlobalSettings gs = new GlobalSettings() {
        @Override/*from  w w w  .  j av a 2 s. c  o m*/
        public void onStart(Application app) {
            LOGGER.info("Starting FakeApplication");
        }
    };

    fakeApp = fakeApplication(dbConn, gs);
    Configuration configuration = ElephantContext.instance().getAutoTuningConf();
    Boolean autoTuningEnabled = configuration.getBoolean(DrElephant.AUTO_TUNING_ENABLED, false);
    org.junit.Assume.assumeTrue(autoTuningEnabled);
}