Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.hortonworks.hbase.replication.bridge.ReplicationBridgeServer.java

License:Apache License

/**
 * Starts a HRegionServer at the default location
 *
 * @param conf// w  w  w .  j av a 2  s. c o m
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException 
 * @throws ZkConnectException 
 */
public ReplicationBridgeServer(Configuration conf) throws IOException, InterruptedException, KeeperException {
    this.conf = conf;

    // Set how many times to retry talking to another server over HConnection.
    HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);

    // Server to handle client requests.
    String hostname = conf.get("hbase.regionserver.ipc.address",
            Strings.domainNamePointerToHostName(
                    DNS.getDefaultHost(conf.get("hbase.regionserver.dns.interface", "default"),
                            conf.get("hbase.regionserver.dns.nameserver", "default"))));
    port = conf.getInt("hbase.bridge.server.port", BRIDGE_SERVER_PORT);
    // Creation of a HSA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }

    this.rpcServer = HBaseRPC.getServer(this, new Class<?>[] { HRegionInterface.class },
            initialIsa.getHostName(), // BindAddress is IP we got for this server.
            initialIsa.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
            conf.getInt("hbase.regionserver.metahandler.count", 10),
            conf.getBoolean("hbase.rpc.verbose", false), conf, HConstants.QOS_THRESHOLD);
}

From source file:com.ibm.stocator.fs.ObjectStoreFileSystem.java

License:Open Source License

@Override
public void initialize(URI fsuri, Configuration conf) throws IOException {
    super.initialize(fsuri, conf);
    if (!conf.getBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", true)) {
        throw new IOException("mapreduce.fileoutputcommitter.marksuccessfuljobs should be enabled");
    }//from ww w . j a  v a2s.co m
    setConf(conf);
    String nameSpace = fsuri.toString().substring(0, fsuri.toString().indexOf("://"));
    if (storageClient == null) {
        storageClient = ObjectStoreVisitor.getStoreClient(nameSpace, fsuri, conf);
        hostNameScheme = storageClient.getScheme() + "://" + Utils.getHost(fsuri) + "/";
    }
}

From source file:com.idvp.platform.hdfs.HDFSDataStream.java

License:Apache License

protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {

    boolean appending = false;
    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
        appending = true;/*from ww w . ja  va 2  s  . c om*/
    } else {
        outStream = hdfs.create(dstPath);
    }

    serializer = new BodyTextEventSerializer.Builder().build(outStream);
    if (appending && !serializer.supportsReopen()) {
        outStream.close();
        serializer = null;
        throw new IOException("serializer (" + "TEXT" + ") does not support append");
    }

    // must call superclass to check for replication issues
    registerCurrentStream(outStream, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
}

From source file:com.iflytek.spider.crawl.AdaptiveFetchSchedule.java

License:Apache License

public void setConf(Configuration conf) {
    super.setConf(conf);
    if (conf == null)
        return;//  w  w w.  j a  v a  2 s .c om
    INC_RATE = conf.getFloat("db.fetch.schedule.adaptive.inc_rate", 0.2f);
    DEC_RATE = conf.getFloat("db.fetch.schedule.adaptive.dec_rate", 0.2f);
    MIN_INTERVAL = conf.getInt("db.fetch.schedule.adaptive.min_interval", 60);
    MAX_INTERVAL = conf.getInt("db.fetch.schedule.adaptive.max_interval", SECONDS_PER_DAY * 365); // 1 year
    SYNC_DELTA = conf.getBoolean("db.fetch.schedule.adaptive.sync_delta", true);
    SYNC_DELTA_RATE = conf.getFloat("db.fetch.schedule.adaptive.sync_delta_rate", 0.2f);
}

From source file:com.iflytek.spider.protocol.http.HttpBase.java

License:Apache License

public void setConf(Configuration conf) {
    this.conf = conf;
    this.proxyHost = conf.get("http.proxy.host");
    this.proxyPort = conf.getInt("http.proxy.port", 8080);
    this.useProxy = (proxyHost != null && proxyHost.length() > 0);
    this.timeout = conf.getInt("http.timeout", 10000);
    this.maxContent = conf.getInt("http.content.limit", 64 * 1024);
    this.maxDelays = conf.getInt("http.max.delays", 3);
    this.maxThreadsPerHost = conf.getInt("fetcher.threads.per.host", 1);
    this.userAgent = getAgentString(conf.get("http.agent.name"), conf.get("http.agent.version"),
            conf.get("http.agent.description"), conf.get("http.agent.url"), conf.get("http.agent.email"));
    this.acceptLanguage = conf.get("http.accept.language", acceptLanguage);
    this.serverDelay = (long) (conf.getFloat("fetcher.server.delay", 1.0f) * 1000);
    this.maxCrawlDelay = (long) (conf.getInt("fetcher.max.crawl.delay", -1) * 1000);
    // backward-compatible default setting
    this.byIP = conf.getBoolean("fetcher.threads.per.host.by.ip", true);
    this.useHttp11 = conf.getBoolean("http.useHttp11", false);
    //logConf();/*from  w  w w . j  av  a  2s . co m*/
}

From source file:com.iflytek.spider.protocol.http.HttpBaseSimply.java

License:Apache License

public void setConf(Configuration conf) {
    this.conf = conf;
    this.proxyHost = conf.get("http.proxy.host");
    this.proxyPort = conf.getInt("http.proxy.port", 8080);
    this.useProxy = (proxyHost != null && proxyHost.length() > 0);
    this.timeout = conf.getInt("http.timeout", 10000);
    this.maxContent = conf.getInt("http.content.limit", 64 * 1024);
    this.userAgent = getAgentString(conf.get("http.agent.name"), conf.get("http.agent.version"),
            conf.get("http.agent.description"), conf.get("http.agent.url"), conf.get("http.agent.email"));
    this.acceptLanguage = conf.get("http.accept.language", acceptLanguage);
    this.useHttp11 = conf.getBoolean("http.useHttp11", false);
    //logConf();/*from   ww  w  .  j av a 2 s.com*/
}

From source file:com.ikanow.infinit.e.data_model.custom.InfiniteMongoConfigUtil.java

License:Apache License

public static boolean getUpdateModeIncremental(Configuration conf) {
    return conf.getBoolean(UPDATE_MODE, false);
}

From source file:com.ikanow.infinit.e.data_model.custom.InfiniteMongoConfigUtil.java

License:Apache License

public static boolean getIsAdmin(Configuration conf) {
    return conf.getBoolean(IS_ADMIN, false);
}

From source file:com.inclouds.hbase.utils.ConfigHelper.java

License:Open Source License

/**
 * Gets the cache configuration./*  w  w  w .  ja v  a  2 s .c  o m*/
 *
 * @param cfg the cfg
 * @return the cache configuration
 */
public static CacheConfiguration getCacheConfiguration(Configuration cfg) {

    CacheConfiguration ccfg = new CacheConfiguration();
    String value = cfg.get(CacheConfiguration.COMPRESSION, "none");
    //TODO not safe
    ccfg.setCodecType(CodecType.valueOf(value.toUpperCase()));
    ccfg.setCompressionThreshold(cfg.getInt(CacheConfiguration.COMPRESSION_THRESHOLD, 100));
    ccfg.setDefaultExpireTimeout(cfg.getInt(CacheConfiguration.DEFAULT_EXPIRE_TIMEOUT, 0));
    ccfg.setEvictOnExpireFirst(cfg.getBoolean(CacheConfiguration.EVICT_EXPIRED_FIRST, true));
    ccfg.setCandidateListSize((cfg.getInt(CacheConfiguration.EVICTION_LIST_SIZE, 30)));
    ccfg.setEvictionPolicy((cfg.get(CacheConfiguration.EVICTION_POLICY, "lru")));
    ccfg.setHighWatermark(cfg.getFloat(CacheConfiguration.HIGH_WATERMARK, 0.95f));
    ccfg.setLowWatermark(cfg.getFloat(CacheConfiguration.LOW_WATERMARK, 0.90f));

    value = cfg.get(CacheConfiguration.KEY_CLASSNAME);
    if (value != null) {
        ccfg.setKeyClassName(value);
    }

    value = cfg.get(CacheConfiguration.VALUE_CLASSNAME);
    if (value != null) {
        ccfg.setValueClassName(value);
    }

    ccfg.setMaxConcurrentReaders(cfg.getInt(CacheConfiguration.MAX_CONCURRENT_READERS, 0));
    ccfg.setMaxQueryProcessors(cfg.getInt(CacheConfiguration.MAX_QUERY_PROCESSORS, 0));

    ccfg.setMaxEntries(cfg.getLong(CacheConfiguration.MAX_ENTRIES, 0));
    value = cfg.get(CacheConfiguration.MAX_GLOBAL_MEMORY);
    if (value != null) {
        ccfg.setMaxGlobalMemory(Long.parseLong(value));
    } else {
        LOG.warn("[row-cache] Max global memory is not specified.");
    }

    value = cfg.get(CacheConfiguration.MAX_MEMORY);
    if (value != null) {
        ccfg.setMaxMemory(Long.parseLong(value));
    } else {
        LOG.info("[row-cache] Max memory is not specified.");
    }

    ccfg.setCacheName(cfg.get(CacheConfiguration.NAME, "row-cache"));

    ccfg.setCacheNamespace(cfg.get(CacheConfiguration.NAMESPACE, "default"));

    ccfg.setSerDeBufferSize(cfg.getInt(RowCache.ROWCACHE_BUFFER_SIZE, RowCache.DEFAULT_BUFFER_SIZE));

    // TODO bucket number must be calculated
    ccfg.setBucketNumber(cfg.getInt(CacheConfiguration.TOTAL_BUCKETS, 1000000));

    // Done with common cache configurations
    value = cfg.get(DiskStoreConfiguration.PERSISTENCE, "none");
    if (value.equals("none")) {
        // We are done
        return ccfg;
    }
    DiskStoreConfiguration dcfg = loadDiskStoreCfg(cfg, value);

    ccfg.setDataStoreConfiguration(dcfg);

    return ccfg;

}

From source file:com.inforefiner.hdata.ApplicationMaster.java

License:Apache License

@VisibleForTesting
void startTimelineClient(final Configuration conf) throws YarnException, IOException, InterruptedException {
    try {//from  w w  w  . j av  a2 s  .c o  m
        appSubmitterUgi.doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
                        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
                    // Creating the Timeline Client
                    timelineClient = TimelineClient.createTimelineClient();
                    timelineClient.init(conf);
                    timelineClient.start();
                } else {
                    timelineClient = null;
                    LOG.warn("Timeline service is not enabled");
                }
                return null;
            }
        });
    } catch (UndeclaredThrowableException e) {
        throw new YarnException(e.getCause());
    }
}