Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.java

License:Open Source License

/**
 * Configures GHFS using the supplied configuration.
 *
 * @param config Hadoop configuration object.
 *//*w  ww. ja va  2s . c o m*/
private synchronized void configure(Configuration config) throws IOException {
    LOG.debug("GHFS.configure");
    LOG.debug("GHFS_ID = {}", GHFS_ID);

    if (gcsfs == null) {

        copyDeprecatedConfigurationOptions(config);

        Credential credential;
        try {
            credential = HadoopCredentialConfiguration.newBuilder().withConfiguration(config)
                    .withOverridePrefix(AUTHENTICATION_PREFIX).build()
                    .getCredential(CredentialFactory.GCS_SCOPES);
        } catch (GeneralSecurityException gse) {
            throw new IOException(gse);
        }

        GoogleCloudStorageFileSystemOptions.Builder optionsBuilder = createOptionsBuilderFromConfig(config);

        PathCodec pathCodec;
        String specifiedPathCodec = config.get(PATH_CODEC_KEY, PATH_CODEC_DEFAULT).toLowerCase();
        LOG.debug("{} = {}", PATH_CODEC_KEY, specifiedPathCodec);
        if (specifiedPathCodec.equals(PATH_CODEC_USE_LEGACY_ENCODING)) {
            pathCodec = GoogleCloudStorageFileSystem.LEGACY_PATH_CODEC;
        } else if (specifiedPathCodec.equals(PATH_CODEC_USE_URI_ENCODING)) {
            pathCodec = GoogleCloudStorageFileSystem.URI_ENCODED_PATH_CODEC;
        } else {
            pathCodec = GoogleCloudStorageFileSystem.LEGACY_PATH_CODEC;
            LOG.warn("Unknwon path codec specified {}. Using default / legacy.", specifiedPathCodec);
        }
        optionsBuilder.setPathCodec(pathCodec);
        gcsfs = new GoogleCloudStorageFileSystem(credential, optionsBuilder.build());
    }

    bufferSizeOverride = config.getInt(BUFFERSIZE_KEY, BUFFERSIZE_DEFAULT);
    LOG.debug("{} = {}", BUFFERSIZE_KEY, bufferSizeOverride);

    defaultBlockSize = config.getLong(BLOCK_SIZE_KEY, BLOCK_SIZE_DEFAULT);
    LOG.debug("{} = {}", BLOCK_SIZE_KEY, defaultBlockSize);

    String systemBucketName = config.get(GCS_SYSTEM_BUCKET_KEY, null);
    LOG.debug("{} = {}", GCS_SYSTEM_BUCKET_KEY, systemBucketName);

    boolean createSystemBucket = config.getBoolean(GCS_CREATE_SYSTEM_BUCKET_KEY,
            GCS_CREATE_SYSTEM_BUCKET_DEFAULT);
    LOG.debug("{} = {}", GCS_CREATE_SYSTEM_BUCKET_KEY, createSystemBucket);

    reportedPermissions = new FsPermission(
            config.get(PERMISSIONS_TO_REPORT_KEY, PERMISSIONS_TO_REPORT_DEFAULT));
    LOG.debug("{} = {}", PERMISSIONS_TO_REPORT_KEY, reportedPermissions);

    configureBuckets(systemBucketName, createSystemBucket);

    // Set initial working directory to root so that any configured value gets resolved
    // against file system root.
    workingDirectory = getFileSystemRoot();

    Path newWorkingDirectory;
    String configWorkingDirectory = config.get(GCS_WORKING_DIRECTORY_KEY);
    if (Strings.isNullOrEmpty(configWorkingDirectory)) {
        newWorkingDirectory = getDefaultWorkingDirectory();
        LOG.warn("No working directory configured, using default: '{}'", newWorkingDirectory);
    } else {
        newWorkingDirectory = new Path(configWorkingDirectory);
    }

    // Use the public method to ensure proper behavior of normalizing and resolving the new
    // working directory relative to the initial filesystem-root directory.
    setWorkingDirectory(newWorkingDirectory);
    LOG.debug("{} = {}", GCS_WORKING_DIRECTORY_KEY, getWorkingDirectory());

    // Set this configuration as the default config for this instance.
    setConf(config);

    LOG.debug("GHFS.configure: done");
}

From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemBase.java

License:Open Source License

@VisibleForTesting
GoogleCloudStorageFileSystemOptions.Builder createOptionsBuilderFromConfig(Configuration config)
        throws IOException {
    GoogleCloudStorageFileSystemOptions.Builder optionsBuilder = GoogleCloudStorageFileSystemOptions
            .newBuilder();//from w  w  w  .  j a  va2 s  . co  m

    boolean enableMetadataCache = config.getBoolean(GCS_ENABLE_METADATA_CACHE_KEY,
            GCS_ENABLE_METADATA_CACHE_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_METADATA_CACHE_KEY, enableMetadataCache);
    optionsBuilder.setIsMetadataCacheEnabled(enableMetadataCache);

    boolean enableBucketDelete = config.getBoolean(GCE_BUCKET_DELETE_ENABLE_KEY,
            GCE_BUCKET_DELETE_ENABLE_DEFAULT);
    LOG.debug("{} = {}", GCE_BUCKET_DELETE_ENABLE_KEY, enableBucketDelete);
    optionsBuilder.setEnableBucketDelete(enableBucketDelete);

    DirectoryListCache.Type cacheType = DirectoryListCache.Type
            .valueOf(config.get(GCS_METADATA_CACHE_TYPE_KEY, GCS_METADATA_CACHE_TYPE_DEFAULT));
    LOG.debug("{} = {}", GCS_METADATA_CACHE_TYPE_KEY, cacheType);
    optionsBuilder.setCacheType(cacheType);

    String cacheBasePath = config.get(GCS_METADATA_CACHE_DIRECTORY_KEY, GCS_METADATA_CACHE_DIRECTORY_DEFAULT);
    LOG.debug("{} = {}", GCS_METADATA_CACHE_DIRECTORY_KEY, cacheBasePath);
    optionsBuilder.setCacheBasePath(cacheBasePath);

    long cacheMaxEntryAgeMillis = config.getLong(GCS_METADATA_CACHE_MAX_ENTRY_AGE_KEY,
            GCS_METADATA_CACHE_MAX_ENTRY_AGE_DEFAULT);
    LOG.debug("{} = {}", GCS_METADATA_CACHE_MAX_ENTRY_AGE_KEY, cacheMaxEntryAgeMillis);
    optionsBuilder.setCacheMaxEntryAgeMillis(cacheMaxEntryAgeMillis);

    long cacheMaxInfoAgeMillis = config.getLong(GCS_METADATA_CACHE_MAX_INFO_AGE_KEY,
            GCS_METADATA_CACHE_MAX_INFO_AGE_DEFAULT);
    LOG.debug("{} = {}", GCS_METADATA_CACHE_MAX_INFO_AGE_KEY, cacheMaxInfoAgeMillis);
    optionsBuilder.setCacheMaxInfoAgeMillis(cacheMaxInfoAgeMillis);

    GoogleCloudStorageFileSystemOptions.TimestampUpdatePredicate updatePredicate = ParentTimestampUpdateIncludePredicate
            .create(config);
    optionsBuilder.setShouldIncludeInTimestampUpdatesPredicate(updatePredicate);

    enableAutoRepairImplicitDirectories = config.getBoolean(GCS_ENABLE_REPAIR_IMPLICIT_DIRECTORIES_KEY,
            GCS_ENABLE_REPAIR_IMPLICIT_DIRECTORIES_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_REPAIR_IMPLICIT_DIRECTORIES_KEY, enableAutoRepairImplicitDirectories);

    enableInferImplicitDirectories = config.getBoolean(GCS_ENABLE_INFER_IMPLICIT_DIRECTORIES_KEY,
            GCS_ENABLE_INFER_IMPLICIT_DIRECTORIES_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_INFER_IMPLICIT_DIRECTORIES_KEY, enableInferImplicitDirectories);

    enableFlatGlob = config.getBoolean(GCS_ENABLE_FLAT_GLOB_KEY, GCS_ENABLE_FLAT_GLOB_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_FLAT_GLOB_KEY, enableFlatGlob);

    optionsBuilder.getCloudStorageOptionsBuilder()
            .setAutoRepairImplicitDirectoriesEnabled(enableAutoRepairImplicitDirectories)
            .setInferImplicitDirectoriesEnabled(enableInferImplicitDirectories);

    boolean enableMarkerFileCreation = config.getBoolean(GCS_ENABLE_MARKER_FILE_CREATION_KEY,
            GCS_ENABLE_MARKER_FILE_CREATION_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_MARKER_FILE_CREATION_KEY, enableMarkerFileCreation);

    optionsBuilder.getCloudStorageOptionsBuilder().setCreateMarkerObjects(enableMarkerFileCreation);

    String transportTypeString = config.get(GCS_HTTP_TRANSPORT_KEY, GCS_HTTP_TRANSPORT_DEFAULT);
    String proxyAddress = config.get(GCS_PROXY_ADDRESS_KEY, GCS_PROXY_ADDRESS_DEFAULT);
    HttpTransportFactory.HttpTransportType transportType = HttpTransportFactory
            .getTransportTypeOf(transportTypeString);

    optionsBuilder.getCloudStorageOptionsBuilder().setTransportType(transportType)
            .setProxyAddress(proxyAddress);

    String projectId = ConfigurationUtil.getMandatoryConfig(config, GCS_PROJECT_ID_KEY);

    optionsBuilder.getCloudStorageOptionsBuilder().setProjectId(projectId);

    long maxListItemsPerCall = config.getLong(GCS_MAX_LIST_ITEMS_PER_CALL, GCS_MAX_LIST_ITEMS_PER_CALL_DEFAULT);

    optionsBuilder.getCloudStorageOptionsBuilder().setMaxListItemsPerCall(maxListItemsPerCall);

    // Configuration for setting 250GB upper limit on file size to gain higher write throughput.
    boolean limitFileSizeTo250Gb = config.getBoolean(GCS_FILE_SIZE_LIMIT_250GB,
            GCS_FILE_SIZE_LIMIT_250GB_DEFAULT);

    optionsBuilder.getCloudStorageOptionsBuilder().getWriteChannelOptionsBuilder()
            .setFileSizeLimitedTo250Gb(limitFileSizeTo250Gb);

    // Configuration for setting GoogleCloudStorageWriteChannel upload buffer size.
    int uploadBufferSize = config.getInt(WRITE_BUFFERSIZE_KEY, WRITE_BUFFERSIZE_DEFAULT);
    LOG.debug("{} = {}", WRITE_BUFFERSIZE_KEY, uploadBufferSize);

    optionsBuilder.getCloudStorageOptionsBuilder().getWriteChannelOptionsBuilder()
            .setUploadBufferSize(uploadBufferSize);

    String applicationNameSuffix = config.get(GCS_APPLICATION_NAME_SUFFIX_KEY,
            GCS_APPLICATION_NAME_SUFFIX_DEFAULT);
    LOG.debug("{} = {}", GCS_APPLICATION_NAME_SUFFIX_KEY, applicationNameSuffix);

    String applicationName = GHFS_ID;
    if (!Strings.isNullOrEmpty(applicationNameSuffix)) {
        applicationName = applicationName + applicationNameSuffix;
    }

    LOG.debug("Setting GCS application name to {}", applicationName);
    optionsBuilder.getCloudStorageOptionsBuilder().setAppName(applicationName);

    boolean enablePerformanceCache = config.getBoolean(GCS_ENABLE_PERFORMANCE_CACHE_KEY,
            GCS_ENABLE_PERFORMANCE_CACHE_DEFAULT);
    LOG.debug("{} = {}", GCS_ENABLE_PERFORMANCE_CACHE_KEY, enablePerformanceCache);
    optionsBuilder.setIsPerformanceCacheEnabled(enablePerformanceCache);

    long performanceCacheMaxEntryAgeMillis = config.getLong(GCS_PERFORMANCE_CACHE_MAX_ENTRY_AGE_MILLIS_KEY,
            GCS_PERFORMANCE_CACHE_MAX_ENTRY_AGE_MILLIS_DEFAULT);
    LOG.debug("{} = {}", GCS_PERFORMANCE_CACHE_MAX_ENTRY_AGE_MILLIS_KEY, performanceCacheMaxEntryAgeMillis);

    boolean listCachingEnabled = config.getBoolean(GCS_PERFORMANCE_CACHE_LIST_CACHING_ENABLE_KEY,
            GCS_PERFORMANCE_CACHE_LIST_CACHING_ENABLE_DEFAULT);
    LOG.debug("{} = {}", GCS_PERFORMANCE_CACHE_LIST_CACHING_ENABLE_KEY, listCachingEnabled);
    optionsBuilder.getPerformanceCachingOptionsBuilder().setMaxEntryAgeMillis(performanceCacheMaxEntryAgeMillis)
            .setInferImplicitDirectoriesEnabled(enableInferImplicitDirectories)
            .setListCachingEnabled(listCachingEnabled);

    return optionsBuilder;
}

From source file:com.hadoop.compression.fourmc.FourMcInputFormatUtil.java

License:BSD License

public static boolean getIgnoreNon4mcProperty(Configuration conf) {
    return conf.getBoolean(IGNORE_NONFOURMC_KEY, DEFAULT_IGNORE_NONFOURMC);
}

From source file:com.hadoop.compression.fourmc.ZstCodec.java

License:BSD License

public static boolean isNativeLoaded(Configuration conf) {
    assert conf != null : "Configuration cannot be null!";
    return nativeLoaded && conf.getBoolean("hadoop.native.lib", true);
}

From source file:com.hadoop.compression.lzo.LzoCodec.java

License:Open Source License

/**
 * Check if native-lzo library is loaded & initialized.
 * //from w  ww .j a  v  a  2 s  .  c o  m
 * @param conf configuration
 * @return <code>true</code> if native-lzo library is loaded & initialized;
 *         else <code>false</code>
 */
public static synchronized boolean isNativeLzoLoaded(Configuration conf) {
    if (!nativeLzoChecked) {
        if (GPLNativeCodeLoader.isNativeCodeLoaded()) {
            nativeLzoLoaded = LzoCompressor.isNativeLzoLoaded() && LzoDecompressor.isNativeLzoLoaded();

            if (nativeLzoLoaded) {
                LOG.info("Successfully loaded & initialized native-lzo library");
            } else {
                LOG.error("Failed to load/initialize native-lzo library");
            }
        } else {
            LOG.error("Cannot load native-lzo without native-hadoop");
        }
        nativeLzoChecked = true;
    }

    return nativeLzoLoaded && conf.getBoolean("hadoop.native.lib", true);
}

From source file:com.hadoop.compression.lzo.LzoInputFormatCommon.java

License:Open Source License

/**
 * @param conf the Configuration object/*from  w ww. j av a2  s .  c  om*/
 * @return the value of the <code>lzo.text.input.format.ignore.nonlzo</code>
 *         property in <code>conf</code>, or <code>DEFAULT_IGNORE_NONLZO</code>
 *         if the property is not set.
 */
public static boolean getIgnoreNonLzoProperty(Configuration conf) {
    return conf.getBoolean(IGNORE_NONLZO_KEY, DEFAULT_IGNORE_NONLZO);
}

From source file:com.hadoop.mapreduce.FourMcInputFormat.java

License:BSD License

@Override
protected List<FileStatus> listStatus(JobContext job) throws IOException {
    List<FileStatus> files = super.listStatus(job);
    List<FileStatus> results = new ArrayList<FileStatus>();
    Configuration conf = HadoopUtils.getConfiguration(job);
    boolean recursive = conf.getBoolean("mapred.input.dir.recursive", false);
    Iterator<FileStatus> it = files.iterator();
    while (it.hasNext()) {
        FileStatus fileStatus = it.next();
        FileSystem fs = fileStatus.getPath().getFileSystem(conf);
        addInputPath(results, fs, fileStatus, recursive);
    }//  www.jav a2s.  c o  m

    LOG.debug("Total 4mc input paths to process: " + results.size());
    return results;
}

From source file:com.hadoop.mapreduce.FourMzInputFormat.java

License:BSD License

@Override
protected List<FileStatus> listStatus(JobContext job) throws IOException {
    List<FileStatus> files = super.listStatus(job);
    List<FileStatus> results = new ArrayList<FileStatus>();
    Configuration conf = HadoopUtils.getConfiguration(job);
    boolean recursive = conf.getBoolean("mapred.input.dir.recursive", false);
    Iterator<FileStatus> it = files.iterator();
    while (it.hasNext()) {
        FileStatus fileStatus = it.next();
        FileSystem fs = fileStatus.getPath().getFileSystem(conf);
        addInputPath(results, fs, fileStatus, recursive);
    }// w ww .  j ava 2  s .c  o m

    LOG.debug("Total 4mz input paths to process: " + results.size());
    return results;
}

From source file:com.hortonworks.hbase.replication.bridge.HBaseClient.java

License:Apache License

/**
 * Construct an IPC client whose values are of the given {@link Writable}
 * class.//from   w w w . ja v a  2  s  .  c o m
 * @param valueClass value class
 * @param conf configuration
 * @param factory socket factory
 */
public HBaseClient(Class<? extends Writable> valueClass, Configuration conf, SocketFactory factory) {
    this.valueClass = valueClass;
    this.maxIdleTime = conf.getInt("hbase.ipc.client.connection.maxidletime", 10000); //10s
    this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0);
    this.failureSleep = conf.getInt("hbase.client.pause", 1000);
    this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", false);
    this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true);
    this.pingInterval = getPingInterval(conf);
    if (LOG.isDebugEnabled()) {
        LOG.debug("The ping interval is" + this.pingInterval + "ms.");
    }
    this.conf = conf;
    this.socketFactory = factory;
    this.clusterId = conf.get(HConstants.CLUSTER_ID, "default");
    this.connections = new PoolMap<ConnectionId, Connection>(getPoolType(conf), getPoolSize(conf));
    this.failedServers = new FailedServers(conf);
}

From source file:com.hortonworks.hbase.replication.bridge.HBaseServer.java

License:Apache License

protected HBaseServer(String bindAddress, int port, Class<? extends Writable> paramClass, int handlerCount,
        int priorityHandlerCount, Configuration conf, String serverName, int highPriorityLevel)
        throws IOException {
    this.bindAddress = bindAddress;
    this.conf = conf;
    this.port = port;
    this.paramClass = paramClass;
    this.handlerCount = handlerCount;
    this.priorityHandlerCount = priorityHandlerCount;
    this.socketSendBufferSize = 0;

    // temporary backward compatibility
    String oldMaxQueueSize = this.conf.get("ipc.server.max.queue.size");
    if (oldMaxQueueSize == null) {
        this.maxQueueLength = this.conf.getInt("ipc.server.max.callqueue.length",
                handlerCount * DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
    } else {/* w w  w  .  ja v  a 2 s .c om*/
        LOG.warn("ipc.server.max.queue.size was renamed " + "ipc.server.max.callqueue.length, "
                + "please update your configuration");
        this.maxQueueLength = Integer.getInteger(oldMaxQueueSize);
    }

    this.maxQueueSize = this.conf.getInt("ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
    this.readThreads = conf.getInt("ipc.server.read.threadpool.size", 10);
    this.callQueue = new LinkedBlockingQueue<Call>(maxQueueLength);
    if (priorityHandlerCount > 0) {
        this.priorityCallQueue = new LinkedBlockingQueue<Call>(maxQueueLength); // TODO hack on size
    } else {
        this.priorityCallQueue = null;
    }
    this.highPriorityLevel = highPriorityLevel;
    this.maxIdleTime = 2 * conf.getInt("ipc.client.connection.maxidletime", 1000);
    this.maxConnectionsToNuke = conf.getInt("ipc.client.kill.max", 10);
    this.thresholdIdleConnections = conf.getInt("ipc.client.idlethreshold", 4000);
    this.purgeTimeout = conf.getLong("ipc.client.call.purge.timeout", 2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
    this.numOfReplicationHandlers = conf.getInt("hbase.regionserver.replication.handler.count", 3);
    if (numOfReplicationHandlers > 0) {
        this.replicationQueue = new LinkedBlockingQueue<Call>(maxQueueSize);
    }
    // Start the listener here and let it bind to the port
    listener = new Listener();
    this.port = listener.getAddress().getPort();
    this.rpcMetrics = new HBaseRpcMetrics(serverName, Integer.toString(this.port));
    this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);
    this.tcpKeepAlive = conf.getBoolean("ipc.server.tcpkeepalive", true);

    this.warnDelayedCalls = conf.getInt(WARN_DELAYED_CALLS, DEFAULT_WARN_DELAYED_CALLS);
    this.delayedCalls = new AtomicInteger(0);

    this.responseQueuesSizeThrottler = new SizeBasedThrottler(
            conf.getLong(RESPONSE_QUEUES_MAX_SIZE, DEFAULT_RESPONSE_QUEUES_MAX_SIZE));

    // Create the responder here
    responder = new Responder();
}