Example usage for org.apache.hadoop.conf Configuration setBoolean

List of usage examples for org.apache.hadoop.conf Configuration setBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration setBoolean.

Prototype

public void setBoolean(String name, boolean value) 

Source Link

Document

Set the value of the name property to a boolean.

Usage

From source file:com.facebook.presto.hive.s3.PrestoS3ConfigurationUpdater.java

License:Apache License

@Override
public void updateConfiguration(Configuration config) {
    // re-map filesystem schemes to match Amazon Elastic MapReduce
    config.set("fs.s3.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3a.impl", PrestoS3FileSystem.class.getName());
    config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName());

    if (awsAccessKey != null) {
        config.set(PrestoS3FileSystem.S3_ACCESS_KEY, awsAccessKey);
    }//w  w  w . ja v  a  2 s .  c o m
    if (awsSecretKey != null) {
        config.set(PrestoS3FileSystem.S3_SECRET_KEY, awsSecretKey);
    }
    if (endpoint != null) {
        config.set(PrestoS3FileSystem.S3_ENDPOINT, endpoint);
    }
    if (signerType != null) {
        config.set(PrestoS3FileSystem.S3_SIGNER_TYPE, signerType.name());
    }
    config.setBoolean(PrestoS3FileSystem.S3_PATH_STYLE_ACCESS, pathStyleAccess);
    config.setBoolean(PrestoS3FileSystem.S3_USE_INSTANCE_CREDENTIALS, useInstanceCredentials);
    config.setBoolean(PrestoS3FileSystem.S3_SSL_ENABLED, sslEnabled);
    config.setBoolean(PrestoS3FileSystem.S3_SSE_ENABLED, sseEnabled);
    config.set(PrestoS3FileSystem.S3_SSE_TYPE, sseType.name());
    if (encryptionMaterialsProvider != null) {
        config.set(PrestoS3FileSystem.S3_ENCRYPTION_MATERIALS_PROVIDER, encryptionMaterialsProvider);
    }
    if (kmsKeyId != null) {
        config.set(PrestoS3FileSystem.S3_KMS_KEY_ID, kmsKeyId);
    }
    if (sseKmsKeyId != null) {
        config.set(PrestoS3FileSystem.S3_SSE_KMS_KEY_ID, sseKmsKeyId);
    }
    config.setInt(PrestoS3FileSystem.S3_MAX_CLIENT_RETRIES, maxClientRetries);
    config.setInt(PrestoS3FileSystem.S3_MAX_ERROR_RETRIES, maxErrorRetries);
    config.set(PrestoS3FileSystem.S3_MAX_BACKOFF_TIME, maxBackoffTime.toString());
    config.set(PrestoS3FileSystem.S3_MAX_RETRY_TIME, maxRetryTime.toString());
    config.set(PrestoS3FileSystem.S3_CONNECT_TIMEOUT, connectTimeout.toString());
    config.set(PrestoS3FileSystem.S3_SOCKET_TIMEOUT, socketTimeout.toString());
    config.set(PrestoS3FileSystem.S3_STAGING_DIRECTORY, stagingDirectory.toString());
    config.setInt(PrestoS3FileSystem.S3_MAX_CONNECTIONS, maxConnections);
    config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_FILE_SIZE, multipartMinFileSize.toBytes());
    config.setLong(PrestoS3FileSystem.S3_MULTIPART_MIN_PART_SIZE, multipartMinPartSize.toBytes());
    config.setBoolean(PrestoS3FileSystem.S3_PIN_CLIENT_TO_CURRENT_REGION, pinClientToCurrentRegion);
    config.set(PrestoS3FileSystem.S3_USER_AGENT_PREFIX, userAgentPrefix);
}

From source file:com.facebook.presto.hive.s3.TestPrestoS3FileSystem.java

License:Apache License

@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "S3 credentials not configured")
public void testInstanceCredentialsDisabled() throws Exception {
    Configuration config = new Configuration();
    config.setBoolean(S3_USE_INSTANCE_CREDENTIALS, false);

    try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
        fs.initialize(new URI("s3n://test-bucket/"), config);
    }//from  w  ww.  ja va 2  s  .c o m
}

From source file:com.facebook.presto.hive.s3.TestPrestoS3FileSystem.java

License:Apache License

@Test
public void testPathStyleAccess() throws Exception {
    Configuration config = new Configuration();
    config.setBoolean(S3_PATH_STYLE_ACCESS, true);

    try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
        fs.initialize(new URI("s3n://test-bucket/"), config);
        S3ClientOptions clientOptions = getFieldValue(fs.getS3Client(), AmazonS3Client.class, "clientOptions",
                S3ClientOptions.class);
        assertTrue(clientOptions.isPathStyleAccess());
    }//from  w w  w  .  j a  v  a2s  .  co  m
}

From source file:com.facebook.presto.hive.s3.TestPrestoS3FileSystem.java

License:Apache License

@Test
public void testUnderscoreBucket() throws Exception {
    Configuration config = new Configuration();
    config.setBoolean(S3_PATH_STYLE_ACCESS, true);

    try (PrestoS3FileSystem fs = new PrestoS3FileSystem()) {
        MockAmazonS3 s3 = new MockAmazonS3();
        String expectedBucketName = "test-bucket_underscore";
        fs.initialize(new URI("s3n://" + expectedBucketName + "/"), config);
        fs.setS3Client(s3);/*from w  w w . j a v a2  s.c o  m*/
        fs.getS3ObjectMetadata(new Path("/test/path"));
        assertEquals(expectedBucketName, s3.getGetObjectMetadataRequest().getBucketName());
    }
}

From source file:com.flipkart.aesop.runtime.producer.hbase.HBaseEventProducer.java

License:Apache License

/**
 * Interface method implementation. Starts up the SEP consumer
 * @see com.linkedin.databus2.producers.EventProducer#start(long)
 *///  w ww. ja  v  a 2s .  c o m
public void start(long sinceSCN) {
    shutdownRequested.set(false);
    this.sinceSCN.set(sinceSCN);
    LOGGER.info("Starting SEP subscription : " + this.getName());
    LOGGER.info("ZK quorum hosts : " + this.zkQuorum);
    LOGGER.info("ZK client port : " + this.zkClientPort);
    LOGGER.info("ZK session timeout : " + this.zkSessionTimeout);
    LOGGER.info("RPC timeout : " + this.rpcTimeout);
    LOGGER.info("Using hostname to bind to : " + this.localHost);
    LOGGER.info("Using worker threads : " + this.workerThreads);
    LOGGER.info("Listening to WAL edits from : " + this.sinceSCN);
    try {
        Configuration hbaseConf = HBaseConfiguration.create();
        // enable replication to get WAL edits
        hbaseConf.setBoolean(HBASE_REPLICATION_CONFIG, true);
        // need to explicitly set the ZK host and port details - hosts separated from port - see SepModelImpl constructor source code
        hbaseConf.set(ZK_QUORUM_CONFIG, this.zkQuorum);
        hbaseConf.setInt(ZK_CLIENT_PORT_CONFIG, this.zkClientPort);
        hbaseConf.setInt(RPC_TIMEOUT_CONFIG, this.rpcTimeout);

        StringBuilder zkQuorumWithPort = new StringBuilder();
        String[] zkHostsList = this.zkQuorum.split(",");
        for (String zkHost : zkHostsList) {
            zkQuorumWithPort.append(zkHost);
            zkQuorumWithPort.append(":");
            zkQuorumWithPort.append(this.zkClientPort);
            zkQuorumWithPort.append(",");
        }

        LOGGER.info("ZK util connect string (host:port) : " + zkQuorumWithPort.toString());
        ZooKeeperItf zk = ZkUtil.connect(zkQuorumWithPort.toString(), this.zkSessionTimeout);

        StringBuilder hbaseConfBuilder = new StringBuilder();
        Iterator<Entry<String, String>> it = hbaseConf.iterator();
        while (it.hasNext()) {
            Entry<String, String> entry = it.next();
            if (entry.getKey().equalsIgnoreCase(HBASE_REPLICATION_CONFIG)
                    || entry.getKey().equalsIgnoreCase(ZK_QUORUM_CONFIG)
                    || entry.getKey().equalsIgnoreCase(ZK_CLIENT_PORT_CONFIG)) {
                hbaseConfBuilder.append(entry.getKey());
                hbaseConfBuilder.append(":");
                hbaseConfBuilder.append(entry.getValue());
                hbaseConfBuilder.append(",");
            }
        }
        LOGGER.info("SEP Model Hbase configuration = " + hbaseConfBuilder.toString());
        SepModel sepModel = new SepModelImpl(zk, hbaseConf);

        final String subscriptionName = this.getName();

        if (!sepModel.hasSubscription(subscriptionName)) {
            sepModel.addSubscriptionSilent(subscriptionName);
        }
        this.sepConsumer = new SepConsumer(subscriptionName, generateSEPSCN(this.sinceSCN.get()),
                new RelayAppender(), this.workerThreads, this.localHost, zk, hbaseConf);
        this.sepConsumer.start();
    } catch (Exception e) {
        LOGGER.error(
                "Error starting WAL edits consumer. Producer not started!. Error message : " + e.getMessage(),
                e);
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl.java

License:Apache License

private FileSystem createFileSystem(Configuration hconf, String configFile, boolean forceNew)
        throws IOException {
    FileSystem filesystem = null;

    // load hdfs client config file if specified. The path is on local file
    // system/*from   w w w  . java2s.c  om*/
    if (configFile != null) {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Adding resource config file to hdfs configuration:" + configFile, logPrefix);
        }
        hconf.addResource(new Path(configFile));

        if (!new File(configFile).exists()) {
            logger.warn(LocalizedMessage.create(LocalizedStrings.HOPLOG_HDFS_CLIENT_CONFIG_FILE_ABSENT,
                    configFile));
        }
    }

    // This setting disables shutdown hook for file system object. Shutdown
    // hook may cause FS object to close before the cache or store and
    // unpredictable behavior. This setting is provided for GFXD like server
    // use cases where FS close is managed by a server. This setting is not
    // supported by old versions of hadoop, HADOOP-4829
    hconf.setBoolean("fs.automatic.close", false);

    // Hadoop has a configuration parameter io.serializations that is a list of serialization 
    // classes which can be used for obtaining serializers and deserializers. This parameter 
    // by default contains avro classes. When a sequence file is created, it calls 
    // SerializationFactory.getSerializer(keyclass). This internally creates objects using 
    // reflection of all the classes that were part of io.serializations. But since, there is 
    // no avro class available it throws an exception. 
    // Before creating a sequenceFile, override the io.serializations parameter and pass only the classes 
    // that are important to us. 
    hconf.setStrings("io.serializations",
            new String[] { "org.apache.hadoop.io.serializer.WritableSerialization" });
    // create writer

    SchemaMetrics.configureGlobally(hconf);

    String nameNodeURL = null;
    if ((nameNodeURL = getNameNodeURL()) == null) {
        nameNodeURL = hconf.get("fs.default.name");
    }

    URI namenodeURI = URI.create(nameNodeURL);

    //if (! GemFireCacheImpl.getExisting().isHadoopGfxdLonerMode()) {
    String authType = hconf.get("hadoop.security.authentication");

    //The following code handles Gemfire XD with secure HDFS
    //A static set is used to cache all known secure HDFS NameNode urls.
    UserGroupInformation.setConfiguration(hconf);

    //Compare authentication method ignoring case to make GFXD future version complaint
    //At least version 2.0.2 starts complaining if the string "kerberos" is not in all small case.
    //However it seems current version of hadoop accept the authType in any case
    if (authType.equalsIgnoreCase("kerberos")) {

        String principal = hconf.get(HoplogConfig.KERBEROS_PRINCIPAL);
        String keyTab = hconf.get(HoplogConfig.KERBEROS_KEYTAB_FILE);

        if (!PERFORM_SECURE_HDFS_CHECK) {
            if (logger.isDebugEnabled())
                logger.debug("{}Ignore secure hdfs check", logPrefix);
        } else {
            if (!secureNameNodes.contains(nameNodeURL)) {
                if (logger.isDebugEnabled())
                    logger.debug("{}Executing secure hdfs check", logPrefix);
                try {
                    filesystem = FileSystem.newInstance(namenodeURI, hconf);
                    //Make sure no IOExceptions are generated when accessing insecure HDFS. 
                    filesystem.listFiles(new Path("/"), false);
                    throw new HDFSIOException(
                            "Gemfire XD HDFS client and HDFS cluster security levels do not match. The configured HDFS Namenode is not secured.");
                } catch (IOException ex) {
                    secureNameNodes.add(nameNodeURL);
                } finally {
                    //Close filesystem to avoid resource leak
                    if (filesystem != null) {
                        closeFileSystemIgnoreError(filesystem);
                    }
                }
            }
        }

        // check to ensure the namenode principal is defined
        String nameNodePrincipal = hconf.get("dfs.namenode.kerberos.principal");
        if (nameNodePrincipal == null) {
            throw new IOException(LocalizedStrings.GF_KERBEROS_NAMENODE_PRINCIPAL_UNDEF.toLocalizedString());
        }

        // ok, the user specified a gfxd principal so we will try to login
        if (principal != null) {
            //If NameNode principal is the same as Gemfire XD principal, there is a 
            //potential security hole
            String regex = "[/@]";
            if (nameNodePrincipal != null) {
                String HDFSUser = nameNodePrincipal.split(regex)[0];
                String GFXDUser = principal.split(regex)[0];
                if (HDFSUser.equals(GFXDUser)) {
                    logger.warn(
                            LocalizedMessage.create(LocalizedStrings.HDFS_USER_IS_SAME_AS_GF_USER, GFXDUser));
                }
            }

            // a keytab must exist if the user specifies a principal
            if (keyTab == null) {
                throw new IOException(LocalizedStrings.GF_KERBEROS_KEYTAB_UNDEF.toLocalizedString());
            }

            // the keytab must exist as well
            File f = new File(keyTab);
            if (!f.exists()) {
                throw new FileNotFoundException(
                        LocalizedStrings.GF_KERBEROS_KEYTAB_FILE_ABSENT.toLocalizedString(f.getAbsolutePath()));
            }

            //Authenticate Gemfire XD principal to Kerberos KDC using Gemfire XD keytab file
            String principalWithValidHost = SecurityUtil.getServerPrincipal(principal, "");
            UserGroupInformation.loginUserFromKeytab(principalWithValidHost, keyTab);
        } else {
            logger.warn(LocalizedMessage.create(LocalizedStrings.GF_KERBEROS_PRINCIPAL_UNDEF));
        }
    }
    //}

    filesystem = getFileSystemFactory().create(namenodeURI, hconf, forceNew);

    if (logger.isDebugEnabled()) {
        logger.debug("{}Initialized FileSystem linked to " + filesystem.getUri() + " " + filesystem.hashCode(),
                logPrefix);
    }
    return filesystem;
}

From source file:com.github.charithe.hbase.HBaseMiniClusterBooter.java

License:Apache License

private Configuration updateConfiguration(String zookeeperQuorum, int zkPort) throws IOException {
    LOGGER.debug("Updating configuration to use random ports and disable UIs");

    Configuration myConf = new Configuration(conf);
    myConf.setInt(HConstants.MASTER_PORT, getFreePort());
    myConf.setInt(HConstants.REGIONSERVER_PORT, getFreePort());
    myConf.setInt("hbase.master.info.port", -1);
    myConf.setInt("hbase.regionserver.info.port", -1);
    myConf.setBoolean("hbase.replication", false);

    myConf.setInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, 80);
    myConf.set(HConstants.ZOOKEEPER_QUORUM, zookeeperQuorum);
    myConf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkPort);

    return myConf;
}

From source file:com.github.libsml.commons.util.HadoopUtils.java

License:Apache License

/**
 * Create a map-only Hadoop Job out of the passed in parameters.  Does not set the
 * Job name.//from ww  w  .j a v  a  2  s.  c  om
 *
 * @see #getCustomJobName(String, JobContext, Class, Class)
 */
public static Job prepareJob(Path inputPath, Path outputPath, Class<? extends InputFormat> inputFormat,
        Class<? extends Mapper> mapper, Class<? extends Writable> mapperKey,
        Class<? extends Writable> mapperValue, Class<? extends OutputFormat> outputFormat, Configuration conf)
        throws IOException {

    //    Job job = new Job(new Configuration(conf));
    Job job = Job.getInstance(conf);
    Configuration jobConf = job.getConfiguration();

    if (mapper.equals(Mapper.class)) {
        throw new IllegalStateException("Can't figure out the user class jar file from mapper/reducer");
    }
    job.setJarByClass(mapper);

    job.setInputFormatClass(inputFormat);
    jobConf.set("mapred.input.dir", inputPath.toString());

    job.setMapperClass(mapper);
    job.setMapOutputKeyClass(mapperKey);
    job.setMapOutputValueClass(mapperValue);
    job.setOutputKeyClass(mapperKey);
    job.setOutputValueClass(mapperValue);
    jobConf.setBoolean("mapred.compress.map.output", true);
    job.setNumReduceTasks(0);

    job.setOutputFormatClass(outputFormat);
    jobConf.set("mapred.output.dir", outputPath.toString());

    return job;
}

From source file:com.github.libsml.commons.util.HadoopUtils.java

License:Apache License

public static Job prepareJob(String inputPath, String outputPath, Class<? extends InputFormat> inputFormat,
        Class<? extends Mapper> mapper, Class<? extends Writable> mapperKey,
        Class<? extends Writable> mapperValue, Class<? extends Reducer> reducer,
        Class<? extends Writable> reducerKey, Class<? extends Writable> reducerValue,
        Class<? extends OutputFormat> outputFormat, Configuration conf) throws IOException {

    //    Job job = new Job(new Configuration(conf));
    Job job = Job.getInstance(conf);//from   ww  w .ja  va  2 s .co  m
    Configuration jobConf = job.getConfiguration();

    if (reducer.equals(Reducer.class)) {
        if (mapper.equals(Mapper.class)) {
            throw new IllegalStateException("Can't figure out the user class jar file from mapper/reducer");
        }
        job.setJarByClass(mapper);
    } else {
        job.setJarByClass(reducer);
    }

    job.setInputFormatClass(inputFormat);
    jobConf.set("mapred.input.dir", inputPath);

    job.setMapperClass(mapper);
    if (mapperKey != null) {
        job.setMapOutputKeyClass(mapperKey);
    }
    if (mapperValue != null) {
        job.setMapOutputValueClass(mapperValue);
    }

    jobConf.setBoolean("mapred.compress.map.output", true);

    job.setReducerClass(reducer);
    job.setOutputKeyClass(reducerKey);
    job.setOutputValueClass(reducerValue);

    job.setOutputFormatClass(outputFormat);
    jobConf.set("mapred.output.dir", outputPath);

    return job;
}

From source file:com.google.cloud.bigtable.mapreduce.Export.java

License:Apache License

/**
 * Sets up the actual job.// w  w  w .j ava 2s .c  o m
 *
 * @param conf  The current configuration.
 * @param args  The command line parameters.
 * @return The newly created job.
 * @throws java.io.IOException When setting up the job fails.
 */
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
    conf.setIfUnset("hbase.client.connection.impl", BigtableConfiguration.getConnectionClass().getName());
    conf.setIfUnset(BigtableOptionsFactory.BIGTABLE_RPC_TIMEOUT_MS_KEY, "60000");
    conf.setBoolean(TableInputFormat.SHUFFLE_MAPS, true);

    String tableName = args[0];
    Path outputDir = new Path(args[1]);
    Job job = Job.getInstance(conf, NAME + "_" + tableName);
    job.setJobName(NAME + "_" + tableName);
    job.setJarByClass(Export.class);
    // Set optional scan parameters
    Scan s = getConfiguredScanForJob(conf, args);
    TableMapReduceUtil.initTableMapperJob(tableName, s, IdentityTableMapper.class, ImmutableBytesWritable.class,
            Result.class, job, false);
    // No reducers.  Just write straight to output files.
    job.setNumReduceTasks(0);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setOutputKeyClass(ImmutableBytesWritable.class);
    job.setOutputValueClass(Result.class);
    FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs.
    return job;
}