Example usage for org.apache.hadoop.conf Configuration getInt

List of usage examples for org.apache.hadoop.conf Configuration getInt

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getInt.

Prototype

public int getInt(String name, int defaultValue) 

Source Link

Document

Get the value of the name property as an int.

Usage

From source file:com.facebook.hive.orc.WriterImpl.java

License:Open Source License

WriterImpl(FileSystem fs, Path path, Configuration conf, ObjectInspector inspector, long stripeSize,
        CompressionKind compress, int bufferSize, int rowIndexStride, MemoryManager memoryManager)
        throws IOException {
    this.fs = fs;
    this.path = path;
    this.conf = conf;
    this.stripeSize = stripeSize;
    this.compress = compress;
    this.bufferSize = bufferSize;
    this.rowIndexStride = rowIndexStride;
    this.memoryManager = memoryManager;
    buildIndex = rowIndexStride > 0;/* w w w.jav  a  2  s.com*/
    codec = createCodec(compress, conf);
    useVInts = OrcConf.getBoolVar(conf, OrcConf.ConfVars.HIVE_ORC_USE_VINTS);
    treeWriter = createTreeWriter(inspector, streamFactory, false, conf, useVInts,
            memoryManager.isLowMemoryMode(), memoryEstimate);
    dfsBytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
    if (buildIndex && rowIndexStride < MIN_ROW_INDEX_STRIDE) {
        throw new IllegalArgumentException("Row stride must be at least " + MIN_ROW_INDEX_STRIDE);
    }
    maxDictSize = OrcConf.getLongVar(conf, OrcConf.ConfVars.HIVE_ORC_MAX_DICTIONARY_SIZE);
    // ensure that we are able to handle callbacks before we register ourselves
    memoryManager.addWriter(path, stripeSize, this, memoryEstimate.getTotalMemory());
}

From source file:com.facebook.hiveio.conf.IntConfOption.java

License:Apache License

/**
 * Lookup value//from  w w w.  j  a v  a  2  s .co  m
 * @param conf Configuration
 * @return value for key, or default value if not set
 */
public int get(Configuration conf) {
    return conf.getInt(getKey(), defaultValue);
}

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    checkNotNull(uri, "uri is null");
    checkNotNull(conf, "conf is null");

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path("/").makeQualified(this.uri, new Path("/"));

    HiveClientConfig defaults = new HiveClientConfig();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxClientRetries = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries());
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));

    ClientConfiguration configuration = new ClientConfiguration();
    configuration.setMaxErrorRetry(maxErrorRetries);
    configuration.setProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP);
    configuration.setConnectionTimeout(Ints.checkedCast(connectTimeout.toMillis()));

    this.s3 = new AmazonS3Client(getAwsCredentials(uri, conf), configuration);
}

From source file:com.facebook.presto.hive.s3.PrestoS3ClientFactory.java

License:Apache License

synchronized AmazonS3 getS3Client(Configuration config, HiveClientConfig clientConfig) {
    if (s3Client != null) {
        return s3Client;
    }//from   w ww  . j av  a2s .  com

    HiveS3Config defaults = new HiveS3Config();
    String userAgentPrefix = config.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());
    int maxErrorRetries = config.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = config.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(config.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(config.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = config.getInt(S3_SELECT_PUSHDOWN_MAX_CONNECTIONS,
            clientConfig.getS3SelectPushdownMaxConnections());

    if (clientConfig.isS3SelectPushdownEnabled()) {
        s3UserAgentSuffix = "presto-select";
    }

    ClientConfiguration clientConfiguration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(s3UserAgentSuffix);

    PrestoS3FileSystemStats stats = new PrestoS3FileSystemStats();
    RequestMetricCollector metricCollector = new PrestoS3FileSystemMetricCollector(stats);
    AWSCredentialsProvider awsCredentialsProvider = getAwsCredentialsProvider(config, defaults);
    AmazonS3Builder<? extends AmazonS3Builder, ? extends AmazonS3> clientBuilder = AmazonS3Client.builder()
            .withCredentials(awsCredentialsProvider).withClientConfiguration(clientConfiguration)
            .withMetricsCollector(metricCollector).enablePathStyleAccess();

    boolean regionOrEndpointSet = false;

    String endpoint = config.get(S3_ENDPOINT);
    boolean pinS3ClientToCurrentRegion = config.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    verify(!pinS3ClientToCurrentRegion || endpoint == null,
            "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region");

    // use local region when running inside of EC2
    if (pinS3ClientToCurrentRegion) {
        Region region = Regions.getCurrentRegion();
        if (region != null) {
            clientBuilder.withRegion(region.getName());
            regionOrEndpointSet = true;
        }
    }

    if (!isNullOrEmpty(endpoint)) {
        clientBuilder.withEndpointConfiguration(new EndpointConfiguration(endpoint, null));
        regionOrEndpointSet = true;
    }

    if (!regionOrEndpointSet) {
        clientBuilder.withRegion(US_EAST_1);
        clientBuilder.setForceGlobalBucketAccessEnabled(true);
    }

    s3Client = clientBuilder.build();
    return s3Client;
}

From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);//w  w  w  .j av  a2  s  . c om

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
    long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
    this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.s3 = createAmazonS3Client(uri, conf, configuration);

    transferConfig.setMultipartUploadThreshold(minFileSize);
    transferConfig.setMinimumUploadPartSize(minPartSize);
}

From source file:com.facebook.presto.hive.S3SelectLineRecordReader.java

License:Apache License

S3SelectLineRecordReader(Configuration configuration, HiveClientConfig clientConfig, Path path, long start,
        long length, Properties schema, String ionSqlQuery, PrestoS3ClientFactory s3ClientFactory) {
    requireNonNull(configuration, "configuration is null");
    requireNonNull(clientConfig, "clientConfig is null");
    requireNonNull(schema, "schema is null");
    requireNonNull(path, "path is null");
    requireNonNull(ionSqlQuery, "ionSqlQuery is null");
    requireNonNull(s3ClientFactory, "s3ClientFactory is null");
    this.lineDelimiter = (schema).getProperty(LINE_DELIM, "\n");
    this.processedRecords = 0;
    this.recordsFromS3 = 0;
    this.start = start;
    this.position = this.start;
    this.end = this.start + length;
    this.isFirstLine = true;

    this.compressionCodecFactory = new CompressionCodecFactory(configuration);
    this.selectObjectContentRequest = buildSelectObjectRequest(schema, ionSqlQuery, path);

    HiveS3Config defaults = new HiveS3Config();
    this.maxAttempts = configuration.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(configuration.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration
            .valueOf(configuration.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));

    this.selectClient = new PrestoS3SelectClient(configuration, clientConfig, s3ClientFactory);
    closer.register(selectClient);//ww  w.ja v  a  2s.co m
}

From source file:com.firewallid.util.HTMLContent.java

public HTMLContent(Configuration firewallConf) {
    partitions = firewallConf.getInt(PARTITIONS, 48);
    retries = firewallConf.getInt(RETRIES, 10);
    timeout = firewallConf.getLong(TIMEOUT, 5);
}

From source file:com.flipkart.fdp.migration.distcp.core.MirrorDistCPDriver.java

License:Apache License

private Job createJob(Configuration configuration) throws Exception {

    System.out.println("Initializing BlueShift v 2.0...");
    System.out.println("Configuration: " + dcmConfig.toString());

    Job job = Job.getInstance(configuration, "BlueShift v 2.0 - " + dcmConfig.getBatchName());

    job.setJarByClass(MirrorDistCPDriver.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);

    job.setMapperClass(MirrorMapper.class);
    job.setReducerClass(MirrorReducer.class);

    job.setInputFormatClass(MirrorFileInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    FileOutputFormat.setOutputPath(job, stateManager.getReportPath());

    job.setNumReduceTasks(configuration.getInt("mapreduce.reduce.tasks", 1));

    System.out.println("Job Initialization Complete, The status of the Mirror job will be written to: "
            + stateManager.getReportPath());
    return job;//from  w w  w.  j  a v a2s. c o  m
}

From source file:com.foursquare.twofishes.io.MapFileConcurrentReader.java

License:Apache License

public MapFileConcurrentReader(Path dir, Configuration conf, SequenceFile.Reader.Option... opts)
        throws IOException {

    INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
    open(dir, comparator, conf, opts);/*w ww .j  ava  2s .  c o m*/
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.GFOutputFormat.java

License:Apache License

/**
 * Creates instance of {@link ClientCache} by connecting to GF cluster through
 * locator/*w w  w. j  a v  a2s .c o m*/
 */
public ClientCache createGFWriterUsingLocator(Configuration conf) {
    // if locator host is not provided assume localhost
    String locator = conf.get(LOCATOR_HOST, ConnectionEndpointConverter.DEFAULT_LOCATOR_HOST);
    // if locator port is not provided assume default locator port 10334
    int port = conf.getInt(LOCATOR_PORT, ConnectionEndpointConverter.DEFAULT_LOCATOR_PORT);

    // create gemfire client cache instance
    ClientCacheFactory ccf = new ClientCacheFactory();
    ccf.addPoolLocator(locator, port);
    ClientCache cache = ccf.create();
    return cache;
}