Example usage for org.apache.hadoop.conf Configuration getBoolean

List of usage examples for org.apache.hadoop.conf Configuration getBoolean

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getBoolean.

Prototype

public boolean getBoolean(String name, boolean defaultValue) 

Source Link

Document

Get the value of the name property as a boolean.

Usage

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    checkNotNull(uri, "uri is null");
    checkNotNull(conf, "conf is null");

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path("/").makeQualified(this.uri, new Path("/"));

    HiveClientConfig defaults = new HiveClientConfig();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxClientRetries = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries());
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));

    ClientConfiguration configuration = new ClientConfiguration();
    configuration.setMaxErrorRetry(maxErrorRetries);
    configuration.setProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP);
    configuration.setConnectionTimeout(Ints.checkedCast(connectTimeout.toMillis()));

    this.s3 = new AmazonS3Client(getAwsCredentials(uri, conf), configuration);
}

From source file:com.facebook.presto.hive.s3.PrestoS3ClientFactory.java

License:Apache License

synchronized AmazonS3 getS3Client(Configuration config, HiveClientConfig clientConfig) {
    if (s3Client != null) {
        return s3Client;
    }// w  w w . j av  a  2s . c  om

    HiveS3Config defaults = new HiveS3Config();
    String userAgentPrefix = config.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());
    int maxErrorRetries = config.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = config.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(config.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(config.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = config.getInt(S3_SELECT_PUSHDOWN_MAX_CONNECTIONS,
            clientConfig.getS3SelectPushdownMaxConnections());

    if (clientConfig.isS3SelectPushdownEnabled()) {
        s3UserAgentSuffix = "presto-select";
    }

    ClientConfiguration clientConfiguration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(s3UserAgentSuffix);

    PrestoS3FileSystemStats stats = new PrestoS3FileSystemStats();
    RequestMetricCollector metricCollector = new PrestoS3FileSystemMetricCollector(stats);
    AWSCredentialsProvider awsCredentialsProvider = getAwsCredentialsProvider(config, defaults);
    AmazonS3Builder<? extends AmazonS3Builder, ? extends AmazonS3> clientBuilder = AmazonS3Client.builder()
            .withCredentials(awsCredentialsProvider).withClientConfiguration(clientConfiguration)
            .withMetricsCollector(metricCollector).enablePathStyleAccess();

    boolean regionOrEndpointSet = false;

    String endpoint = config.get(S3_ENDPOINT);
    boolean pinS3ClientToCurrentRegion = config.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    verify(!pinS3ClientToCurrentRegion || endpoint == null,
            "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region");

    // use local region when running inside of EC2
    if (pinS3ClientToCurrentRegion) {
        Region region = Regions.getCurrentRegion();
        if (region != null) {
            clientBuilder.withRegion(region.getName());
            regionOrEndpointSet = true;
        }
    }

    if (!isNullOrEmpty(endpoint)) {
        clientBuilder.withEndpointConfiguration(new EndpointConfiguration(endpoint, null));
        regionOrEndpointSet = true;
    }

    if (!regionOrEndpointSet) {
        clientBuilder.withRegion(US_EAST_1);
        clientBuilder.setForceGlobalBucketAccessEnabled(true);
    }

    s3Client = clientBuilder.build();
    return s3Client;
}

From source file:com.facebook.presto.hive.s3.PrestoS3ClientFactory.java

License:Apache License

private AWSCredentialsProvider getAwsCredentialsProvider(Configuration conf, HiveS3Config defaults) {
    Optional<AWSCredentials> credentials = getAwsCredentials(conf);
    if (credentials.isPresent()) {
        return new AWSStaticCredentialsProvider(credentials.get());
    }//from  w  ww .  j  a v a  2 s  . co m

    boolean useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    if (useInstanceCredentials) {
        return InstanceProfileCredentialsProvider.getInstance();
    }

    String providerClass = conf.get(S3_CREDENTIALS_PROVIDER);
    if (!isNullOrEmpty(providerClass)) {
        return getCustomAWSCredentialsProvider(conf, providerClass);
    }

    throw new RuntimeException("S3 credentials not configured");
}

From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);/* w ww  .  j a  v  a 2s  .c om*/

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes());
    long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes());
    this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.s3 = createAmazonS3Client(uri, conf, configuration);

    transferConfig.setMultipartUploadThreshold(minFileSize);
    transferConfig.setMinimumUploadPartSize(minPartSize);
}

From source file:com.github.hdl.tensorflow.yarn.app.TFAmContainer.java

License:Apache License

public Map<String, String> setJavaEnv(Configuration conf) {
    Map<String, String> env = new HashMap<String, String>();

    // Add AppMaster.jar location to classpath
    // At some point we should not be required to add
    // the hadoop specific classpaths to the env.
    // It should be provided out of the box.
    // For now setting all required classpaths including
    // the classpath to "." for the application jar
    StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());/* w w  w  . j ava 2 s  .c o  m*/
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    env.put("CLASSPATH", classPathEnv.toString());
    return env;
}

From source file:com.github.hdl.tensorflow.yarn.app.TFContainer.java

License:Apache License

public Map<String, String> setJavaEnv(Configuration conf, String tfServerJar) {
    // Set the java environment
    Map<String, String> env = new HashMap<String, String>();

    // Add TFServerLauncher.jar location to classpath
    StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$$())
            .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");

    // Add hadoop's jar location to classpath
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(c.trim());/*from w  w w  .ja  v  a 2  s. c o  m*/
    }
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }

    if (tfServerJar != null) {
        classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
        classPathEnv.append(tfServerJar);
    }
    env.put("CLASSPATH", classPathEnv.toString());
    return env;
}

From source file:com.github.sakserv.minicluster.impl.HbaseRestLocalCluster.java

License:Apache License

@Override
public void start() throws Exception {
    VersionInfo.logVersion();/*from   w w w  .  jav a  2  s  .  com*/
    Configuration conf = builder.getHbaseConfiguration();

    conf.set("hbase.rest.port", hbaseRestPort.toString());
    conf.set("hbase.rest.readonly", (hbaseRestReadOnly == null) ? "true" : hbaseRestReadOnly.toString());
    conf.set("hbase.rest.info.port", (hbaseRestInfoPort == null) ? "8085" : hbaseRestInfoPort.toString());
    String hbaseRestHost = (this.hbaseRestHost == null) ? "0.0.0.0" : this.hbaseRestHost;

    Integer hbaseRestThreadMax = (this.hbaseRestThreadMax == null) ? 100 : this.hbaseRestThreadMax;
    Integer hbaseRestThreadMin = (this.hbaseRestThreadMin == null) ? 2 : this.hbaseRestThreadMin;

    UserProvider userProvider = UserProvider.instantiate(conf);
    Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf);
    FilterHolder authFilter = pair.getFirst();
    Class<? extends ServletContainer> containerClass = pair.getSecond();
    RESTServlet.getInstance(conf, userProvider);

    // set up the Jersey servlet container for Jetty
    ServletHolder sh = new ServletHolder(containerClass);
    sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
            ResourceConfig.class.getCanonicalName());
    sh.setInitParameter("com.sun.jersey.config.property.packages", "jetty");
    ServletHolder shPojoMap = new ServletHolder(containerClass);
    Map<String, String> shInitMap = sh.getInitParameters();
    for (Map.Entry<String, String> e : shInitMap.entrySet()) {
        shPojoMap.setInitParameter(e.getKey(), e.getValue());
    }
    shPojoMap.setInitParameter(JSONConfiguration.FEATURE_POJO_MAPPING, "true");

    // set up Jetty and run the embedded server

    server = new Server();

    Connector connector = new SelectChannelConnector();
    if (conf.getBoolean(RESTServer.REST_SSL_ENABLED, false)) {
        SslSelectChannelConnector sslConnector = new SslSelectChannelConnector();
        String keystore = conf.get(RESTServer.REST_SSL_KEYSTORE_STORE);
        String password = HBaseConfiguration.getPassword(conf, RESTServer.REST_SSL_KEYSTORE_PASSWORD, null);
        String keyPassword = HBaseConfiguration.getPassword(conf, RESTServer.REST_SSL_KEYSTORE_KEYPASSWORD,
                password);
        sslConnector.setKeystore(keystore);
        sslConnector.setPassword(password);
        sslConnector.setKeyPassword(keyPassword);
        connector = sslConnector;
    }
    connector.setPort(hbaseRestPort);
    connector.setHost(hbaseRestHost);
    connector.setHeaderBufferSize(8192);

    server.addConnector(connector);

    QueuedThreadPool threadPool = new QueuedThreadPool(hbaseRestThreadMax);
    threadPool.setMinThreads(hbaseRestThreadMin);
    server.setThreadPool(threadPool);

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);
    // set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    context.addServlet(shPojoMap, "/status/cluster");
    context.addServlet(sh, "/*");
    if (authFilter != null) {
        context.addFilter(authFilter, "/*", 1);
    }

    HttpServerUtil.constrainHttpMethods(context);

    // Put up info server.
    int port = (hbaseRestInfoPort == null) ? 8085 : hbaseRestInfoPort;
    if (port >= 0) {
        conf.setLong("startcode", System.currentTimeMillis());
        String a = hbaseRestHost;
        infoServer = new InfoServer("rest", a, port, false, conf);
        infoServer.setAttribute("hbase.conf", conf);
        infoServer.start();
    }
    // start server
    server.start();
}

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

public static BigtableOptions fromConfiguration(final Configuration configuration) throws IOException {

    BigtableOptions.Builder bigtableOptionsBuilder = new BigtableOptions.Builder();

    bigtableOptionsBuilder.setProjectId(getValue(configuration, PROJECT_ID_KEY, "Project ID"));
    bigtableOptionsBuilder.setZoneId(getValue(configuration, ZONE_KEY, "Zone"));
    bigtableOptionsBuilder.setClusterId(getValue(configuration, CLUSTER_KEY, "Cluster"));

    bigtableOptionsBuilder/* w w  w.j a v a  2s.c  o m*/
            .setDataHost(getHost(configuration, BIGTABLE_HOST_KEY, BIGTABLE_DATA_HOST_DEFAULT, "API Data"));

    bigtableOptionsBuilder.setTableAdminHost(getHost(configuration, BIGTABLE_TABLE_ADMIN_HOST_KEY,
            BIGTABLE_TABLE_ADMIN_HOST_DEFAULT, "Table Admin"));

    bigtableOptionsBuilder.setClusterAdminHost(getHost(configuration, BIGTABLE_CLUSTER_ADMIN_HOST_KEY,
            BIGTABLE_CLUSTER_ADMIN_HOST_DEFAULT, "Cluster Admin"));

    int port = configuration.getInt(BIGTABLE_PORT_KEY, BIGTABLE_PORT_DEFAULT);
    bigtableOptionsBuilder.setPort(port);
    setChannelOptions(bigtableOptionsBuilder, configuration);

    int asyncMutatorCount = configuration.getInt(BIGTABLE_ASYNC_MUTATOR_COUNT_KEY,
            BIGTABLE_ASYNC_MUTATOR_COUNT_DEFAULT);
    bigtableOptionsBuilder.setAsyncMutatorWorkerCount(asyncMutatorCount);

    bigtableOptionsBuilder.setUseBulkApi(configuration.getBoolean(BIGTABLE_USE_BULK_API, true));
    bigtableOptionsBuilder.setBulkMaxRowKeyCount(configuration.getInt(BIGTABLE_BULK_MAX_ROW_KEY_COUNT,
            BigtableOptions.BIGTABLE_BULK_MAX_ROW_KEY_COUNT_DEFAULT));
    bigtableOptionsBuilder.setBulkMaxRequestSize(configuration.getLong(BIGTABLE_BULK_MAX_REQUEST_SIZE_BYTES,
            BigtableOptions.BIGTABLE_BULK_MAX_REQUEST_SIZE_BYTES_DEFAULT));
    bigtableOptionsBuilder
            .setUsePlaintextNegotiation(configuration.getBoolean(BIGTABLE_USE_PLAINTEXT_NEGOTIATION, false));

    return bigtableOptionsBuilder.build();
}

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

private static void setCredentialOptions(BigtableOptions.Builder builder, Configuration configuration)
        throws FileNotFoundException {
    if (configuration.getBoolean(BIGTABE_USE_SERVICE_ACCOUNTS_KEY, BIGTABLE_USE_SERVICE_ACCOUNTS_DEFAULT)) {
        LOG.debug("Using service accounts");

        if (configuration.get(BIGTABLE_SERVICE_ACCOUNT_JSON_KEYFILE_LOCATION_KEY) != null) {
            String keyfileLocation = configuration.get(BIGTABLE_SERVICE_ACCOUNT_JSON_KEYFILE_LOCATION_KEY);
            LOG.debug("Using json keyfile: %s", keyfileLocation);
            builder.setCredentialOptions(
                    CredentialOptions.jsonCredentials(new FileInputStream(keyfileLocation)));
        } else if (configuration.get(BIGTABLE_SERVICE_ACCOUNT_EMAIL_KEY) != null) {
            String serviceAccount = configuration.get(BIGTABLE_SERVICE_ACCOUNT_EMAIL_KEY);
            LOG.debug("Service account %s specified.", serviceAccount);
            String keyfileLocation = configuration.get(BIGTABLE_SERVICE_ACCOUNT_P12_KEYFILE_LOCATION_KEY);
            Preconditions.checkState(!isNullOrEmpty(keyfileLocation),
                    "Key file location must be specified when setting service account email");
            LOG.debug("Using p12 keyfile: %s", keyfileLocation);
            builder.setCredentialOptions(CredentialOptions.p12Credential(serviceAccount, keyfileLocation));
        } else {/* www  . j a v a  2 s  .  c  om*/
            LOG.debug("Using default credentials.");
            builder.setCredentialOptions(CredentialOptions.defaultCredentials());
        }
    } else if (configuration.getBoolean(BIGTABLE_NULL_CREDENTIAL_ENABLE_KEY,
            BIGTABLE_NULL_CREDENTIAL_ENABLE_DEFAULT)) {
        builder.setCredentialOptions(CredentialOptions.nullCredential());
        LOG.info("Enabling the use of null credentials. This should not be used in production.");
    } else {
        throw new IllegalStateException("Either service account or null credentials must be enabled");
    }
}

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

private static RetryOptions createRetryOptions(Configuration configuration) {
    RetryOptions.Builder retryOptionsBuilder = new RetryOptions.Builder();
    boolean enableRetries = configuration.getBoolean(ENABLE_GRPC_RETRIES_KEY,
            RetryOptions.DEFAULT_ENABLE_GRPC_RETRIES);
    LOG.debug("gRPC retries enabled: %s", enableRetries);
    retryOptionsBuilder.setEnableRetries(enableRetries);

    String retryCodes = configuration.get(ADDITIONAL_RETRY_CODES, "");
    String codes[] = retryCodes.split(",");
    for (String stringCode : codes) {
        String trimmed = stringCode.trim();
        if (trimmed.isEmpty()) {
            continue;
        }//  w w w  . j av a2 s.c  om
        Status.Code code = Status.Code.valueOf(trimmed);
        Preconditions.checkArgument(code != null, "Code " + stringCode + " not found.");
        LOG.debug("gRPC retry on: %s", stringCode);
        retryOptionsBuilder.addStatusToRetryOn(code);
    }

    boolean retryOnDeadlineExceeded = configuration.getBoolean(ENABLE_GRPC_RETRY_DEADLINEEXCEEDED_KEY, true);
    LOG.debug("gRPC retry on deadline exceeded enabled: %s", retryOnDeadlineExceeded);
    retryOptionsBuilder.setRetryOnDeadlineExceeded(retryOnDeadlineExceeded);

    int maxElapsedBackoffMillis = configuration.getInt(MAX_ELAPSED_BACKOFF_MILLIS_KEY,
            RetryOptions.DEFAULT_MAX_ELAPSED_BACKOFF_MILLIS);
    LOG.debug("gRPC retry maxElapsedBackoffMillis: %d", maxElapsedBackoffMillis);
    retryOptionsBuilder.setMaxElapsedBackoffMillis(maxElapsedBackoffMillis);

    int readPartialRowTimeoutMillis = configuration.getInt(READ_PARTIAL_ROW_TIMEOUT_MS,
            RetryOptions.DEFAULT_READ_PARTIAL_ROW_TIMEOUT_MS);
    LOG.debug("gRPC read partial row timeout (millis): %d", readPartialRowTimeoutMillis);
    retryOptionsBuilder.setReadPartialRowTimeoutMillis(readPartialRowTimeoutMillis);

    int streamingBufferSize = configuration.getInt(READ_BUFFER_SIZE,
            RetryOptions.DEFAULT_STREAMING_BUFFER_SIZE);
    LOG.debug("gRPC read buffer size (count): %d", streamingBufferSize);
    retryOptionsBuilder.setStreamingBufferSize(streamingBufferSize);

    int streamingBatchSize = configuration.getInt(READ_BATCH_SIZE, RetryOptions.DEFAULT_STREAMING_BATCH_SIZE);
    LOG.debug("gRPC read batch size (count): %d", streamingBatchSize);
    retryOptionsBuilder.setStreamingBatchSize(streamingBatchSize);

    int maxScanTimeoutRetries = configuration.getInt(MAX_SCAN_TIMEOUT_RETRIES,
            RetryOptions.DEFAULT_MAX_SCAN_TIMEOUT_RETRIES);
    LOG.debug("gRPC max scan timeout retries (count): %d", maxScanTimeoutRetries);
    retryOptionsBuilder.setMaxScanTimeoutRetries(maxScanTimeoutRetries);

    return retryOptionsBuilder.build();
}