Example usage for com.amazonaws ClientConfiguration ClientConfiguration

List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration ClientConfiguration.

Prototype

public ClientConfiguration() 

Source Link

Usage

From source file:com.pearson.eidetic.driver.threads.MonitorSnapshotVolumeTime.java

public AmazonEC2Client connect(Region region, String awsAccessKey, String awsSecretKey) {
    AmazonEC2Client ec2Client;/*from   w ww .  j a va2s  . c  om*/
    String endpoint = "ec2." + region.getName() + ".amazonaws.com";

    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTPS);

    ec2Client = new AmazonEC2Client(credentials, clientConfig);
    ec2Client.setRegion(region);
    ec2Client.setEndpoint(endpoint);
    return ec2Client;
}

From source file:com.peel.kinesisStorm.AnalyticsTopology.java

License:Open Source License

public static void main(String[] args) throws IllegalArgumentException, KeeperException, InterruptedException,
        AlreadyAliveException, InvalidTopologyException, IOException {

    String propertiesFile = null;
    IKinesisRecordScheme scheme = new DefaultKinesisRecordScheme();
    String mode = null;/* www.ja va  2s . c  o  m*/

    //releasing cache of ip's for every 30 sec 
    java.security.Security.setProperty("networkaddress.cache.ttl", "30");

    if (args.length != 2) {
        printUsageAndExit();
    } else {
        propertiesFile = args[0];
        mode = args[1];
    }

    StormConfigurator config = new AnalyticsTopology(propertiesFile);
    new ConfigurationParameters(config);

    final KinesisSpoutConfig SpoutConfig = new KinesisSpoutConfig(config.STREAM_NAME, config.MAX_RECORDS,
            config.INTIAL_POSITION_IN_STREAM, config.ZOOKEEPER_PREFIX, config.ZOOKEEPER_ENDPOINT,
            config.ZOOKEEPER_SESSIONTIMEOUT, config.CHECKPOINT_INTERVAL, scheme);

    final KinesisSpout spout = new KinesisSpout(SpoutConfig, new CustomCredentialsProviderChain(),
            new ClientConfiguration());

    TopologyBuilder builder = new TopologyBuilder();

    // Using number of shards as the parallelism hint for the spout.
    builder.setSpout("kinesis_spout", spout, 2);
    builder.setBolt("print_bolt", new ParsingBolt(), 2).fieldsGrouping("kinesis_spout",
            new Fields(DefaultKinesisRecordScheme.FIELD_PARTITION_KEY));
    builder.setBolt("topchannels", new ParameterProcessorBolt(), 2).fieldsGrouping("print_bolt", "processing",
            new Fields(EventRecordScheme.FIELD_NOGROUP));
    builder.setBolt("calculateTopChannels", new ShowProcessorBolt(), 1).fieldsGrouping("topchannels",
            "channelslist", new Fields(ShowProcessorSchema.FIELD_NOGROUPING));
    builder.setBolt("slidingWindow", new SlidingWindow(), 1).fieldsGrouping("calculateTopChannels", "mapFrames",
            new Fields(SlidingWindowSchema.FIELD_NOGROUP));

    Config topoConf = new Config();
    topoConf.setFallBackOnJavaSerialization(true);
    topoConf.setDebug(false);

    if (mode.equals("LocalMode")) {
        LOG.info("Starting sample storm topology in LocalMode ...");
        new LocalCluster().submitTopology("test_spout", topoConf, builder.createTopology());
    } else if (mode.equals("RemoteMode")) {
        topoConf.setNumWorkers(1);
        topoConf.setMaxSpoutPending(5000);
        LOG.info("Submitting sample topology " + config.TOPOLOGY_NAME + " to remote cluster.");
        StormSubmitter.submitTopology(config.TOPOLOGY_NAME, topoConf, builder.createTopology());
    } else {
        printUsageAndExit();
    }

}

From source file:com.pinterest.secor.uploader.S3UploadManager.java

License:Apache License

public S3UploadManager(SecorConfig config) {
    super(config);

    final String accessKey = mConfig.getAwsAccessKey();
    final String secretKey = mConfig.getAwsSecretKey();
    final String endpoint = mConfig.getAwsEndpoint();
    final String region = mConfig.getAwsRegion();
    final String awsRole = mConfig.getAwsRole();

    s3Path = mConfig.getS3Path();

    AmazonS3 client;/* w  w  w  .j  a v a 2s  .c  o  m*/
    AWSCredentialsProvider provider;

    ClientConfiguration clientConfiguration = new ClientConfiguration();
    boolean isHttpProxyEnabled = mConfig.getAwsProxyEnabled();

    //proxy settings
    if (isHttpProxyEnabled) {
        LOG.info("Http Proxy Enabled for S3UploadManager");
        String httpProxyHost = mConfig.getAwsProxyHttpHost();
        int httpProxyPort = mConfig.getAwsProxyHttpPort();
        clientConfiguration.setProxyHost(httpProxyHost);
        clientConfiguration.setProxyPort(httpProxyPort);
    }

    if (accessKey.isEmpty() || secretKey.isEmpty()) {
        provider = new DefaultAWSCredentialsProviderChain();
    } else {
        provider = new AWSCredentialsProvider() {
            public AWSCredentials getCredentials() {
                return new BasicAWSCredentials(accessKey, secretKey);
            }

            public void refresh() {
            }
        };
    }

    if (!awsRole.isEmpty()) {
        provider = new STSAssumeRoleSessionCredentialsProvider(provider, awsRole, "secor");
    }

    client = new AmazonS3Client(provider, clientConfiguration);

    if (!endpoint.isEmpty()) {
        client.setEndpoint(endpoint);
    } else if (!region.isEmpty()) {
        client.setRegion(Region.getRegion(Regions.fromName(region)));
    }

    mManager = new TransferManager(client);
}

From source file:com.plumbee.flume.source.sqs.SQSSource.java

License:Apache License

@Override
public void configure(Context context) {

    // Mandatory configuration parameters.
    queueURL = context.getString(ConfigurationConstants.CONFIG_QUEUE_URL);
    Preconditions.checkArgument(StringUtils.isNotBlank(queueURL), ErrorMessages.MISSING_MANDATORY_PARAMETER,
            ConfigurationConstants.CONFIG_QUEUE_URL);

    // Optional configuration parameters.
    queueRecvBatchSize = context.getInteger(ConfigurationConstants.CONFIG_RECV_BATCH_SIZE,
            ConfigurationConstants.DEFAULT_RECV_BATCH_SIZE);
    Preconditions.checkArgument(queueRecvBatchSize > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_RECV_BATCH_SIZE);

    queueDeleteBatchSize = context.getInteger(ConfigurationConstants.CONFIG_DELETE_BATCH_SIZE,
            ConfigurationConstants.DEFAULT_DELETE_BATCH_SIZE);
    Preconditions.checkArgument(queueDeleteBatchSize > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_DELETE_BATCH_SIZE);

    queueRecvPollingTimeout = context.getInteger(ConfigurationConstants.CONFIG_RECV_TIMEOUT,
            ConfigurationConstants.DEFAULT_RECV_TIMEOUT);
    Preconditions.checkArgument(queueRecvPollingTimeout > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_RECV_TIMEOUT);

    queueRecvVisabilityTimeout = context.getInteger(ConfigurationConstants.CONFIG_RECV_VISTIMEOUT,
            ConfigurationConstants.DEFAULT_RECV_VISTIMEOUT);
    Preconditions.checkArgument(queueRecvVisabilityTimeout > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_RECV_VISTIMEOUT);

    batchSize = context.getInteger(ConfigurationConstants.CONFIG_BATCH_SIZE,
            ConfigurationConstants.DEFAULT_BATCH_SIZE);
    Preconditions.checkArgument(batchSize > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_BATCH_SIZE);

    nbThreads = context.getInteger(ConfigurationConstants.CONFIG_NB_CONSUMER_THREADS,
            ConfigurationConstants.DEFAULT_NB_CONSUMER_THREADS);
    Preconditions.checkArgument(nbThreads > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_NB_CONSUMER_THREADS);
    Preconditions.checkArgument(nbThreads <= ClientConfiguration.DEFAULT_MAX_CONNECTIONS,
            "%s cannot cannot exceed %s " + "(Default Amazon client connection pool size)",
            ConfigurationConstants.CONFIG_NB_CONSUMER_THREADS, ClientConfiguration.DEFAULT_MAX_CONNECTIONS);

    // Don't let the number of messages to be polled from SQS using one
    // call exceed the transaction batchSize for the downstream channel.
    Preconditions.checkArgument(queueRecvBatchSize <= batchSize, "%s must be smaller than or equal to the %s",
            ConfigurationConstants.CONFIG_RECV_BATCH_SIZE, ConfigurationConstants.CONFIG_BATCH_SIZE);

    flushInterval = context.getLong(ConfigurationConstants.CONFIG_FLUSH_INTERVAL,
            ConfigurationConstants.DEFAULT_FLUSH_INTERVAL);
    Preconditions.checkArgument(flushInterval > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_FLUSH_INTERVAL);
    flushInterval = TimeUnit.SECONDS.toMillis(flushInterval);

    // Runner backoff configuration.
    maxBackOffSleep = context.getLong(ConfigurationConstants.CONFIG_MAX_BACKOFF_SLEEP,
            ConfigurationConstants.DEFAULT_MAX_BACKOFF_SLEEP);
    Preconditions.checkArgument(maxBackOffSleep > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_MAX_BACKOFF_SLEEP);

    backOffSleepIncrement = context.getLong(ConfigurationConstants.CONFIG_BACKOFF_SLEEP_INCREMENT,
            ConfigurationConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT);
    Preconditions.checkArgument(backOffSleepIncrement > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_BACKOFF_SLEEP_INCREMENT);

    Preconditions.checkArgument(flushInterval > maxBackOffSleep, "%s too high, %s cannot be respected",
            ConfigurationConstants.CONFIG_MAX_BACKOFF_SLEEP, ConfigurationConstants.CONFIG_FLUSH_INTERVAL);

    // Log a warning if the flushInterval plus maxBackOffSleep exceed
    // the queueRecvVisabilityTimeout of messages. On queues with
    // low levels of throughput this can cause message duplication!
    if ((flushInterval + maxBackOffSleep) > TimeUnit.SECONDS.toMillis(queueRecvVisabilityTimeout)) {
        LOGGER.warn("{} too low, potential for message duplication",
                ConfigurationConstants.CONFIG_FLUSH_INTERVAL);
    }/*w  ww .j av a 2s  .c  o m*/

    // The following configuration options allows credentials to be
    // provided via the configuration context.
    String awsAccessKeyId = context.getString(ConfigurationConstants.CONFIG_AWS_ACCESS_KEY_ID);
    String awsSecretKey = context.getString(ConfigurationConstants.CONFIG_AWS_SECRET_KEY);

    if (StringUtils.isNotBlank(awsAccessKeyId) && StringUtils.isNotBlank(awsSecretKey)) {
        if (client == null) {
            // Create the AmazonSQSClient using BasicAWSCredentials
            client = new AmazonSQSClient(new BasicAWSCredentials(awsAccessKeyId, awsSecretKey),
                    new ClientConfiguration().withMaxConnections(nbThreads));
        } else {
            LOGGER.warn("Cannot set AWS credentials for AmazonSQSClient, " + "client already initialized");
        }
    }

    // Default to the DefaultAWSCredentialsProviderChain.
    if (client == null) {
        client = new AmazonSQSClient(new ClientConfiguration().withMaxConnections(nbThreads));
    }
}

From source file:com.rapid7.diskstorage.dynamodb.Client.java

License:Open Source License

public Client(com.thinkaurelius.titan.diskstorage.configuration.Configuration config) {
    String credentialsClassName = config.get(Constants.DYNAMODB_CREDENTIALS_CLASS_NAME);
    Class<?> clazz;//from   www . j a v a2 s. com
    try {
        clazz = Class.forName(credentialsClassName);
    } catch (ClassNotFoundException e) {
        throw new IllegalArgumentException(VALIDATE_CREDENTIALS_CLASS_NAME, e);
    }

    String[] credentialsConstructorArgsValues = config.get(Constants.DYNAMODB_CREDENTIALS_CONSTRUCTOR_ARGS);
    final List<String> filteredArgList = new ArrayList<String>();
    for (Object obj : credentialsConstructorArgsValues) {
        final String str = obj.toString();
        if (!str.isEmpty()) {
            filteredArgList.add(str);
        }
    }

    AWSCredentialsProvider credentialsProvider;
    if (AWSCredentials.class.isAssignableFrom(clazz)) {
        AWSCredentials credentials = createCredentials(clazz,
                filteredArgList.toArray(new String[filteredArgList.size()]));
        credentialsProvider = new StaticCredentialsProvider(credentials);
    } else if (AWSCredentialsProvider.class.isAssignableFrom(clazz)) {
        credentialsProvider = createCredentialsProvider(clazz, credentialsConstructorArgsValues);
    } else {
        throw new IllegalArgumentException(VALIDATE_CREDENTIALS_CLASS_NAME);
    }
    //begin adaptation of constructor at
    //https://github.com/buka/titan/blob/master/src/main/java/com/thinkaurelius/titan/diskstorage/dynamodb/DynamoDBClient.java#L77
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.withConnectionTimeout(config.get(Constants.DYNAMODB_CLIENT_CONN_TIMEOUT)) //
            .withConnectionTTL(config.get(Constants.DYNAMODB_CLIENT_CONN_TTL)) //
            .withMaxConnections(config.get(Constants.DYNAMODB_CLIENT_MAX_CONN)) //
            .withMaxErrorRetry(config.get(Constants.DYNAMODB_CLIENT_MAX_ERROR_RETRY)) //
            .withGzip(config.get(Constants.DYNAMODB_CLIENT_USE_GZIP)) //
            .withReaper(config.get(Constants.DYNAMODB_CLIENT_USE_REAPER)) //
            .withUserAgent(config.get(Constants.DYNAMODB_CLIENT_USER_AGENT)) //
            .withSocketTimeout(config.get(Constants.DYNAMODB_CLIENT_SOCKET_TIMEOUT)) //
            .withSocketBufferSizeHints( //
                    config.get(Constants.DYNAMODB_CLIENT_SOCKET_BUFFER_SEND_HINT), //
                    config.get(Constants.DYNAMODB_CLIENT_SOCKET_BUFFER_RECV_HINT)) //
            .withProxyDomain(config.get(Constants.DYNAMODB_CLIENT_PROXY_DOMAIN)) //
            .withProxyWorkstation(config.get(Constants.DYNAMODB_CLIENT_PROXY_WORKSTATION)) //
            .withProxyHost(config.get(Constants.DYNAMODB_CLIENT_PROXY_HOST)) //
            .withProxyPort(config.get(Constants.DYNAMODB_CLIENT_PROXY_PORT)) //
            .withProxyUsername(config.get(Constants.DYNAMODB_CLIENT_PROXY_USERNAME)) //
            .withProxyPassword(config.get(Constants.DYNAMODB_CLIENT_PROXY_PASSWORD)); //

    forceConsistentRead = config.get(Constants.DYNAMODB_FORCE_CONSISTENT_READ);
    //end adaptation of constructor at
    //https://github.com/buka/titan/blob/master/src/main/java/com/thinkaurelius/titan/diskstorage/dynamodb/DynamoDBClient.java#L77
    enableParallelScan = config.get(Constants.DYNAMODB_ENABLE_PARALLEL_SCAN);
    prefix = config.get(Constants.DYNAMODB_TABLE_PREFIX);
    final String metricsPrefix = config.get(Constants.DYNAMODB_METRICS_PREFIX);

    final long maxRetries = config.get(Constants.DYNAMODB_MAX_SELF_THROTTLED_RETRIES);
    if (maxRetries < 0) {
        throw new IllegalArgumentException(
                Constants.DYNAMODB_MAX_SELF_THROTTLED_RETRIES.getName() + " must be at least 0");
    }
    final long retryMillis = config.get(Constants.DYNAMODB_INITIAL_RETRY_MILLIS);
    if (retryMillis <= 0) {
        throw new IllegalArgumentException(
                Constants.DYNAMODB_INITIAL_RETRY_MILLIS.getName() + " must be at least 1");
    }
    final double controlPlaneRate = config.get(Constants.DYNAMODB_CONTROL_PLANE_RATE);
    if (controlPlaneRate < 0) {
        throw new IllegalArgumentException("must have a positive control plane rate");
    }
    final RateLimiter controlPlaneRateLimiter = RateLimiter.create(controlPlaneRate);

    final Map<String, RateLimiter> readRateLimit = new HashMap<>();
    final Map<String, RateLimiter> writeRateLimit = new HashMap<>();

    Set<String> storeNames = new HashSet<String>(Constants.REQUIRED_BACKEND_STORES);
    storeNames.addAll(config.getContainedNamespaces(Constants.DYNAMODB_STORES_NAMESPACE));
    for (String storeName : storeNames) {
        setupStore(config, prefix, readRateLimit, writeRateLimit, storeName);
    }

    endpoint = TitanConfigUtil.getNullableConfigValue(config, Constants.DYNAMODB_CLIENT_ENDPOINT);
    delegate = new DynamoDBDelegate(endpoint, credentialsProvider, clientConfig, config, readRateLimit,
            writeRateLimit, maxRetries, retryMillis, prefix, metricsPrefix, controlPlaneRateLimiter);
}

From source file:com.sitewhere.aws.SqsOutboundEventProcessor.java

License:Open Source License

@Override
public void start() throws SiteWhereException {
    super.start();

    ClientConfiguration config = new ClientConfiguration();
    config.setMaxConnections(250);/*from   w w  w .java 2 s  .  c o  m*/
    config.setMaxErrorRetry(5);

    if (getAccessKey() == null) {
        throw new SiteWhereException("Amazon access key not provided.");
    }

    if (getSecretKey() == null) {
        throw new SiteWhereException("Amazon secret key not provided.");
    }

    sqs = new AmazonSQSClient(new BasicAWSCredentials(getAccessKey(), getSecretKey()), config);
    Region usEast1 = Region.getRegion(Regions.US_EAST_1);
    sqs.setRegion(usEast1);
}

From source file:com.sitewhere.connectors.aws.sqs.SqsOutboundEventProcessor.java

License:Open Source License

@Override
public void start(ILifecycleProgressMonitor monitor) throws SiteWhereException {
    super.start(monitor);

    ClientConfiguration config = new ClientConfiguration();
    config.setMaxConnections(250);// w w  w.  j a v a2 s . c om
    config.setMaxErrorRetry(5);

    if (getAccessKey() == null) {
        throw new SiteWhereException("Amazon access key not provided.");
    }

    if (getSecretKey() == null) {
        throw new SiteWhereException("Amazon secret key not provided.");
    }

    sqs = new AmazonSQSClient(new BasicAWSCredentials(getAccessKey(), getSecretKey()), config);
    Region usEast1 = Region.getRegion(Regions.US_EAST_1);
    sqs.setRegion(usEast1);
}

From source file:com.smoketurner.pipeline.application.config.AwsConfiguration.java

License:Apache License

@JsonIgnore
public ClientConfiguration getClientConfiguration() {
    final ClientConfiguration clientConfig = new ClientConfiguration();
    if (proxy.isPresent()) {
        clientConfig.setProxyHost(proxy.get().getHostText());
        clientConfig.setProxyPort(proxy.get().getPort());
    }//w w w.ja  va2s  .  com
    clientConfig.setUseTcpKeepAlive(true);
    // needs to be false to support streaming gunzipping
    clientConfig.setUseGzip(false);
    return clientConfig;
}

From source file:com.srotya.flume.kinesis.sink.KinesisSink.java

License:Apache License

@Override
public void configure(Context ctx) {
    ImmutableMap<String, String> props = ctx.getSubProperties(Constants.SETTINGS);
    if (!props.containsKey(Constants.ACCESS_KEY) || !props.containsKey(Constants.ACCESS_SECRET)) {
        Throwables.propagate(/* w  w w . j a  v a  2s  .c om*/
                new InvalidArgumentException("Must provide AWS credentials i.e. accessKey and accessSecret"));
    }
    awsCredentials = new BasicAWSCredentials(props.get(Constants.ACCESS_KEY),
            props.get(Constants.ACCESS_SECRET));
    clientConfig = new ClientConfiguration();
    if (props.containsKey(Constants.PROXY_HOST)) {
        clientConfig.setProxyHost(props.get(Constants.PROXY_HOST));
        clientConfig.setProxyPort(Integer.parseInt(props.getOrDefault(Constants.PROXY_PORT, "80")));
        clientConfig.setProtocol(Protocol.valueOf(props.getOrDefault(Constants.PROTOCOL, "HTTPS")));
    }
    if (!props.containsKey(Constants.STREAM_NAME)) {
        Throwables.propagate(new InvalidArgumentException("Must provide Kinesis stream name"));
    }
    streamName = props.get(Constants.STREAM_NAME);
    putSize = Integer.parseInt(props.getOrDefault(Constants.PUT_SIZE, "100"));
    if (putSize > 500) {
        Throwables.propagate(
                new InvalidArgumentException("AWS Kinesis doesn't allow more than 500 put requests"));
    }
    endpoint = props.getOrDefault(Constants.ENDPOINT, Constants.DEFAULT_ENDPOINT);
    String serializerClass = props.getOrDefault(Constants.SERIALIZER, GsonSerializer.class.getName());
    try {
        serializer = (KinesisSerializer) Class.forName(serializerClass).newInstance();
    } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
        Throwables.propagate(e);
    }
    serializer.configure(props);
}

From source file:com.srotya.flume.kinesis.source.KinesisSource.java

License:Apache License

@Override
protected void doConfigure(Context ctx) throws FlumeException {
    ImmutableMap<String, String> props = ctx.getSubProperties(Constants.SETTINGS);
    if (!props.containsKey(Constants.ACCESS_KEY) || !props.containsKey(Constants.ACCESS_SECRET)) {
        Throwables.propagate(//w w  w.  j a va 2  s  . c  o  m
                new InvalidArgumentException("Must provide AWS credentials i.e. accessKey and accessSecret"));
    }
    awsCredentials = new BasicAWSCredentials(props.get(Constants.ACCESS_KEY),
            props.get(Constants.ACCESS_SECRET));
    clientConfig = new ClientConfiguration();
    if (props.containsKey(Constants.PROXY_HOST)) {
        clientConfig.setProxyHost(props.get(Constants.PROXY_HOST));
        clientConfig.setProxyPort(Integer.parseInt(props.getOrDefault(Constants.PROXY_PORT, "80")));
        clientConfig.setProtocol(Protocol.valueOf(props.getOrDefault(Constants.PROTOCOL, "HTTPS")));
    }
    if (!props.containsKey(Constants.STREAM_NAME)) {
        Throwables.propagate(new InvalidArgumentException("Must provide Kinesis stream name"));
    }
    streamName = props.get(Constants.STREAM_NAME);
    putSize = Integer.parseInt(props.getOrDefault(Constants.PUT_SIZE, "100"));
    if (putSize > 500) {
        Throwables.propagate(
                new InvalidArgumentException("AWS Kinesis doesn't allow more than 500 put requests"));
    }
    endpoint = props.getOrDefault(Constants.ENDPOINT, Constants.DEFAULT_ENDPOINT);
    String serializerClass = props.getOrDefault(Constants.SERIALIZER, GsonSerializer.class.getName());
    try {
        serializer = (KinesisSerializer) Class.forName(serializerClass).newInstance();
    } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
        Throwables.propagate(e);
    }
    serializer.configure(props);
    shardId = Integer.parseInt(props.getOrDefault(SHARD_INDEX, "0"));
    shardIteratorType = props.getOrDefault(ITERATOR_TYPE, DEFAULT_ITERATOR_TYPE);
}