Example usage for com.amazonaws ClientConfiguration ClientConfiguration

List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration ClientConfiguration.

Prototype

public ClientConfiguration() 

Source Link

Usage

From source file:com.github.sjones4.youcan.youtwo.YouTwoClient.java

License:Open Source License

public YouTwoClient(final AWSCredentialsProvider awsCredentialsProvider) {
    this(awsCredentialsProvider, new ClientConfiguration());
}

From source file:com.gu.logback.appender.kinesis.BaseKinesisAppender.java

License:Open Source License

/**
 * Configures appender instance and makes it ready for use by the consumers.
 * It validates mandatory parameters and confirms if the configured stream is
 * ready for publishing data yet./*from   w w  w  .  j a v a 2 s . com*/
 * 
 * Error details are made available through the fallback handler for this
 * appender
 * 
 * @throws IllegalStateException if we encounter issues configuring this
 *           appender instance
 */
@Override
public void start() {
    if (layout == null) {
        initializationFailed = true;
        addError("Invalid configuration - No layout for appender: " + name);
        return;
    }

    if (streamName == null) {
        initializationFailed = true;
        addError("Invalid configuration - streamName cannot be null for appender: " + name);
        return;
    }

    ClientConfiguration clientConfiguration = new ClientConfiguration();
    clientConfiguration.setMaxErrorRetry(maxRetries);
    clientConfiguration.setRetryPolicy(new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION,
            PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, maxRetries, true));
    clientConfiguration.setUserAgent(AppenderConstants.USER_AGENT_STRING);

    BlockingQueue<Runnable> taskBuffer = new LinkedBlockingDeque<Runnable>(bufferSize);
    threadPoolExecutor = new ThreadPoolExecutor(threadCount, threadCount,
            AppenderConstants.DEFAULT_THREAD_KEEP_ALIVE_SEC, TimeUnit.SECONDS, taskBuffer,
            new BlockFastProducerPolicy());
    threadPoolExecutor.prestartAllCoreThreads();

    this.client = createClient(credentials, clientConfiguration, threadPoolExecutor);

    client.setRegion(findRegion());
    if (!Validator.isBlank(endpoint)) {
        if (!Validator.isBlank(region)) {
            addError("Received configuration for both region as well as Amazon Kinesis endpoint. (" + endpoint
                    + ") will be used as endpoint instead of default endpoint for region (" + region + ")");
        }
        client.setEndpoint(endpoint);
    }

    validateStreamName(client, streamName);

    super.start();
}

From source file:com.gu.logback.appender.kinesis.KinesisAppender.java

License:Open Source License

/**
 * Configures this appender instance and makes it ready for use by the
 * consumers. It validates mandatory parameters and confirms if the configured
 * stream is ready for publishing data yet.
 * /*from w  w  w . ja  v  a 2 s  .c  o m*/
 * Error details are made available through the fallback handler for this
 * appender
 * 
 * @throws IllegalStateException
 *           if we encounter issues configuring this appender instance
 */
@Override
public void start() {
    if (layout == null) {
        initializationFailed = true;
        addError("Invalid configuration - No layout for appender: " + name);
        return;
    }

    if (streamName == null) {
        initializationFailed = true;
        addError("Invalid configuration - streamName cannot be null for appender: " + name);
        return;
    }

    ClientConfiguration clientConfiguration = new ClientConfiguration();
    clientConfiguration.setMaxErrorRetry(maxRetries);
    clientConfiguration.setRetryPolicy(new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION,
            PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, maxRetries, true));
    clientConfiguration.setUserAgent(AppenderConstants.USER_AGENT_STRING);

    BlockingQueue<Runnable> taskBuffer = new LinkedBlockingDeque<Runnable>(bufferSize);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(threadCount, threadCount,
            AppenderConstants.DEFAULT_THREAD_KEEP_ALIVE_SEC, TimeUnit.SECONDS, taskBuffer,
            new BlockFastProducerPolicy());
    threadPoolExecutor.prestartAllCoreThreads();
    kinesisClient = new AmazonKinesisAsyncClient(credentials, clientConfiguration, threadPoolExecutor);

    boolean regionProvided = !Validator.isBlank(region);
    if (!regionProvided) {
        region = AppenderConstants.DEFAULT_REGION;
    }
    kinesisClient.setRegion(Region.getRegion(Regions.fromName(region)));
    if (!Validator.isBlank(endpoint)) {
        if (regionProvided) {
            addError("Received configuration for both region as well as Amazon Kinesis endpoint. (" + endpoint
                    + ") will be used as endpoint instead of default endpoint for region (" + region + ")");
        }
        kinesisClient.setEndpoint(endpoint);
    }

    DescribeStreamResult describeResult = null;
    try {
        describeResult = kinesisClient.describeStream(streamName);
        String streamStatus = describeResult.getStreamDescription().getStreamStatus();
        if (!StreamStatus.ACTIVE.name().equals(streamStatus)
                && !StreamStatus.UPDATING.name().equals(streamStatus)) {
            initializationFailed = true;
            addError(
                    "Stream " + streamName + " is not ready (in active/updating status) for appender: " + name);
        }
    } catch (ResourceNotFoundException rnfe) {
        initializationFailed = true;
        addError("Stream " + streamName + " doesn't exist for appender: " + name, rnfe);
    }

    asyncCallHander = new AsyncPutCallStatsReporter(this);

    super.start();
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

/**
 * ?// w  ww  .  j  av  a2 s.  c om
 *
 * @param bucketName
 * @return s3client
 * @throws StoreException
 * @throws Exception
 */
private AmazonS3 getS3Client(String bucketName) throws StoreException {
    logger.debug("get S3 Client start.");
    // ?
    AWSCredentialsProvider provider = new EnvironmentVariableCredentialsProvider();

    // 
    ClientConfiguration clientConfig = new ClientConfiguration()

            // .withProtocol(Protocol.HTTPS) // Proxy
            // .withProxyHost("proxyHost")
            // .withProxyPort(80)
            // .withProxyUsername("proxyUsername")
            // .withProxyPassword("proxyPassword")

            .withConnectionTimeout(10000);

    // ?
    AmazonS3 s3client = AmazonS3ClientBuilder.standard().withCredentials(provider)
            .withClientConfiguration(clientConfig).withRegion(DEFAULT_REGION)
            .withForceGlobalBucketAccessEnabled(true).build();

    logger.debug("Region={}", s3client.getRegion());

    try {
        // ??
        if (!CommonUtils.isNullOrEmpty(bucketName) && !(s3client.doesBucketExistV2(bucketName))) {
            s3client.createBucket(new CreateBucketRequest(bucketName, DEFAULT_REGION.getName()));
        }
        // Get location.
        String bucketLocation = s3client.getBucketLocation(new GetBucketLocationRequest(bucketName));
        logger.info("bucket location={}", bucketLocation);
    } catch (AmazonClientException ace) {
        throw new StoreException(HttpStatus.SC_CONFLICT, ErrorClassification.BS0003, ace, "?");
    }

    logger.info("get S3 Client end.");
    return s3client;
}

From source file:com.hpe.caf.worker.datastore.s3.S3DataStore.java

License:Apache License

public S3DataStore(final S3DataStoreConfiguration s3DataStoreConfiguration) {
    if (s3DataStoreConfiguration == null) {
        throw new ArgumentException("s3DataStoreConfiguration was null.");
    }/* ww w  .j  a  v a 2  s .  c  o m*/

    ClientConfiguration clientCfg = new ClientConfiguration();

    if (!StringUtils.isNullOrEmpty(s3DataStoreConfiguration.getProxyHost())) {
        clientCfg.setProtocol(Protocol.valueOf(s3DataStoreConfiguration.getProxyProtocol()));
        clientCfg.setProxyHost(s3DataStoreConfiguration.getProxyHost());
        clientCfg.setProxyPort(s3DataStoreConfiguration.getProxyPort());
    }
    AWSCredentials credentials = new BasicAWSCredentials(s3DataStoreConfiguration.getAccessKey(),
            s3DataStoreConfiguration.getSecretKey());
    bucketName = s3DataStoreConfiguration.getBucketName();
    amazonS3Client = new AmazonS3Client(credentials, clientCfg);
    amazonS3Client.setBucketAccelerateConfiguration(new SetBucketAccelerateConfigurationRequest(bucketName,
            new BucketAccelerateConfiguration(BucketAccelerateStatus.Enabled)));
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public void initiate(String scheme) throws IOException, ConfigurationParseException {
    mCachedSparkOriginated = new HashMap<String, Boolean>();
    mCachedSparkJobsStatus = new HashMap<String, Boolean>();
    schemaProvided = scheme;/*from  w w  w  . j ava 2s.  co m*/
    Properties props = ConfigurationHandler.initialize(filesystemURI, conf, scheme);
    // Set bucket name property
    int cacheSize = conf.getInt(CACHE_SIZE, GUAVA_CACHE_SIZE_DEFAULT);
    memoryCache = MemoryCache.getInstance(cacheSize);
    mBucket = props.getProperty(COS_BUCKET_PROPERTY);
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(filesystemURI,
            getWorkingDirectory());

    fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_COS_PROPERTY, "false"));
    mIsV2Signer = "true".equals(props.getProperty(V2_SIGNER_TYPE_COS_PROPERTY, "false"));
    // Define COS client
    String accessKey = props.getProperty(ACCESS_KEY_COS_PROPERTY);
    String secretKey = props.getProperty(SECRET_KEY_COS_PROPERTY);

    if (accessKey == null) {
        throw new ConfigurationParseException("Access KEY is empty. Please provide valid access key");
    }
    if (secretKey == null) {
        throw new ConfigurationParseException("Secret KEY is empty. Please provide valid secret key");
    }

    BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
    ClientConfiguration clientConf = new ClientConfiguration();

    int maxThreads = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_THREADS, DEFAULT_MAX_THREADS);
    if (maxThreads < 2) {
        LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
        maxThreads = 2;
    }
    int totalTasks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS);
    long keepAliveTime = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
    threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks,
            keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");

    unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));

    boolean secureConnections = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, SECURE_CONNECTIONS,
            DEFAULT_SECURE_CONNECTIONS);
    clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);

    String proxyHost = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_HOST, "");
    int proxyPort = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, PROXY_PORT, -1);
    if (!proxyHost.isEmpty()) {
        clientConf.setProxyHost(proxyHost);
        if (proxyPort >= 0) {
            clientConf.setProxyPort(proxyPort);
        } else {
            if (secureConnections) {
                LOG.warn("Proxy host set without port. Using HTTPS default 443");
                clientConf.setProxyPort(443);
            } else {
                LOG.warn("Proxy host set without port. Using HTTP default 80");
                clientConf.setProxyPort(80);
            }
        }
        String proxyUsername = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_USERNAME);
        String proxyPassword = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_PASSWORD);
        if ((proxyUsername == null) != (proxyPassword == null)) {
            String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
            LOG.error(msg);
            throw new IllegalArgumentException(msg);
        }
        clientConf.setProxyUsername(proxyUsername);
        clientConf.setProxyPassword(proxyPassword);
        clientConf.setProxyDomain(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_DOMAIN));
        clientConf.setProxyWorkstation(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_WORKSTATION));
        if (LOG.isDebugEnabled()) {
            LOG.debug(
                    "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}",
                    clientConf.getProxyHost(), clientConf.getProxyPort(),
                    String.valueOf(clientConf.getProxyUsername()), clientConf.getProxyPassword(),
                    clientConf.getProxyDomain(), clientConf.getProxyWorkstation());
        }
    } else if (proxyPort >= 0) {
        String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
        LOG.error(msg);
        throw new IllegalArgumentException(msg);
    }

    initConnectionSettings(conf, clientConf);
    if (mIsV2Signer) {
        clientConf.withSignerOverride("S3SignerType");
    }
    mClient = new AmazonS3Client(creds, clientConf);

    final String serviceUrl = props.getProperty(ENDPOINT_URL_COS_PROPERTY);
    if (serviceUrl != null && !serviceUrl.equals(amazonDefaultEndpoint)) {
        mClient.setEndpoint(serviceUrl);
    }
    mClient.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());

    // Set block size property
    String mBlockSizeString = props.getProperty(BLOCK_SIZE_COS_PROPERTY, "128");
    mBlockSize = Long.valueOf(mBlockSizeString).longValue() * 1024 * 1024L;

    boolean autoCreateBucket = "true"
            .equalsIgnoreCase((props.getProperty(AUTO_BUCKET_CREATE_COS_PROPERTY, "false")));

    partSize = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
    multiPartThreshold = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MIN_MULTIPART_THRESHOLD,
            DEFAULT_MIN_MULTIPART_THRESHOLD);
    readAhead = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE);
    LOG.debug(READAHEAD_RANGE + ":" + readAhead);
    inputPolicy = COSInputPolicy
            .getPolicy(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, INPUT_FADVISE, INPUT_FADV_NORMAL));

    initTransferManager();
    maxKeys = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
    flatListingFlag = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FLAT_LISTING, DEFAULT_FLAT_LISTING);

    if (autoCreateBucket) {
        try {
            boolean bucketExist = mClient.doesBucketExist(mBucket);
            if (bucketExist) {
                LOG.trace("Bucket {} exists", mBucket);
            } else {
                LOG.trace("Bucket {} doesn`t exists and autocreate", mBucket);
                String mRegion = props.getProperty(REGION_COS_PROPERTY);
                if (mRegion == null) {
                    mClient.createBucket(mBucket);
                } else {
                    LOG.trace("Creating bucket {} in region {}", mBucket, mRegion);
                    mClient.createBucket(mBucket, mRegion);
                }
            }
        } catch (AmazonServiceException ase) {
            /*
            *  we ignore the BucketAlreadyExists exception since multiple processes or threads
            *  might try to create the bucket in parrallel, therefore it is expected that
            *  some will fail to create the bucket
            */
            if (!ase.getErrorCode().equals("BucketAlreadyExists")) {
                LOG.error(ase.getMessage());
                throw (ase);
            }
        } catch (Exception e) {
            LOG.error(e.getMessage());
            throw (e);
        }
    }

    initMultipartUploads(conf);
    enableMultiObjectsDelete = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, ENABLE_MULTI_DELETE, true);

    blockUploadEnabled = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD, DEFAULT_FAST_UPLOAD);

    if (blockUploadEnabled) {
        blockOutputBuffer = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_BUFFER,
                DEFAULT_FAST_UPLOAD_BUFFER);
        partSize = COSUtils.ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
        blockFactory = COSDataBlocks.createFactory(this, blockOutputBuffer);
        blockOutputActiveBlocks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_ACTIVE_BLOCKS,
                DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS);
        LOG.debug("Using COSBlockOutputStream with buffer = {}; block={};" + " queue limit={}",
                blockOutputBuffer, partSize, blockOutputActiveBlocks);
    } else {
        LOG.debug("Using COSOutputStream");
    }
}

From source file:com.infinitechaos.vpcviewer.service.impl.VpcServiceImpl.java

License:Open Source License

private synchronized AmazonEC2 getClientForRegion(final String regionName) {
    return clients.computeIfAbsent(Regions.fromName(regionName), region -> {
        LOG.info("Creating client for region {}", region);
        return Region.getRegion(region).createClient(AmazonEC2Client.class,
                new DefaultAWSCredentialsProviderChain(), new ClientConfiguration());
    });/*from   w  w  w. j a  v a2 s  .  c  o  m*/
}

From source file:com.innoq.hagmans.bachelor.TemperatureConsumer.java

License:Open Source License

public static void main(String[] args) throws InterruptedException {
    if (args.length == 2) {
        streamName = args[0];// w  w w .j  a v a2  s  . com
        db_name = args[1];
    }

    // Initialize Utils
    KinesisClientLibConfiguration config = new KinesisClientLibConfiguration(db_name, streamName,
            new DefaultAWSCredentialsProviderChain(), "KinesisProducerLibSampleConsumer")
                    .withRegionName(TemperatureProducer.REGION)
                    .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    Region region = RegionUtils.getRegion(TemperatureProducer.REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonDynamoDB amazonDynamoDB = new AmazonDynamoDBClient(credentialsProvider, new ClientConfiguration());
    AmazonDynamoDBClient client = new AmazonDynamoDBClient(credentialsProvider);
    client.setRegion(region);
    DynamoDB dynamoDB = new DynamoDB(client);
    amazonDynamoDB.setRegion(region);
    DynamoDBUtils dbUtils = new DynamoDBUtils(dynamoDB, amazonDynamoDB, client);
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    try {
        if (!streamUtils.isActive(kinesis.describeStream(streamName))) {
            log.info("Stream is not active. Waiting for Stream to become active....");
            streamUtils.waitForStreamToBecomeActive(streamName);
        }
    } catch (ResourceNotFoundException e) {
        log.info("Stream is not created right now. Waiting for stream to get created and become active....");
        streamUtils.waitForStreamToBecomeActive(streamName);
    }
    dbUtils.deleteTable(db_name);
    dbUtils.createTemperatureTableIfNotExists(tableName);

    Thread.sleep(1000);

    final TemperatureConsumer consumer = new TemperatureConsumer();

    new Worker.Builder().recordProcessorFactory(consumer).config(config).build().run();
}

From source file:com.innoq.hagmans.bachelor.TemperatureProducer.java

License:Open Source License

public static void main(String[] args) throws Exception {

    if (args.length == 4) {
        streamName = args[0];//from   w w w .j av  a 2 s.  c  o  m
        sensorName = args[1];
        secondsToRun = Integer.parseInt(args[2]);
        recordsPerSecond = Integer.parseInt(args[3]);
    }

    // Create a new stream if it doesn't already exists
    Region region = RegionUtils.getRegion(REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStream(streamName, NUMBER_OF_SHARDS);

    final KinesisProducer producer = getKinesisProducer();

    // The monotonically increasing sequence number we will put in the data
    // of each record
    final AtomicLong sequenceNumber = new AtomicLong(0);

    // The number of records that have finished (either successfully put, or
    // failed)
    final AtomicLong completed = new AtomicLong(0);

    // KinesisProducer.addUserRecord is asynchronous. A callback can be used
    // to receive the results.
    final FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() {
        @Override
        public void onFailure(Throwable t) {
            // We don't expect any failures during this sample. If it
            // happens, we will log the first one and exit.
            if (t instanceof UserRecordFailedException) {
                Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts());
                log.error(String.format("Record failed to put - %s : %s", last.getErrorCode(),
                        last.getErrorMessage()));
            }
            log.error("Exception during put", t);
            System.exit(1);
        }

        @Override
        public void onSuccess(UserRecordResult result) {
            temperature = Utils.getNextTemperature(temperature);
            completed.getAndIncrement();
        }
    };

    // The lines within run() are the essence of the KPL API.
    final Runnable putOneRecord = new Runnable() {
        @Override
        public void run() {
            ByteBuffer data = Utils.generateData(temperature, sensorName, DATA_SIZE);
            // TIMESTAMP is our partition key
            ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, TIMESTAMP,
                    Utils.randomExplicitHashKey(), data);
            Futures.addCallback(f, callback);
        }
    };

    // This gives us progress updates
    EXECUTOR.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            long put = sequenceNumber.get();
            long total = recordsPerSecond * secondsToRun;
            double putPercent = 100.0 * put / total;
            long done = completed.get();
            double donePercent = 100.0 * done / total;
            log.info(String.format("Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total,
                    putPercent, done, donePercent));
        }
    }, 1, 1, TimeUnit.SECONDS);

    // Kick off the puts
    log.info(String.format("Starting puts... will run for %d seconds at %d records per second", secondsToRun,
            recordsPerSecond));
    executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, secondsToRun, recordsPerSecond);

    // Wait for puts to finish. After this statement returns, we have
    // finished all calls to putRecord, but the records may still be
    // in-flight. We will additionally wait for all records to actually
    // finish later.
    EXECUTOR.awaitTermination(secondsToRun + 1, TimeUnit.SECONDS);

    // If you need to shutdown your application, call flushSync() first to
    // send any buffered records. This method will block until all records
    // have finished (either success or fail). There are also asynchronous
    // flush methods available.
    //
    // Records are also automatically flushed by the KPL after a while based
    // on the time limit set with Configuration.setRecordMaxBufferedTime()
    log.info("Waiting for remaining puts to finish...");
    producer.flushSync();
    log.info("All records complete.");

    // This kills the child process and shuts down the threads managing it.
    producer.destroy();
    log.info("Finished.");
}

From source file:com.intuit.tank.persistence.databases.AmazonDynamoDatabaseDocApi.java

License:Open Source License

/**
 * /*from  www  .j  a va2s  .co  m*/
 * @param dynamoDb
 */
public AmazonDynamoDatabaseDocApi() {
    CloudCredentials creds = new TankConfig().getVmManagerConfig().getCloudCredentials(CloudProvider.amazon);
    if (creds != null && StringUtils.isNotBlank(creds.getKeyId())) {
        AWSCredentials credentials = new BasicAWSCredentials(creds.getKeyId(), creds.getKey());
        this.dynamoDb = new AmazonDynamoDBClient(credentials, new ClientConfiguration());
    } else {
        this.dynamoDb = new AmazonDynamoDBClient(new ClientConfiguration());
    }

}