Example usage for com.amazonaws ClientConfiguration ClientConfiguration

List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration ClientConfiguration.

Prototype

public ClientConfiguration() 

Source Link

Usage

From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java

License:Apache License

/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 *///from   w ww  .j av  a  2 s  . c  o m
public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType)
        throws IOException {
    // Generate bundle
    SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType);

    boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
    String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
    String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
    String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
    int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

    if (!enabled) {
        throw new IOException("Uploading support bundles was disabled by administrator.");
    }

    AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(
            new BasicAWSCredentials(accessKey, secretKey));
    AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

    // Object Metadata
    ObjectMetadata s3Metadata = new ObjectMetadata();
    for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) {
        s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue());
    }

    List<PartETag> partETags;
    InitiateMultipartUploadResult initResponse = null;
    try {
        // Uploading part by part
        LOG.info("Initiating multi-part support bundle upload");
        partETags = new ArrayList<>();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket,
                bundle.getBundleKey());
        initRequest.setObjectMetadata(s3Metadata);
        initResponse = s3Client.initiateMultipartUpload(initRequest);
    } catch (AmazonClientException e) {
        LOG.error("Support bundle upload failed: ", e);
        throw new IOException("Support bundle upload failed", e);
    }

    try {
        byte[] buffer = new byte[bufferSize];
        int partId = 1;
        int size = -1;
        while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
            LOG.debug("Uploading part {} of size {}", partId, size);
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId())
                    .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer))
                    .withPartSize(size);

            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
        }

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket,
                bundle.getBundleKey(), initResponse.getUploadId(), partETags);

        s3Client.completeMultipartUpload(compRequest);
        LOG.info("Support bundle upload finished");
    } catch (Exception e) {
        LOG.error("Support bundle upload failed", e);
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId()));

        throw new IOException("Can't upload support bundle", e);
    } finally {
        // Close the client
        s3Client.shutdown();
    }
}

From source file:com.streamsets.pipeline.lib.aws.s3.S3Accessor.java

License:Apache License

ClientConfiguration createClientConfiguration() throws StageException {
    ClientConfiguration clientConfig = new ClientConfiguration();

    clientConfig.setConnectionTimeout(connectionConfigs.getConnectionTimeoutMillis());
    clientConfig.setSocketTimeout(connectionConfigs.getSocketTimeoutMillis());
    clientConfig.withMaxErrorRetry(connectionConfigs.getMaxErrorRetry());

    if (connectionConfigs.isProxyEnabled()) {
        clientConfig.setProxyHost(connectionConfigs.getProxyHost());
        clientConfig.setProxyPort(connectionConfigs.getProxyPort());
        if (connectionConfigs.isProxyAuthenticationEnabled()) {
            clientConfig.setProxyUsername(connectionConfigs.getProxyUser().get());
            clientConfig.setProxyPassword(connectionConfigs.getProxyPassword().get());
        }//from  w  w w  .  j ava 2s  .com
    }
    return clientConfig;
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.KinesisTarget.java

License:Apache License

@Override
protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();

    checkStreamExists(issues);//from  w  ww. j ava  2  s . c om

    if (issues.isEmpty()) {
        kinesisConfiguration = new ClientConfiguration();
        //TODO Set additional configuration options here.
        createKinesisClient();

        generatorFactory = createDataGeneratorFactory();
    }
    return issues;
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.KinesisTarget.java

License:Apache License

private void checkStreamExists(List<ConfigIssue> issues) {
    ClientConfiguration kinesisConfiguration = new ClientConfiguration();
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(kinesisConfiguration);
    kinesisClient.setRegion(Region.getRegion(region));

    try {/*from w w  w . j av a  2 s. c om*/
        DescribeStreamResult result = kinesisClient.describeStream(streamName);
        LOG.info("Connected successfully to stream: {} with description: {}", streamName,
                result.getStreamDescription().toString());
    } catch (Exception e) {
        issues.add(getContext().createConfigIssue(
                com.streamsets.pipeline.stage.origin.kinesis.Groups.KINESIS.name(), "streamName",
                Errors.KINESIS_01, e.toString()));
    } finally {
        kinesisClient.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.lib.aws.AWSUtil.java

License:Apache License

public static ClientConfiguration getClientConfiguration(ProxyConfig config) {
    ClientConfiguration clientConfig = new ClientConfiguration();

    // Optional proxy settings
    if (config.useProxy) {
        if (config.proxyHost != null && !config.proxyHost.isEmpty()) {
            clientConfig.setProxyHost(config.proxyHost);
            clientConfig.setProxyPort(config.proxyPort);

            if (config.proxyUser != null && !config.proxyUser.isEmpty()) {
                clientConfig.setProxyUsername(config.proxyUser);
            }/*  w  w w  .  jav a 2 s .co m*/

            if (config.proxyPassword != null) {
                clientConfig.setProxyPassword(config.proxyPassword);
            }
        }
    }
    return clientConfig;
}

From source file:com.streamsets.pipeline.stage.lib.kinesis.KinesisUtil.java

License:Apache License

public static long getShardCount(Regions region, AWSConfig awsConfig, String streamName)
        throws AmazonClientException {
    ClientConfiguration kinesisConfiguration = new ClientConfiguration();
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(AWSUtil.getCredentialsProvider(awsConfig),
            kinesisConfiguration);/*from   w w  w .j  a  v  a  2s  .c  o  m*/
    kinesisClient.setRegion(Region.getRegion(region));

    try {
        long numShards = 0;
        String lastShardId = null;
        StreamDescription description;
        do {
            if (lastShardId == null) {
                description = kinesisClient.describeStream(streamName).getStreamDescription();
            } else {
                description = kinesisClient.describeStream(streamName, lastShardId).getStreamDescription();
            }

            for (Shard shard : description.getShards()) {
                if (shard.getSequenceNumberRange().getEndingSequenceNumber() == null) {
                    // Then this shard is open, so we should count it. Shards with an ending sequence number
                    // are closed and cannot be written to, so we skip counting them.
                    ++numShards;
                }
            }

            int pageSize = description.getShards().size();
            lastShardId = description.getShards().get(pageSize - 1).getShardId();

        } while (description.getHasMoreShards());

        LOG.debug("Connected successfully to stream: '{}' with '{}' shards.", streamName, numShards);

        return numShards;
    } finally {
        kinesisClient.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.origin.kinesis.KinesisSource.java

License:Apache License

private void checkStreamExists(List<ConfigIssue> issues) {
    ClientConfiguration kinesisConfiguration = new ClientConfiguration();
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(kinesisConfiguration);
    kinesisClient.setRegion(Region.getRegion(region));

    try {/*from  ww  w. jav a 2  s . c  om*/
        DescribeStreamResult result = kinesisClient.describeStream(streamName);
        LOG.info("Connected successfully to stream: {} with description: {}", streamName,
                result.getStreamDescription().toString());
    } catch (Exception e) {
        issues.add(getContext().createConfigIssue(Groups.KINESIS.name(), "streamName", Errors.KINESIS_01,
                e.toString()));
    } finally {
        kinesisClient.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.origin.s3.S3Config.java

License:Apache License

private void validateConnection(Stage.Context context, List<Stage.ConfigIssue> issues) {
    //Access Key ID - username [unique in aws]
    //secret access key - password
    AWSCredentials credentials = new BasicAWSCredentials(accessKeyId, secretAccessKey);
    s3Client = new AmazonS3Client(credentials, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    if (endPoint != null && !endPoint.isEmpty()) {
        s3Client.setEndpoint(endPoint);// www  . jav a2s  . c o m
    } else {
        s3Client.setRegion(Region.getRegion(region));
    }
    try {
        //check if the credentials are right by trying to list buckets
        s3Client.listBuckets();
    } catch (AmazonS3Exception e) {
        issues.add(context.createConfigIssue(Groups.S3.name(), "accessKeyId", Errors.S3_SPOOLDIR_20,
                e.toString()));
    }
}

From source file:com.swap.aws.elb.client.AWSHelper.java

License:Apache License

public AWSHelper(String awsAccessKey, String awsSecretKey, String availabilityZone, String region) {
    this.awsAccessKey = awsAccessKey;
    this.awsSecretKey = awsSecretKey;

    this.availabilityZone = availabilityZone;
    this.region = region;

    awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    clientConfiguration = new ClientConfiguration();

    cloudWatchClient = new AmazonCloudWatchClient(awsCredentials, clientConfiguration);
}

From source file:com.tango.BucketSyncer.StorageClients.S3Client.java

License:Apache License

public void createClient(MirrorOptions options) {
    ClientConfiguration clientConfiguration = new ClientConfiguration().withProtocol(Protocol.HTTP)
            .withMaxConnections(options.getMaxConnections());
    if (options.getHasProxy()) {
        clientConfiguration = clientConfiguration.withProxyHost(options.getProxyHost())
                .withProxyPort(options.getProxyPort());
    }/*from   w ww . j av  a  2 s . com*/
    this.s3Client = new AmazonS3Client(options, clientConfiguration);
    if (options.hasEndpoint()) {
        s3Client.setEndpoint(options.getEndpoint());
    }
}