Example usage for com.amazonaws ClientConfiguration ClientConfiguration

List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration ClientConfiguration.

Prototype

public ClientConfiguration() 

Source Link

Usage

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public StorageResponse store(StorageRequest request) throws IOException {

    final IFile requestFile = request.getFile();

    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));

    Map<String, String> fileMetadata = new HashMap<String, String>();

    fileMetadata.put("accountUrn", request.getUser().getAccount().getUrn());
    fileMetadata.put("userUrn", request.getUser().getUrn());
    fileMetadata.put("fileUrn", requestFile.getUrn());
    fileMetadata.put("entityReferenceType", requestFile.getEntityReferenceType().name());
    fileMetadata.put("referenceUrn", requestFile.getReferenceUrn());
    fileMetadata.put("recordedTimestamp", Long.toString(request.getFile().getTimestamp()));
    //            fileMetadata.put("mimeType", request.getVfsObject().getMimeType());

    ObjectMetadata metadata = new ObjectMetadata();
    if (request.getContentLength() > 0) {
        LOG.debug("Including content length : " + request.getContentLength());
        metadata.setContentLength(request.getContentLength());
    }/*  ww w.j av  a2  s  . c  o m*/
    //            metadata.setContentMD5(streamMD5);
    metadata.setUserMetadata(fileMetadata);

    try {
        LOG.trace("Bucket name: " + getBucketName());
        LOG.trace("File name: " + request.getFileName());
        LOG.trace("inputStream == null?  " + (request.getInputStream() == null));

        HashingInputStream his = new HashingInputStream(request.getInputStream(), "SHA-256");

        PutObjectResult putResult = s3
                .putObject(new PutObjectRequest(getBucketName(), request.getFileName(), his, metadata));

        String finalUrl = getUrl(request.getFileName());
        LOG.trace("File URL: " + finalUrl);

        requestFile.setUrl(getUrl(request.getFileName()));

        byte[] signature = his.getSignature();

        JSONObject jsonObject = HashUtil.signFile(requestFile, signature);
        LOG.info("File Signature\n\n{}\n\n", jsonObject.toString(3));

        return new StorageResponse(requestFile, finalUrl, jsonObject.toString(3));
    } catch (AmazonS3Exception e) {
        e.printStackTrace();
        throw e;
    } catch (JSONException | NoSuchAlgorithmException e) {
        e.printStackTrace();
        throw new IOException(e);
    }

}

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public boolean isHealthy() {
    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));
    return s3.doesBucketExist(getBucketName());
}

From source file:org.adroitlogic.build.aws.maven.S3Utils.java

License:Apache License

static ClientConfiguration getClientConfiguration(ProxyInfoProvider proxyInfoProvider) {
    ClientConfiguration clientConfiguration = new ClientConfiguration();

    if (proxyInfoProvider != null) {
        ProxyInfo proxyInfo = proxyInfoProvider.getProxyInfo("s3");

        if (proxyInfo != null) {
            //System.out.println("Proxy Info : NTLM host: " + proxyInfo.getNtlmHost() + " domain: " + proxyInfo.getNtlmDomain() + " username: " + proxyInfo.getUserName() + " password: " + proxyInfo.getPassword() + " host: " + proxyInfo.getHost() + " port: " + proxyInfo.getPort());
            clientConfiguration.withProxyHost(proxyInfo.getHost()).withProxyPort(proxyInfo.getPort());
            if (proxyInfo.getUserName() != null) {
                clientConfiguration.setProxyUsername(proxyInfo.getUserName());
                clientConfiguration.setProxyPassword(proxyInfo.getPassword());
            }/*from   w ww  .ja  v a  2  s.c om*/
            if (proxyInfo.getNtlmDomain() != null) {
                clientConfiguration.setProxyDomain(proxyInfo.getNtlmDomain());
            }
            if (proxyInfo.getNtlmHost() != null) {
                clientConfiguration.setProxyWorkstation(proxyInfo.getNtlmHost());
            }
        }
    }
    //clientConfiguration.setProtocol(com.amazonaws.Protocol.HTTP);

    return clientConfiguration;
}

From source file:org.apache.camel.component.aws.swf.SWFEndpoint.java

License:Apache License

private AmazonSimpleWorkflowClient createSWClient() throws Exception {
    AWSCredentials credentials = new BasicAWSCredentials(configuration.getAccessKey(),
            configuration.getSecretKey());

    ClientConfiguration clientConfiguration = new ClientConfiguration();
    if (!configuration.getClientConfigurationParameters().isEmpty()) {
        setProperties(clientConfiguration, configuration.getClientConfigurationParameters());
    }//from w  ww  . j a  v a 2s  .c om

    AmazonSimpleWorkflowClient client = new AmazonSimpleWorkflowClient(credentials, clientConfiguration);
    if (!configuration.getsWClientParameters().isEmpty()) {
        setProperties(client, configuration.getsWClientParameters());
    }
    return client;
}

From source file:org.apache.druid.indexing.kinesis.KinesisRecordSupplier.java

License:Apache License

public static AmazonKinesis getAmazonKinesisClient(String endpoint, AWSCredentialsConfig awsCredentialsConfig,
        String awsAssumedRoleArn, String awsExternalId) {
    AWSCredentialsProvider awsCredentialsProvider = AWSCredentialsUtils
            .defaultAWSCredentialsProviderChain(awsCredentialsConfig);

    if (awsAssumedRoleArn != null) {
        log.info("Assuming role [%s] with externalId [%s]", awsAssumedRoleArn, awsExternalId);

        STSAssumeRoleSessionCredentialsProvider.Builder builder = new STSAssumeRoleSessionCredentialsProvider.Builder(
                awsAssumedRoleArn, StringUtils.format("druid-kinesis-%s", UUID.randomUUID().toString()))
                        .withStsClient(AWSSecurityTokenServiceClientBuilder.standard()
                                .withCredentials(awsCredentialsProvider).build());

        if (awsExternalId != null) {
            builder.withExternalId(awsExternalId);
        }//from w w  w. ja v a2  s . co m

        awsCredentialsProvider = builder.build();
    }

    return AmazonKinesisClientBuilder.standard().withCredentials(awsCredentialsProvider)
            .withClientConfiguration(new ClientConfiguration())
            .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint,
                    AwsHostNameUtils.parseRegion(endpoint, null)))
            .build();
}

From source file:org.apache.hadoop.dynamodb.DynamoDBClient.java

License:Open Source License

private AmazonDynamoDBClient getDynamoDBClient(Configuration conf) {
    ClientConfiguration clientConfig = new ClientConfiguration().withMaxErrorRetry(1);
    applyProxyConfiguration(clientConfig, conf);
    return new AmazonDynamoDBClient(getAWSCredentialsProvider(conf), clientConfig);
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem/*from   ww  w. j  ava  2  s  .  c  om*/
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
    super.initialize(name, conf);

    uri = URI.create(name.getScheme() + "://" + name.getAuthority());
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri,
            this.getWorkingDirectory());

    // Try to get our credentials or just connect anonymously
    String accessKey = conf.get(NEW_ACCESS_KEY, conf.get(OLD_ACCESS_KEY, null));
    String secretKey = conf.get(NEW_SECRET_KEY, conf.get(OLD_SECRET_KEY, null));

    String userInfo = name.getUserInfo();
    if (userInfo != null) {
        int index = userInfo.indexOf(':');
        if (index != -1) {
            accessKey = userInfo.substring(0, index);
            secretKey = userInfo.substring(index + 1);
        } else {
            accessKey = userInfo;
        }
    }

    AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
            new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
            new AnonymousAWSCredentialsProvider());

    bucket = name.getHost();

    ClientConfiguration awsConf = new ClientConfiguration();
    awsConf.setMaxConnections(conf.getInt(NEW_MAXIMUM_CONNECTIONS,
            conf.getInt(OLD_MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS)));
    awsConf.setProtocol(conf.getBoolean(NEW_SECURE_CONNECTIONS,
            conf.getBoolean(OLD_SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) ? Protocol.HTTPS
                    : Protocol.HTTP);
    awsConf.setMaxErrorRetry(
            conf.getInt(NEW_MAX_ERROR_RETRIES, conf.getInt(OLD_MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES)));
    awsConf.setSocketTimeout(
            conf.getInt(NEW_SOCKET_TIMEOUT, conf.getInt(OLD_SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT)));

    s3 = new AmazonS3Client(credentials, awsConf);

    maxKeys = conf.getInt(NEW_MAX_PAGING_KEYS, conf.getInt(OLD_MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS));
    partSize = conf.getLong(NEW_MULTIPART_SIZE, conf.getLong(OLD_MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE));
    partSizeThreshold = conf.getLong(NEW_MIN_MULTIPART_THRESHOLD,
            conf.getLong(OLD_MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD));

    if (partSize < 5 * 1024 * 1024) {
        LOG.error(NEW_MULTIPART_SIZE + " must be at least 5 MB");
        partSize = 5 * 1024 * 1024;
    }

    if (partSizeThreshold < 5 * 1024 * 1024) {
        LOG.error(NEW_MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
        partSizeThreshold = 5 * 1024 * 1024;
    }

    String cannedACLName = conf.get(NEW_CANNED_ACL, conf.get(OLD_CANNED_ACL, DEFAULT_CANNED_ACL));
    if (!cannedACLName.isEmpty()) {
        cannedACL = CannedAccessControlList.valueOf(cannedACLName);
    } else {
        cannedACL = null;
    }

    if (!s3.doesBucketExist(bucket)) {
        throw new IOException("Bucket " + bucket + " does not exist");
    }

    boolean purgeExistingMultipart = conf.getBoolean(NEW_PURGE_EXISTING_MULTIPART,
            conf.getBoolean(OLD_PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART));
    long purgeExistingMultipartAge = conf.getLong(NEW_PURGE_EXISTING_MULTIPART_AGE,
            conf.getLong(OLD_PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE));

    if (purgeExistingMultipart) {
        TransferManager transferManager = new TransferManager(s3);
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);

        transferManager.abortMultipartUploads(bucket, purgeBefore);
        transferManager.shutdownNow(false);
    }

    serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM, null);

    setConf(conf);
}

From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java

License:Apache License

/** Called after a new FileSystem instance is constructed.
 * @param name a uri whose authority section names the host, port, etc.
 *   for this FileSystem/*from  www.  jav  a2 s  .  co m*/
 * @param conf the configuration
 */
public void initialize(URI name, Configuration conf) throws IOException {
    super.initialize(name, conf);

    uri = URI.create(name.getScheme() + "://" + name.getAuthority());
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri,
            this.getWorkingDirectory());

    // Try to get our credentials or just connect anonymously
    String accessKey = conf.get(ACCESS_KEY, null);
    String secretKey = conf.get(SECRET_KEY, null);

    String userInfo = name.getUserInfo();
    if (userInfo != null) {
        int index = userInfo.indexOf(':');
        if (index != -1) {
            accessKey = userInfo.substring(0, index);
            secretKey = userInfo.substring(index + 1);
        } else {
            accessKey = userInfo;
        }
    }

    AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
            new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
            new AnonymousAWSCredentialsProvider());

    bucket = name.getHost();

    ClientConfiguration awsConf = new ClientConfiguration();
    awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS));
    boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
    awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
    awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES));
    awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT));
    awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));

    String proxyHost = conf.getTrimmed(PROXY_HOST, "");
    int proxyPort = conf.getInt(PROXY_PORT, -1);
    if (!proxyHost.isEmpty()) {
        awsConf.setProxyHost(proxyHost);
        if (proxyPort >= 0) {
            awsConf.setProxyPort(proxyPort);
        } else {
            if (secureConnections) {
                LOG.warn("Proxy host set without port. Using HTTPS default 443");
                awsConf.setProxyPort(443);
            } else {
                LOG.warn("Proxy host set without port. Using HTTP default 80");
                awsConf.setProxyPort(80);
            }
        }
        String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
        String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
        if ((proxyUsername == null) != (proxyPassword == null)) {
            String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
            LOG.error(msg);
            throw new IllegalArgumentException(msg);
        }
        awsConf.setProxyUsername(proxyUsername);
        awsConf.setProxyPassword(proxyPassword);
        awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
        awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
        if (LOG.isDebugEnabled()) {
            LOG.debug(
                    "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}",
                    awsConf.getProxyHost(), awsConf.getProxyPort(), String.valueOf(awsConf.getProxyUsername()),
                    awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation());
        }
    } else if (proxyPort >= 0) {
        String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
        LOG.error(msg);
        throw new IllegalArgumentException(msg);
    }

    s3 = new AmazonS3Client(credentials, awsConf);
    String endPoint = conf.getTrimmed(ENDPOINT, "");
    if (!endPoint.isEmpty()) {
        try {
            s3.setEndpoint(endPoint);
        } catch (IllegalArgumentException e) {
            String msg = "Incorrect endpoint: " + e.getMessage();
            LOG.error(msg);
            throw new IllegalArgumentException(msg, e);
        }
    }

    maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
    partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
    multiPartThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);

    if (partSize < 5 * 1024 * 1024) {
        LOG.error(MULTIPART_SIZE + " must be at least 5 MB");
        partSize = 5 * 1024 * 1024;
    }

    if (multiPartThreshold < 5 * 1024 * 1024) {
        LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB");
        multiPartThreshold = 5 * 1024 * 1024;
    }

    int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS);
    int coreThreads = conf.getInt(CORE_THREADS, DEFAULT_CORE_THREADS);
    if (maxThreads == 0) {
        maxThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    if (coreThreads == 0) {
        coreThreads = Runtime.getRuntime().availableProcessors() * 8;
    }
    long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
    LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>(
            maxThreads * conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS));
    threadPoolExecutor = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            workQueue, newDaemonThreadFactory("s3a-transfer-shared-"));
    threadPoolExecutor.allowCoreThreadTimeOut(true);

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMinimumUploadPartSize(partSize);
    transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);

    transfers = new TransferManager(s3, threadPoolExecutor);
    transfers.setConfiguration(transferConfiguration);

    String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL);
    if (!cannedACLName.isEmpty()) {
        cannedACL = CannedAccessControlList.valueOf(cannedACLName);
    } else {
        cannedACL = null;
    }

    if (!s3.doesBucketExist(bucket)) {
        throw new IOException("Bucket " + bucket + " does not exist");
    }

    boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART,
            DEFAULT_PURGE_EXISTING_MULTIPART);
    long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE,
            DEFAULT_PURGE_EXISTING_MULTIPART_AGE);

    if (purgeExistingMultipart) {
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);

        transfers.abortMultipartUploads(bucket, purgeBefore);
    }

    serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM);

    setConf(conf);
}

From source file:org.apache.heron.uploader.s3.S3Uploader.java

License:Apache License

@Override
public void initialize(Config config) {
    bucket = S3Context.bucket(config);
    String accessKey = S3Context.accessKey(config);
    String accessSecret = S3Context.secretKey(config);
    String awsProfile = S3Context.awsProfile(config);
    String proxy = S3Context.proxyUri(config);
    String endpoint = S3Context.uri(config);
    String customRegion = S3Context.region(config);
    AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard();

    if (Strings.isNullOrEmpty(bucket)) {
        throw new RuntimeException("Missing heron.uploader.s3.bucket config value");
    }//from  w w  w. j  a va2  s  . co m

    // If an accessKey is specified, use it. Otherwise check if an aws profile
    // is specified. If neither was set just use the DefaultAWSCredentialsProviderChain
    // by not specifying a CredentialsProvider.
    if (!Strings.isNullOrEmpty(accessKey) || !Strings.isNullOrEmpty(accessSecret)) {

        if (!Strings.isNullOrEmpty(awsProfile)) {
            throw new RuntimeException("Please provide access_key/secret_key " + "or aws_profile, not both.");
        }

        if (Strings.isNullOrEmpty(accessKey)) {
            throw new RuntimeException("Missing heron.uploader.s3.access_key config value");
        }

        if (Strings.isNullOrEmpty(accessSecret)) {
            throw new RuntimeException("Missing heron.uploader.s3.secret_key config value");
        }
        builder.setCredentials(
                new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, accessSecret)));
    } else if (!Strings.isNullOrEmpty(awsProfile)) {
        builder.setCredentials(new ProfileCredentialsProvider(awsProfile));
    }

    if (!Strings.isNullOrEmpty(proxy)) {
        URI proxyUri;

        try {
            proxyUri = new URI(proxy);
        } catch (URISyntaxException e) {
            throw new RuntimeException("Invalid heron.uploader.s3.proxy_uri config value: " + proxy, e);
        }

        ClientConfiguration clientCfg = new ClientConfiguration();
        clientCfg.withProtocol(Protocol.HTTPS).withProxyHost(proxyUri.getHost())
                .withProxyPort(proxyUri.getPort());

        if (!Strings.isNullOrEmpty(proxyUri.getUserInfo())) {
            String[] info = proxyUri.getUserInfo().split(":", 2);
            clientCfg.setProxyUsername(info[0]);
            if (info.length > 1) {
                clientCfg.setProxyPassword(info[1]);
            }
        }

        builder.setClientConfiguration(clientCfg);
    }

    s3Client = builder.withRegion(customRegion).withPathStyleAccessEnabled(true)
            .withChunkedEncodingDisabled(true).withPayloadSigningEnabled(true).build();

    if (!Strings.isNullOrEmpty(endpoint)) {
        s3Client.setEndpoint(endpoint);
    }

    final String topologyName = Context.topologyName(config);
    final String topologyPackageLocation = Context.topologyPackageFile(config);

    pathPrefix = S3Context.pathPrefix(config);
    packageFileHandler = new File(topologyPackageLocation);

    // The path the packaged topology will be uploaded to
    remoteFilePath = generateS3Path(pathPrefix, topologyName, packageFileHandler.getName());

    // Generate the location of the backup file incase we need to revert the deploy
    previousVersionFilePath = generateS3Path(pathPrefix, topologyName,
            "previous_" + packageFileHandler.getName());
}

From source file:org.apache.jackrabbit.aws.ext.Utils.java

License:Apache License

/**
 * Create AmazonS3Client from properties.
 * //from  www  .  j  a v  a  2s  .co  m
 * @param prop properties to configure @link {@link AmazonS3Client}
 * @return {@link AmazonS3Client}
 */
public static AmazonS3Client openService(final Properties prop) {
    AWSCredentials credentials = new BasicAWSCredentials(prop.getProperty(S3Constants.ACCESS_KEY),
            prop.getProperty(S3Constants.SECRET_KEY));
    int connectionTimeOut = Integer.parseInt(prop.getProperty(S3Constants.S3_CONN_TIMEOUT));
    int socketTimeOut = Integer.parseInt(prop.getProperty(S3Constants.S3_SOCK_TIMEOUT));
    int maxConnections = Integer.parseInt(prop.getProperty(S3Constants.S3_MAX_CONNS));
    int maxErrorRetry = Integer.parseInt(prop.getProperty(S3Constants.S3_MAX_ERR_RETRY));
    ClientConfiguration cc = new ClientConfiguration();
    cc.setConnectionTimeout(connectionTimeOut);
    cc.setSocketTimeout(socketTimeOut);
    cc.setMaxConnections(maxConnections);
    cc.setMaxErrorRetry(maxErrorRetry);
    return new AmazonS3Client(credentials, cc);
}