Example usage for com.amazonaws.services.s3 AmazonS3ClientBuilder standard

List of usage examples for com.amazonaws.services.s3 AmazonS3ClientBuilder standard

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3ClientBuilder standard.

Prototype

public static AmazonS3ClientBuilder standard() 

Source Link

Usage

From source file:com.zero_x_baadf00d.play.module.aws.s3.AmazonS3ModuleInitializer.java

License:Open Source License

/**
 * Create a simple instance of {@code S3Module}.
 *
 * @param lifecycle     The application life cycle
 * @param configuration The application configuration
 * @since 16.03.13/*from  w  ww. j  av  a2 s .c  o  m*/
 */
@Inject
public AmazonS3ModuleInitializer(final ApplicationLifecycle lifecycle, final Config configuration) {
    final String accessKey;
    final String secretKey;

    if (configuration.hasPath("aws.s3.authKey")) {
        accessKey = configuration.getString("aws.s3.authKey");
    } else {
        accessKey = configuration.getString("aws.authKey");
    }
    if (configuration.hasPath("aws.s3.authSecret")) {
        secretKey = configuration.getString("aws.s3.authSecret");
    } else {
        secretKey = configuration.getString("aws.authSecret");
    }
    final String endPoint = configuration.getString("aws.s3.endPoint");
    final String signingRegion = configuration.getString("aws.s3.signingRegion");

    final boolean withPathStyle = configuration.hasPath("aws.s3.withPathStyle")
            && configuration.getBoolean("aws.s3.withPathStyle");
    final boolean withChunkedEncodingDisabled = configuration.hasPath("aws.s3.disableChunkedEncoding")
            && configuration.getBoolean("aws.s3.disableChunkedEncoding");

    PlayS3.bucketName = configuration.getString("aws.s3.bucketName");
    PlayS3.publicUrl = configuration.hasPath("aws.s3.publicUrl") ? configuration.getString("aws.s3.publicUrl")
            : "/";
    if (!PlayS3.publicUrl.endsWith("/")) {
        PlayS3.publicUrl += "/";
    }

    if (accessKey == null || secretKey == null || PlayS3.bucketName == null) {
        throw new RuntimeException("S3Module is not properly configured");
    }

    PlayS3.amazonS3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSCredentialsProvider() {
        @Override
        public AWSCredentials getCredentials() {
            return new BasicAWSCredentials(accessKey, secretKey);
        }

        @Override
        public void refresh() {
            // Not used with basic AWS credentials
        }
    }).withPathStyleAccessEnabled(withPathStyle).withChunkedEncodingDisabled(withChunkedEncodingDisabled)
            .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endPoint, signingRegion))
            .build();
    try {
        PlayS3.amazonS3.createBucket(PlayS3.bucketName);
    } catch (final AmazonS3Exception ex) {
        if (ex.getErrorCode().compareTo("BucketAlreadyOwnedByYou") != 0
                && ex.getErrorCode().compareTo("AccessDenied") != 0) {
            throw ex;
        }
    } finally {
        Logger.info("Using PlayS3 Bucket: " + PlayS3.bucketName);
    }

    lifecycle.addStopHook(() -> CompletableFuture.completedFuture(null));
}

From source file:dbs.Getfroms3.java

public static void main(String[] args) throws IOException {

    AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withRegion(Regions.AP_SOUTH_1)
            .withCredentials(new AWSStaticCredentialsProvider(credentials)).build();
    //AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider("inhack2hire28"));
    try {/*ww w  .j  a va 2  s  .  co  m*/
        System.out.println("Downloading an object");
        S3Object s3object = s3Client.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + s3object.getObjectMetadata().getContentType());
        displayTextInputStream(s3object.getObjectContent());

        // Get a range of bytes from an object.

        GetObjectRequest rangeObjectRequest = new GetObjectRequest(bucketName, key);
        rangeObjectRequest.setRange(0, 10);
        S3Object objectPortion = s3Client.getObject(rangeObjectRequest);

        System.out.println("Printing bytes retrieved.");
        displayTextInputStream(objectPortion.getObjectContent());

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which" + " means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means" + " the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:dk.dma.nogoservice.service.S3DataLoader.java

License:Apache License

@Autowired
public S3DataLoader(@Value("${s3dataloader.tempdir:#{null}}") String tempDirLocation) {
    tempDirLocation = tempDirLocation != null ? tempDirLocation : System.getProperty("java.io.tmpdir");
    tempDir = new File(tempDirLocation);
    if (!tempDir.exists()) {
        cacheLocally = tempDir.mkdirs();
    } else {//from   w w  w  .  ja  va2  s  .  c  o m
        cacheLocally = true;
    }

    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    try {
        credentialsProvider.getCredentials();
    } catch (SdkClientException e) {
        throw new IllegalStateException(
                "You must define AWS credentials in USER_HOME/.aws/credentials file on the machine. When running on AWS container service, "
                        + "the TaskRole should have permissions to read from S3");
    }

    AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard();
    builder.setCredentials(credentialsProvider);
    builder.setRegion("eu-west-1");
    amazonS3 = builder.build();
    Preconditions.checkArgument(amazonS3.doesBucketExist(DATA_BUCKET),
            "No AWS S3 bucket named " + DATA_BUCKET + " this bucket must exist");
}

From source file:grails.plugins.crm.content.aws.AmazonS3ClientFactory.java

License:Apache License

@Override
public AmazonS3 getObject() {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    ClientConfiguration clientConfiguration = new ClientConfiguration();
    clientConfiguration.setSignerOverride("AWSS3V4SignerType");

    return AmazonS3ClientBuilder.standard()
            .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, region))
            .withPathStyleAccessEnabled(true).withClientConfiguration(clientConfiguration)
            .withCredentials(new AWSStaticCredentialsProvider(credentials)).build();
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;/*from  w  w w .ja  v a 2  s .c  om*/

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void download(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util download");

    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.exists() && filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            throw new AWSUtilException("Invalid local path");
        }/*w  w w . ja v  a 2 s . c om*/
    boolean fail_if_exist = false;
    int retryAttempt = 0;
    int i;
    String amazonFileUploadLocationOriginal = null;
    String keyName = null;
    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();
    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {

            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {

                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());

                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }
            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            if (filepath.lastIndexOf("/") != -1) {
                s3folderName = filepath.substring(0, filepath.lastIndexOf("/"));
                keyName = filepath.substring(filepath.lastIndexOf("/") + 1);

            } else {

                keyName = filepath;

            }
            log.debug("keyName is: " + keyName);
            log.debug("bucket name is:" + runFileTransferEntity.getBucketName());
            log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());
            if (s3folderName != null) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }
            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                String outputPath = runFileTransferEntity.getLocalPath();
                String s1 = outputPath.substring(7, outputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName));
                } else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);

                String s = outputPath.substring(7, outputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());

                Path local = new Path("/tmp/" + keyName);
                Path hdfs = new Path(hdfspath);
                hdfsFileSystem.copyFromLocalFile(local, hdfs);

            } else {

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream(
                            runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                }

                else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

            }
        }

        catch (AmazonServiceException e) {
            log.error("Amazon Service Exception", e);
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError()) {
                    Log.error("Incorrect details provided.Please provide correct details", e);
                    throw new AWSUtilException("Incorrect details provided");
                } else {
                    Log.error("Unknown amezon exception occured", e);
                }

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Error e) {
            Log.error("Error occured while sleeping the thread");
            throw new AWSUtilException(e);
        } catch (Exception e) {
            log.error("error while transfering file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {

            } catch (Error err) {
                Log.error("Error occured while downloading");
                throw new AWSUtilException(err);
            }
            continue;
        }
        done = true;
        break;
    }

    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError()) {
            throw new AWSUtilException("File transfer failed");
        }
    }
    log.debug("Finished AWSS3Util download");
}

From source file:io.confluent.connect.s3.S3SinkConnectorTestBase.java

License:Open Source License

public AmazonS3 newS3Client(S3SinkConnectorConfig config) {
    AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard()
            .withAccelerateModeEnabled(config.getBoolean(S3SinkConnectorConfig.WAN_MODE_CONFIG))
            .withPathStyleAccessEnabled(true).withCredentials(new DefaultAWSCredentialsProviderChain());

    builder = url == null ? builder.withRegion(config.getString(S3SinkConnectorConfig.REGION_CONFIG))
            : builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(url, ""));

    return builder.build();
}

From source file:io.confluent.connect.s3.storage.S3Storage.java

License:Open Source License

/**
 * Creates and configures S3 client./*from   w  w w . j  a  va 2s . com*/
 * Visible for testing.
 *
 * @param config the S3 configuration.
 * @return S3 client
 */
public AmazonS3 newS3Client(S3SinkConnectorConfig config) {
    ClientConfiguration clientConfiguration = newClientConfiguration(config);
    AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard()
            .withAccelerateModeEnabled(config.getBoolean(WAN_MODE_CONFIG)).withPathStyleAccessEnabled(true)
            .withCredentials(config.getCredentialsProvider()).withClientConfiguration(clientConfiguration);

    String region = config.getString(REGION_CONFIG);
    if (StringUtils.isBlank(url)) {
        builder = "us-east-1".equals(region) ? builder.withRegion(Regions.US_EAST_1)
                : builder.withRegion(region);
    } else {
        builder = builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(url, region));
    }

    return builder.build();
}

From source file:io.dockstore.webservice.core.tooltester.ToolTesterS3Client.java

License:Apache License

public ToolTesterS3Client(String bucketName) {
    this.bucketName = bucketName;
    this.s3 = AmazonS3ClientBuilder.standard().build();
}

From source file:io.konig.camel.aws.s3.DeleteObjectComponentVerifierExtension.java

License:Apache License

@Override
protected Result verifyConnectivity(Map<String, Object> parameters) {
    ResultBuilder builder = ResultBuilder.withStatusAndScope(Result.Status.OK, Scope.CONNECTIVITY);

    try {// w  w  w  .  j av a 2 s . c  o  m
        S3Configuration configuration = setProperties(new S3Configuration(), parameters);
        AWSCredentials credentials = new BasicAWSCredentials(configuration.getAccessKey(),
                configuration.getSecretKey());
        AWSCredentialsProvider credentialsProvider = new AWSStaticCredentialsProvider(credentials);
        AmazonS3 client = AmazonS3ClientBuilder.standard().withCredentials(credentialsProvider)
                .withRegion(Regions.valueOf(configuration.getRegion())).build();
        client.listBuckets();
    } catch (SdkClientException e) {
        ResultErrorBuilder errorBuilder = ResultErrorBuilder
                .withCodeAndDescription(VerificationError.StandardCode.AUTHENTICATION, e.getMessage())
                .detail("aws_s3_exception_message", e.getMessage())
                .detail(VerificationError.ExceptionAttribute.EXCEPTION_CLASS, e.getClass().getName())
                .detail(VerificationError.ExceptionAttribute.EXCEPTION_INSTANCE, e);

        builder.error(errorBuilder.build());
    } catch (Exception e) {
        builder.error(ResultErrorBuilder.withException(e).build());
    }
    return builder.build();
}