Example usage for com.amazonaws ClientConfiguration ClientConfiguration

List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration ClientConfiguration.

Prototype

public ClientConfiguration() 

Source Link

Usage

From source file:grails.plugins.crm.content.aws.AmazonS3ClientFactory.java

License:Apache License

@Override
public AmazonS3 getObject() {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    ClientConfiguration clientConfiguration = new ClientConfiguration();
    clientConfiguration.setSignerOverride("AWSS3V4SignerType");

    return AmazonS3ClientBuilder.standard()
            .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, region))
            .withPathStyleAccessEnabled(true).withClientConfiguration(clientConfiguration)
            .withCredentials(new AWSStaticCredentialsProvider(credentials)).build();
}

From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.EC2VirtualMachine.java

License:Apache License

@Override
protected void parseVMCreatorParameters(Map<String, Vector<String>> parameters) {
    super.datacollectorDelay = 2000; // 2 seconds delay between polls
    disableUnparseableDateWarning();/*from  ww  w .  j a  v a  2 s .  c  o m*/
    if (parameters == null)
        throw new IllegalArgumentException("Missing parameters");
    if (!parameters.containsKey(ACCESS_KEY) || parameters.get(ACCESS_KEY) == null
            || parameters.get(ACCESS_KEY).size() == 0 || parameters.get(ACCESS_KEY).get(0) == null)
        throw new IllegalArgumentException("Missing parameter: " + ACCESS_KEY);
    if (!parameters.containsKey(SECRET_KEY) || parameters.get(SECRET_KEY) == null
            || parameters.get(SECRET_KEY).size() == 0 || parameters.get(SECRET_KEY).get(0) == null)
        throw new IllegalArgumentException("Missing parameter: " + SECRET_KEY);
    this.accessKey = parameters.get(ACCESS_KEY).get(0);
    this.secretKey = parameters.get(SECRET_KEY).get(0);
    if (parameters.containsKey(ENDPOINT) && parameters.get(ENDPOINT) != null
            && parameters.get(ENDPOINT).size() > 0)
        this.endpoint = parameters.get(ENDPOINT).get(0);
    if (parameters.containsKey(INSTANCE_TYPE) && parameters.get(INSTANCE_TYPE) != null
            && parameters.get(INSTANCE_TYPE).size() > 0)
        this.instanceType = parameters.get(INSTANCE_TYPE).get(0);
    if (parameters.containsKey(LOGIN_NAME) && parameters.get(LOGIN_NAME) != null
            && parameters.get(LOGIN_NAME).size() > 0)
        super.loginName = parameters.get(LOGIN_NAME).get(0);
    AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
    ClientConfiguration clientConfiguration = new ClientConfiguration();
    amazonEC2Client = new AmazonEC2Client(awsCredentials, clientConfiguration);
    amazonEC2Client.setEndpoint(endpoint);
}

From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java

License:Apache License

/**
 * @param endpoint S3 endpoint URL/*from   w w  w .j  av  a  2s.  c  o m*/
 * @param accessKey Access key
 * @param secretKey Secret key
 * @param bucket Bucket name 
 * @param path Key name of the object to download (path + file name)
 * @param file Local file to download to 
 * @throws Exception On any error
 */
public static void download(String endpoint, String accessKey, String secretKey, String bucket, String path,
        File file) throws Exception {
    AmazonS3Client amazonS3Client = null;
    InputStream in = null;
    OutputStream out = null;
    try {
        AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
        ClientConfiguration clientConfiguration = new ClientConfiguration();
        clientConfiguration.setMaxConnections(MAX_CONNECTIONS);
        clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY);
        clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT);
        amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration);
        S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true);
        amazonS3Client.setS3ClientOptions(clientOptions);
        amazonS3Client.setEndpoint(endpoint);
        S3Object object = amazonS3Client.getObject(new GetObjectRequest(bucket, path));
        in = object.getObjectContent();
        byte[] buf = new byte[BUFFER_SIZE];
        out = new FileOutputStream(file);
        int count;
        while ((count = in.read(buf)) != -1)
            out.write(buf, 0, count);
        out.close();
        in.close();
    } catch (AmazonServiceException x) {
        Shrinker.myLogger.info("download error: " + x.getMessage());
        throw new Exception("download exception", x);
    } catch (AmazonClientException x) {
        Shrinker.myLogger.info("download error: " + x.getMessage());
        throw new Exception("download exception", x);
    } catch (IOException x) {
        Shrinker.myLogger.info("download error: " + x.getMessage());
        throw new Exception("download exception", x);
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (Exception e) {
            }
        }
        if (out != null) {
            try {
                out.close();
            } catch (Exception e) {
            }
        }
        if (amazonS3Client != null) {
            try {
                amazonS3Client.shutdown();
            } catch (Exception e) {
            }
        }
    }
}

From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java

License:Apache License

/**
 * @param file Local file to upload/*from  www.  ja  va  2  s .c  o m*/
 * @param endpoint S3 endpoint URL
 * @param accessKey Access key
 * @param secretKey Secret key
 * @param bucket Bucket name 
 * @param path Key name (path + file name)
 * @throws Exception On any error
 */
public static void upload(File file, String endpoint, String accessKey, String secretKey, String bucket,
        String path) throws Exception {
    AmazonS3Client amazonS3Client = null;
    try {
        AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
        ClientConfiguration clientConfiguration = new ClientConfiguration();
        clientConfiguration.setMaxConnections(MAX_CONNECTIONS);
        clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY);
        clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT);
        amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration);
        S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true);
        amazonS3Client.setS3ClientOptions(clientOptions);
        amazonS3Client.setEndpoint(endpoint);
        //         amazonS3Client.putObject(new PutObjectRequest(bucket, path, file)); // up to 5GB
        TransferManager tm = new TransferManager(amazonS3Client); // up to 5TB
        Upload upload = tm.upload(bucket, path, file);
        // while (!upload.isDone()) { upload.getProgress().getBytesTransferred(); Thread.sleep(1000); } // to get progress
        upload.waitForCompletion();
        tm.shutdownNow();
    } catch (AmazonServiceException x) {
        Shrinker.myLogger.info("upload error: " + x.getMessage());
        throw new Exception("upload exception", x);
    } catch (AmazonClientException x) {
        Shrinker.myLogger.info("upload error: " + x.getMessage());
        throw new Exception("upload exception", x);
    } finally {
        if (amazonS3Client != null) {
            try {
                amazonS3Client.shutdown();
            } catch (Exception e) {
            }
        }
    }
}

From source file:hudson.plugins.ec2.EC2Cloud.java

License:Open Source License

/***
 * Connect to an EC2 instance./* w  w  w  .ja  va 2s .c  o m*/
 * @return {@link AmazonEC2} client
 */
public synchronized static AmazonEC2 connect(AWSCredentialsProvider credentialsProvider, URL endpoint) {
    awsCredentialsProvider = credentialsProvider;
    ClientConfiguration config = new ClientConfiguration();
    ProxyConfiguration proxyConfig = Jenkins.getInstance().proxy;
    Proxy proxy = proxyConfig == null ? Proxy.NO_PROXY : proxyConfig.createProxy(endpoint.getHost());
    if (!proxy.equals(Proxy.NO_PROXY) && proxy.address() instanceof InetSocketAddress) {
        InetSocketAddress address = (InetSocketAddress) proxy.address();
        config.setProxyHost(address.getHostName());
        config.setProxyPort(address.getPort());
        if (null != proxyConfig.getUserName()) {
            config.setProxyUsername(proxyConfig.getUserName());
            config.setProxyPassword(proxyConfig.getPassword());
        }
    }
    AmazonEC2 client = new AmazonEC2Client(credentialsProvider.getCredentials(), config);
    client.setEndpoint(endpoint.toString());
    return client;
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;/*from   w w  w  . j  a  va 2s . c  om*/

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void download(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util download");

    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.exists() && filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            throw new AWSUtilException("Invalid local path");
        }//from  w  w  w. ja v  a2s . c om
    boolean fail_if_exist = false;
    int retryAttempt = 0;
    int i;
    String amazonFileUploadLocationOriginal = null;
    String keyName = null;
    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();
    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {

            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {

                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());

                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }
            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            if (filepath.lastIndexOf("/") != -1) {
                s3folderName = filepath.substring(0, filepath.lastIndexOf("/"));
                keyName = filepath.substring(filepath.lastIndexOf("/") + 1);

            } else {

                keyName = filepath;

            }
            log.debug("keyName is: " + keyName);
            log.debug("bucket name is:" + runFileTransferEntity.getBucketName());
            log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());
            if (s3folderName != null) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }
            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                String outputPath = runFileTransferEntity.getLocalPath();
                String s1 = outputPath.substring(7, outputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName));
                } else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);

                String s = outputPath.substring(7, outputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());

                Path local = new Path("/tmp/" + keyName);
                Path hdfs = new Path(hdfspath);
                hdfsFileSystem.copyFromLocalFile(local, hdfs);

            } else {

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream(
                            runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                }

                else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

            }
        }

        catch (AmazonServiceException e) {
            log.error("Amazon Service Exception", e);
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError()) {
                    Log.error("Incorrect details provided.Please provide correct details", e);
                    throw new AWSUtilException("Incorrect details provided");
                } else {
                    Log.error("Unknown amezon exception occured", e);
                }

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Error e) {
            Log.error("Error occured while sleeping the thread");
            throw new AWSUtilException(e);
        } catch (Exception e) {
            log.error("error while transfering file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {

            } catch (Error err) {
                Log.error("Error occured while downloading");
                throw new AWSUtilException(err);
            }
            continue;
        }
        done = true;
        break;
    }

    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError()) {
            throw new AWSUtilException("File transfer failed");
        }
    }
    log.debug("Finished AWSS3Util download");
}

From source file:io.dockstore.common.FileProvisioning.java

License:Apache License

private static AmazonS3 getAmazonS3Client(HierarchicalINIConfiguration config) {
    AmazonS3 s3Client = new AmazonS3Client(new ClientConfiguration().withSignerOverride("S3Signer"));
    if (config.containsKey(S3_ENDPOINT)) {
        final String endpoint = config.getString(S3_ENDPOINT);
        LOG.info("found custom S3 endpoint, setting to {}", endpoint);
        s3Client.setEndpoint(endpoint);//from  w ww  .j a v  a 2 s. c o  m
        s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    }
    return s3Client;
}

From source file:io.fineo.client.auth.AWSAbstractCognitoIdentityProvider.java

License:Open Source License

/**
 * Sets up an AWSAbstractCognitoIdentityProvider, which will serve as the
 * baseline for both Cognito and developer trusted identity providers.
 * Custom providers should not extend this class, but should extend
 * AWSAbstractCognitoDeveloperIdentityProvider
 *
 * @deprecated please use AWSAbstractCognitoIdentityProvider(String
 *             accountId, String identityPoolId, Regions region) instead.
 * @param accountId the accountId of the developer
 * @param identityPoolId the identityPoolId to be used
 *//*from  ww  w.  j  av a2  s  . c o  m*/
@Deprecated
public AWSAbstractCognitoIdentityProvider(String accountId, String identityPoolId) {
    this(accountId, identityPoolId, new ClientConfiguration());
}

From source file:io.fineo.client.auth.AWSAbstractCognitoIdentityProvider.java

License:Open Source License

/**
 * Sets up an AWSAbstractCognitoIdentityProvider, which will serve as the
 * baseline for both Cognito and developer trusted identity providers.
 * Custom providers should not extend this class, but should extend
 * AWSAbstractCognitoDeveloperIdentityProvider
 *
 * @param accountId the accountId of the developer
 * @param identityPoolId the identityPoolId to be used
 * @param region the region cib will use
 *///  w  w w.jav a 2 s  .c  o  m
public AWSAbstractCognitoIdentityProvider(String accountId, String identityPoolId, Regions region) {
    this(accountId, identityPoolId, new ClientConfiguration(), region);
}