Example usage for com.amazonaws Protocol HTTPS

List of usage examples for com.amazonaws Protocol HTTPS

Introduction

In this page you can find the example usage for com.amazonaws Protocol HTTPS.

Prototype

Protocol HTTPS

To view the source code for com.amazonaws Protocol HTTPS.

Click Source Link

Document

HTTPS Protocol - Using the HTTPS protocol is more secure than using the HTTP protocol, but may use slightly more system resources.

Usage

From source file:com.netflix.iep.aws.AwsClientFactory.java

License:Apache License

ClientConfiguration createClientConfig(String name) {
    final Config cfg = getConfig(name, "client");
    final ClientConfiguration settings = new ClientConfiguration();

    // Should be the default, but just to make it explicit
    settings.setProtocol(Protocol.HTTPS);

    // Helpers/* w  ww. ja  v a 2  s.co  m*/
    Function<String, Long> getMillis = k -> cfg.getDuration(k, TimeUnit.MILLISECONDS);
    Function<String, Integer> getTimeout = k -> getMillis.apply(k).intValue();

    // Typically use the defaults
    setIfPresent(cfg, "use-gzip", cfg::getBoolean, settings::setUseGzip);
    setIfPresent(cfg, "use-reaper", cfg::getBoolean, settings::setUseReaper);
    setIfPresent(cfg, "use-tcp-keep-alive", cfg::getBoolean, settings::setUseTcpKeepAlive);
    setIfPresent(cfg, "use-throttle-retries", cfg::getBoolean, settings::setUseThrottleRetries);
    setIfPresent(cfg, "max-connections", cfg::getInt, settings::setMaxConnections);
    setIfPresent(cfg, "max-error-retry", cfg::getInt, settings::setMaxErrorRetry);
    setIfPresent(cfg, "connection-ttl", getMillis, settings::setConnectionTTL);
    setIfPresent(cfg, "connection-max-idle", getMillis, settings::setConnectionMaxIdleMillis);
    setIfPresent(cfg, "connection-timeout", getTimeout, settings::setConnectionTimeout);
    setIfPresent(cfg, "socket-timeout", getTimeout, settings::setSocketTimeout);
    setIfPresent(cfg, "client-execution-timeout", getTimeout, settings::setClientExecutionTimeout);
    setIfPresent(cfg, "user-agent-prefix", cfg::getString, settings::setUserAgentPrefix);
    setIfPresent(cfg, "user-agent-suffix", cfg::getString, settings::setUserAgentSuffix);
    setIfPresent(cfg, "proxy-port", cfg::getInt, settings::setProxyPort);
    setIfPresent(cfg, "proxy-host", cfg::getString, settings::setProxyHost);
    setIfPresent(cfg, "proxy-domain", cfg::getString, settings::setProxyDomain);
    setIfPresent(cfg, "proxy-workstation", cfg::getString, settings::setProxyWorkstation);
    setIfPresent(cfg, "proxy-username", cfg::getString, settings::setProxyUsername);
    setIfPresent(cfg, "proxy-password", cfg::getString, settings::setProxyPassword);
    return settings;
}

From source file:com.netflix.spinnaker.clouddriver.aws.security.AWSProxy.java

License:Apache License

public void apply(ClientConfiguration clientConfiguration) {

    clientConfiguration.setProxyHost(proxyHost);
    clientConfiguration.setProxyPort(Integer.parseInt(proxyPort));
    clientConfiguration.setProxyUsername(proxyUsername);
    clientConfiguration.setProxyPassword(proxyPassword);

    Protocol awsProtocol = Protocol.HTTP;

    if ("HTTPS".equalsIgnoreCase(protocol)) {
        awsProtocol = Protocol.HTTPS;
    }/*from w w w  . ja  v a2 s. co m*/

    clientConfiguration.setProtocol(awsProtocol);

    if (isNTLMProxy()) {
        clientConfiguration.setProxyDomain(proxyDomain);
        clientConfiguration.setProxyWorkstation(proxyWorkstation);
    }
}

From source file:com.noctarius.hazelcast.aws.HazelcastAwsDiscoveryStrategy.java

License:Open Source License

private AmazonEC2Client buildAmazonEC2Client() {
    ClientConfiguration configuration = new ClientConfiguration();

    // Always set HTTPS as protocol, security first
    configuration.setProtocol(Protocol.HTTPS);

    // Configure proxy configuration
    configureProxy(configuration);/*from  ww w  .  ja  v a  2s .  c  o  m*/

    // Configure authentication
    AWSCredentialsProvider credentialsProvider = buildCredentialsProvider();

    // Create WS client
    AmazonEC2Client client = new AmazonEC2Client(credentialsProvider, configuration);

    // Configure Amazon EC2 WS endpoint
    configureEndpoint(client);

    return client;
}

From source file:com.pearson.eidetic.driver.Common.java

public static ClientConfiguration setClientConfigurationSettings(ClientConfiguration clientConfiguration) {
    clientConfiguration.setConnectionTimeout(10000);
    clientConfiguration.setMaxConnections(10000);
    clientConfiguration.setProtocol(Protocol.HTTPS);
    clientConfiguration.setSocketTimeout(60000);

    return clientConfiguration;
}

From source file:com.pearson.eidetic.driver.threads.MonitorSnapshotVolumeTime.java

public AmazonEC2Client connect(Region region, String awsAccessKey, String awsSecretKey) {
    AmazonEC2Client ec2Client;//ww w.j  a  va2 s  .com
    String endpoint = "ec2." + region.getName() + ".amazonaws.com";

    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTPS);

    ec2Client = new AmazonEC2Client(credentials, clientConfig);
    ec2Client.setRegion(region);
    ec2Client.setEndpoint(endpoint);
    return ec2Client;
}

From source file:com.yahoo.ycsb.db.S3Client.java

License:Open Source License

/**
* Initialize any state for the storage.// ww  w .  j av a2 s.c  o  m
* Called once per S3 instance; If the client is not null it is re-used.
*/
@Override
public void init() throws DBException {
    final int count = INIT_COUNT.incrementAndGet();
    synchronized (S3Client.class) {
        Properties propsCL = getProperties();
        int recordcount = Integer.parseInt(propsCL.getProperty("recordcount"));
        int operationcount = Integer.parseInt(propsCL.getProperty("operationcount"));
        int numberOfOperations = 0;
        if (recordcount > 0) {
            if (recordcount > operationcount) {
                numberOfOperations = recordcount;
            } else {
                numberOfOperations = operationcount;
            }
        } else {
            numberOfOperations = operationcount;
        }
        if (count <= numberOfOperations) {
            String accessKeyId = null;
            String secretKey = null;
            String endPoint = null;
            String region = null;
            String maxErrorRetry = null;
            String maxConnections = null;
            String protocol = null;
            BasicAWSCredentials s3Credentials;
            ClientConfiguration clientConfig;
            if (s3Client != null) {
                System.out.println("Reusing the same client");
                return;
            }
            try {
                InputStream propFile = S3Client.class.getClassLoader().getResourceAsStream("s3.properties");
                Properties props = new Properties(System.getProperties());
                props.load(propFile);
                accessKeyId = props.getProperty("s3.accessKeyId");
                if (accessKeyId == null) {
                    accessKeyId = propsCL.getProperty("s3.accessKeyId");
                }
                System.out.println(accessKeyId);
                secretKey = props.getProperty("s3.secretKey");
                if (secretKey == null) {
                    secretKey = propsCL.getProperty("s3.secretKey");
                }
                System.out.println(secretKey);
                endPoint = props.getProperty("s3.endPoint");
                if (endPoint == null) {
                    endPoint = propsCL.getProperty("s3.endPoint", "s3.amazonaws.com");
                }
                System.out.println(endPoint);
                region = props.getProperty("s3.region");
                if (region == null) {
                    region = propsCL.getProperty("s3.region", "us-east-1");
                }
                System.out.println(region);
                maxErrorRetry = props.getProperty("s3.maxErrorRetry");
                if (maxErrorRetry == null) {
                    maxErrorRetry = propsCL.getProperty("s3.maxErrorRetry", "15");
                }
                maxConnections = props.getProperty("s3.maxConnections");
                if (maxConnections == null) {
                    maxConnections = propsCL.getProperty("s3.maxConnections");
                }
                protocol = props.getProperty("s3.protocol");
                if (protocol == null) {
                    protocol = propsCL.getProperty("s3.protocol", "HTTPS");
                }
                sse = props.getProperty("s3.sse");
                if (sse == null) {
                    sse = propsCL.getProperty("s3.sse", "false");
                }
                String ssec = props.getProperty("s3.ssec");
                if (ssec == null) {
                    ssec = propsCL.getProperty("s3.ssec", null);
                } else {
                    ssecKey = new SSECustomerKey(ssec);
                }
            } catch (Exception e) {
                System.err.println("The file properties doesn't exist " + e.toString());
                e.printStackTrace();
            }
            try {
                System.out.println("Inizializing the S3 connection");
                s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey);
                clientConfig = new ClientConfiguration();
                clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry));
                if (protocol.equals("HTTP")) {
                    clientConfig.setProtocol(Protocol.HTTP);
                } else {
                    clientConfig.setProtocol(Protocol.HTTPS);
                }
                if (maxConnections != null) {
                    clientConfig.setMaxConnections(Integer.parseInt(maxConnections));
                }
                s3Client = new AmazonS3Client(s3Credentials, clientConfig);
                s3Client.setRegion(Region.getRegion(Regions.fromName(region)));
                s3Client.setEndpoint(endPoint);
                System.out.println("Connection successfully initialized");
            } catch (Exception e) {
                System.err.println("Could not connect to S3 storage: " + e.toString());
                e.printStackTrace();
                throw new DBException(e);
            }
        } else {
            System.err.println("The number of threads must be less or equal than the operations");
            throw new DBException(new Error("The number of threads must be less or equal than the operations"));
        }
    }
}

From source file:com.yahoo.ycsb.utils.connection.S3Connection.java

License:Open Source License

public S3Connection(String bucket, String region, String endPoint) throws ClientException {
    super(bucket, region, endPoint);
    logger.debug("S3Client.establishConnection(" + region + "," + endPoint + ") bucket: " + bucket);
    org.apache.log4j.Logger.getLogger("com.amazonaws").setLevel(Level.OFF);

    /*if (S3Connection.init == true) {
    init();//  w  ww . ja va  2s .  co  m
    S3Connection.init = false;
    }*/

    this.bucket = bucket;
    this.region = region;

    try {
        BasicAWSCredentials s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey);
        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry));
        if (protocol.equals("HTTP")) {
            clientConfig.setProtocol(Protocol.HTTP);
        } else {
            clientConfig.setProtocol(Protocol.HTTPS);
        }
        if (maxConnections != null) {
            clientConfig.setMaxConnections(Integer.parseInt(maxConnections));
        }

        logger.debug("Inizializing the S3 connection...");
        awsClient = new AmazonS3Client(s3Credentials, clientConfig);
        awsClient.setRegion(Region.getRegion(Regions.fromName(region)));
        awsClient.setEndpoint(endPoint);
        logger.debug("Connection successfully initialized");
    } catch (Exception e) {
        logger.error("Could not connect to S3 storage: " + e.toString());
        e.printStackTrace();
        throw new ClientException(e);
    }
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;//from   ww w. j  a va2 s . c om

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void download(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util download");

    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.exists() && filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            throw new AWSUtilException("Invalid local path");
        }/*from ww  w. jav  a  2s . co m*/
    boolean fail_if_exist = false;
    int retryAttempt = 0;
    int i;
    String amazonFileUploadLocationOriginal = null;
    String keyName = null;
    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();
    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {

            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {

                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());

                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }
            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            if (filepath.lastIndexOf("/") != -1) {
                s3folderName = filepath.substring(0, filepath.lastIndexOf("/"));
                keyName = filepath.substring(filepath.lastIndexOf("/") + 1);

            } else {

                keyName = filepath;

            }
            log.debug("keyName is: " + keyName);
            log.debug("bucket name is:" + runFileTransferEntity.getBucketName());
            log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());
            if (s3folderName != null) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }
            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                String outputPath = runFileTransferEntity.getLocalPath();
                String s1 = outputPath.substring(7, outputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName));
                } else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);

                String s = outputPath.substring(7, outputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());

                Path local = new Path("/tmp/" + keyName);
                Path hdfs = new Path(hdfspath);
                hdfsFileSystem.copyFromLocalFile(local, hdfs);

            } else {

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream(
                            runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                }

                else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

            }
        }

        catch (AmazonServiceException e) {
            log.error("Amazon Service Exception", e);
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError()) {
                    Log.error("Incorrect details provided.Please provide correct details", e);
                    throw new AWSUtilException("Incorrect details provided");
                } else {
                    Log.error("Unknown amezon exception occured", e);
                }

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Error e) {
            Log.error("Error occured while sleeping the thread");
            throw new AWSUtilException(e);
        } catch (Exception e) {
            log.error("error while transfering file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {

            } catch (Error err) {
                Log.error("Error occured while downloading");
                throw new AWSUtilException(err);
            }
            continue;
        }
        done = true;
        break;
    }

    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError()) {
            throw new AWSUtilException("File transfer failed");
        }
    }
    log.debug("Finished AWSS3Util download");
}

From source file:io.confluent.connect.s3.util.S3ProxyConfig.java

License:Open Source License

public static Protocol extractProtocol(String protocol) {
    if (StringUtils.isBlank(protocol)) {
        return Protocol.HTTPS;
    }/*from ww  w.j a v  a 2s .c o  m*/
    return "http".equals(protocol.trim().toLowerCase(Locale.ROOT)) ? Protocol.HTTP : Protocol.HTTPS;
}