Example usage for com.amazonaws ClientConfiguration setProtocol

List of usage examples for com.amazonaws ClientConfiguration setProtocol

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration setProtocol.

Prototype

public void setProtocol(Protocol protocol) 

Source Link

Document

Sets the protocol (i.e.

Usage

From source file:com.treasure_data.td_import.source.S3Source.java

License:Apache License

static AmazonS3Client createAmazonS3Client(SourceDesc desc) {
    String accessKey = desc.getUser();
    if (accessKey == null || accessKey.isEmpty()) {
        throw new IllegalArgumentException("S3 AccessKey is null or empty.");
    }/*from   w w w  . j a v  a2s  .c om*/
    String secretAccessKey = desc.getPassword();
    if (secretAccessKey == null || secretAccessKey.isEmpty()) {
        throw new IllegalArgumentException("S3 SecretAccessKey is null or empty.");
    }
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretAccessKey);

    ClientConfiguration conf = new ClientConfiguration();
    conf.setProtocol(Configuration.BI_PREPARE_S3_PROTOCOL);
    conf.setMaxConnections(Configuration.BI_PREPARE_S3_MAX_CONNECTIONS);
    conf.setMaxErrorRetry(Configuration.BI_PREPARE_S3_MAX_ERRORRETRY);
    conf.setSocketTimeout(Configuration.BI_PREPARE_S3_SOCKET_TIMEOUT);

    return new AmazonS3Client(credentials, conf);
}

From source file:com.upplication.s3fs.S3FileSystemProvider.java

License:Open Source License

protected ClientConfiguration createClientConfig(Properties props) {
    ClientConfiguration config = new ClientConfiguration();

    if (props == null)
        return config;

    if (props.containsKey("connection_timeout")) {
        log.trace("AWS client config - connection_timeout: {}", props.getProperty("connection_timeout"));
        config.setConnectionTimeout(Integer.parseInt(props.getProperty("connection_timeout")));
    }//  ww w.j  ava 2  s .  c  o m

    if (props.containsKey("max_connections")) {
        log.trace("AWS client config - max_connections: {}", props.getProperty("max_connections"));
        config.setMaxConnections(Integer.parseInt(props.getProperty("max_connections")));
    }

    if (props.containsKey("max_error_retry")) {
        log.trace("AWS client config - max_error_retry: {}", props.getProperty("max_error_retry"));
        config.setMaxErrorRetry(Integer.parseInt(props.getProperty("max_error_retry")));
    }

    if (props.containsKey("protocol")) {
        log.trace("AWS client config - protocol: {}", props.getProperty("protocol"));
        config.setProtocol(Protocol.valueOf(props.getProperty("protocol").toUpperCase()));
    }

    if (props.containsKey("proxy_domain")) {
        log.trace("AWS client config - proxy_domain: {}", props.getProperty("proxy_domain"));
        config.setProxyDomain(props.getProperty("proxy_domain"));
    }

    if (props.containsKey("proxy_host")) {
        log.trace("AWS client config - proxy_host: {}", props.getProperty("proxy_host"));
        config.setProxyHost(props.getProperty("proxy_host"));
    }

    if (props.containsKey("proxy_port")) {
        log.trace("AWS client config - proxy_port: {}", props.getProperty("proxy_port"));
        config.setProxyPort(Integer.parseInt(props.getProperty("proxy_port")));
    }

    if (props.containsKey("proxy_username")) {
        log.trace("AWS client config - proxy_username: {}", props.getProperty("proxy_username"));
        config.setProxyUsername(props.getProperty("proxy_username"));
    }

    if (props.containsKey("proxy_password")) {
        log.trace("AWS client config - proxy_password: {}", props.getProperty("proxy_password"));
        config.setProxyPassword(props.getProperty("proxy_password"));
    }

    if (props.containsKey("proxy_workstation")) {
        log.trace("AWS client config - proxy_workstation: {}", props.getProperty("proxy_workstation"));
        config.setProxyWorkstation(props.getProperty("proxy_workstation"));
    }

    if (props.containsKey("signer_override")) {
        log.debug("AWS client config - signerOverride: {}", props.getProperty("signer_override"));
        config.setSignerOverride(props.getProperty("signer_override"));
    }

    if (props.containsKey("socket_send_buffer_size_hints")
            || props.containsKey("socket_recv_buffer_size_hints")) {
        log.trace("AWS client config - socket_send_buffer_size_hints: {}, socket_recv_buffer_size_hints: {}",
                props.getProperty("socket_send_buffer_size_hints", "0"),
                props.getProperty("socket_recv_buffer_size_hints", "0"));
        int send = Integer.parseInt(props.getProperty("socket_send_buffer_size_hints", "0"));
        int recv = Integer.parseInt(props.getProperty("socket_recv_buffer_size_hints", "0"));
        config.setSocketBufferSizeHints(send, recv);
    }

    if (props.containsKey("socket_timeout")) {
        log.trace("AWS client config - socket_timeout: {}", props.getProperty("socket_timeout"));
        config.setSocketTimeout(Integer.parseInt(props.getProperty("socket_timeout")));
    }

    if (props.containsKey("user_agent")) {
        log.trace("AWS client config - user_agent: {}", props.getProperty("user_agent"));
        config.setUserAgent(props.getProperty("user_agent"));
    }

    return config;
}

From source file:com.yahoo.ycsb.db.S3Client.java

License:Open Source License

/**
* Initialize any state for the storage./*from   w ww  .j  a va  2s .c o m*/
* Called once per S3 instance; If the client is not null it is re-used.
*/
@Override
public void init() throws DBException {
    final int count = INIT_COUNT.incrementAndGet();
    synchronized (S3Client.class) {
        Properties propsCL = getProperties();
        int recordcount = Integer.parseInt(propsCL.getProperty("recordcount"));
        int operationcount = Integer.parseInt(propsCL.getProperty("operationcount"));
        int numberOfOperations = 0;
        if (recordcount > 0) {
            if (recordcount > operationcount) {
                numberOfOperations = recordcount;
            } else {
                numberOfOperations = operationcount;
            }
        } else {
            numberOfOperations = operationcount;
        }
        if (count <= numberOfOperations) {
            String accessKeyId = null;
            String secretKey = null;
            String endPoint = null;
            String region = null;
            String maxErrorRetry = null;
            String maxConnections = null;
            String protocol = null;
            BasicAWSCredentials s3Credentials;
            ClientConfiguration clientConfig;
            if (s3Client != null) {
                System.out.println("Reusing the same client");
                return;
            }
            try {
                InputStream propFile = S3Client.class.getClassLoader().getResourceAsStream("s3.properties");
                Properties props = new Properties(System.getProperties());
                props.load(propFile);
                accessKeyId = props.getProperty("s3.accessKeyId");
                if (accessKeyId == null) {
                    accessKeyId = propsCL.getProperty("s3.accessKeyId");
                }
                System.out.println(accessKeyId);
                secretKey = props.getProperty("s3.secretKey");
                if (secretKey == null) {
                    secretKey = propsCL.getProperty("s3.secretKey");
                }
                System.out.println(secretKey);
                endPoint = props.getProperty("s3.endPoint");
                if (endPoint == null) {
                    endPoint = propsCL.getProperty("s3.endPoint", "s3.amazonaws.com");
                }
                System.out.println(endPoint);
                region = props.getProperty("s3.region");
                if (region == null) {
                    region = propsCL.getProperty("s3.region", "us-east-1");
                }
                System.out.println(region);
                maxErrorRetry = props.getProperty("s3.maxErrorRetry");
                if (maxErrorRetry == null) {
                    maxErrorRetry = propsCL.getProperty("s3.maxErrorRetry", "15");
                }
                maxConnections = props.getProperty("s3.maxConnections");
                if (maxConnections == null) {
                    maxConnections = propsCL.getProperty("s3.maxConnections");
                }
                protocol = props.getProperty("s3.protocol");
                if (protocol == null) {
                    protocol = propsCL.getProperty("s3.protocol", "HTTPS");
                }
                sse = props.getProperty("s3.sse");
                if (sse == null) {
                    sse = propsCL.getProperty("s3.sse", "false");
                }
                String ssec = props.getProperty("s3.ssec");
                if (ssec == null) {
                    ssec = propsCL.getProperty("s3.ssec", null);
                } else {
                    ssecKey = new SSECustomerKey(ssec);
                }
            } catch (Exception e) {
                System.err.println("The file properties doesn't exist " + e.toString());
                e.printStackTrace();
            }
            try {
                System.out.println("Inizializing the S3 connection");
                s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey);
                clientConfig = new ClientConfiguration();
                clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry));
                if (protocol.equals("HTTP")) {
                    clientConfig.setProtocol(Protocol.HTTP);
                } else {
                    clientConfig.setProtocol(Protocol.HTTPS);
                }
                if (maxConnections != null) {
                    clientConfig.setMaxConnections(Integer.parseInt(maxConnections));
                }
                s3Client = new AmazonS3Client(s3Credentials, clientConfig);
                s3Client.setRegion(Region.getRegion(Regions.fromName(region)));
                s3Client.setEndpoint(endPoint);
                System.out.println("Connection successfully initialized");
            } catch (Exception e) {
                System.err.println("Could not connect to S3 storage: " + e.toString());
                e.printStackTrace();
                throw new DBException(e);
            }
        } else {
            System.err.println("The number of threads must be less or equal than the operations");
            throw new DBException(new Error("The number of threads must be less or equal than the operations"));
        }
    }
}

From source file:com.yahoo.ycsb.utils.connection.S3Connection.java

License:Open Source License

public S3Connection(String bucket, String region, String endPoint) throws ClientException {
    super(bucket, region, endPoint);
    logger.debug("S3Client.establishConnection(" + region + "," + endPoint + ") bucket: " + bucket);
    org.apache.log4j.Logger.getLogger("com.amazonaws").setLevel(Level.OFF);

    /*if (S3Connection.init == true) {
    init();/*from  ww w. j a v a2s . c o  m*/
    S3Connection.init = false;
    }*/

    this.bucket = bucket;
    this.region = region;

    try {
        BasicAWSCredentials s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey);
        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry));
        if (protocol.equals("HTTP")) {
            clientConfig.setProtocol(Protocol.HTTP);
        } else {
            clientConfig.setProtocol(Protocol.HTTPS);
        }
        if (maxConnections != null) {
            clientConfig.setMaxConnections(Integer.parseInt(maxConnections));
        }

        logger.debug("Inizializing the S3 connection...");
        awsClient = new AmazonS3Client(s3Credentials, clientConfig);
        awsClient.setRegion(Region.getRegion(Regions.fromName(region)));
        awsClient.setEndpoint(endPoint);
        logger.debug("Connection successfully initialized");
    } catch (Exception e) {
        logger.error("Could not connect to S3 storage: " + e.toString());
        e.printStackTrace();
        throw new ClientException(e);
    }
}

From source file:edu.upenn.library.fcrepo.connector.annex.S3AnnexResolverFactory.java

License:Apache License

@Override
public void initialize(Properties props) {
    this.accessKey = props.getProperty(ACCESS_KEY_PROPNAME);
    this.secretKey = props.getProperty(SECRET_KEY_PROPNAME);
    this.bucket = props.getProperty(BUCKET_PROPNAME);
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);
    S3ClientOptions clientOptions = new S3ClientOptions();
    clientOptions.setPathStyleAccess(true);

    conn = new AmazonS3Client(credentials, clientConfig);
    conn.setS3ClientOptions(clientOptions);
    conn.setEndpoint(props.getProperty(ENDPOINT_PROPNAME));
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;/*  w  w  w  . j a v a2s.c om*/

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void download(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util download");

    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.exists() && filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            throw new AWSUtilException("Invalid local path");
        }/*from ww w  .  j  a  v a  2 s.c o m*/
    boolean fail_if_exist = false;
    int retryAttempt = 0;
    int i;
    String amazonFileUploadLocationOriginal = null;
    String keyName = null;
    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();
    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {

            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {

                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());

                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }
            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            if (filepath.lastIndexOf("/") != -1) {
                s3folderName = filepath.substring(0, filepath.lastIndexOf("/"));
                keyName = filepath.substring(filepath.lastIndexOf("/") + 1);

            } else {

                keyName = filepath;

            }
            log.debug("keyName is: " + keyName);
            log.debug("bucket name is:" + runFileTransferEntity.getBucketName());
            log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());
            if (s3folderName != null) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }
            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                String outputPath = runFileTransferEntity.getLocalPath();
                String s1 = outputPath.substring(7, outputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File f = new File("/tmp");
                if (!f.exists())
                    f.mkdir();

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName));
                } else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);

                String s = outputPath.substring(7, outputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());

                Path local = new Path("/tmp/" + keyName);
                Path hdfs = new Path(hdfspath);
                hdfsFileSystem.copyFromLocalFile(local, hdfs);

            } else {

                GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName);
                S3Object object = s3Client.getObject(request);
                if (runFileTransferEntity.getEncoding() != null)
                    object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding());
                File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName);
                if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) {
                    S3ObjectInputStream objectContent = object.getObjectContent();
                    IOUtils.copyLarge(objectContent, new FileOutputStream(
                            runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                }

                else {
                    if (!(fexist.exists() && !fexist.isDirectory())) {
                        S3ObjectInputStream objectContent = object.getObjectContent();
                        IOUtils.copyLarge(objectContent, new FileOutputStream(
                                runFileTransferEntity.getLocalPath() + File.separatorChar + keyName));
                    } else {
                        fail_if_exist = true;
                        Log.error("File already exists");
                        throw new AWSUtilException("File already exists");
                    }
                }

            }
        }

        catch (AmazonServiceException e) {
            log.error("Amazon Service Exception", e);
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError()) {
                    Log.error("Incorrect details provided.Please provide correct details", e);
                    throw new AWSUtilException("Incorrect details provided");
                } else {
                    Log.error("Unknown amezon exception occured", e);
                }

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Error e) {
            Log.error("Error occured while sleeping the thread");
            throw new AWSUtilException(e);
        } catch (Exception e) {
            log.error("error while transfering file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {

            } catch (Error err) {
                Log.error("Error occured while downloading");
                throw new AWSUtilException(err);
            }
            continue;
        }
        done = true;
        break;
    }

    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError()) {
            throw new AWSUtilException("File transfer failed");
        }
    }
    log.debug("Finished AWSS3Util download");
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void upload(String lrzname) {
    try {//from w  ww .  ja  v a2  s . com
        System.out.println("Uploading to Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        File file = new File(lrzname);
        String archiveid = "";
        if (file.length() < 5 * 1024 * 1024) {
            System.out.println("File is small, uploading as single chunk");
            String treehash = TreeHashGenerator.calculateTreeHash(file);

            InputStream is = new FileInputStream(file);
            byte[] buffer = new byte[(int) file.length()];
            int bytes = is.read(buffer);
            if (bytes != file.length())
                throw new RuntimeException("Only read " + bytes + " of " + file.length()
                        + " byte file when preparing for upload.");
            InputStream bais = new ByteArrayInputStream(buffer);

            UploadArchiveRequest request = new UploadArchiveRequest(vaultname, lrzname, treehash, bais);
            UploadArchiveResult result = client.uploadArchive(request);
            archiveid = result.getArchiveId();
        } else {
            long chunks = file.length() / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = file.length() / chunksize;
            }
            String chunksizestr = new Integer(chunksize).toString();
            System.out.println(
                    "Starting multipart upload: " + chunks + " full chunks of " + chunksizestr + " bytes");

            InitiateMultipartUploadResult imures = client.initiateMultipartUpload(
                    new InitiateMultipartUploadRequest(vaultname, lrzname, chunksizestr));

            String uploadid = imures.getUploadId();
            RandomAccessFile raf = new RandomAccessFile(file, "r");

            byte[] buffer = new byte[chunksize];

            for (long x = 0; x < chunks; x++) {
                try {
                    System.out.println("Uploading chunk " + x + "/" + chunks);

                    raf.seek(x * chunksize);
                    raf.read(buffer);

                    String parthash = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(buffer));
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                            range, new ByteArrayInputStream(buffer)));
                } catch (Exception e) {
                    e.printStackTrace();
                    System.err.println("Error uploading chunk " + x + ", retrying...");
                    x--;
                }
            }

            if (file.length() > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Uploading final partial chunk");
                        raf.seek(chunks * chunksize);
                        int bytes = raf.read(buffer);

                        String parthash = TreeHashGenerator
                                .calculateTreeHash(new ByteArrayInputStream(buffer, 0, bytes));
                        String range = "bytes " + (chunks * chunksize) + "-" + (file.length() - 1) + "/*";

                        client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                                range, new ByteArrayInputStream(buffer, 0, bytes)));
                    } catch (Exception e) {
                        e.printStackTrace();
                        System.err.println("Error uploading final chunk, retrying...");
                        continue;
                    }
                } while (false);
            }

            System.out.println("Completing upload");
            String treehash = TreeHashGenerator.calculateTreeHash(file);
            CompleteMultipartUploadResult result = client
                    .completeMultipartUpload(new CompleteMultipartUploadRequest(vaultname, uploadid,
                            new Long(file.length()).toString(), treehash));
            archiveid = result.getArchiveId();
        }

        System.out.println("Uploaded " + lrzname + " to Glacier as ID " + archiveid);

        File listfile = new File(cachedir, "archives.lst");
        FileWriter fw = new FileWriter(listfile, true);
        fw.write(archiveid + " " + lrzname + "\n");
        fw.close();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void download(String filename, String jobid) {
    try {//w  w  w. j av a  2s .  c o  m
        System.out.println("Starting download...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        if (jobid == null || jobid == "") {
            String archiveid;
            // Yes, this will screw up on actual 138-character file names, but... yeah.
            if (filename.length() == 138) {
                archiveid = filename;
            } else {
                File listfile = new File(cachedir, "archives.lst");
                Map<File, String> filemap = loadHashes(listfile);
                archiveid = filemap.get(filename);
                if (archiveid == null) {
                    System.err.println("Error: Could not find archive ID for file " + filename);
                    System.exit(1);
                    return;
                }
            }

            InitiateJobResult result = client.initiateJob(new InitiateJobRequest(vaultname,
                    new JobParameters().withType("archive-retrieval").withArchiveId(archiveid)));
            jobid = result.getJobId();
            System.out.println("Started download job as ID " + jobid);
        } else {
            DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, jobid));
            if (!djres.getStatusCode().equals("Succeeded")) {
                System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode());
                System.out.println(djres.getStatusMessage());
                System.exit(2);
            }
            long size = djres.getArchiveSizeInBytes();
            long chunks = size / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = size / chunksize;
            }
            RandomAccessFile raf = new RandomAccessFile(filename, "w");
            raf.setLength(size);
            byte[] buffer = new byte[chunksize];

            for (int x = 0; x < chunks; x++) {
                try {
                    System.out.println("Downloading chunk " + x + " of " + chunks);
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    GetJobOutputResult gjores = client
                            .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range));

                    gjores.getBody().read(buffer);

                    MessageDigest md = MessageDigest.getInstance("SHA-256");
                    md.update(buffer, 0, chunksize);

                    byte[] hash = md.digest();

                    StringBuffer sb = new StringBuffer();
                    for (byte b : hash) {
                        sb.append(String.format("%02x", b));
                    }
                    if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) {
                        System.err.println("Error: Chunk " + x + " does not match SHA-256. Retrying.");
                        x--;
                        continue;
                    }

                    raf.seek(x * chunksize);
                    raf.write(buffer);
                } catch (Exception e) {
                    System.err.println("Error: Exception while downloading chunk " + x + ". Retrying.");
                    x--;
                }
            }

            if (size > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Downloading final partial chunk");
                        String range = "bytes " + (chunks * chunksize) + "-" + (size - 1) + "/*";

                        GetJobOutputResult gjores = client
                                .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range));

                        int bytes = gjores.getBody().read(buffer);

                        MessageDigest md = MessageDigest.getInstance("SHA-256");
                        md.update(buffer, 0, bytes);

                        byte[] hash = md.digest();

                        StringBuffer sb = new StringBuffer();
                        for (byte b : hash) {
                            sb.append(String.format("%02x", b));
                        }
                        if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) {
                            System.err.println("Error: Final chunk does not match SHA-256. Retrying.");
                            continue;
                        }

                        raf.seek(chunks * chunksize);
                        raf.write(buffer, 0, bytes);
                    } catch (Exception e) {
                        System.err.println("Error: Exception while downloading final chunk. Retrying.");
                        continue;
                    }
                } while (false);
            }
            raf.close();

            String treehash = TreeHashGenerator.calculateTreeHash(new File(filename));
            if (!treehash.equalsIgnoreCase(djres.getSHA256TreeHash())) {
                System.err.println("Error: File failed final tree hash check.");
                System.exit(3);
            }

            System.out.println("Download complete.");
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void delete(String archive) {
    try {//from  w  w w .  jav a 2  s  .  c  o  m
        System.out.println("Deleting from Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);
        client.deleteArchive(new DeleteArchiveRequest(vaultname, archive));
        System.out.println("Archive deleted.");
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}