Example usage for com.amazonaws Protocol HTTPS

List of usage examples for com.amazonaws Protocol HTTPS

Introduction

In this page you can find the example usage for com.amazonaws Protocol HTTPS.

Prototype

Protocol HTTPS

To view the source code for com.amazonaws Protocol HTTPS.

Click Source Link

Document

HTTPS Protocol - Using the HTTPS protocol is more secure than using the HTTP protocol, but may use slightly more system resources.

Usage

From source file:io.prestosql.plugin.hive.s3.PrestoS3FileSystem.java

License:Apache License

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    requireNonNull(uri, "uri is null");
    requireNonNull(conf, "conf is null");
    super.initialize(uri, conf);
    setConf(conf);//  w ww  . ja v a 2s.  c om

    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR));

    HiveS3Config defaults = new HiveS3Config();
    this.stagingDirectory = new File(
            conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString()));
    this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1;
    this.maxBackoffTime = Duration
            .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString()));
    this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString()));
    int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries());
    boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled());
    Duration connectTimeout = Duration
            .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString()));
    Duration socketTimeout = Duration
            .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString()));
    int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections());
    this.multiPartUploadMinFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE,
            defaults.getS3MultipartMinFileSize().toBytes());
    this.multiPartUploadMinPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE,
            defaults.getS3MultipartMinPartSize().toBytes());
    this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess());
    this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS,
            defaults.isS3UseInstanceCredentials());
    this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION,
            defaults.isPinS3ClientToCurrentRegion());
    verify((pinS3ClientToCurrentRegion && conf.get(S3_ENDPOINT) == null) || !pinS3ClientToCurrentRegion,
            "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region");
    this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled());
    this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name()));
    this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId());
    this.s3AclType = PrestoS3AclType.valueOf(conf.get(S3_ACL_TYPE, defaults.getS3AclType().name()));
    String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix());

    ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries)
            .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP)
            .withConnectionTimeout(toIntExact(connectTimeout.toMillis()))
            .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections)
            .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX);

    this.credentialsProvider = createAwsCredentialsProvider(uri, conf);
    this.s3 = createAmazonS3Client(conf, configuration);
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void upload(String lrzname) {
    try {/*from  w w  w  .  ja v  a 2 s .c  o m*/
        System.out.println("Uploading to Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        File file = new File(lrzname);
        String archiveid = "";
        if (file.length() < 5 * 1024 * 1024) {
            System.out.println("File is small, uploading as single chunk");
            String treehash = TreeHashGenerator.calculateTreeHash(file);

            InputStream is = new FileInputStream(file);
            byte[] buffer = new byte[(int) file.length()];
            int bytes = is.read(buffer);
            if (bytes != file.length())
                throw new RuntimeException("Only read " + bytes + " of " + file.length()
                        + " byte file when preparing for upload.");
            InputStream bais = new ByteArrayInputStream(buffer);

            UploadArchiveRequest request = new UploadArchiveRequest(vaultname, lrzname, treehash, bais);
            UploadArchiveResult result = client.uploadArchive(request);
            archiveid = result.getArchiveId();
        } else {
            long chunks = file.length() / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = file.length() / chunksize;
            }
            String chunksizestr = new Integer(chunksize).toString();
            System.out.println(
                    "Starting multipart upload: " + chunks + " full chunks of " + chunksizestr + " bytes");

            InitiateMultipartUploadResult imures = client.initiateMultipartUpload(
                    new InitiateMultipartUploadRequest(vaultname, lrzname, chunksizestr));

            String uploadid = imures.getUploadId();
            RandomAccessFile raf = new RandomAccessFile(file, "r");

            byte[] buffer = new byte[chunksize];

            for (long x = 0; x < chunks; x++) {
                try {
                    System.out.println("Uploading chunk " + x + "/" + chunks);

                    raf.seek(x * chunksize);
                    raf.read(buffer);

                    String parthash = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(buffer));
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                            range, new ByteArrayInputStream(buffer)));
                } catch (Exception e) {
                    e.printStackTrace();
                    System.err.println("Error uploading chunk " + x + ", retrying...");
                    x--;
                }
            }

            if (file.length() > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Uploading final partial chunk");
                        raf.seek(chunks * chunksize);
                        int bytes = raf.read(buffer);

                        String parthash = TreeHashGenerator
                                .calculateTreeHash(new ByteArrayInputStream(buffer, 0, bytes));
                        String range = "bytes " + (chunks * chunksize) + "-" + (file.length() - 1) + "/*";

                        client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash,
                                range, new ByteArrayInputStream(buffer, 0, bytes)));
                    } catch (Exception e) {
                        e.printStackTrace();
                        System.err.println("Error uploading final chunk, retrying...");
                        continue;
                    }
                } while (false);
            }

            System.out.println("Completing upload");
            String treehash = TreeHashGenerator.calculateTreeHash(file);
            CompleteMultipartUploadResult result = client
                    .completeMultipartUpload(new CompleteMultipartUploadRequest(vaultname, uploadid,
                            new Long(file.length()).toString(), treehash));
            archiveid = result.getArchiveId();
        }

        System.out.println("Uploaded " + lrzname + " to Glacier as ID " + archiveid);

        File listfile = new File(cachedir, "archives.lst");
        FileWriter fw = new FileWriter(listfile, true);
        fw.write(archiveid + " " + lrzname + "\n");
        fw.close();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void download(String filename, String jobid) {
    try {// w  w  w. j  ava  2s.c o m
        System.out.println("Starting download...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        if (jobid == null || jobid == "") {
            String archiveid;
            // Yes, this will screw up on actual 138-character file names, but... yeah.
            if (filename.length() == 138) {
                archiveid = filename;
            } else {
                File listfile = new File(cachedir, "archives.lst");
                Map<File, String> filemap = loadHashes(listfile);
                archiveid = filemap.get(filename);
                if (archiveid == null) {
                    System.err.println("Error: Could not find archive ID for file " + filename);
                    System.exit(1);
                    return;
                }
            }

            InitiateJobResult result = client.initiateJob(new InitiateJobRequest(vaultname,
                    new JobParameters().withType("archive-retrieval").withArchiveId(archiveid)));
            jobid = result.getJobId();
            System.out.println("Started download job as ID " + jobid);
        } else {
            DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, jobid));
            if (!djres.getStatusCode().equals("Succeeded")) {
                System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode());
                System.out.println(djres.getStatusMessage());
                System.exit(2);
            }
            long size = djres.getArchiveSizeInBytes();
            long chunks = size / chunksize;
            while (chunks > 10000) {
                chunksize <<= 1;
                chunks = size / chunksize;
            }
            RandomAccessFile raf = new RandomAccessFile(filename, "w");
            raf.setLength(size);
            byte[] buffer = new byte[chunksize];

            for (int x = 0; x < chunks; x++) {
                try {
                    System.out.println("Downloading chunk " + x + " of " + chunks);
                    String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*";

                    GetJobOutputResult gjores = client
                            .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range));

                    gjores.getBody().read(buffer);

                    MessageDigest md = MessageDigest.getInstance("SHA-256");
                    md.update(buffer, 0, chunksize);

                    byte[] hash = md.digest();

                    StringBuffer sb = new StringBuffer();
                    for (byte b : hash) {
                        sb.append(String.format("%02x", b));
                    }
                    if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) {
                        System.err.println("Error: Chunk " + x + " does not match SHA-256. Retrying.");
                        x--;
                        continue;
                    }

                    raf.seek(x * chunksize);
                    raf.write(buffer);
                } catch (Exception e) {
                    System.err.println("Error: Exception while downloading chunk " + x + ". Retrying.");
                    x--;
                }
            }

            if (size > chunks * chunksize) {
                do {
                    try {
                        System.out.println("Downloading final partial chunk");
                        String range = "bytes " + (chunks * chunksize) + "-" + (size - 1) + "/*";

                        GetJobOutputResult gjores = client
                                .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range));

                        int bytes = gjores.getBody().read(buffer);

                        MessageDigest md = MessageDigest.getInstance("SHA-256");
                        md.update(buffer, 0, bytes);

                        byte[] hash = md.digest();

                        StringBuffer sb = new StringBuffer();
                        for (byte b : hash) {
                            sb.append(String.format("%02x", b));
                        }
                        if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) {
                            System.err.println("Error: Final chunk does not match SHA-256. Retrying.");
                            continue;
                        }

                        raf.seek(chunks * chunksize);
                        raf.write(buffer, 0, bytes);
                    } catch (Exception e) {
                        System.err.println("Error: Exception while downloading final chunk. Retrying.");
                        continue;
                    }
                } while (false);
            }
            raf.close();

            String treehash = TreeHashGenerator.calculateTreeHash(new File(filename));
            if (!treehash.equalsIgnoreCase(djres.getSHA256TreeHash())) {
                System.err.println("Error: File failed final tree hash check.");
                System.exit(3);
            }

            System.out.println("Download complete.");
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void delete(String archive) {
    try {/*from   w  w  w  .  java  2 s.  c  o  m*/
        System.out.println("Deleting from Glacier...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);
        client.deleteArchive(new DeleteArchiveRequest(vaultname, archive));
        System.out.println("Archive deleted.");
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:maebackup.MaeBackup.java

License:Open Source License

public static void list(String arg) {
    try {/*from   w w  w. ja va 2s.  co m*/
        System.out.println("Listing Glacier vault...");
        ClientConfiguration config = new ClientConfiguration();
        config.setProtocol(Protocol.HTTPS);
        AmazonGlacierClient client = new AmazonGlacierClient(credentials, config);
        client.setEndpoint(endpoint);

        if (arg == null || arg == "") {
            InitiateJobResult result = client.initiateJob(
                    new InitiateJobRequest(vaultname, new JobParameters().withType("inventory-retrieval")));
            String jobid = result.getJobId();
            System.out.println("Started inventory retrival job as ID " + jobid);
        } else {
            DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, arg));
            if (!djres.getStatusCode().equals("Succeeded")) {
                System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode());
                System.out.println(djres.getStatusMessage());
                System.exit(2);
            }

            GetJobOutputResult gjores = client
                    .getJobOutput(new GetJobOutputRequest().withVaultName(vaultname).withJobId(arg));
            byte[] buffer = new byte[1024];
            int bytes;
            while ((bytes = gjores.getBody().read(buffer)) > 0) {
                System.out.write(buffer, 0, bytes);
            }
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public InputStream retrieve(IFile file) throws IOException {
    Preconditions.checkArgument((file != null), "file must not be null");

    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));

    GetObjectRequest getObjectRequest = new GetObjectRequest(getBucketName(), file.getFileName());
    S3Object storedObject = s3.getObject(getObjectRequest);

    return storedObject.getObjectContent();
}

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public void delete(IFile file) throws IOException {
    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));

    DeleteObjectRequest deleteObjectRequest = new DeleteObjectRequest(getBucketName(), file.getFileName());
    s3.deleteObject(deleteObjectRequest);
}

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public StorageResponse store(StorageRequest request) throws IOException {

    final IFile requestFile = request.getFile();

    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));

    Map<String, String> fileMetadata = new HashMap<String, String>();

    fileMetadata.put("accountUrn", request.getUser().getAccount().getUrn());
    fileMetadata.put("userUrn", request.getUser().getUrn());
    fileMetadata.put("fileUrn", requestFile.getUrn());
    fileMetadata.put("entityReferenceType", requestFile.getEntityReferenceType().name());
    fileMetadata.put("referenceUrn", requestFile.getReferenceUrn());
    fileMetadata.put("recordedTimestamp", Long.toString(request.getFile().getTimestamp()));
    //            fileMetadata.put("mimeType", request.getVfsObject().getMimeType());

    ObjectMetadata metadata = new ObjectMetadata();
    if (request.getContentLength() > 0) {
        LOG.debug("Including content length : " + request.getContentLength());
        metadata.setContentLength(request.getContentLength());
    }/*from w  w w . java 2s  . c o  m*/
    //            metadata.setContentMD5(streamMD5);
    metadata.setUserMetadata(fileMetadata);

    try {
        LOG.trace("Bucket name: " + getBucketName());
        LOG.trace("File name: " + request.getFileName());
        LOG.trace("inputStream == null?  " + (request.getInputStream() == null));

        HashingInputStream his = new HashingInputStream(request.getInputStream(), "SHA-256");

        PutObjectResult putResult = s3
                .putObject(new PutObjectRequest(getBucketName(), request.getFileName(), his, metadata));

        String finalUrl = getUrl(request.getFileName());
        LOG.trace("File URL: " + finalUrl);

        requestFile.setUrl(getUrl(request.getFileName()));

        byte[] signature = his.getSignature();

        JSONObject jsonObject = HashUtil.signFile(requestFile, signature);
        LOG.info("File Signature\n\n{}\n\n", jsonObject.toString(3));

        return new StorageResponse(requestFile, finalUrl, jsonObject.toString(3));
    } catch (AmazonS3Exception e) {
        e.printStackTrace();
        throw e;
    } catch (JSONException | NoSuchAlgorithmException e) {
        e.printStackTrace();
        throw new IOException(e);
    }

}

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public boolean isHealthy() {
    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));
    return s3.doesBucketExist(getBucketName());
}

From source file:org.apache.druid.storage.s3.S3StorageDruidModule.java

License:Apache License

@Nullable
private static Protocol parseProtocol(@Nullable String protocol) {
    if (protocol == null) {
        return null;
    }/*  ww w  .  ja  va2s  .co  m*/

    if (protocol.equalsIgnoreCase("http")) {
        return Protocol.HTTP;
    } else if (protocol.equalsIgnoreCase("https")) {
        return Protocol.HTTPS;
    } else {
        throw new IAE("Unknown protocol[%s]", protocol);
    }
}