Example usage for com.amazonaws.util BinaryUtils toHex

List of usage examples for com.amazonaws.util BinaryUtils toHex

Introduction

In this page you can find the example usage for com.amazonaws.util BinaryUtils toHex.

Prototype

public static String toHex(byte[] data) 

Source Link

Document

Converts byte data to a Hex-encoded string in lower case.

Usage

From source file:com.ibm.og.s3.v4.AWSS3V4Signer.java

License:Open Source License

/**
 * Returns the pre-defined header value and set other necessary headers if the request needs to be
 * chunk-encoded. Otherwise calls the superclass method which calculates the hash of the whole
 * content for signing./*from w w w.  jav  a  2s .  c  om*/
 */
@Override
protected String calculateContentHash(final SignableRequest<?> request) {
    // To be consistent with other service clients using sig-v4,
    // we just set the header as "required", and AWS4Signer.sign() will be
    // notified to pick up the header value returned by this method.
    request.addHeader(X_AMZ_CONTENT_SHA256, "required");
    final String contentLength = request.getHeaders().get(Headers.CONTENT_LENGTH);
    if (useChunkEncoding(request)) {
        final long originalContentLength;
        if (contentLength != null) {
            originalContentLength = Long.parseLong(contentLength);
        } else {
            /**
             * "Content-Length" header could be missing if the caller is uploading a stream without
             * setting Content-Length in ObjectMetadata. Before using sigv4, we rely on HttpClient to
             * add this header by using BufferedHttpEntity when creating the HttpRequest object. But
             * now, we need this information immediately for the signing process, so we have to cache
             * the stream here.
             */
            try {
                originalContentLength = getContentLength(request);
            } catch (final IOException e) {
                throw new AmazonClientException("Cannot get the content-length of the request content.", e);
            }
        }
        request.addHeader("x-amz-decoded-content-length", Long.toString(originalContentLength));
        // Make sure "Content-Length" header is not empty so that HttpClient
        // won't cache the stream again to recover Content-Length
        request.addHeader(Headers.CONTENT_LENGTH, Long
                .toString(AwsChunkedEncodingInputStream.calculateStreamContentLength(originalContentLength)));
        return CONTENT_SHA_256;
    }

    if (this.digestCache != null) {
        try {
            final long length = contentLength != null ? Long.parseLong(contentLength) : 0;
            return BinaryUtils.toHex(this.digestCache.get(length));
        } catch (final ExecutionException e) {
            throw new RuntimeException(e);
        }
    }
    return super.calculateContentHash(request);
}

From source file:glacierpipe.GlacierPipe.java

License:Apache License

public String pipe(AmazonGlacierClient client, String vaultName, String archiveDesc, InputStream in)
        throws IOException {

    long currentPosition = 0;
    int partId = 0;

    try {/*from  w w w. ja v  a 2  s . c o m*/
        byte[] buffer = new byte[4096];

        TreeHashMessageDigest completeHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));
        in = new DigestInputStream(in, completeHash);

        /**** Create an upload ID for the current upload ****/
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest().withVaultName(vaultName)
                .withArchiveDescription(archiveDesc).withPartSize(Long.toString(partSize));

        InitiateMultipartUploadResult result = client.initiateMultipartUpload(request);
        String uploadId = result.getUploadId();

        this.observer.gotUploadId(uploadId);

        /**** While there are still chunks to process ****/
        do {
            TreeHashMessageDigest partHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));

            // Fill up the buffer
            try (OutputStream bufferOut = this.buffer.getOutputStream();
                    OutputStream observedOut = new ObservedOutputStream(bufferOut,
                            new BufferingObserver(this.observer, partId));
                    DigestOutputStream out = new DigestOutputStream(observedOut, partHash);) {
                int read = 0;
                while (this.buffer.getRemaining() > 0 && (read = in.read(buffer, 0,
                        (int) Math.min(this.buffer.getRemaining(), buffer.length))) >= 0) {
                    out.write(buffer, 0, read);
                }
            }

            currentPosition += this.buffer.getLength();

            // If we read zero bytes, we reached the end of the stream.  Break.
            if (this.buffer.getLength() == 0) {
                break;
            }

            // Report the Tree Hash of this chunk
            byte[] byteChecksum = partHash.digest();
            String checksum = BinaryUtils.toHex(byteChecksum);
            this.observer.computedTreeHash(partId, byteChecksum);

            // Try to upload this chunk
            int attempts = 0;
            do {
                try (InputStream bufferIn = this.buffer.getInputStream();

                        // KLUDGE: Throttling really belongs closer to EntitySerializer.serialize(), but there
                        // wasn't an easy hook for it.  Throttling on input would work well enough, but
                        // client.uploadMultipartPart() calculates a SHA-256 checksum on the request before it
                        // sends it, then calls reset() on the stream.  Because we know this, don't throttle until
                        // reset() has been called at least once.
                        InputStream throttledIn = this.throttlingStrategy == null ? bufferIn
                                : new ThrottledInputStream(bufferIn, this.throttlingStrategy) {
                                    private long resets = 0;

                                    @Override
                                    public void setBytesPerSecond() {
                                        if (this.resets > 0) {
                                            super.setBytesPerSecond();
                                        }
                                    }

                                    @Override
                                    protected long getMaxRead(long currentTime) {
                                        return this.resets > 0 ? super.getMaxRead(currentTime) : Long.MAX_VALUE;
                                    }

                                    @Override
                                    public synchronized void reset() throws IOException {
                                        super.reset();
                                        this.resets++;
                                    }
                                };

                        InputStream observedIn = new ObservedInputStream(throttledIn,
                                new UploadObserver(this.observer, partId));) {

                    UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest()
                            .withVaultName(vaultName).withBody(observedIn).withChecksum(checksum)
                            .withRange(String.format("bytes %d-%d/*", currentPosition - this.buffer.getLength(),
                                    currentPosition - 1))
                            .withUploadId(uploadId).withAccountId("-");

                    UploadMultipartPartResult partResult = client.uploadMultipartPart(partRequest);

                    if (!Arrays.equals(BinaryUtils.fromHex(partResult.getChecksum()), byteChecksum)) {
                        throw new AmazonClientException("Checksum mismatch");
                    }

                    break;
                } catch (AmazonClientException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                } catch (IOException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                }

                try {
                    long sleepingFor = 1000 * (attempts < 15 ? (long) Math.pow(1.5, attempts) : 300);
                    this.observer.sleepingBeforeRetry(sleepingFor);
                    Thread.sleep(sleepingFor);
                } catch (InterruptedException e) {
                    throw new IOException("Upload interrupted", e);
                }
            } while (true);

            partId++;
        } while (this.buffer.getRemaining() == 0);

        byte[] complateHash = completeHash.digest();

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
                .withVaultName(vaultName).withUploadId(uploadId).withChecksum(BinaryUtils.toHex(complateHash))
                .withArchiveSize(Long.toString(currentPosition));

        CompleteMultipartUploadResult compResult = client.completeMultipartUpload(compRequest);
        String location = compResult.getLocation();

        this.observer.done(complateHash, location);
        return location;

    } catch (IOException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (AmazonClientException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException("SHA-256 not available", e);
    }
}