Example usage for com.amazonaws AmazonClientException AmazonClientException

List of usage examples for com.amazonaws AmazonClientException AmazonClientException

Introduction

In this page you can find the example usage for com.amazonaws AmazonClientException AmazonClientException.

Prototype

public AmazonClientException(Throwable t) 

Source Link

Usage

From source file:com.optimalbi.Controller.AmazonAccount.java

License:Apache License

private void configure() throws AmazonClientException {
    services = new ArrayList<>();
    if (getCredentials() == null) {
        throw new AmazonClientException("No credentials provided");
    }/*  ww w  .j  a v  a 2 s  .  c o m*/
}

From source file:com.optimalbi.Controller.AmazonAccount.java

License:Apache License

private void populateEc2() throws AmazonClientException {
    Map<String, Double> pricing = readEc2Pricing();
    for (Region region : getRegions()) {
        try {/* www. j  a  v a  2s. c  om*/
            //                services.addAll(Ec2Service.populateServices(region, getCredentials(), getLogger(), pricing));
            AmazonEC2Client ec2 = new AmazonEC2Client(getCredentials().getCredentials());
            ec2.setRegion(region);
            DescribeInstancesResult describeInstancesRequest = ec2.describeInstances();
            List<Reservation> reservations = describeInstancesRequest.getReservations();
            Set<Instance> inst = new HashSet<>();
            for (Reservation reservation : reservations) {
                inst.addAll(reservation.getInstances());
            }

            getLogger().info("EC2, Adding " + inst.size() + " instances from " + region.getName());

            for (Instance i : inst) {
                Service temp = new LocalEc2Service(i.getInstanceId(), getCredentials(), region, ec2,
                        getLogger());
                if (pricing != null) {
                    temp.attachPricing(pricing);
                }
                services.add(temp);
            }

        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);
    }

}

From source file:com.optimalbi.Controller.AmazonAccount.java

License:Apache License

private void populateRedshift() throws AmazonClientException {
    for (Region region : getRegions()) {
        try {//from  w w  w. j a  va 2 s.c o  m
            if (region.isServiceSupported(ServiceAbbreviations.RedShift)) {
                //                    services.addAll(RedshiftService.populateServices(region, getCredentials(), getLogger()));
                AmazonRedshiftClient redshift = new AmazonRedshiftClient(getCredentials().getCredentials());
                redshift.setRegion(region);

                DescribeClustersResult clusterResult;
                List<Cluster> clusters;
                try {
                    clusterResult = redshift.describeClusters();
                    clusters = clusterResult.getClusters();
                } catch (Exception e) {
                    throw new AmazonClientException("Failed to get clusters " + e.getMessage());
                }

                getLogger().info("Redshift, Adding " + clusters.size() + " clusters from " + region.getName());
                for (Cluster cluster : clusters) {
                    getLogger().info("Cluster: " + cluster.getClusterIdentifier());
                    services.add(new LocalRedshiftService(cluster.getClusterIdentifier(), getCredentials(),
                            region, cluster, getLogger()));
                }
            } else {
                getLogger().info("Redshift, NOPE from " + region.getName());

            }
        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);
    }
}

From source file:com.optimalbi.Controller.AmazonAccount.java

License:Apache License

private void populateRDS() throws AmazonClientException {
    for (Region region : getRegions()) {
        try {/*w w w. j  a  v  a  2s .c  o  m*/
            if (region.isServiceSupported(ServiceAbbreviations.RDS)) {
                //                services.addAll(RDSService.populateServices(region, getCredentials(), getLogger()));
                AmazonRDSClient rds = new AmazonRDSClient(getCredentials().getCredentials());
                rds.setRegion(region);

                DescribeDBInstancesResult result = rds.describeDBInstances();
                List<DBInstance> instances = result.getDBInstances();

                getLogger().info("RDS, Adding " + instances.size() + " instances from " + region.getName());
                services.addAll(
                        instances
                                .stream().map(i -> new LocalRDSService(i.getDBInstanceIdentifier(),
                                        getCredentials(), region, i, getLogger()))
                                .collect(Collectors.toList()));
            } else {
                getLogger().info("RDS, NOPE from " + region.getName());
            }
        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);

    }
}

From source file:com.optimalbi.Services.LocalEc2Service.java

License:Apache License

public void refreshInstance() {
    DescribeInstancesResult describeInstancesResult = amazonEC2
            .describeInstances(new DescribeInstancesRequest().withInstanceIds(this.serviceID()));
    List<Reservation> reservations = describeInstancesResult.getReservations();
    List<Instance> inst = new ArrayList<>();

    for (Reservation reservation : reservations) {
        inst.addAll(reservation.getInstances());
    }//from   ww  w.j  av a2  s  .c o m
    if (inst.size() > 1) {
        getLogger().error("Error in drawing instance " + this.serviceID());
        throw new AmazonClientException(this.serviceID() + " failed to draw");
    }
    thisInstance = inst.get(0);
}

From source file:com.rmn.qa.aws.MockAmazonEc2Client.java

License:Open Source License

@Override
public DescribeInstancesResult describeInstances(DescribeInstancesRequest describeInstancesRequest)
        throws AmazonClientException {
    if (throwDescribeInstancesError) {
        throwDescribeInstancesError = false;
        throw new AmazonClientException("testError");
    }// w w  w  . ja  v  a2  s  . c  o  m
    return describeInstancesResult;
}

From source file:eu.openg.aws.s3.internal.AmazonS3Fake.java

License:Apache License

@Override
public PutObjectResult putObject(String bucketName, String key, File file) {
    try {/*from   w  w w  .  jav a  2s . c  om*/
        return putObject(bucketName, key, new FileInputStream(file));
    } catch (IOException e) {
        throw new AmazonClientException(e);
    }
}

From source file:eu.openg.aws.s3.internal.FakeS3Object.java

License:Apache License

private void setContent(InputStream content) {
    try {/*  www. j av  a  2  s .c  o m*/
        this.content = toByteArray(content);
        updateContentMetadata(this.content);
    } catch (IOException e) {
        throw new AmazonClientException(e);
    }
}

From source file:glacierpipe.GlacierPipe.java

License:Apache License

public String pipe(AmazonGlacierClient client, String vaultName, String archiveDesc, InputStream in)
        throws IOException {

    long currentPosition = 0;
    int partId = 0;

    try {/*  ww w. j av a  2 s . com*/
        byte[] buffer = new byte[4096];

        TreeHashMessageDigest completeHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));
        in = new DigestInputStream(in, completeHash);

        /**** Create an upload ID for the current upload ****/
        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest().withVaultName(vaultName)
                .withArchiveDescription(archiveDesc).withPartSize(Long.toString(partSize));

        InitiateMultipartUploadResult result = client.initiateMultipartUpload(request);
        String uploadId = result.getUploadId();

        this.observer.gotUploadId(uploadId);

        /**** While there are still chunks to process ****/
        do {
            TreeHashMessageDigest partHash = new TreeHashMessageDigest(MessageDigest.getInstance("SHA-256"));

            // Fill up the buffer
            try (OutputStream bufferOut = this.buffer.getOutputStream();
                    OutputStream observedOut = new ObservedOutputStream(bufferOut,
                            new BufferingObserver(this.observer, partId));
                    DigestOutputStream out = new DigestOutputStream(observedOut, partHash);) {
                int read = 0;
                while (this.buffer.getRemaining() > 0 && (read = in.read(buffer, 0,
                        (int) Math.min(this.buffer.getRemaining(), buffer.length))) >= 0) {
                    out.write(buffer, 0, read);
                }
            }

            currentPosition += this.buffer.getLength();

            // If we read zero bytes, we reached the end of the stream.  Break.
            if (this.buffer.getLength() == 0) {
                break;
            }

            // Report the Tree Hash of this chunk
            byte[] byteChecksum = partHash.digest();
            String checksum = BinaryUtils.toHex(byteChecksum);
            this.observer.computedTreeHash(partId, byteChecksum);

            // Try to upload this chunk
            int attempts = 0;
            do {
                try (InputStream bufferIn = this.buffer.getInputStream();

                        // KLUDGE: Throttling really belongs closer to EntitySerializer.serialize(), but there
                        // wasn't an easy hook for it.  Throttling on input would work well enough, but
                        // client.uploadMultipartPart() calculates a SHA-256 checksum on the request before it
                        // sends it, then calls reset() on the stream.  Because we know this, don't throttle until
                        // reset() has been called at least once.
                        InputStream throttledIn = this.throttlingStrategy == null ? bufferIn
                                : new ThrottledInputStream(bufferIn, this.throttlingStrategy) {
                                    private long resets = 0;

                                    @Override
                                    public void setBytesPerSecond() {
                                        if (this.resets > 0) {
                                            super.setBytesPerSecond();
                                        }
                                    }

                                    @Override
                                    protected long getMaxRead(long currentTime) {
                                        return this.resets > 0 ? super.getMaxRead(currentTime) : Long.MAX_VALUE;
                                    }

                                    @Override
                                    public synchronized void reset() throws IOException {
                                        super.reset();
                                        this.resets++;
                                    }
                                };

                        InputStream observedIn = new ObservedInputStream(throttledIn,
                                new UploadObserver(this.observer, partId));) {

                    UploadMultipartPartRequest partRequest = new UploadMultipartPartRequest()
                            .withVaultName(vaultName).withBody(observedIn).withChecksum(checksum)
                            .withRange(String.format("bytes %d-%d/*", currentPosition - this.buffer.getLength(),
                                    currentPosition - 1))
                            .withUploadId(uploadId).withAccountId("-");

                    UploadMultipartPartResult partResult = client.uploadMultipartPart(partRequest);

                    if (!Arrays.equals(BinaryUtils.fromHex(partResult.getChecksum()), byteChecksum)) {
                        throw new AmazonClientException("Checksum mismatch");
                    }

                    break;
                } catch (AmazonClientException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                } catch (IOException e) {
                    attempts++;
                    observer.exceptionUploadingPart(partId, e, attempts, attempts < this.maxRetries);

                    if (attempts >= this.maxRetries) {
                        throw new IOException("Failed to upload after " + attempts + " attempts", e);
                    }
                }

                try {
                    long sleepingFor = 1000 * (attempts < 15 ? (long) Math.pow(1.5, attempts) : 300);
                    this.observer.sleepingBeforeRetry(sleepingFor);
                    Thread.sleep(sleepingFor);
                } catch (InterruptedException e) {
                    throw new IOException("Upload interrupted", e);
                }
            } while (true);

            partId++;
        } while (this.buffer.getRemaining() == 0);

        byte[] complateHash = completeHash.digest();

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
                .withVaultName(vaultName).withUploadId(uploadId).withChecksum(BinaryUtils.toHex(complateHash))
                .withArchiveSize(Long.toString(currentPosition));

        CompleteMultipartUploadResult compResult = client.completeMultipartUpload(compRequest);
        String location = compResult.getLocation();

        this.observer.done(complateHash, location);
        return location;

    } catch (IOException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (AmazonClientException e) {
        this.observer.fatalException(e);
        throw e;
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException("SHA-256 not available", e);
    }
}

From source file:hudson.plugins.ec2.SlaveTemplate.java

License:Open Source License

private List<BlockDeviceMapping> getAmiBlockDeviceMappings() {

    /*/*from w  ww.  j av a  2s.com*/
     * AmazonEC2#describeImageAttribute does not work due to a bug
     * https://forums.aws.amazon.com/message.jspa?messageID=231972
     */
    for (final Image image : getParent().connect().describeImages().getImages()) {

        if (ami.equals(image.getImageId())) {

            return image.getBlockDeviceMappings();
        }
    }

    throw new AmazonClientException("Unable to get AMI device mapping for " + ami);
}