Example usage for com.amazonaws AmazonClientException AmazonClientException

List of usage examples for com.amazonaws AmazonClientException AmazonClientException

Introduction

In this page you can find the example usage for com.amazonaws AmazonClientException AmazonClientException.

Prototype

public AmazonClientException(String message, Throwable t) 

Source Link

Document

Creates a new AmazonClientException with the specified message, and root cause.

Usage

From source file:org.mule.module.s3.simpleapi.SimpleAmazonS3AmazonDevKitImpl.java

License:Open Source License

public URI createObjectPresignedUri(@NotNull S3ObjectId objectId, Date expiration, HttpMethod method) {
    Validate.notNull(objectId);/*from  w w  w  .j  a  v  a  2s  .  c o  m*/
    try {
        return s3.generatePresignedUrl(objectId.getBucketName(), objectId.getKey(), expiration, method).toURI();
    } catch (URISyntaxException e) {
        throw new AmazonClientException("S3 returned a malformed URI", e);
    }
}

From source file:org.mule.modules.sqs.SQSConnector.java

License:Open Source License

/**
 * Attempts to receive a message from the queue. Every attribute of the incoming
 * message will be added as an inbound property. Also the following properties
 * will also be added://from w  ww .  j  a v  a2  s .  c  o  m
 * <p/>
 * sqs.message.id = containing the message identification
 * sqs.message.receipt.handle = containing the message identification
 * <p/>
 * {@sample.xml ../../../doc/mule-module-sqs.xml.sample sqs:receive-messages}
 *
 * @param callback          Callback to call when a new message is available.
 * @param visibilityTimeout the duration (in seconds) the retrieved message is hidden from
 *                          subsequent calls to retrieve.
 * @param preserveMessages    Flag that indicates if you want to preserve the messages
 *                            in the queue. False by default, so the messages are
 *                            going to be deleted.
 * @param pollPeriod        time in milliseconds to wait between polls (when no messages were retrieved). 
 *                          Default period is 1000 ms.
 * @param numberOfMessages  the number of messages to be retrieved on each call (10 messages max). 
 *                      By default, 1 message will be retrieved.                                 
 * @throws AmazonClientException
 *             If any internal errors are encountered inside the client while
 *             attempting to make the request or handle the response.  For example
 *             if a network connection is not available.
 * @throws AmazonServiceException
 *             If an error response is returned by AmazonSQS indicating
 *             either a problem with the data in the request, or a server side issue.
 */
@Source
@InvalidateConnectionOn(exception = AmazonClientException.class)
public void receiveMessages(SourceCallback callback, @Optional @Default("30") Integer visibilityTimeout,
        @Optional @Default("false") Boolean preserveMessages, @Optional @Default("1000") Long pollPeriod,
        @Optional @Default("1") Integer numberOfMessages) throws AmazonServiceException {

    List<Message> messages;
    ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest();
    receiveMessageRequest.setQueueUrl(getQueueUrl());

    if (visibilityTimeout != null) {
        receiveMessageRequest.setVisibilityTimeout(visibilityTimeout);
    }
    receiveMessageRequest.setMaxNumberOfMessages(numberOfMessages);

    while (!Thread.interrupted()) {
        messages = msgQueue.receiveMessage(receiveMessageRequest).getMessages();
        try {
            if (messages.size() == 0) {
                Thread.sleep(pollPeriod);
                continue;
            }
            for (Message msg : messages) {
                callback.process(msg.getBody(), createProperties(msg));
                if (!preserveMessages) {
                    msgQueue.deleteMessage(new DeleteMessageRequest(getQueueUrl(), msg.getReceiptHandle()));
                }
            }
        } catch (InterruptedException e) {
            logger.error(e.getMessage(), e);
        } catch (Exception e) {
            throw new AmazonClientException("Error while processing message.", e);
        }
    }
}

From source file:org.transitime.maintenance.AwsGlacierInventoryRetriever.java

License:Open Source License

/**
 * For retrieving vault inventory. /*  w  w  w . ja  v a2 s .  com*/
 * 
 * @param vaultName
 * @param jobId
 * @param outputFileName
 * @throws IOException
 */
private void downloadJobOutput(String vaultName, String jobId, String outputFileName) throws IOException {
    logger.info("Downloading job output from vaultName={} into " + "outputFileName={} for jobId={}", vaultName,
            outputFileName, jobId);

    GetJobOutputRequest getJobOutputRequest = new GetJobOutputRequest().withVaultName(vaultName)
            .withJobId(jobId);
    GetJobOutputResult getJobOutputResult = glacierClient.getJobOutput(getJobOutputRequest);

    FileWriter fstream = new FileWriter(outputFileName);
    BufferedWriter out = new BufferedWriter(fstream);
    BufferedReader in = new BufferedReader(new InputStreamReader(getJobOutputResult.getBody()));
    String inputLine;
    try {
        while ((inputLine = in.readLine()) != null) {
            out.write(inputLine);
        }
    } catch (IOException e) {
        throw new AmazonClientException("Unable to save archive for " + "vaultName=" + vaultName, e);
    } finally {
        try {
            in.close();
        } catch (Exception e) {
        }
        try {
            out.close();
        } catch (Exception e) {
        }
    }

    logger.info("Retrieved inventory for vaultName={} to file={}", vaultName, outputFileName);
}

From source file:oulib.aws.Main.java

public static void main(String[] args) {

    try {/*ww  w. j  a  va2 s.  co m*/
        AWSCredentials credentials = null;
        AmazonS3 s3Client = null;
        //            args = new String[4];
        //            args[0] = "ul-bagit";
        //            args[1] = "ul-ir-workspace";
        //            args[2] = "Borelli_1680-1681";
        //            args[3] = "6";
        try {
            credentials = new ProfileCredentialsProvider("default").getCredentials();
        } catch (Exception e) {
            String access_key_id = null;
            String secret_key_id = null;
            String credentialInfo = AwsUtil.getAwsCredentials();
            ObjectMapper mapper = new ObjectMapper();
            Map<String, String> credentialInfoMap = new HashMap<>();
            credentialInfoMap = mapper.readValue(credentialInfo, HashMap.class);
            for (String key : credentialInfoMap.keySet()) {

                if ("AccessKeyId".equals(key)) {
                    access_key_id = credentialInfoMap.get(key);
                } else if ("SecretAccessKey".equals(key)) {
                    secret_key_id = credentialInfoMap.get(key);
                }
            }
            //                System.out.println("access_key_id = "+access_key_id+" access_key_id = "+access_key_id);
            if (null != access_key_id && null != secret_key_id) {
                credentials = new BasicAWSCredentials(access_key_id, secret_key_id);
                //                    s3Client = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCreds)).build();
            } else {
                throw new AmazonClientException("Cannot load the credentials from the credential information. "
                        + "Please make sure that your credentials file is at the correct, and is in valid format.",
                        e);
            }
        }

        ClientConfiguration config = new ClientConfiguration();
        config.setConnectionTimeout(250000);
        config.setSocketTimeout(50000);

        s3Client = new AmazonS3Client(credentials, config);
        Region usEast = Region.getRegion(Regions.US_EAST_1);
        s3Client.setRegion(usEast);

        String bookName = args[2];

        S3BookInfo bookInfo = new S3BookInfo();
        bookInfo.setBookName(bookName);
        bookInfo.setBucketSourceName(args[0]);
        bookInfo.setBucketTargetName(args[1]);
        bookInfo.setCompressionSize(15000000);

        // *** Generate metadadta *****

        //                S3Util.copyS3ObjectTiffMetadata(s3client, "ul-bagit", "ul-ir-workspace", "Zuniga_1591/data/004.tif", "Zuniga_1591/data/004.tif");
        //                S3Util.copyS3ObjectTiffMetadata(s3client, "ul-bagit", "ul-ir-workspace", "Zuniga_1591/data/004.tif", "Zuniga_1591/data/004-20.tif");
        //            S3Util.copyS3ObjectTiffMetadata(s3client, "ul-bagit", "ul-ir-workspace", "Zuniga_1591/data/004.tif", "Zuniga_1591/data/004-50.tif");

        // *** Generate small tiffs *****
        Integer threadMaxCount = 0;
        try {
            threadMaxCount = Integer.valueOf(args[3]);
        } catch (Exception ex) {
            ex.printStackTrace();//logger.error("Cannot parse the thread count! "+ex.getMessage());
            return;
        }
        System.out.println(
                "arg0 = " + args[0] + " arg1 = " + args[1] + " arg2 = " + args[2] + " arg3 = " + args[3]);
        ExecutorService executor = Executors.newFixedThreadPool(threadMaxCount);
        List<String> tiffDiff = S3Util.getBucketObjectKeyList(bookInfo.getBucketSourceName(), args[2],
                s3Client);//.getS3BucketFolderObjDiff(s3Client, args[0], bookName+"/data", args[1], bookName+"/data");
        int diff = tiffDiff.size();
        if (diff > 0) {
            System.out.println("There are totally " + String.valueOf(diff)
                    + " tiff images to process.\nStart processing at " + (new java.util.Date()).toString());
            AwsDataProcessorThreadFactory threadFactory = new AwsDataProcessorThreadFactory();
            for (int i = 0; i <= 10; i++) {
                //                    S3TiffProcessorThread s3TiffProcessorThread = new S3TiffProcessorThread(s3Client, bookInfo, String.valueOf(i)+".tif", tiffDiff);
                //                    threadFactory.setIndex(i);
                //                    threadFactory.setJobType("small-tiff-" + bookName);
                //                    executor.execute(threadFactory.newThread(s3TiffProcessorThread));
                //                    System.out.println("obj has path = "+bookInfo.getBucketSourceName() + tiffDiff.get(i));
                S3TiffMetadataProcessorThread thread = new S3TiffMetadataProcessorThread(s3Client, bookInfo,
                        String.valueOf(i) + ".tif", tiffDiff);
                threadFactory.setIndex(i);
                threadFactory.setJobType("tiff-metadata-" + bookName);
                executor.execute(threadFactory.newThread(thread));
            }
        } else {
            System.out.println("There are no tiff images to process");
        }
        executor.shutdown();
        while (!executor.isTerminated()) {
        }
        System.out.println("All the derivatives were generated at " + (new java.util.Date()).toString() + "!");

    } catch (Exception ex) {
        ex.printStackTrace();//logger.error("Cannot finish generating the small tiff images" + ex.getMessage());
    }
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Use the default approach to get a AWS S3 client with the default region of east.
 * //www .ja  v a2 s. c  o  m
 * @return AmazonS3 : s3 client
 */
public static AmazonS3 getS3AwsClient() {

    AWSCredentials credentials = null;
    try {
        ProfileCredentialsProvider provider = new ProfileCredentialsProvider("default");
        credentials = provider.getCredentials();
        if (null == credentials) {
            throw new InvalidS3CredentialsException("Invalid credentials with default approach!");
        }
    } catch (InvalidS3CredentialsException | AmazonClientException e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/Users/zhao0677/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3client = new AmazonS3Client(credentials);
    Region usEast = Region.getRegion(Regions.US_EAST_1);
    s3client.setRegion(usEast);
    return s3client;
}

From source file:oulib.aws.s3.S3Util.java

public static AmazonS3 getS3Client() {

    AWSCredentials credentials = null;//www .ja v  a 2s . c o  m
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/Users/zhao0677/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3client = new AmazonS3Client(credentials);
    Region usEast = Region.getRegion(Regions.US_EAST_1);
    s3client.setRegion(usEast);
    return s3client;
}

From source file:pa3.RemoteClientSQS.java

License:Open Source License

/**
 * The only information needed to create a client are security credentials
 * consisting of the AWS Access Key ID and Secret Access Key. All other
 * configuration, such as the service endpoints, are performed
 * automatically. Client parameters, such as proxies, can be specified in an
 * optional ClientConfiguration object when constructing a client.
 *
 * @see com.amazonaws.auth.BasicAWSCredentials
 * @see com.amazonaws.auth.ProfilesConfigFile
 * @see com.amazonaws.ClientConfiguration
 *///from   w w  w  . j a v a2s  .  c o  m
private static void initSQSandDynamoDB() throws Exception {
    /*
     * The ProfileCredentialsProvider will return your [default] credential
     * profile by reading from the credentials file located at
     * (C:\\Users\\HP\\.aws\\credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (C:\\Users\\HP\\.aws\\credentials), and is in valid format.", e);
    }
    dynamoDB = new AmazonDynamoDBClient(credentials);
    sqs = new AmazonSQSClient(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    dynamoDB.setRegion(usWest2);
    sqs.setRegion(usWest2);
}

From source file:pa3.RemoteWorkerSQS.java

License:Open Source License

/**
  * The only information needed to create a client are security credentials
  * consisting of the AWS Access Key ID and Secret Access Key. All other
  * configuration, such as the service endpoints, are performed
  * automatically. Client parameters, such as proxies, can be specified in an
  * optional ClientConfiguration object when constructing a client.
  */*from w w w .j  av  a 2s  . com*/
  * @see com.amazonaws.auth.BasicAWSCredentials
  * @see com.amazonaws.auth.ProfilesConfigFile
  * @see com.amazonaws.ClientConfiguration
  */

private static void initSQSandDynamoDB() throws Exception {
    /*
     * The ProfileCredentialsProvider will return your [default] credential
     * profile by reading from the credentials file located at
     * (C:\\Users\\HP\\.aws\\credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (C:\\Users\\HP\\.aws\\credentials), and is in valid format.", e);
    }
    dynamoDB = new AmazonDynamoDBClient(credentials);
    sqs = new AmazonSQSClient(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    dynamoDB.setRegion(usWest2);
    sqs.setRegion(usWest2);
}

From source file:pagerank.S3Wrapper.java

License:Open Source License

private void init() {
    /*/*from  w ww  .  j av a  2s  .  c o m*/
       * The ProfileCredentialsProvider will return your [default]
       * credential profile by reading from the credentials file located at
       * (/home/yupenglu/.aws/credentials).
       */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/home/yupenglu/.aws/credentials), and is in valid format.", e);
    }
    s3 = new AmazonS3Client(credentials);
    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
    isTruncated = objectListing.isTruncated();
}

From source file:pagerank.S3Wrapper.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//  w ww.j a v a  2s. c  om
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (/home/yupenglu/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/home/yupenglu/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    //        Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    //        s3.setRegion(usWest2);

    //        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String bucketName = "pages4.27";
    String key = "NewKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        //            System.out.println("Uploading a new object to S3 from a file\n");
        //            s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        //            System.out.println("Downloading an object");
        //            S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        //            System.out.println("Content-Type: "  + object.getObjectMetadata().getContentType());
        //            displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        //            ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
        //                    .withBucketName(bucketName)
        //                    .withPrefix("My"));
        ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(" - " + URLDecoder.decode(objectSummary.getKey(), "UTF-8") + "  " + "(size = "
                    + objectSummary.getSize() + ")");
        }
        S3Object testObj = s3.getObject(bucketName,
                URLEncoder.encode("http://finance.yahoo.com/investing-news/", "UTF-8"));
        S3ObjectInputStream inputStream = testObj.getObjectContent();

        //            System.out.println(streamToString(inputStream));
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}