Example usage for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client

List of usage examples for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client.

Prototype

@Deprecated
public AmazonS3Client() 

Source Link

Document

Constructs a new client to invoke service methods on Amazon S3.

Usage

From source file:com.ub.ml.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*from  w  w w .  j ava2s.  c  om*/
     * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) 
     * and save the following lines after replacing the underlined values with your own.
     *
     * [default]
     * aws_access_key_id = YOUR_ACCESS_KEY_ID
     * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
     */

    AmazonS3 s3 = new AmazonS3Client();
    Region usWest2 = Region.getRegion(Regions.US_EAST_1);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //System.out.println("Deleting an object\n");
        //s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //System.out.println("Deleting bucket " + bucketName + "\n");
        //s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:cz.pichlik.goodsentiment.server.ioc.ObjectFactory.java

License:Apache License

public ObjectFactory() {
    this.s3Client = new AmazonS3Client();
    this.s3RepositoryBase = new S3RepositoryBase(this.s3Client);
    this.s3VoteRepository = new S3VoteRepository(this.s3RepositoryBase, eventDataBucket);
    this.eventDataReader = new EventDataReader(s3RepositoryBase, eventDataBucket);
    this.eventAggregator = new EventAggregator(aggregatedDataBucket, eventDataReader, s3RepositoryBase);
    this.dataJarAggregator = new DataJarAggregator(integrationDataBucket, s3RepositoryBase, eventDataReader);
}

From source file:dataprocessing.amazonwebservices.S3Client.java

License:GNU General Public License

/** *************************************************************
 * Constructor//from  ww w.j a v a2  s .c  om
 */
public S3Client() {

    ResourceBundle resourceBundle = ResourceBundle.getBundle("aws");
    bucket = resourceBundle.getObject("bucket").toString();
    client = new AmazonS3Client();
}

From source file:doug.iotdemo.mojo.deployer.Deployer.java

License:Open Source License

@Override
public void execute() throws MojoExecutionException, MojoFailureException {
    AmazonS3 s3 = new AmazonS3Client();

    if (!s3.doesBucketExist(bucketName)) {
        throw new MojoExecutionException("bucket " + bucketName + " does not exist");
    }/*from w w w.j a  v a 2 s .  c o m*/

    getLog().info("Uploading " + source.getName() + " to s3://" + bucketName + "/" + bucketKey);

    TransferManager manager = new TransferManager(s3);
    Transfer transfer;
    if (source.isFile()) {
        transfer = manager.upload(bucketName, bucketKey, source);
    } else if (source.isDirectory()) {
        transfer = manager.uploadDirectory(bucketName, bucketKey, source, true);
    } else {
        throw new MojoExecutionException("Unknown file type " + source.getAbsolutePath());
    }

    try {
        transfer.waitForCompletion();
    } catch (InterruptedException e) {
        throw new MojoExecutionException("Upload to S3 failed", e);
    }
}

From source file:eu.openg.aws.s3.S3Service.java

License:Apache License

public S3Service() {
    this(new AmazonS3Client());
}

From source file:gateway.controller.util.GatewayUtil.java

License:Apache License

/**
 * Initializing the Kafka Producer on Controller startup.
 *//*  w  w  w  .  j  av  a 2  s.  c  o  m*/
@PostConstruct
public void init() {
    // Kafka Producer.
    producer = KafkaClientFactory.getProducer(KAFKA_ADDRESS.split(":")[0], KAFKA_ADDRESS.split(":")[1]);
    // Connect to S3 Bucket. Only apply credentials if they are present.
    if ((AMAZONS3_ACCESS_KEY.isEmpty()) && (AMAZONS3_PRIVATE_KEY.isEmpty())) {
        s3Client = new AmazonS3Client();
    } else {
        BasicAWSCredentials credentials = new BasicAWSCredentials(AMAZONS3_ACCESS_KEY, AMAZONS3_PRIVATE_KEY);
        s3Client = new AmazonS3Client(credentials);
    }
}

From source file:generator.components.S3FileUtility.java

License:Apache License

/**
 * Upload file to s3 bucket/*w w w .  j a v a 2  s  .  com*/
 * 
 * @param file
 *            the object
 */
public String writeFileToS3(File file, FileLocation fileLocation) throws FileNotFoundException {

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(file.length());
    String fileKey = String.format("%s-%s", uuidFactory.getUUID(), file.getName());

    // BasicAWSCredentials credentials = new BasicAWSCredentials(AMAZONS3_ACCESS_KEY, AMAZONS3_PRIVATE_KEY);
    s3Client = new AmazonS3Client();

    // Making the object public
    PutObjectRequest putObj = new PutObjectRequest(S3_OUTPUT_BUCKET, fileKey, file);
    putObj.setCannedAcl(CannedAccessControlList.PublicRead);
    s3Client.putObject(putObj);

    return fileKey;
}

From source file:ingest.utility.IngestUtilities.java

License:Apache License

/**
 * Gets an instance of an S3 client to use.
 * //from  w  w  w .  j ava  2  s  .  co  m
 * @param useEncryption
 *            True if encryption should be used (only for Piazza Bucket). For all external Buckets, encryption is
 *            not used.
 * 
 * @return The S3 client
 */
public AmazonS3 getAwsClient(boolean useEncryption) {
    AmazonS3 s3Client;
    if ((AMAZONS3_ACCESS_KEY.isEmpty()) && (AMAZONS3_PRIVATE_KEY.isEmpty())) {
        s3Client = new AmazonS3Client();
    } else {
        BasicAWSCredentials credentials = new BasicAWSCredentials(AMAZONS3_ACCESS_KEY, AMAZONS3_PRIVATE_KEY);
        // Set up encryption using the KMS CMK Key
        if (useEncryption) {
            KMSEncryptionMaterialsProvider materialProvider = new KMSEncryptionMaterialsProvider(S3_KMS_CMK_ID);
            s3Client = new AmazonS3EncryptionClient(credentials, materialProvider,
                    new CryptoConfiguration().withKmsRegion(Regions.US_EAST_1))
                            .withRegion(Region.getRegion(Regions.US_EAST_1));
        } else {
            s3Client = new AmazonS3Client(credentials);
        }
    }
    return s3Client;
}

From source file:io.ingenieux.lambda.shell.LambdaShell.java

License:Apache License

private void copyFile(String sourceFile, String targetPath) {
    AmazonS3 s3Client = new AmazonS3Client();

    AmazonS3URI uri = new AmazonS3URI(targetPath);

    String key = uri.getKey();//from   w  ww. j a va2s . c o m

    String bucketName = uri.getBucket();

    s3Client.putObject(new PutObjectRequest(bucketName, key, new File(sourceFile)));
}

From source file:io.klerch.alexa.state.handler.AWSS3StateHandler.java

License:Open Source License

/**
 * Takes the Alexa session. An AWS client for accessing the S3 bucket will make use
 * of all the defaults in your runtime environment in regards to AWS region and credentials. The
 * credentials of this client need permission for getting and putting objects to this bucket.
 * @param session The Alexa session of your current skill invocation.
 * @param bucketName The bucket where all saved states will go into.
 *///from   w  w  w  .j a v a 2  s  .c  om
public AWSS3StateHandler(final Session session, final String bucketName) {
    this(session, new AmazonS3Client(), bucketName);
}