Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata) 

Source Link

Document

Constructs a new PutObjectRequest object to upload a stream of data to the specified bucket and key.

Usage

From source file:oulib.aws.s3.S3Util.java

/**
 * Generate a small tiff file from large Tiff S3 bucket object <br>
 * Note: the small tiff file will have the same key path as the original one
 * //  w  w w  . j  a  va  2  s  .  c  om
 * @param s3client : S3 client
 * @param s3 : S3 object that con
 * @param targetBucketName : the bucket that stores the small tiff file
 * @param targetKey : key of the object in the target bucket
 * @param compressionRate : compression rate
 * @return : PutObjectResult
 */
public static PutObjectResult generateSmallTiff(AmazonS3 s3client, S3Object s3, String targetBucketName,
        String targetKey, double compressionRate) {

    PutObjectResult result = null;
    ByteArrayOutputStream bos = null;
    ByteArrayOutputStream os = null;
    ByteArrayInputStream is = null;
    S3ObjectInputStream s = null;
    ByteArrayInputStream byteInputStream = null;

    try {
        System.setProperty("com.sun.media.jai.disableMediaLib", "true");

        bos = new ByteArrayOutputStream();
        s = s3.getObjectContent();
        byte[] bytes = IOUtils.toByteArray(s);
        byteInputStream = new ByteArrayInputStream(bytes);

        TIFFDecodeParam param = new TIFFDecodeParam();
        ImageDecoder dec = ImageCodec.createImageDecoder("TIFF", byteInputStream, param);

        RenderedImage image = dec.decodeAsRenderedImage();

        RenderingHints qualityHints = new RenderingHints(RenderingHints.KEY_RENDERING,
                RenderingHints.VALUE_RENDER_QUALITY);

        RenderedOp resizedImage = JAI.create("SubsampleAverage", image, compressionRate, compressionRate,
                qualityHints);

        TIFFEncodeParam params = new com.sun.media.jai.codec.TIFFEncodeParam();

        resizedImage = JAI.create("encode", resizedImage, bos, "TIFF", params);

        BufferedImage imagenew = resizedImage.getSourceImage(0).getAsBufferedImage();

        os = new ByteArrayOutputStream();
        ImageIO.write(imagenew, "tif", os);
        is = new ByteArrayInputStream(os.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(os.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        os.close();

        imagenew.flush();

        result = s3client.putObject(new PutObjectRequest(targetBucketName, targetKey, is, metadata));
    } catch (IOException | AmazonClientException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (bos != null) {
                bos.close();
            }
            if (os != null) {
                os.close();
            }
            if (is != null) {
                is.close();
            }
            if (s != null) {
                s.close();
            }
            if (byteInputStream != null) {
                byteInputStream.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    return result;
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Pull out Tiff metadata from input S3 object and inject into the 
 * content of target S3 Object;<br>
 * Generate the new output S3 object that has the metadata from input object.
 * // w w  w  . j a v  a  2 s .  co  m
 * @param s3client : S3 client
 * @param obj1 : input object that provides metadata
 * @param obj2 : target object that receives metadata
 * 
 * @return PutObjectResult
 */
public static PutObjectResult copyS3ObjectTiffMetadata(AmazonS3 s3client, S3Object obj1, S3Object obj2) {

    PutObjectResult result = null;

    BufferedInputStream bufferedInputStrean = null;
    ByteArrayOutputStream byteArrayOutputStream = null;
    ByteArrayInputStream byteArrayInputStream = null;
    ByteArrayInputStream bis = null;
    S3ObjectInputStream content1 = null;
    S3ObjectInputStream content2 = null;
    String targetBucketName = obj2.getBucketName();
    String outputKey = obj2.getKey().split(".tif")[0] + "-copied.tif";

    ImageMetadata metadata1, metadata2;
    TiffImageMetadata tiffMetadata1, tiffMetadata2;
    TiffOutputSet output1, output2;

    try {
        content1 = obj1.getObjectContent();
        content2 = obj2.getObjectContent();

        byte[] bytes1 = IOUtils.toByteArray(content1);
        byte[] bytes2 = IOUtils.toByteArray(content2);

        metadata1 = Imaging.getMetadata(bytes1);
        metadata2 = Imaging.getMetadata(bytes2);

        tiffMetadata1 = (TiffImageMetadata) metadata1;
        tiffMetadata2 = (TiffImageMetadata) metadata2;

        output1 = tiffMetadata1.getOutputSet();
        output2 = tiffMetadata2.getOutputSet();

        TiffOutputDirectory rootDir = output2.getOrCreateRootDirectory();
        TiffOutputDirectory exifDir = output2.getOrCreateExifDirectory();
        TiffOutputDirectory gpsDir = output2.getOrCreateGPSDirectory();

        if (null != output1.getRootDirectory()) {
            List<TiffOutputField> fs = output1.getRootDirectory().getFields();
            for (TiffOutputField f1 : fs) {
                if (null == rootDir.findField(f1.tag)
                        // CANNOT create the output image with this tag included!
                        && !"PlanarConfiguration".equals(f1.tagInfo.name)) {
                    rootDir.add(f1);
                }
            }
        }

        if (null != output1.getExifDirectory()) {
            for (TiffOutputField f2 : output1.getExifDirectory().getFields()) {
                exifDir.removeField(f2.tagInfo);
                exifDir.add(f2);
            }
        }

        if (null != output1.getGPSDirectory()) {
            for (TiffOutputField f3 : output1.getGPSDirectory().getFields()) {
                gpsDir.removeField(f3.tagInfo);
                gpsDir.add(f3);
            }
        }

        byteArrayOutputStream = new ByteArrayOutputStream();
        TiffImageWriterLossy writerLossy = new TiffImageWriterLossy(output2.byteOrder);
        writerLossy.write(byteArrayOutputStream, output2);

        byteArrayInputStream = new ByteArrayInputStream(byteArrayOutputStream.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(byteArrayOutputStream.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        result = s3client
                .putObject(new PutObjectRequest(targetBucketName, outputKey, byteArrayInputStream, metadata));

    } catch (ImageReadException | IOException | ImageWriteException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (null != content1) {
                content1.close();
            }
            if (null != content2) {
                content2.close();
            }
            if (null != bufferedInputStrean) {
                bufferedInputStrean.close();
            }
            if (null != byteArrayInputStream) {
                byteArrayInputStream.close();
            }
            if (null != byteArrayOutputStream) {
                byteArrayOutputStream.close();
            }
            if (null != bis) {
                bis.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    return result;
}

From source file:pl.pawlik.cymes.controllers.FormController.java

@RequestMapping(value = "/upload", method = RequestMethod.POST)
public String handleFileUpload(@RequestParam("plik") MultipartFile file) {
    if (!file.isEmpty()) {
        try {/*  w ww  .  j ava2s  .c o m*/

            //myObject.getClass().getProtectionDomain().getCodeSource()
            System.out.println("------------" + ObjectMapper.class.getProtectionDomain().getCodeSource());
            Logger.getLogger("FormController").log(Level.SEVERE,
                    "------------" + ObjectMapper.class.getProtectionDomain().getCodeSource());
            UUID uuid = UUID.randomUUID();
            String filename = "/uploads/upload_" + uuid.toString();
            String bucketName = "pawliktest";
            String accessKey = "xx";
            String secretKey = "xx";
            byte[] bytes = file.getBytes();
            InputStream inputStream = new ByteArrayInputStream(bytes);
            AmazonS3 s3client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
            s3client.putObject(new PutObjectRequest(bucketName, filename, inputStream, new ObjectMetadata()));

            System.out.println("File {} has been successfully uploaded as ");
        } catch (Exception e) {
            e.printStackTrace();
        }
    } else {
        System.out.println("Uploaded file is empty");
    }
    return "redirect:/cymes/upload";
}

From source file:raymond.mockftpserver.S3BucketFileSystem.java

License:Apache License

@Override
public void add(FileSystemEntry entry) {
    ObjectMetadata metaData = new ObjectMetadata();
    PutObjectRequest request;/*from   w w  w.j a v  a2 s .c om*/
    if (isDirectory(entry)) {
        metaData.setContentLength(0);
        InputStream is = new ByteArrayInputStream(new byte[0]);
        request = new PutObjectRequest(bucket, entry.getPath() + FOLDER_SUFFIX, is, metaData);
    } else {
        metaData.setContentLength(entry.getSize());
        request = new PutObjectRequest(bucket, entry.getPath(), ((FileEntry) entry).createInputStream(),
                metaData);
    }
    request.setStorageClass(StorageClass.ReducedRedundancy);
    s3.putObject(request);
}

From source file:squash.booking.lambdas.core.BackupManager.java

License:Apache License

@Override
public final void backupSingleBooking(Booking booking, Boolean isCreation)
        throws InterruptedException, JsonProcessingException {
    // Backup to the S3 bucket. This method will typically be called every time
    // a booking is mutated. We upload the booking to the same key, so the
    // versions of this key should provide a timeline of all individual bookings
    // in the sequence (or close to it) that they were made.

    if (!initialised) {
        throw new IllegalStateException("The backup manager has not been initialised");
    }/*  w  ww. j  av  a 2 s .  c  om*/

    // Encode booking as JSON
    String backupString = (isCreation ? "Booking created: " : "Booking deleted: ")
            + System.getProperty("line.separator") + mapper.writeValueAsString(booking);

    logger.log("Backing up single booking mutation to S3 bucket");
    IS3TransferManager transferManager = getS3TransferManager();
    byte[] bookingAsBytes = backupString.getBytes(StandardCharsets.UTF_8);
    ByteArrayInputStream bookingAsStream = new ByteArrayInputStream(bookingAsBytes);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(bookingAsBytes.length);
    PutObjectRequest putObjectRequest = new PutObjectRequest(databaseBackupBucketName, "LatestBooking",
            bookingAsStream, metadata);
    TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
    logger.log("Backed up single booking mutation to S3 bucket: " + backupString);

    // Backup to the SNS topic
    logger.log("Backing up single booking mutation to SNS topic: " + adminSnsTopicArn);
    getSNSClient().publish(adminSnsTopicArn, backupString, "Sqawsh single booking backup");
}

From source file:squash.booking.lambdas.core.BackupManager.java

License:Apache License

@Override
public final void backupSingleBookingRule(BookingRule bookingRule, Boolean isNotDeletion)
        throws InterruptedException, JsonProcessingException {
    // Backup to the S3 bucket. This method will typically be called every time
    // a booking rule is mutated. We upload the booking rule to the same key, so
    // the versions of this key should provide a timeline of all individual
    // booking rules in the sequence (or close to it) that they were made.

    if (!initialised) {
        throw new IllegalStateException("The backup manager has not been initialised");
    }/*w  ww .j ava 2  s. c  o  m*/

    // Encode booking rule as JSON
    String backupString = (isNotDeletion ? "Booking rule updated: " : "Booking rule deleted: ")
            + System.getProperty("line.separator") + mapper.writeValueAsString(bookingRule);

    logger.log("Backing up single booking rule mutation to S3 bucket");
    IS3TransferManager transferManager = getS3TransferManager();
    byte[] bookingRuleAsBytes = backupString.getBytes(StandardCharsets.UTF_8);
    ByteArrayInputStream bookingRuleAsStream = new ByteArrayInputStream(bookingRuleAsBytes);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(bookingRuleAsBytes.length);
    PutObjectRequest putObjectRequest = new PutObjectRequest(databaseBackupBucketName, "LatestBookingRule",
            bookingRuleAsStream, metadata);
    TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
    logger.log("Backed up single booking rule mutation to S3 bucket: " + backupString);

    // Backup to the SNS topic
    logger.log("Backing up single booking rule mutation to SNS topic: " + adminSnsTopicArn);
    getSNSClient().publish(adminSnsTopicArn, backupString, "Sqawsh single booking rule backup");
}

From source file:squash.booking.lambdas.core.BackupManager.java

License:Apache License

@Override
public final ImmutablePair<List<Booking>, List<BookingRule>> backupAllBookingsAndBookingRules()
        throws Exception {

    if (!initialised) {
        throw new IllegalStateException("The backup manager has not been initialised");
    }/*  w  ww . j  av  a 2  s.  c  o  m*/

    // Encode bookings and booking rules as JSON
    JsonNodeFactory factory = new JsonNodeFactory(false);
    // Create a json factory to write the treenode as json.
    JsonFactory jsonFactory = new JsonFactory();
    ObjectNode rootNode = factory.objectNode();

    ArrayNode bookingsNode = rootNode.putArray("bookings");
    List<Booking> bookings = bookingManager.getAllBookings(false);
    for (Booking booking : bookings) {
        bookingsNode.add((JsonNode) (mapper.valueToTree(booking)));
    }

    ArrayNode bookingRulesNode = rootNode.putArray("bookingRules");
    List<BookingRule> bookingRules = ruleManager.getRules(false);
    for (BookingRule bookingRule : bookingRules) {
        bookingRulesNode.add((JsonNode) (mapper.valueToTree(bookingRule)));
    }

    // Add this, as will be needed for restore in most common case.
    rootNode.put("clearBeforeRestore", true);

    ByteArrayOutputStream backupDataStream = new ByteArrayOutputStream();
    PrintStream printStream = new PrintStream(backupDataStream);
    try (JsonGenerator generator = jsonFactory.createGenerator(printStream)) {
        mapper.writeTree(generator, rootNode);
    }
    String backupString = backupDataStream.toString(StandardCharsets.UTF_8.name());

    logger.log("Backing up all bookings and booking rules to S3 bucket");
    IS3TransferManager transferManager = getS3TransferManager();
    byte[] backupAsBytes = backupString.getBytes(StandardCharsets.UTF_8);
    ByteArrayInputStream backupAsStream = new ByteArrayInputStream(backupAsBytes);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(backupAsBytes.length);
    PutObjectRequest putObjectRequest = new PutObjectRequest(databaseBackupBucketName,
            "AllBookingsAndBookingRules", backupAsStream, metadata);
    TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
    logger.log("Backed up all bookings and booking rules to S3 bucket: " + backupString);

    // Backup to the SNS topic
    logger.log("Backing up all bookings and booking rules to SNS topic: " + adminSnsTopicArn);
    getSNSClient().publish(adminSnsTopicArn, backupString, "Sqawsh all-bookings and booking rules backup");

    return new ImmutablePair<>(bookings, bookingRules);
}

From source file:squash.booking.lambdas.core.PageManager.java

License:Apache License

private void copyJsonDataToS3(String keyName, String jsonToCopy) throws Exception {

    logger.log("About to copy cached json data to S3");

    try {//from   w  ww.j ava 2s  .co  m
        logger.log("Uploading json data to S3 bucket: " + websiteBucketName + " and key: " + keyName + ".json");
        byte[] jsonAsBytes = jsonToCopy.getBytes(StandardCharsets.UTF_8);
        ByteArrayInputStream jsonAsStream = new ByteArrayInputStream(jsonAsBytes);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(jsonAsBytes.length);
        metadata.setContentType("application/json");
        // Direct caches not to satisfy future requests with this data without
        // revalidation.
        if (keyName.contains("famousplayers")) {
            // Famousplayers list is good for a year
            metadata.setCacheControl("max-age=31536000");
        } else {
            metadata.setCacheControl("no-cache, must-revalidate");
        }
        PutObjectRequest putObjectRequest = new PutObjectRequest(websiteBucketName, keyName + ".json",
                jsonAsStream, metadata);
        // Data must be public so it can be served from the website
        putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
        IS3TransferManager transferManager = getS3TransferManager();
        TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
        logger.log("Uploaded cached json data to S3 bucket");
    } catch (AmazonServiceException ase) {
        ExceptionUtils.logAmazonServiceException(ase, logger);
        throw new Exception("Exception caught while copying json data to S3");
    } catch (AmazonClientException ace) {
        ExceptionUtils.logAmazonClientException(ace, logger);
        throw new Exception("Exception caught while copying json data to S3");
    } catch (InterruptedException e) {
        logger.log("Caught interrupted exception: ");
        logger.log("Error Message: " + e.getMessage());
        throw new Exception("Exception caught while copying json data to S3");
    }
}

From source file:squash.booking.lambdas.core.PageManager.java

License:Apache License

private void copyUpdatedBookingPageToS3(String pageBaseName, String page, String uidSuffix, boolean usePrefix)
        throws Exception {

    logger.log("About to copy booking page to S3");

    String pageBaseNameWithPrefix = usePrefix ? "NoScript/" + pageBaseName : pageBaseName;
    try {/*from   www. ja v  a 2  s . c o  m*/
        logger.log("Uploading booking page to S3 bucket: " + websiteBucketName + "s3websitebucketname"
                + " and key: " + pageBaseNameWithPrefix + uidSuffix + ".html");
        byte[] pageAsGzippedBytes = FileUtils.gzip(page.getBytes(StandardCharsets.UTF_8), logger);

        ByteArrayInputStream pageAsStream = new ByteArrayInputStream(pageAsGzippedBytes);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(pageAsGzippedBytes.length);
        metadata.setContentEncoding("gzip");
        metadata.setContentType("text/html");
        // Direct caches not to satisfy future requests with this data without
        // revalidation.
        metadata.setCacheControl("no-cache, must-revalidate");
        PutObjectRequest putObjectRequest = new PutObjectRequest(websiteBucketName,
                pageBaseNameWithPrefix + uidSuffix + ".html", pageAsStream, metadata);
        // Page must be public so it can be served from the website
        putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
        IS3TransferManager transferManager = getS3TransferManager();
        TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
        logger.log("Uploaded booking page to S3 bucket");

        if (uidSuffix.equals("")) {
            // Nothing to copy - so return
            logger.log("UidSuffix is empty - so not creating duplicate page");
            return;
        }

        // N.B. We copy from hashed key to non-hashed (and not vice versa)
        // to ensure consistency
        logger.log("Copying booking page in S3 bucket: " + websiteBucketName + " and key: "
                + pageBaseNameWithPrefix + ".html");
        CopyObjectRequest copyObjectRequest = new CopyObjectRequest(websiteBucketName,
                pageBaseNameWithPrefix + uidSuffix + ".html", websiteBucketName,
                pageBaseNameWithPrefix + ".html");
        copyObjectRequest.setCannedAccessControlList(CannedAccessControlList.PublicRead);
        // N.B. Copied object will get same metadata as the source (e.g. the
        // cache-control header etc.)
        TransferUtils.waitForS3Transfer(transferManager.copy(copyObjectRequest), logger);
        logger.log("Copied booking page successfully in S3");
    } catch (AmazonServiceException ase) {
        ExceptionUtils.logAmazonServiceException(ase, logger);
        throw new Exception("Exception caught while copying booking page to S3");
    } catch (AmazonClientException ace) {
        ExceptionUtils.logAmazonClientException(ace, logger);
        throw new Exception("Exception caught while copying booking page to S3");
    } catch (InterruptedException e) {
        logger.log("Caught interrupted exception: ");
        logger.log("Error Message: " + e.getMessage());
        throw new Exception("Exception caught while copying booking page to S3");
    }
}

From source file:squash.deployment.lambdas.AngularjsAppCustomResourceLambda.java

License:Apache License

/**
 * Implementation for the AWS Lambda function backing the AngularjsApp
 * resource.//w  w  w.  ja v a  2  s.c  o  m
 * 
 * <p>
 * This lambda requires the following environment variables:
 * <ul>
 * <li>WebsiteBucket - name of S3 bucket serving the booking website.</li>
 * <li>AngularjsZipBucket - S3 bucket holding the Angularjs app zip file.</li>
 * <li>CognitoIdentityPoolId - id of the Cognito Identity Pool.</li>
 * <li>CognitoUserPoolId - id of the Cognito User Pool.</li>
 * <li>CognitoUserPoolIdentityProviderName - Name of user pool identity provider.</li>
 * <li>JavascriptClientAppId - id of the Cognito User Pool app to use from javascript.</li>
 * <li>ApiGatewayBaseUrl - base Url of the ApiGateway Api.</li>
 * <li>Region - the AWS region in which the Cloudformation stack is created.</li>
 * <li>Revision - integer incremented to force stack updates to update this resource.</li>
 * </ul>
 *
 * <p>On success, it returns the following output to Cloudformation:
 * <ul>
 *    <li>WebsiteURL - Url of the Angularjs website.</li>
 * </ul>
 *
 * <p>Updates will delete the previous deployment and replace it with the new one.
 *
 * @param request
 *            request parameters as provided by the CloudFormation service
 * @param context
 *            context as provided by the CloudFormation service
 */
@Override
public Object handleRequest(Map<String, Object> request, Context context) {

    LambdaLogger logger = context.getLogger();
    logger.log("Starting AngularjsApp custom resource handleRequest");

    // Handle standard request parameters
    Map<String, String> standardRequestParameters = LambdaInputLogger.logStandardRequestParameters(request,
            logger);
    String requestType = standardRequestParameters.get("RequestType");

    // Handle required environment variables
    logger.log("Logging required environment variables for custom resource request");
    String websiteBucket = System.getenv("WebsiteBucket");
    String angularjsZipBucket = System.getenv("AngularjsZipBucket");
    String cognitoIdentityPoolId = System.getenv("CognitoIdentityPoolId");
    String cognitoUserPoolId = System.getenv("CognitoUserPoolId");
    String cognitoUserPoolIdentityProviderName = System.getenv("CognitoUserPoolIdentityProviderName");
    String javascriptClientAppId = System.getenv("JavascriptClientAppId");
    String apiGatewayBaseUrl = System.getenv("ApiGatewayBaseUrl");
    String region = System.getenv("AWS_REGION");
    String revision = System.getenv("Revision");

    // Log out our required environment variables
    logger.log("WebsiteBucket: " + websiteBucket);
    logger.log("AngularjsZipBucket: " + angularjsZipBucket);
    logger.log("CognitoIdentityPoolId: " + cognitoIdentityPoolId);
    logger.log("CognitoUserPoolId: " + cognitoUserPoolId);
    logger.log("CognitoUserPoolIdentityProviderName: " + cognitoUserPoolIdentityProviderName);
    logger.log("JavascriptClientAppId: " + javascriptClientAppId);
    logger.log("ApiGatewayBaseUrl: " + apiGatewayBaseUrl);
    logger.log("Region: " + region);
    logger.log("Revision: " + revision);

    // API calls below can sometimes give access denied errors during stack
    // creation which I think is bc required new roles have not yet propagated
    // across AWS. We sleep here to allow time for this propagation.
    try {
        Thread.sleep(10000);
    } catch (InterruptedException e) {
        logger.log("Sleep to allow new roles to propagate has been interrupted.");
    }

    // Prepare our response to be sent in the finally block
    CloudFormationResponder cloudFormationResponder = new CloudFormationResponder(standardRequestParameters,
            "DummyPhysicalResourceId");
    // Initialise failure response, which will be changed on success
    String responseStatus = "FAILED";

    String websiteURL = null;
    try {
        cloudFormationResponder.initialise();

        if (requestType.equals("Create") || requestType.equals("Update")) {

            // On updates we clear out the app first
            if (requestType.equals("Update")) {
                deleteAngularjsApp(websiteBucket, logger);
            }

            // Get the Angularjs app's zip file
            try {
                logger.log("Downloading Angularjs zip from S3");
                IS3TransferManager transferManager = getS3TransferManager();
                String zipDownloadPath = "/tmp/AngularjsApp.zip";
                File downloadedFile = new File(zipDownloadPath);
                TransferUtils.waitForS3Transfer(
                        transferManager.download(angularjsZipBucket, "AngularjsApp.zip", downloadedFile),
                        logger);
                logger.log("Downloaded Angularjs zip successfully from S3");

                // Modify the Bookings and Identity Service files to point to the
                // correct Cognito data, ApiGateway base url, and region.
                logger.log("Extracting Angularjs zip");
                String extractPath = "/tmp";
                try {
                    ZipFile zipFile = new ZipFile(zipDownloadPath);
                    // Will produce /tmp/app/app.js etc
                    zipFile.extractAll(extractPath);
                } catch (ZipException e) {
                    logger.log("Caught a ZipException Exception: " + e.getMessage());
                    throw e;
                }
                logger.log("Extracted Angularjs zip");

                logger.log(
                        "Modifying the Bookings and Identity Services to point to the correct ApiGatewayBaseUrl, Cognito data, and region");
                String fileContent;
                String filePath = extractPath + "/app/sqawsh.min.js";
                try (FileInputStream inputStream = new FileInputStream(filePath)) {
                    fileContent = IOUtils.toString(inputStream);
                }
                fileContent = fileContent.replace("bookingregiontobereplaced", region)
                        .replace("bookingurltobereplaced", apiGatewayBaseUrl)
                        .replace("bookingbuckettobereplaced", websiteBucket)
                        .replace("identityregiontobereplaced", region)
                        .replace("identitypoolidtobereplaced", cognitoIdentityPoolId)
                        .replace("identityuserpoolidtobereplaced", cognitoUserPoolId)
                        .replace("identityprovidernametobereplaced", cognitoUserPoolIdentityProviderName)
                        .replace("identityappidtobereplaced", javascriptClientAppId);

                FileUtils.writeStringToFile(new File(filePath), fileContent);
                logger.log(
                        "Modified the Bookings and Identity Services to point to the correct ApiGatewayBaseUrl, Cognito data, and region");

                // We will later modify the gzip-ed filenames to add a revving suffix.
                // But before we gzip, we need to modify the revved file links in
                // index.html
                String revvingSuffix = System.getenv("RevvingSuffix");
                File appPath = new File("/tmp/app");
                logger.log("Modifying links to revved files in index.html");
                Path indexPath = new File(appPath, "index.html").toPath();
                Charset charset = StandardCharsets.UTF_8;
                List<String> newLines = new ArrayList<>();
                for (String line : Files.readAllLines(indexPath, charset)) {
                    if (line.contains("googleapis") || line.contains("cloudflare") || line.contains("maxcdn")) {
                        // Don't alter lines linking to cdn-s. They are already revved.
                        newLines.add(line);
                    } else {
                        newLines.add(line.replace(".js", "_" + revvingSuffix + ".js").replace(".css",
                                "_" + revvingSuffix + ".css"));
                    }
                }
                Files.write(indexPath, newLines, charset);
                logger.log("Modified links to revved files in index.html");

                // GZIP all js, css, and html files within app folder
                logger.log("GZip-ing files in app folder to enable serving gzip-ed from S3");
                squash.deployment.lambdas.utils.FileUtils.gzip(Arrays.asList(appPath), Collections.emptyList(),
                        logger);
                logger.log("GZip-ed files in app folder to enable serving gzip-ed from S3");

                // Rev the js and css files by appending revving-suffix to names - for
                // cache-ing
                logger.log("Appending revving suffix to js and css files in app folder");
                squash.deployment.lambdas.utils.FileUtils.appendRevvingSuffix(revvingSuffix, appPath.toPath(),
                        logger);
                logger.log("Appended revving suffix to js and css files in app folder");

                // Upload the modified app to the S3 website bucket
                logger.log("Uploading modified Angularjs app to S3 website bucket");
                // Will produce <S3BucketRoot>/app/sqawsh.min.js etc
                TransferUtils.waitForS3Transfer(transferManager.uploadDirectory(websiteBucket, "app",
                        new File(extractPath + "/app"), true), logger);
                logger.log("Uploaded modified Angularjs app to S3 website bucket");

                // Add gzip content-encoding metadata to zip-ed files
                logger.log("Updating metadata on modified Angularjs app in S3 bucket");
                TransferUtils.addGzipContentEncodingMetadata(websiteBucket, Optional.of("app"), logger);
                logger.log("Updated metadata on modified Angularjs app in S3 bucket");

                // Upload Cognito SDKs and their dependencies - these should all be
                // zipped first. N.B. We also append filenames with the revving
                // suffix.
                logger.log("About to upload Cognito libraries");
                List<ImmutableTriple<String, String, byte[]>> cognitoLibraries = new ArrayList<>();
                cognitoLibraries.add(new ImmutableTriple<>("Cognito SDK",
                        "aws-cognito-sdk.min_" + revvingSuffix + ".js", IOUtils.toByteArray(new URL(
                                "https://raw.githubusercontent.com/aws/amazon-cognito-identity-js/master/dist/aws-cognito-sdk.min.js"))));
                cognitoLibraries.add(new ImmutableTriple<>("Cognito Identity SDK",
                        "amazon-cognito-identity.min_" + revvingSuffix + ".js", IOUtils.toByteArray(new URL(
                                "https://raw.githubusercontent.com/aws/amazon-cognito-identity-js/master/dist/amazon-cognito-identity.min.js"))));
                cognitoLibraries.add(new ImmutableTriple<>("Big Integer Library",
                        "jsbn_" + revvingSuffix + ".js",
                        IOUtils.toByteArray(new URL("http://www-cs-students.stanford.edu/~tjw/jsbn/jsbn.js"))));
                cognitoLibraries.add(new ImmutableTriple<>("Big Integer Library 2",
                        "jsbn2_" + revvingSuffix + ".js", IOUtils.toByteArray(
                                new URL("http://www-cs-students.stanford.edu/~tjw/jsbn/jsbn2.js"))));

                // The SJCL still seems to need configuring to include the bytes
                // codec, despite 1.0 of Cognito Idp saying it had removed this
                // dependency. So for now we get this bytes-codec-configured version
                // from our resources.
                String sjcl_library;
                try {
                    sjcl_library = IOUtils.toString(AngularjsAppCustomResourceLambda.class
                            .getResourceAsStream("/squash/deployment/lambdas/sjcl.js"));
                } catch (IOException e) {
                    logger.log("Exception caught reading sjcl.js file: " + e.getMessage());
                    throw new Exception("Exception caught reading sjcl.js file");
                }
                logger.log("Read modified SJCL library from resources");
                cognitoLibraries.add(new ImmutableTriple<>("Stanford Javascript Crypto Library",
                        "sjcl_" + revvingSuffix + ".js", sjcl_library.getBytes(Charset.forName("UTF-8"))));

                for (ImmutableTriple<String, String, byte[]> cognitoLibrary : cognitoLibraries) {
                    logger.log("Uploading a Cognito library to S3 website bucket. Library name: "
                            + cognitoLibrary.left);

                    byte[] zippedLibrary = squash.deployment.lambdas.utils.FileUtils.gzip(cognitoLibrary.right,
                            logger);
                    ByteArrayInputStream libraryAsGzippedStream = new ByteArrayInputStream(zippedLibrary);
                    ObjectMetadata metadata = new ObjectMetadata();
                    metadata.setContentLength(zippedLibrary.length);
                    metadata.setContentEncoding("gzip");
                    String keyName = "app/components/identity/cognito/" + cognitoLibrary.middle;
                    logger.log("Uploading to key: " + keyName);
                    PutObjectRequest putObjectRequest = new PutObjectRequest(websiteBucket, keyName,
                            libraryAsGzippedStream, metadata);
                    TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
                    logger.log("Uploaded a Cognito library to S3 website bucket: " + cognitoLibrary.left);
                }

                // Add cache-control metadata to files. Css and js files will have
                // 1-year cache validity, since they are rev-ved.
                logger.log("Updating cache-control metadata on angular app in S3 bucket");
                TransferUtils.addCacheControlHeader("max-age=31536000", websiteBucket, Optional.of("app"),
                        ".js", logger);
                TransferUtils.addCacheControlHeader("max-age=31536000", websiteBucket, Optional.of("app"),
                        ".css", logger);
                // All html must revalidate every time
                TransferUtils.addCacheControlHeader("no-cache, must-revalidate", websiteBucket,
                        Optional.of("app"), ".html", logger);
                logger.log("Updated cache-control metadata on angular app in S3 bucket");

                // App content must be public so it can be served from the website
                logger.log("Modifying Angularjs app ACL in S3 website bucket");
                TransferUtils.setPublicReadPermissionsOnBucket(websiteBucket, Optional.of("app/"), logger);
                logger.log("Modified Angularjs app ACL in S3 website bucket");

            } catch (MalformedInputException mie) {
                logger.log("Caught a MalformedInputException: " + mie.getMessage());
                throw mie;
            } catch (IOException ioe) {
                logger.log("Caught an IO Exception: " + ioe.getMessage());
                throw ioe;
            }

            websiteURL = "http://" + websiteBucket + ".s3-website-" + region + ".amazonaws.com/app/index.html";
            ;
        } else if (requestType.equals("Delete")) {
            logger.log("Delete request - so deleting the app");
            deleteAngularjsApp(websiteBucket, logger);
        }

        responseStatus = "SUCCESS";
        return null;
    } catch (AmazonServiceException ase) {
        ExceptionUtils.logAmazonServiceException(ase, logger);
        return null;
    } catch (AmazonClientException ace) {
        ExceptionUtils.logAmazonClientException(ace, logger);
        return null;
    } catch (Exception e) {
        logger.log("Exception caught in AngularjsApp Lambda: " + e.getMessage());
        return null;
    } finally {
        // Send response to CloudFormation
        cloudFormationResponder.addKeyValueOutputsPair("WebsiteURL", websiteURL);
        cloudFormationResponder.sendResponse(responseStatus, logger);
    }
}