Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentEncoding

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentEncoding

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentEncoding.

Prototype

public void setContentEncoding(String encoding) 

Source Link

Document

<p> Sets the optional Content-Encoding HTTP header specifying what content encodings have been applied to the object and what decoding mechanisms must be applied in order to obtain the media-type referenced by the Content-Type field.

Usage

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * {@inheritDoc}// www . jav a 2s .  com
 */
public void setContentProperties(String spaceId, String contentId, Map<String, String> contentProperties) {
    log.debug("setContentProperties(" + spaceId + ", " + contentId + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    String contentEncoding = removeContentEncoding(contentProperties);

    contentProperties = removeCalculatedProperties(contentProperties);

    // Determine mimetype, from properties list or existing value
    String mimeType = contentProperties.remove(PROPERTIES_CONTENT_MIMETYPE);
    if (mimeType == null || mimeType.equals("")) {
        Map<String, String> existingMeta = getContentProperties(spaceId, contentId);
        String existingMime = existingMeta.get(StorageProvider.PROPERTIES_CONTENT_MIMETYPE);
        if (existingMime != null) {
            mimeType = existingMime;
        }
    }

    // Collect all object properties
    ObjectMetadata objMetadata = new ObjectMetadata();
    for (String key : contentProperties.keySet()) {
        if (log.isDebugEnabled()) {
            log.debug("[" + key + "|" + contentProperties.get(key) + "]");
        }
        objMetadata.addUserMetadata(getSpaceFree(key), contentProperties.get(key));
    }

    // Set Content-Type
    if (mimeType != null && !mimeType.equals("")) {
        objMetadata.setContentType(mimeType);
    }

    // Set Content-Encoding
    if (contentEncoding != null && !contentEncoding.equals("")) {
        objMetadata.setContentEncoding(contentEncoding);
    }

    updateObjectProperties(bucketName, contentId, objMetadata);
}

From source file:org.geoserver.taskmanager.external.impl.S3FileServiceImpl.java

License:Open Source License

@Override
public void create(String filePath, InputStream content) throws IOException {
    // Check parameters
    if (content == null) {
        throw new IllegalArgumentException("Content of a file can not be null.");
    }// w  ww  .j ava2  s  .c om
    if (filePath == null) {
        throw new IllegalArgumentException("Name of a file can not be null.");
    }

    if (checkFileExists(filePath)) {
        throw new IllegalArgumentException("The file already exists");
    }
    File scratchFile = File.createTempFile("prefix", String.valueOf(System.currentTimeMillis()));
    try {
        if (!getS3Client().doesBucketExist(rootFolder)) {
            getS3Client().createBucket(rootFolder);
        }

        FileUtils.copyInputStreamToFile(content, scratchFile);

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentEncoding(ENCODING);

        PutObjectRequest putObjectRequest = new PutObjectRequest(rootFolder, filePath, scratchFile);

        putObjectRequest.withMetadata(metadata);

        getS3Client().putObject(putObjectRequest);
    } catch (AmazonClientException e) {
        throw new IOException(e);
    } finally {
        if (scratchFile.exists()) {
            scratchFile.delete();
        }
    }
}

From source file:org.rdswitchboard.harvesters.pmh.Harvester.java

License:Open Source License

public void downloadRecords(SetStatus set) throws HarvesterException, UnsupportedEncodingException, IOException,
        InterruptedException, XPathExpressionException, SAXException, ParserConfigurationException {

    // Generate the URL of request
    String url = null;/*www  . j  av  a2 s .c o  m*/
    ;
    if (set.hasToken()) {
        try {
            url = repoUrl + String.format(URL_LIST_RECORDS_RESUMPTION_TOKEN,
                    URLEncoder.encode(set.getToken(), "UTF-8"));
        } catch (UnsupportedEncodingException e) {
            e.printStackTrace();
        }
    }
    if (null == url) {
        if (!set.hasName())
            url = repoUrl + String.format(URL_LIST_DEFAULT_RECORDS, metadataPrefix);
        else
            url = repoUrl + String.format(URL_LIST_RECORDS, URLEncoder.encode(set.getName(), "UTF-8"),
                    metadataPrefix);
    }

    System.out.println("Downloading records: " + url);

    String xml = null;

    // Get XML document 
    URLConnection conn = new URL(url).openConnection();
    if (connectionTimeout > 0)
        conn.setConnectTimeout(connectionTimeout);
    if (readTimeout > 0)
        conn.setReadTimeout(readTimeout);
    try (InputStream is = conn.getInputStream()) {
        if (null != is)
            xml = IOUtils.toString(is, StandardCharsets.UTF_8.name());
    }

    // Check if xml has been returned and check what it had a valid root element
    if (null == xml)
        throw new HarvesterException("The XML document is empty");
    Document doc = dbf.newDocumentBuilder().parse(new InputSource(new StringReader(xml)));

    // Extract root node
    Node root = (Node) XPATH_OAI_PMH.evaluate(doc, XPathConstants.NODE);
    if (null == root)
        throw new HarvesterException("The document is not an OAI:PMH file");

    // Check for error node
    Node error = (Node) XPATH_ERROR.evaluate(root, XPathConstants.NODE);
    if (null != error && error instanceof Element) {
        String code = ((Element) error).getAttribute("code");
        String message = ((Element) error).getTextContent();

        if (ERR_NO_RECORDS_MATCH.equals(code)) {
            System.out.println("Error: The set is empty");

            set.setFiles(0);
            set.resetToken();

            return;
        } else
            throw new HarvesterException(code, message);
    }

    Node nodeToken = (Node) XPATH_RECORDS_RESUMPTION_TOKEN.evaluate(root, XPathConstants.NODE);

    if (null != nodeToken && nodeToken instanceof Element) {
        String tokenString = ((Element) nodeToken).getTextContent();
        if (null != tokenString && !tokenString.isEmpty())
            set.setToken(tokenString);
        else
            set.resetToken();

        set.setCursor(((Element) nodeToken).getAttribute("cursor"));
        set.setSize(((Element) nodeToken).getAttribute("completeListSize"));

        set.dumpToken(System.out);
    } else
        set.resetToken();

    String filePath = repoPrefix + "/" + metadataPrefix + "/" + harvestDate + "/" + set.getNameSafe() + "/"
            + set.getFiles() + ".xml";

    if (StringUtils.isNullOrEmpty(bucketName)) {

        FileUtils.writeStringToFile(new File(folderName, filePath), xml);

    } else {
        byte[] bytes = xml.getBytes(StandardCharsets.UTF_8);

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentEncoding(StandardCharsets.UTF_8.name());
        metadata.setContentType("text/xml");
        metadata.setContentLength(bytes.length);

        InputStream inputStream = new ByteArrayInputStream(bytes);

        PutObjectRequest request = new PutObjectRequest(bucketName, filePath, inputStream, metadata);

        s3client.putObject(request);
    }

    set.incFiles();
}

From source file:org.rdswitchboard.harvesters.pmh.Harvester.java

License:Open Source License

/**
 * Alternative function to organize the harvest process. The difference with another function
 * is in data storage. The harvest2 function will store files in the raw format as they come
 * from the server.// w  w  w.  j  a v a2 s.  co  m
 * The harvesting method should never be mixed. The harvesting folder must be wiped out if 
 * switching to this method, or function will fail.
 * @param prefix A metadata prefix
 * @throws Exception
 */
public boolean harvest() throws Exception {
    if (StringUtils.isNullOrEmpty(metadataPrefix))
        throw new IllegalArgumentException("The OAI:PMH Metadata Prefix can not be empty");

    System.out.println("Downloading set list");

    boolean result = false;

    if (null == whiteList || whiteList.isEmpty()) {

        System.out.println(
                "There is no whitelist found. Proceeding with downloading the list of all available sets.");

        // download all sets in the repository
        Map<String, String> mapSets = listSets();

        if (null == mapSets || mapSets.isEmpty()) {
            System.out.println("Processing default set");

            result = harvestSet(new SetStatus(null, "Default"));
        } else {

            result = false;

            for (Map.Entry<String, String> entry : mapSets.entrySet()) {

                SetStatus set = new SetStatus(entry.getKey().trim(),
                        URLDecoder.decode(entry.getValue(), StandardCharsets.UTF_8.name()));

                // if black list exists and item is blacklisted, continue
                if (null != blackList && blackList.contains(set)) {
                    set.setFiles(-2);
                    saveSetStats(set); // set was ignored
                    continue;
                }

                System.out.println("Processing set: "
                        + URLDecoder.decode(entry.getValue(), StandardCharsets.UTF_8.name()));

                if (!harvestSet(set)) {
                    System.err.println(
                            "The harvesting job has been aborted due to an error. If you want harvesting to be continued, please set option 'fail.on.error' to 'false' in the configuration file");
                    result = false;
                    break;
                } else
                    result = true;
            }

        }
    } else {
        for (String item : whiteList) {
            if (!harvestSet(new SetStatus(item, item))) {
                System.err.println(
                        "The harvesting job has been aborted due to an error. If you want harvesting to be continued, please set option 'fail.on.error' to 'false' in the configuration file");
                result = false;
                break;
            } else
                result = true;
        }

    }
    if (result) {
        String filePath = repoPrefix + "/" + metadataPrefix + "/latest.txt";

        if (StringUtils.isNullOrEmpty(bucketName)) {

            FileUtils.writeStringToFile(new File(folderName, filePath), harvestDate);

        } else {

            byte[] bytes = harvestDate.getBytes(StandardCharsets.UTF_8);

            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentEncoding(StandardCharsets.UTF_8.name());
            metadata.setContentType("text/plain");
            metadata.setContentLength(bytes.length);

            InputStream inputStream = new ByteArrayInputStream(bytes);

            PutObjectRequest request = new PutObjectRequest(bucketName, filePath, inputStream, metadata);

            s3client.putObject(request);
        }
    }

    return result;
}

From source file:org.researchgraph.crossref.CrossRef.java

private void saveCacheFile(String file, String json) throws IOException {
    if (null != file && null != json && !json.isEmpty()) {
        if (null != cache) {
            FileUtils.write(new File(cache, file), json);
        } else if (null != s3Client) {
            byte[] bytes = json.getBytes(StandardCharsets.UTF_8);

            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentEncoding(StandardCharsets.UTF_8.name());
            metadata.setContentType("text/json");
            metadata.setContentLength(bytes.length);

            InputStream inputStream = new ByteArrayInputStream(bytes);

            s3Client.putObject(new PutObjectRequest(s3Bucket, getS3Key(file), inputStream, metadata));
        }/*from   w ww .  j  a  v  a2  s  . com*/
    }
}

From source file:squash.booking.lambdas.core.PageManager.java

License:Apache License

private void copyUpdatedBookingPageToS3(String pageBaseName, String page, String uidSuffix, boolean usePrefix)
        throws Exception {

    logger.log("About to copy booking page to S3");

    String pageBaseNameWithPrefix = usePrefix ? "NoScript/" + pageBaseName : pageBaseName;
    try {//from   w w  w .j  a va2 s.  c  om
        logger.log("Uploading booking page to S3 bucket: " + websiteBucketName + "s3websitebucketname"
                + " and key: " + pageBaseNameWithPrefix + uidSuffix + ".html");
        byte[] pageAsGzippedBytes = FileUtils.gzip(page.getBytes(StandardCharsets.UTF_8), logger);

        ByteArrayInputStream pageAsStream = new ByteArrayInputStream(pageAsGzippedBytes);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(pageAsGzippedBytes.length);
        metadata.setContentEncoding("gzip");
        metadata.setContentType("text/html");
        // Direct caches not to satisfy future requests with this data without
        // revalidation.
        metadata.setCacheControl("no-cache, must-revalidate");
        PutObjectRequest putObjectRequest = new PutObjectRequest(websiteBucketName,
                pageBaseNameWithPrefix + uidSuffix + ".html", pageAsStream, metadata);
        // Page must be public so it can be served from the website
        putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
        IS3TransferManager transferManager = getS3TransferManager();
        TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
        logger.log("Uploaded booking page to S3 bucket");

        if (uidSuffix.equals("")) {
            // Nothing to copy - so return
            logger.log("UidSuffix is empty - so not creating duplicate page");
            return;
        }

        // N.B. We copy from hashed key to non-hashed (and not vice versa)
        // to ensure consistency
        logger.log("Copying booking page in S3 bucket: " + websiteBucketName + " and key: "
                + pageBaseNameWithPrefix + ".html");
        CopyObjectRequest copyObjectRequest = new CopyObjectRequest(websiteBucketName,
                pageBaseNameWithPrefix + uidSuffix + ".html", websiteBucketName,
                pageBaseNameWithPrefix + ".html");
        copyObjectRequest.setCannedAccessControlList(CannedAccessControlList.PublicRead);
        // N.B. Copied object will get same metadata as the source (e.g. the
        // cache-control header etc.)
        TransferUtils.waitForS3Transfer(transferManager.copy(copyObjectRequest), logger);
        logger.log("Copied booking page successfully in S3");
    } catch (AmazonServiceException ase) {
        ExceptionUtils.logAmazonServiceException(ase, logger);
        throw new Exception("Exception caught while copying booking page to S3");
    } catch (AmazonClientException ace) {
        ExceptionUtils.logAmazonClientException(ace, logger);
        throw new Exception("Exception caught while copying booking page to S3");
    } catch (InterruptedException e) {
        logger.log("Caught interrupted exception: ");
        logger.log("Error Message: " + e.getMessage());
        throw new Exception("Exception caught while copying booking page to S3");
    }
}

From source file:squash.deployment.lambdas.AngularjsAppCustomResourceLambda.java

License:Apache License

/**
 * Implementation for the AWS Lambda function backing the AngularjsApp
 * resource.//from   ww w . j  a  v a  2  s . c o m
 * 
 * <p>
 * This lambda requires the following environment variables:
 * <ul>
 * <li>WebsiteBucket - name of S3 bucket serving the booking website.</li>
 * <li>AngularjsZipBucket - S3 bucket holding the Angularjs app zip file.</li>
 * <li>CognitoIdentityPoolId - id of the Cognito Identity Pool.</li>
 * <li>CognitoUserPoolId - id of the Cognito User Pool.</li>
 * <li>CognitoUserPoolIdentityProviderName - Name of user pool identity provider.</li>
 * <li>JavascriptClientAppId - id of the Cognito User Pool app to use from javascript.</li>
 * <li>ApiGatewayBaseUrl - base Url of the ApiGateway Api.</li>
 * <li>Region - the AWS region in which the Cloudformation stack is created.</li>
 * <li>Revision - integer incremented to force stack updates to update this resource.</li>
 * </ul>
 *
 * <p>On success, it returns the following output to Cloudformation:
 * <ul>
 *    <li>WebsiteURL - Url of the Angularjs website.</li>
 * </ul>
 *
 * <p>Updates will delete the previous deployment and replace it with the new one.
 *
 * @param request
 *            request parameters as provided by the CloudFormation service
 * @param context
 *            context as provided by the CloudFormation service
 */
@Override
public Object handleRequest(Map<String, Object> request, Context context) {

    LambdaLogger logger = context.getLogger();
    logger.log("Starting AngularjsApp custom resource handleRequest");

    // Handle standard request parameters
    Map<String, String> standardRequestParameters = LambdaInputLogger.logStandardRequestParameters(request,
            logger);
    String requestType = standardRequestParameters.get("RequestType");

    // Handle required environment variables
    logger.log("Logging required environment variables for custom resource request");
    String websiteBucket = System.getenv("WebsiteBucket");
    String angularjsZipBucket = System.getenv("AngularjsZipBucket");
    String cognitoIdentityPoolId = System.getenv("CognitoIdentityPoolId");
    String cognitoUserPoolId = System.getenv("CognitoUserPoolId");
    String cognitoUserPoolIdentityProviderName = System.getenv("CognitoUserPoolIdentityProviderName");
    String javascriptClientAppId = System.getenv("JavascriptClientAppId");
    String apiGatewayBaseUrl = System.getenv("ApiGatewayBaseUrl");
    String region = System.getenv("AWS_REGION");
    String revision = System.getenv("Revision");

    // Log out our required environment variables
    logger.log("WebsiteBucket: " + websiteBucket);
    logger.log("AngularjsZipBucket: " + angularjsZipBucket);
    logger.log("CognitoIdentityPoolId: " + cognitoIdentityPoolId);
    logger.log("CognitoUserPoolId: " + cognitoUserPoolId);
    logger.log("CognitoUserPoolIdentityProviderName: " + cognitoUserPoolIdentityProviderName);
    logger.log("JavascriptClientAppId: " + javascriptClientAppId);
    logger.log("ApiGatewayBaseUrl: " + apiGatewayBaseUrl);
    logger.log("Region: " + region);
    logger.log("Revision: " + revision);

    // API calls below can sometimes give access denied errors during stack
    // creation which I think is bc required new roles have not yet propagated
    // across AWS. We sleep here to allow time for this propagation.
    try {
        Thread.sleep(10000);
    } catch (InterruptedException e) {
        logger.log("Sleep to allow new roles to propagate has been interrupted.");
    }

    // Prepare our response to be sent in the finally block
    CloudFormationResponder cloudFormationResponder = new CloudFormationResponder(standardRequestParameters,
            "DummyPhysicalResourceId");
    // Initialise failure response, which will be changed on success
    String responseStatus = "FAILED";

    String websiteURL = null;
    try {
        cloudFormationResponder.initialise();

        if (requestType.equals("Create") || requestType.equals("Update")) {

            // On updates we clear out the app first
            if (requestType.equals("Update")) {
                deleteAngularjsApp(websiteBucket, logger);
            }

            // Get the Angularjs app's zip file
            try {
                logger.log("Downloading Angularjs zip from S3");
                IS3TransferManager transferManager = getS3TransferManager();
                String zipDownloadPath = "/tmp/AngularjsApp.zip";
                File downloadedFile = new File(zipDownloadPath);
                TransferUtils.waitForS3Transfer(
                        transferManager.download(angularjsZipBucket, "AngularjsApp.zip", downloadedFile),
                        logger);
                logger.log("Downloaded Angularjs zip successfully from S3");

                // Modify the Bookings and Identity Service files to point to the
                // correct Cognito data, ApiGateway base url, and region.
                logger.log("Extracting Angularjs zip");
                String extractPath = "/tmp";
                try {
                    ZipFile zipFile = new ZipFile(zipDownloadPath);
                    // Will produce /tmp/app/app.js etc
                    zipFile.extractAll(extractPath);
                } catch (ZipException e) {
                    logger.log("Caught a ZipException Exception: " + e.getMessage());
                    throw e;
                }
                logger.log("Extracted Angularjs zip");

                logger.log(
                        "Modifying the Bookings and Identity Services to point to the correct ApiGatewayBaseUrl, Cognito data, and region");
                String fileContent;
                String filePath = extractPath + "/app/sqawsh.min.js";
                try (FileInputStream inputStream = new FileInputStream(filePath)) {
                    fileContent = IOUtils.toString(inputStream);
                }
                fileContent = fileContent.replace("bookingregiontobereplaced", region)
                        .replace("bookingurltobereplaced", apiGatewayBaseUrl)
                        .replace("bookingbuckettobereplaced", websiteBucket)
                        .replace("identityregiontobereplaced", region)
                        .replace("identitypoolidtobereplaced", cognitoIdentityPoolId)
                        .replace("identityuserpoolidtobereplaced", cognitoUserPoolId)
                        .replace("identityprovidernametobereplaced", cognitoUserPoolIdentityProviderName)
                        .replace("identityappidtobereplaced", javascriptClientAppId);

                FileUtils.writeStringToFile(new File(filePath), fileContent);
                logger.log(
                        "Modified the Bookings and Identity Services to point to the correct ApiGatewayBaseUrl, Cognito data, and region");

                // We will later modify the gzip-ed filenames to add a revving suffix.
                // But before we gzip, we need to modify the revved file links in
                // index.html
                String revvingSuffix = System.getenv("RevvingSuffix");
                File appPath = new File("/tmp/app");
                logger.log("Modifying links to revved files in index.html");
                Path indexPath = new File(appPath, "index.html").toPath();
                Charset charset = StandardCharsets.UTF_8;
                List<String> newLines = new ArrayList<>();
                for (String line : Files.readAllLines(indexPath, charset)) {
                    if (line.contains("googleapis") || line.contains("cloudflare") || line.contains("maxcdn")) {
                        // Don't alter lines linking to cdn-s. They are already revved.
                        newLines.add(line);
                    } else {
                        newLines.add(line.replace(".js", "_" + revvingSuffix + ".js").replace(".css",
                                "_" + revvingSuffix + ".css"));
                    }
                }
                Files.write(indexPath, newLines, charset);
                logger.log("Modified links to revved files in index.html");

                // GZIP all js, css, and html files within app folder
                logger.log("GZip-ing files in app folder to enable serving gzip-ed from S3");
                squash.deployment.lambdas.utils.FileUtils.gzip(Arrays.asList(appPath), Collections.emptyList(),
                        logger);
                logger.log("GZip-ed files in app folder to enable serving gzip-ed from S3");

                // Rev the js and css files by appending revving-suffix to names - for
                // cache-ing
                logger.log("Appending revving suffix to js and css files in app folder");
                squash.deployment.lambdas.utils.FileUtils.appendRevvingSuffix(revvingSuffix, appPath.toPath(),
                        logger);
                logger.log("Appended revving suffix to js and css files in app folder");

                // Upload the modified app to the S3 website bucket
                logger.log("Uploading modified Angularjs app to S3 website bucket");
                // Will produce <S3BucketRoot>/app/sqawsh.min.js etc
                TransferUtils.waitForS3Transfer(transferManager.uploadDirectory(websiteBucket, "app",
                        new File(extractPath + "/app"), true), logger);
                logger.log("Uploaded modified Angularjs app to S3 website bucket");

                // Add gzip content-encoding metadata to zip-ed files
                logger.log("Updating metadata on modified Angularjs app in S3 bucket");
                TransferUtils.addGzipContentEncodingMetadata(websiteBucket, Optional.of("app"), logger);
                logger.log("Updated metadata on modified Angularjs app in S3 bucket");

                // Upload Cognito SDKs and their dependencies - these should all be
                // zipped first. N.B. We also append filenames with the revving
                // suffix.
                logger.log("About to upload Cognito libraries");
                List<ImmutableTriple<String, String, byte[]>> cognitoLibraries = new ArrayList<>();
                cognitoLibraries.add(new ImmutableTriple<>("Cognito SDK",
                        "aws-cognito-sdk.min_" + revvingSuffix + ".js", IOUtils.toByteArray(new URL(
                                "https://raw.githubusercontent.com/aws/amazon-cognito-identity-js/master/dist/aws-cognito-sdk.min.js"))));
                cognitoLibraries.add(new ImmutableTriple<>("Cognito Identity SDK",
                        "amazon-cognito-identity.min_" + revvingSuffix + ".js", IOUtils.toByteArray(new URL(
                                "https://raw.githubusercontent.com/aws/amazon-cognito-identity-js/master/dist/amazon-cognito-identity.min.js"))));
                cognitoLibraries.add(new ImmutableTriple<>("Big Integer Library",
                        "jsbn_" + revvingSuffix + ".js",
                        IOUtils.toByteArray(new URL("http://www-cs-students.stanford.edu/~tjw/jsbn/jsbn.js"))));
                cognitoLibraries.add(new ImmutableTriple<>("Big Integer Library 2",
                        "jsbn2_" + revvingSuffix + ".js", IOUtils.toByteArray(
                                new URL("http://www-cs-students.stanford.edu/~tjw/jsbn/jsbn2.js"))));

                // The SJCL still seems to need configuring to include the bytes
                // codec, despite 1.0 of Cognito Idp saying it had removed this
                // dependency. So for now we get this bytes-codec-configured version
                // from our resources.
                String sjcl_library;
                try {
                    sjcl_library = IOUtils.toString(AngularjsAppCustomResourceLambda.class
                            .getResourceAsStream("/squash/deployment/lambdas/sjcl.js"));
                } catch (IOException e) {
                    logger.log("Exception caught reading sjcl.js file: " + e.getMessage());
                    throw new Exception("Exception caught reading sjcl.js file");
                }
                logger.log("Read modified SJCL library from resources");
                cognitoLibraries.add(new ImmutableTriple<>("Stanford Javascript Crypto Library",
                        "sjcl_" + revvingSuffix + ".js", sjcl_library.getBytes(Charset.forName("UTF-8"))));

                for (ImmutableTriple<String, String, byte[]> cognitoLibrary : cognitoLibraries) {
                    logger.log("Uploading a Cognito library to S3 website bucket. Library name: "
                            + cognitoLibrary.left);

                    byte[] zippedLibrary = squash.deployment.lambdas.utils.FileUtils.gzip(cognitoLibrary.right,
                            logger);
                    ByteArrayInputStream libraryAsGzippedStream = new ByteArrayInputStream(zippedLibrary);
                    ObjectMetadata metadata = new ObjectMetadata();
                    metadata.setContentLength(zippedLibrary.length);
                    metadata.setContentEncoding("gzip");
                    String keyName = "app/components/identity/cognito/" + cognitoLibrary.middle;
                    logger.log("Uploading to key: " + keyName);
                    PutObjectRequest putObjectRequest = new PutObjectRequest(websiteBucket, keyName,
                            libraryAsGzippedStream, metadata);
                    TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
                    logger.log("Uploaded a Cognito library to S3 website bucket: " + cognitoLibrary.left);
                }

                // Add cache-control metadata to files. Css and js files will have
                // 1-year cache validity, since they are rev-ved.
                logger.log("Updating cache-control metadata on angular app in S3 bucket");
                TransferUtils.addCacheControlHeader("max-age=31536000", websiteBucket, Optional.of("app"),
                        ".js", logger);
                TransferUtils.addCacheControlHeader("max-age=31536000", websiteBucket, Optional.of("app"),
                        ".css", logger);
                // All html must revalidate every time
                TransferUtils.addCacheControlHeader("no-cache, must-revalidate", websiteBucket,
                        Optional.of("app"), ".html", logger);
                logger.log("Updated cache-control metadata on angular app in S3 bucket");

                // App content must be public so it can be served from the website
                logger.log("Modifying Angularjs app ACL in S3 website bucket");
                TransferUtils.setPublicReadPermissionsOnBucket(websiteBucket, Optional.of("app/"), logger);
                logger.log("Modified Angularjs app ACL in S3 website bucket");

            } catch (MalformedInputException mie) {
                logger.log("Caught a MalformedInputException: " + mie.getMessage());
                throw mie;
            } catch (IOException ioe) {
                logger.log("Caught an IO Exception: " + ioe.getMessage());
                throw ioe;
            }

            websiteURL = "http://" + websiteBucket + ".s3-website-" + region + ".amazonaws.com/app/index.html";
            ;
        } else if (requestType.equals("Delete")) {
            logger.log("Delete request - so deleting the app");
            deleteAngularjsApp(websiteBucket, logger);
        }

        responseStatus = "SUCCESS";
        return null;
    } catch (AmazonServiceException ase) {
        ExceptionUtils.logAmazonServiceException(ase, logger);
        return null;
    } catch (AmazonClientException ace) {
        ExceptionUtils.logAmazonClientException(ace, logger);
        return null;
    } catch (Exception e) {
        logger.log("Exception caught in AngularjsApp Lambda: " + e.getMessage());
        return null;
    } finally {
        // Send response to CloudFormation
        cloudFormationResponder.addKeyValueOutputsPair("WebsiteURL", websiteURL);
        cloudFormationResponder.sendResponse(responseStatus, logger);
    }
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds gzip content-encoding metadata to S3 objects.
 * /*w w  w. j ava 2  s .c  om*/
 * <p>Adds gzip content-encoding metadata to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder) will have the
 *    metadata added. When the bucket serves objects it will then
 *    add a suitable Content-Encoding header.
 *
 *    @param bucketName the bucket to apply the metadata to.
 *    @param prefix prefix within the bucket, beneath which to apply the metadata.
 *    @param logger a CloudwatchLogs logger.
 */
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: "
                + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setContentEncoding("gzip");
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set gzip content encoding metadata on bucket");
}