Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:org.weakref.s3fs.util.AmazonS3ClientMock.java

License:Apache License

private S3Element parse(ByteArrayInputStream stream, String bucket, String key) {

    S3Object object = new S3Object();

    object.setBucketName(bucket);/*from w  w w  . j a  v a2s  .c om*/
    object.setKey(key);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setLastModified(new Date());
    metadata.setContentLength(stream.available());
    object.setObjectContent(stream);

    object.setObjectMetadata(metadata);
    // TODO: create converter between path permission and s3 permission
    AccessControlList permission = createAllPermission();
    return new S3Element(object, permission, false);
}

From source file:org.xwiki.blobstore.s3.internal.S3BlobStore.java

License:Open Source License

@Override
public void putBlob(String path, InputStream content, long length) {
    String normalizedPath = normalizePath(path);

    this.logger.debug("Putting blob to '{}'", normalizedPath);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    if (length > 0) {
        objectMetadata.setContentLength(length);
    }//  w w w .j  a v a 2s.com

    this.client.putObject(this.bucket, normalizedPath, content, objectMetadata);
}

From source file:org.zalando.stups.fullstop.plugin.SaveSecurityGroupsPlugin.java

License:Apache License

private void writeToS3(String content, String prefix) {
    InputStream stream = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8));
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(content.length());
    String fileName = SECURITY_GROUPS + new DateTime(UTC) + JSON;
    s3Writer.putObjectToS3(bucketName, fileName, prefix, metadata, stream);
}

From source file:org.zalando.stups.fullstop.s3.S3Writer.java

License:Apache License

public void writeToS3(String accountId, String region, Date instanceBootTime, String logData, String logType,
        String instanceId) throws IOException {
    String fileName = null;/*from ww w  .j  a v a 2s  . c o m*/

    DateTime dateTime = new DateTime(instanceBootTime, UTC);

    String keyName = Paths.get(accountId, region, dateTime.toString("YYYY"), dateTime.toString("MM"),
            dateTime.toString("dd"), instanceId + "-" + dateTime).toString();

    switch (logType) {
    case USER_DATA:
        fileName = TAUPAGE_YAML;
        break;
    case AUDIT_LOG:
        fileName = AUDIT_LOG_FILE_NAME + new DateTime(UTC) + LOG_GZ;
        break;
    default:
        logger.error("Wrong logType given: " + logType);
        break;
    }
    ObjectMetadata metadata = new ObjectMetadata();
    byte[] decodedLogData = Base64.decode(logData);
    metadata.setContentLength(decodedLogData.length);

    InputStream stream = new ByteArrayInputStream(decodedLogData);

    putObjectToS3(bucketName, fileName, keyName, metadata, stream);
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Creates an AWS S3 folder/*from  w  w  w.  j  a v  a 2s .  c  o  m*/
 * 
 * @param bucketName
 * @param folderName
 * @param client 
 */
public static void createFolder(String bucketName, String folderName, AmazonS3 client) {

    try {

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(0);

        InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, folderName + "/", emptyContent,
                metadata);

        client.putObject(putObjectRequest);

        System.out
                .println("Sucessfully created the folder of " + folderName + " in the bucket of " + bucketName);
    } catch (Exception ex) {
        System.out.println("Failed to create the folder of " + folderName + " in the bucket of " + bucketName);
        //          Logger.getLogger(AwsS3Processor.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:oulib.aws.s3.S3Util.java

/**
 * Generate a small tiff file from large Tiff S3 bucket object <br>
 * Note: the small tiff file will have the same key path as the original one
 * /*from  ww w  .j av  a2s. c  o  m*/
 * @param s3client : S3 client
 * @param s3 : S3 object that con
 * @param targetBucketName : the bucket that stores the small tiff file
 * @param targetKey : key of the object in the target bucket
 * @param compressionRate : compression rate
 * @return : PutObjectResult
 */
public static PutObjectResult generateSmallTiff(AmazonS3 s3client, S3Object s3, String targetBucketName,
        String targetKey, double compressionRate) {

    PutObjectResult result = null;
    ByteArrayOutputStream bos = null;
    ByteArrayOutputStream os = null;
    ByteArrayInputStream is = null;
    S3ObjectInputStream s = null;
    ByteArrayInputStream byteInputStream = null;

    try {
        System.setProperty("com.sun.media.jai.disableMediaLib", "true");

        bos = new ByteArrayOutputStream();
        s = s3.getObjectContent();
        byte[] bytes = IOUtils.toByteArray(s);
        byteInputStream = new ByteArrayInputStream(bytes);

        TIFFDecodeParam param = new TIFFDecodeParam();
        ImageDecoder dec = ImageCodec.createImageDecoder("TIFF", byteInputStream, param);

        RenderedImage image = dec.decodeAsRenderedImage();

        RenderingHints qualityHints = new RenderingHints(RenderingHints.KEY_RENDERING,
                RenderingHints.VALUE_RENDER_QUALITY);

        RenderedOp resizedImage = JAI.create("SubsampleAverage", image, compressionRate, compressionRate,
                qualityHints);

        TIFFEncodeParam params = new com.sun.media.jai.codec.TIFFEncodeParam();

        resizedImage = JAI.create("encode", resizedImage, bos, "TIFF", params);

        BufferedImage imagenew = resizedImage.getSourceImage(0).getAsBufferedImage();

        os = new ByteArrayOutputStream();
        ImageIO.write(imagenew, "tif", os);
        is = new ByteArrayInputStream(os.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(os.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        os.close();

        imagenew.flush();

        result = s3client.putObject(new PutObjectRequest(targetBucketName, targetKey, is, metadata));
    } catch (IOException | AmazonClientException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (bos != null) {
                bos.close();
            }
            if (os != null) {
                os.close();
            }
            if (is != null) {
                is.close();
            }
            if (s != null) {
                s.close();
            }
            if (byteInputStream != null) {
                byteInputStream.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    return result;
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Pull out Tiff metadata from input S3 object and inject into the 
 * content of target S3 Object;<br>
 * Generate the new output S3 object that has the metadata from input object.
 * //  w w w .  ja va  2 s . c o  m
 * @param s3client : S3 client
 * @param obj1 : input object that provides metadata
 * @param obj2 : target object that receives metadata
 * 
 * @return PutObjectResult
 */
public static PutObjectResult copyS3ObjectTiffMetadata(AmazonS3 s3client, S3Object obj1, S3Object obj2) {

    PutObjectResult result = null;

    BufferedInputStream bufferedInputStrean = null;
    ByteArrayOutputStream byteArrayOutputStream = null;
    ByteArrayInputStream byteArrayInputStream = null;
    ByteArrayInputStream bis = null;
    S3ObjectInputStream content1 = null;
    S3ObjectInputStream content2 = null;
    String targetBucketName = obj2.getBucketName();
    String outputKey = obj2.getKey().split(".tif")[0] + "-copied.tif";

    ImageMetadata metadata1, metadata2;
    TiffImageMetadata tiffMetadata1, tiffMetadata2;
    TiffOutputSet output1, output2;

    try {
        content1 = obj1.getObjectContent();
        content2 = obj2.getObjectContent();

        byte[] bytes1 = IOUtils.toByteArray(content1);
        byte[] bytes2 = IOUtils.toByteArray(content2);

        metadata1 = Imaging.getMetadata(bytes1);
        metadata2 = Imaging.getMetadata(bytes2);

        tiffMetadata1 = (TiffImageMetadata) metadata1;
        tiffMetadata2 = (TiffImageMetadata) metadata2;

        output1 = tiffMetadata1.getOutputSet();
        output2 = tiffMetadata2.getOutputSet();

        TiffOutputDirectory rootDir = output2.getOrCreateRootDirectory();
        TiffOutputDirectory exifDir = output2.getOrCreateExifDirectory();
        TiffOutputDirectory gpsDir = output2.getOrCreateGPSDirectory();

        if (null != output1.getRootDirectory()) {
            List<TiffOutputField> fs = output1.getRootDirectory().getFields();
            for (TiffOutputField f1 : fs) {
                if (null == rootDir.findField(f1.tag)
                        // CANNOT create the output image with this tag included!
                        && !"PlanarConfiguration".equals(f1.tagInfo.name)) {
                    rootDir.add(f1);
                }
            }
        }

        if (null != output1.getExifDirectory()) {
            for (TiffOutputField f2 : output1.getExifDirectory().getFields()) {
                exifDir.removeField(f2.tagInfo);
                exifDir.add(f2);
            }
        }

        if (null != output1.getGPSDirectory()) {
            for (TiffOutputField f3 : output1.getGPSDirectory().getFields()) {
                gpsDir.removeField(f3.tagInfo);
                gpsDir.add(f3);
            }
        }

        byteArrayOutputStream = new ByteArrayOutputStream();
        TiffImageWriterLossy writerLossy = new TiffImageWriterLossy(output2.byteOrder);
        writerLossy.write(byteArrayOutputStream, output2);

        byteArrayInputStream = new ByteArrayInputStream(byteArrayOutputStream.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(byteArrayOutputStream.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        result = s3client
                .putObject(new PutObjectRequest(targetBucketName, outputKey, byteArrayInputStream, metadata));

    } catch (ImageReadException | IOException | ImageWriteException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (null != content1) {
                content1.close();
            }
            if (null != content2) {
                content2.close();
            }
            if (null != bufferedInputStrean) {
                bufferedInputStrean.close();
            }
            if (null != byteArrayInputStream) {
                byteArrayInputStream.close();
            }
            if (null != byteArrayOutputStream) {
                byteArrayOutputStream.close();
            }
            if (null != bis) {
                bis.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    return result;
}

From source file:raymond.mockftpserver.S3BucketFileSystem.java

License:Apache License

@Override
public void add(FileSystemEntry entry) {
    ObjectMetadata metaData = new ObjectMetadata();
    PutObjectRequest request;//ww w.j  a  v  a2s  .  c o m
    if (isDirectory(entry)) {
        metaData.setContentLength(0);
        InputStream is = new ByteArrayInputStream(new byte[0]);
        request = new PutObjectRequest(bucket, entry.getPath() + FOLDER_SUFFIX, is, metaData);
    } else {
        metaData.setContentLength(entry.getSize());
        request = new PutObjectRequest(bucket, entry.getPath(), ((FileEntry) entry).createInputStream(),
                metaData);
    }
    request.setStorageClass(StorageClass.ReducedRedundancy);
    s3.putObject(request);
}

From source file:sample.S3EmitterWithMetadata.java

License:Open Source License

@Override
public List<byte[]> emit(final UnmodifiableBuffer<byte[]> buffer) throws IOException {
    List<byte[]> records = buffer.getRecords();
    // Write all of the records to a compressed output stream
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    for (byte[] record : records) {
        try {/*from w  w  w. j a v a 2 s .c  o  m*/
            baos.write(record);
        } catch (Exception e) {
            LOG.error("Error writing record to output stream. Failing this emit attempt. Record: "
                    + Arrays.toString(record), e);
            return buffer.getRecords();
        }
    }
    // Get the Amazon S3 filename
    String s3FileName = getS3FileName(buffer.getFirstSequenceNumber(), buffer.getLastSequenceNumber());
    String s3URI = getS3URI(s3FileName);
    try {
        ByteArrayInputStream object = new ByteArrayInputStream(baos.toByteArray());
        LOG.debug("Starting upload of file " + s3URI + " to Amazon S3 containing " + records.size()
                + " records.");
        ObjectMetadata meta = new ObjectMetadata();
        Date date = new Date();
        GregorianCalendar calendar = new GregorianCalendar();
        calendar.setTime(date);
        calendar.add(Calendar.DATE, 14);
        meta.setExpirationTime(calendar.getTime());
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        meta.setContentLength(baos.size());
        s3client.putObject(s3Bucket, s3FileName, object, meta);
        LOG.info("Successfully emitted " + buffer.getRecords().size() + " records to Amazon S3 in " + s3URI);
        return Collections.emptyList();
    } catch (Exception e) {
        LOG.error("Caught exception when uploading file " + s3URI + "to Amazon S3. Failing this emit attempt.",
                e);
        return buffer.getRecords();
    }
}

From source file:squash.booking.lambdas.core.BackupManager.java

License:Apache License

@Override
public final void backupSingleBooking(Booking booking, Boolean isCreation)
        throws InterruptedException, JsonProcessingException {
    // Backup to the S3 bucket. This method will typically be called every time
    // a booking is mutated. We upload the booking to the same key, so the
    // versions of this key should provide a timeline of all individual bookings
    // in the sequence (or close to it) that they were made.

    if (!initialised) {
        throw new IllegalStateException("The backup manager has not been initialised");
    }/*  w  w w .j ava  2 s. co m*/

    // Encode booking as JSON
    String backupString = (isCreation ? "Booking created: " : "Booking deleted: ")
            + System.getProperty("line.separator") + mapper.writeValueAsString(booking);

    logger.log("Backing up single booking mutation to S3 bucket");
    IS3TransferManager transferManager = getS3TransferManager();
    byte[] bookingAsBytes = backupString.getBytes(StandardCharsets.UTF_8);
    ByteArrayInputStream bookingAsStream = new ByteArrayInputStream(bookingAsBytes);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(bookingAsBytes.length);
    PutObjectRequest putObjectRequest = new PutObjectRequest(databaseBackupBucketName, "LatestBooking",
            bookingAsStream, metadata);
    TransferUtils.waitForS3Transfer(transferManager.upload(putObjectRequest), logger);
    logger.log("Backed up single booking mutation to S3 bucket: " + backupString);

    // Backup to the SNS topic
    logger.log("Backing up single booking mutation to SNS topic: " + adminSnsTopicArn);
    getSNSClient().publish(adminSnsTopicArn, backupString, "Sqawsh single booking backup");
}