Example usage for com.amazonaws.services.s3.model ObjectMetadata setLastModified

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setLastModified

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setLastModified.

Prototype

public void setLastModified(Date lastModified) 

Source Link

Document

For internal use only.

Usage

From source file:net.solarnetwork.node.backup.s3.S3BackupService.java

License:Open Source License

private Backup performBackupInternal(final Iterable<BackupResource> resources, final Calendar now,
        Map<String, String> props) {
    if (resources == null) {
        return null;
    }/*from  ww  w  . j ava2s .c om*/
    final Iterator<BackupResource> itr = resources.iterator();
    if (!itr.hasNext()) {
        log.debug("No resources provided, nothing to backup");
        return null;
    }
    S3Client client = this.s3Client;
    if (!status.compareAndSet(BackupStatus.Configured, BackupStatus.RunningBackup)) {
        // try to reset from error
        if (!status.compareAndSet(BackupStatus.Error, BackupStatus.RunningBackup)) {
            return null;
        }
    }
    S3BackupMetadata result = null;
    try {
        final Long nodeId = nodeId(props);
        final String metaName = String.format(META_NAME_FORMAT, now, nodeId);
        final String metaObjectKey = objectKeyForPath(META_OBJECT_KEY_PREFIX + metaName);
        log.info("Starting backup to archive {}", metaObjectKey);

        final Set<S3ObjectReference> allDataObjects = client
                .listObjects(objectKeyForPath(DATA_OBJECT_KEY_PREFIX));

        S3BackupMetadata meta = new S3BackupMetadata();
        meta.setNodeId(nodeId);
        MessageDigest digest = DigestUtils.getSha256Digest();
        byte[] buf = new byte[4096];
        for (BackupResource rsrc : resources) {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            if (rsrc.getModificationDate() > 0) {
                objectMetadata.setLastModified(new Date(rsrc.getModificationDate()));
            }
            String sha = calculateContentDigest(rsrc, digest, buf, objectMetadata);
            String objectKey = objectKeyForPath(DATA_OBJECT_KEY_PREFIX + sha);

            // see if already exists
            if (!allDataObjects.contains(new S3ObjectReference(objectKey))) {
                log.info("Saving resource to S3: {}", rsrc.getBackupPath());
                client.putObject(objectKey, rsrc.getInputStream(), objectMetadata);
            } else {
                log.info("Backup resource already saved to S3: {}", rsrc.getBackupPath());
            }
            meta.addBackupResource(rsrc, objectKey, sha);
        }

        // now save metadata
        meta.setComplete(true);
        meta.setDate(now.getTime());
        meta.setKey(metaName);
        byte[] metaJsonBytes = OBJECT_MAPPER.writeValueAsBytes(meta);
        try (ByteArrayInputStream in = new ByteArrayInputStream(metaJsonBytes)) {
            ObjectMetadata metaObjectMetadata = new ObjectMetadata();
            metaObjectMetadata.setContentType("application/json;charset=UTF-8");
            metaObjectMetadata.setContentLength(metaJsonBytes.length);
            metaObjectMetadata.setLastModified(meta.getDate());
            S3ObjectReference metaRef = client.putObject(metaObjectKey, in, metaObjectMetadata);
            result = new S3BackupMetadata(metaRef);
        }

        if (additionalBackupCount < 1) {
            // add this backup to the cached data
            CachedResult<List<Backup>> cached = cachedBackupList.get();
            if (cached != null) {
                List<Backup> list = cached.getResult();
                List<Backup> newList = new ArrayList<>(list);
                newList.add(0, result);
                updateCachedBackupList(newList);
            }
        } else {
            // clean out older backups
            List<Backup> knownBackups = getAvailableBackupsInternal();
            List<String> backupsForNode = knownBackups.stream().filter(b -> nodeId.equals(b.getNodeId()))
                    .map(b -> b.getKey()).collect(Collectors.toList());
            if (backupsForNode.size() > additionalBackupCount + 1) {
                Set<String> keysToDelete = backupsForNode.stream()
                        .limit(backupsForNode.size() - additionalBackupCount - 1).collect(Collectors.toSet());
                log.info("Deleting {} expired backups for node {}: {}", keysToDelete.size(), nodeId,
                        keysToDelete);
                client.deleteObjects(keysToDelete);

                // update cache
                knownBackups = knownBackups.stream().filter(b -> !keysToDelete.contains(b.getKey()))
                        .collect(Collectors.toList());
                updateCachedBackupList(knownBackups);
            }
        }
    } catch (IOException e) {
        log.error("IO error performing backup", e);
    } finally {
        status.compareAndSet(BackupStatus.RunningBackup, BackupStatus.Configured);
    }
    return result;
}

From source file:org.apache.camel.component.aws.s3.S3Producer.java

License:Apache License

@Override
public void process(final Exchange exchange) throws Exception {
    ObjectMetadata objectMetadata = new ObjectMetadata();

    Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class);
    if (contentLength != null) {
        objectMetadata.setContentLength(contentLength);
    }//  w ww .  j  a  v  a2  s.  co  m

    String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class);
    if (contentType != null) {
        objectMetadata.setContentType(contentType);
    }

    String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class);
    if (cacheControl != null) {
        objectMetadata.setCacheControl(cacheControl);
    }

    String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class);
    if (contentDisposition != null) {
        objectMetadata.setContentDisposition(contentDisposition);
    }

    String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class);
    if (contentEncoding != null) {
        objectMetadata.setContentEncoding(contentEncoding);
    }

    String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class);
    if (contentMD5 != null) {
        objectMetadata.setContentMD5(contentMD5);
    }

    Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class);
    if (lastModified != null) {
        objectMetadata.setLastModified(lastModified);
    }

    Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class);
    if (userMetadata != null) {
        objectMetadata.setUserMetadata(userMetadata);
    }

    File filePayload = null;

    Object obj = exchange.getIn().getMandatoryBody();
    if (obj instanceof File) {
        filePayload = (File) obj;
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(),
            determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        putObjectRequest.setStorageClass(storageClass);
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        putObjectRequest.setCannedAcl(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        putObjectRequest.setAccessControlList(acl);
    }
    LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);

    PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);

    LOG.trace("Received result [{}]", putObjectResult);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
    if (putObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        IOHelper.close(putObjectRequest.getInputStream());
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

private void mkdir(String path) {
    ObjectMetadata om = new ObjectMetadata();
    om.setLastModified(new Date());
    om.setContentLength(0);/*from w  w w. ja  v a  2 s  .  co  m*/
    om.setContentType("inode/directory");
    ByteArrayInputStream bis = new ByteArrayInputStream(new byte[0]);

    s3.putObject(getBucketName(), path, bis, om);

    System.out.println("Creating Directory: " + path);
}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

@Override
public void putFile(File file, String path) throws IOException {
    ObjectMetadata om = new ObjectMetadata();
    om.setLastModified(new Date());
    om.setContentType(URLConnection.guessContentTypeFromName(file.getName()));
    om.setContentLength(file.length());/*from w w w.j  a v  a 2s .  c  o m*/
    BufferedInputStream stream = new BufferedInputStream(new FileInputStream(file));

    s3.putObject(getBucketName(), path, stream, om);

}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

void createFolder(String bucketName, String path) {
    ObjectMetadata om = new ObjectMetadata();
    om.setLastModified(new Date());
    om.setContentLength(0);// ww  w. j a v  a  2  s  . c  o  m
    om.setContentType("inode/directory");
    ByteArrayInputStream bis = new ByteArrayInputStream(new byte[0]);

    s3.putObject(bucketName, path, bis, om);

    System.out.println("Creating folder: " + path);
}

From source file:org.finra.herd.dao.impl.MockS3OperationsImpl.java

License:Apache License

/**
 * {@inheritDoc}/*from w ww .j av a2s .c om*/
 * <p/>
 * This implementation creates a new bucket if the bucket does not already exist.
 */
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest, AmazonS3 s3Client) {
    LOGGER.debug("putObject(): putObjectRequest.getBucketName() = " + putObjectRequest.getBucketName()
            + ", putObjectRequest.getKey() = " + putObjectRequest.getKey());

    String s3BucketName = putObjectRequest.getBucketName();
    InputStream inputStream = putObjectRequest.getInputStream();

    ObjectMetadata metadata = putObjectRequest.getMetadata();
    if (metadata == null) {
        metadata = new ObjectMetadata();
    }

    File file = putObjectRequest.getFile();
    if (file != null) {
        try {
            inputStream = new FileInputStream(file);
            metadata.setContentLength(file.length());
        } catch (FileNotFoundException e) {
            throw new IllegalArgumentException("File not found " + file, e);
        }
    }

    String s3ObjectKey = putObjectRequest.getKey();
    String s3ObjectVersion = MOCK_S3_BUCKET_NAME_VERSIONING_ENABLED.equals(putObjectRequest.getBucketName())
            ? UUID.randomUUID().toString()
            : null;
    String s3ObjectKeyVersion = s3ObjectKey + (s3ObjectVersion != null ? s3ObjectVersion : "");

    byte[] s3ObjectData;
    try {
        s3ObjectData = IOUtils.toByteArray(inputStream);
        metadata.setContentLength(s3ObjectData.length);
    } catch (IOException e) {
        throw new IllegalArgumentException("Error converting input stream into byte array", e);
    } finally {
        try {
            inputStream.close();
        } catch (IOException e) {
            LOGGER.error("Error closing stream " + inputStream, e);
        }
    }

    // Update the Last-Modified header value. This value not being set causes NullPointerException in S3Dao download related unit tests.
    metadata.setLastModified(new Date());

    MockS3Bucket mockS3Bucket = getOrCreateBucket(s3BucketName);

    MockS3Object mockS3Object = new MockS3Object();
    mockS3Object.setKey(s3ObjectKey);
    mockS3Object.setVersion(s3ObjectVersion);
    mockS3Object.setData(s3ObjectData);
    mockS3Object.setObjectMetadata(metadata);

    if (putObjectRequest.getTagging() != null) {
        mockS3Object.setTags(putObjectRequest.getTagging().getTagSet());
    }

    mockS3Bucket.getObjects().put(s3ObjectKey, mockS3Object);
    mockS3Bucket.getVersions().put(s3ObjectKeyVersion, mockS3Object);

    return new PutObjectResult();
}

From source file:org.weakref.s3fs.util.AmazonS3ClientMock.java

License:Apache License

private S3Element parse(ByteArrayInputStream stream, String bucket, String key) {

    S3Object object = new S3Object();

    object.setBucketName(bucket);//from  w w  w . j  ava 2 s . co  m
    object.setKey(key);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setLastModified(new Date());
    metadata.setContentLength(stream.available());
    object.setObjectContent(stream);

    object.setObjectMetadata(metadata);
    // TODO: create converter between path permission and s3 permission
    AccessControlList permission = createAllPermission();
    return new S3Element(object, permission, false);
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Generate a small tiff file from large Tiff S3 bucket object <br>
 * Note: the small tiff file will have the same key path as the original one
 * /*from  w w w  .  j  a va 2 s.  com*/
 * @param s3client : S3 client
 * @param s3 : S3 object that con
 * @param targetBucketName : the bucket that stores the small tiff file
 * @param targetKey : key of the object in the target bucket
 * @param compressionRate : compression rate
 * @return : PutObjectResult
 */
public static PutObjectResult generateSmallTiff(AmazonS3 s3client, S3Object s3, String targetBucketName,
        String targetKey, double compressionRate) {

    PutObjectResult result = null;
    ByteArrayOutputStream bos = null;
    ByteArrayOutputStream os = null;
    ByteArrayInputStream is = null;
    S3ObjectInputStream s = null;
    ByteArrayInputStream byteInputStream = null;

    try {
        System.setProperty("com.sun.media.jai.disableMediaLib", "true");

        bos = new ByteArrayOutputStream();
        s = s3.getObjectContent();
        byte[] bytes = IOUtils.toByteArray(s);
        byteInputStream = new ByteArrayInputStream(bytes);

        TIFFDecodeParam param = new TIFFDecodeParam();
        ImageDecoder dec = ImageCodec.createImageDecoder("TIFF", byteInputStream, param);

        RenderedImage image = dec.decodeAsRenderedImage();

        RenderingHints qualityHints = new RenderingHints(RenderingHints.KEY_RENDERING,
                RenderingHints.VALUE_RENDER_QUALITY);

        RenderedOp resizedImage = JAI.create("SubsampleAverage", image, compressionRate, compressionRate,
                qualityHints);

        TIFFEncodeParam params = new com.sun.media.jai.codec.TIFFEncodeParam();

        resizedImage = JAI.create("encode", resizedImage, bos, "TIFF", params);

        BufferedImage imagenew = resizedImage.getSourceImage(0).getAsBufferedImage();

        os = new ByteArrayOutputStream();
        ImageIO.write(imagenew, "tif", os);
        is = new ByteArrayInputStream(os.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(os.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        os.close();

        imagenew.flush();

        result = s3client.putObject(new PutObjectRequest(targetBucketName, targetKey, is, metadata));
    } catch (IOException | AmazonClientException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (bos != null) {
                bos.close();
            }
            if (os != null) {
                os.close();
            }
            if (is != null) {
                is.close();
            }
            if (s != null) {
                s.close();
            }
            if (byteInputStream != null) {
                byteInputStream.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    return result;
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Pull out Tiff metadata from input S3 object and inject into the 
 * content of target S3 Object;<br>
 * Generate the new output S3 object that has the metadata from input object.
 * //  w  ww. ja  v  a2 s.com
 * @param s3client : S3 client
 * @param obj1 : input object that provides metadata
 * @param obj2 : target object that receives metadata
 * 
 * @return PutObjectResult
 */
public static PutObjectResult copyS3ObjectTiffMetadata(AmazonS3 s3client, S3Object obj1, S3Object obj2) {

    PutObjectResult result = null;

    BufferedInputStream bufferedInputStrean = null;
    ByteArrayOutputStream byteArrayOutputStream = null;
    ByteArrayInputStream byteArrayInputStream = null;
    ByteArrayInputStream bis = null;
    S3ObjectInputStream content1 = null;
    S3ObjectInputStream content2 = null;
    String targetBucketName = obj2.getBucketName();
    String outputKey = obj2.getKey().split(".tif")[0] + "-copied.tif";

    ImageMetadata metadata1, metadata2;
    TiffImageMetadata tiffMetadata1, tiffMetadata2;
    TiffOutputSet output1, output2;

    try {
        content1 = obj1.getObjectContent();
        content2 = obj2.getObjectContent();

        byte[] bytes1 = IOUtils.toByteArray(content1);
        byte[] bytes2 = IOUtils.toByteArray(content2);

        metadata1 = Imaging.getMetadata(bytes1);
        metadata2 = Imaging.getMetadata(bytes2);

        tiffMetadata1 = (TiffImageMetadata) metadata1;
        tiffMetadata2 = (TiffImageMetadata) metadata2;

        output1 = tiffMetadata1.getOutputSet();
        output2 = tiffMetadata2.getOutputSet();

        TiffOutputDirectory rootDir = output2.getOrCreateRootDirectory();
        TiffOutputDirectory exifDir = output2.getOrCreateExifDirectory();
        TiffOutputDirectory gpsDir = output2.getOrCreateGPSDirectory();

        if (null != output1.getRootDirectory()) {
            List<TiffOutputField> fs = output1.getRootDirectory().getFields();
            for (TiffOutputField f1 : fs) {
                if (null == rootDir.findField(f1.tag)
                        // CANNOT create the output image with this tag included!
                        && !"PlanarConfiguration".equals(f1.tagInfo.name)) {
                    rootDir.add(f1);
                }
            }
        }

        if (null != output1.getExifDirectory()) {
            for (TiffOutputField f2 : output1.getExifDirectory().getFields()) {
                exifDir.removeField(f2.tagInfo);
                exifDir.add(f2);
            }
        }

        if (null != output1.getGPSDirectory()) {
            for (TiffOutputField f3 : output1.getGPSDirectory().getFields()) {
                gpsDir.removeField(f3.tagInfo);
                gpsDir.add(f3);
            }
        }

        byteArrayOutputStream = new ByteArrayOutputStream();
        TiffImageWriterLossy writerLossy = new TiffImageWriterLossy(output2.byteOrder);
        writerLossy.write(byteArrayOutputStream, output2);

        byteArrayInputStream = new ByteArrayInputStream(byteArrayOutputStream.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(byteArrayOutputStream.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        result = s3client
                .putObject(new PutObjectRequest(targetBucketName, outputKey, byteArrayInputStream, metadata));

    } catch (ImageReadException | IOException | ImageWriteException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (null != content1) {
                content1.close();
            }
            if (null != content2) {
                content2.close();
            }
            if (null != bufferedInputStrean) {
                bufferedInputStrean.close();
            }
            if (null != byteArrayInputStream) {
                byteArrayInputStream.close();
            }
            if (null != byteArrayOutputStream) {
                byteArrayOutputStream.close();
            }
            if (null != bis) {
                bis.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    return result;
}