Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata) 

Source Link

Document

Constructs a new PutObjectRequest object to upload a stream of data to the specified bucket and key.

Usage

From source file:org.kuali.maven.wagon.S3Wagon.java

License:Educational Community License

/**
 * Create a PutObjectRequest based on the source file and destination passed in
 *///w  ww.  j  ava 2  s. c  om
protected PutObjectRequest getPutObjectRequest(File source, String destination, TransferProgress progress) {
    try {
        String key = getNormalizedKey(source, destination);
        String bucketName = bucket.getName();
        InputStream input = getInputStream(source, progress);
        ObjectMetadata metadata = getObjectMetadata(source, destination);
        PutObjectRequest request = new PutObjectRequest(bucketName, key, input, metadata);
        request.setCannedAcl(acl);
        return request;
    } catch (FileNotFoundException e) {
        throw new AmazonServiceException("File not found", e);
    }
}

From source file:org.mule.module.s3.simpleapi.content.InputStreamS3ObjectContent.java

License:Open Source License

public PutObjectRequest createPutObjectRequest() {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(length);//from www . j  ava 2  s .  c  o  m
    if (md5base64 != null) {
        metadata.setContentMD5(md5base64);
    }
    return new PutObjectRequest(null, null, inputStream, metadata);
}

From source file:org.nickelproject.nickel.blobStore.S3BlobStore.java

License:Apache License

private void putSinglePartByteArray(final BlobRef blobRef, final byte[] pBytes) {
    final ByteArrayInputStream vByteArrayInputStream = new ByteArrayInputStream(pBytes);
    final ObjectMetadata vMetadata = new ObjectMetadata();
    vMetadata.setContentLength(pBytes.length);
    s3Client.putObject(new PutObjectRequest(bucketName, blobRef.toString(), vByteArrayInputStream, vMetadata));
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void writeHashBlobArchive(HashBlobArchive arc, long id) throws IOException {
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {//ww  w.  j  a  v a2  s.  c  o m
        int csz = toIntExact(arc.getFile().length());
        ObjectMetadata md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.getLen()));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.setContentType("binary/octet-stream");
        md.setContentLength(csz);
        if (md5sum) {
            FileInputStream in = new FileInputStream(arc.getFile());
            String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(in));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
            IOUtils.closeQuietly(in);
        }
        PutObjectRequest req = new PutObjectRequest(this.name, "blocks/" + haName,
                new FileInputStream(arc.getFile()), md);

        if (this.simpleS3)
            s3Service.putObject(req);
        else
            this.multiPartUpload(req);
        byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
        md.setContentMD5(mds);
        md.addUserMetadata("md5sum", mds);
        if (this.clustered) {
            md.setContentType("binary/octet-stream");
            md.setContentLength(msg.length);
            PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                    new ByteArrayInputStream(msg), md);

            s3Service.putObject(creq);
        }
        byte[] hs = arc.getHashesString().getBytes();
        int sz = hs.length;
        if (Main.compress) {
            hs = CompressionUtils.compressLz4(hs);
        }
        byte[] ivb = PassPhrase.getByteIV();
        if (Main.chunkStoreEncryptionEnabled) {
            hs = EncryptUtils.encryptCBC(hs, new IvParameterSpec(ivb));
        }
        md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(sz));
        md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
        md.addUserMetadata("lastaccessed", "0");
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));

        md.setContentType("binary/octet-stream");
        md.setContentLength(hs.length);
        if (md5sum) {
            mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(hs));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
        }
        req = new PutObjectRequest(this.name, "keys/" + haName, new ByteArrayInputStream(hs), md);
        s3Service.putObject(req);
    } catch (Throwable e) {
        SDFSLogger.getLog().fatal("unable to upload " + arc.getID() + " with id " + id, e);
        throw new IOException(e);
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void uploadFile(File f, String to, String pp) throws IOException {
    this.s3clientLock.readLock().lock();
    try {//from   ww  w. j ava  2s.  c om
        InputStream in = null;
        while (to.startsWith(File.separator))
            to = to.substring(1);

        String pth = pp + "/" + EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled);
        SDFSLogger.getLog().info("uploading " + f.getPath() + " to " + to + " pth " + pth);
        boolean isDir = false;
        boolean isSymlink = false;
        if (!OSValidator.isWindows()) {
            isDir = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isDirectory();
            isSymlink = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isSymbolicLink();
        } else {
            isDir = f.isDirectory();
        }
        if (isSymlink) {
            try {
                HashMap<String, String> metaData = new HashMap<String, String>();
                metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                metaData.put("lastmodified", Long.toString(f.lastModified()));
                String slp = EncyptUtils.encString(Files.readSymbolicLink(f.toPath()).toFile().getPath(),
                        Main.chunkStoreEncryptionEnabled);
                metaData.put("symlink", slp);
                ObjectMetadata md = new ObjectMetadata();
                md.setContentType("binary/octet-stream");
                md.setContentLength(pth.getBytes().length);
                md.setUserMetadata(metaData);
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                throw new IOException(e1);
            }
        } else if (isDir) {
            HashMap<String, String> metaData = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
            metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
            metaData.put("lastmodified", Long.toString(f.lastModified()));
            metaData.put("directory", "true");
            ObjectMetadata md = new ObjectMetadata();
            md.setContentType("binary/octet-stream");
            md.setContentLength(pth.getBytes().length);
            md.setUserMetadata(metaData);
            try {
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        } else {
            String rnd = RandomGUID.getGuid();
            File p = new File(this.staged_sync_location, rnd);
            File z = new File(this.staged_sync_location, rnd + ".z");
            File e = new File(this.staged_sync_location, rnd + ".e");
            while (z.exists()) {
                rnd = RandomGUID.getGuid();
                p = new File(this.staged_sync_location, rnd);
                z = new File(this.staged_sync_location, rnd + ".z");
                e = new File(this.staged_sync_location, rnd + ".e");
            }
            try {
                BufferedInputStream is = new BufferedInputStream(new FileInputStream(f));
                BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(p));
                IOUtils.copy(is, os);
                os.flush();
                os.close();
                is.close();
                if (Main.compress) {
                    CompressionUtils.compressFile(p, z);
                    p.delete();
                    p = z;
                }
                byte[] ivb = null;
                if (Main.chunkStoreEncryptionEnabled) {
                    try {
                        ivb = PassPhrase.getByteIV();
                        EncryptUtils.encryptFile(p, e, new IvParameterSpec(ivb));

                    } catch (Exception e1) {
                        throw new IOException(e1);
                    }
                    p.delete();
                    p = e;
                }
                String objName = pth;
                ObjectMetadata md = new ObjectMetadata();
                Map<String, String> umd = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
                md.setUserMetadata(umd);
                md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
                md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                if (ivb != null)
                    md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
                md.addUserMetadata("lastmodified", Long.toString(f.lastModified()));
                if (simpleS3) {
                    md.setContentType("binary/octet-stream");
                    in = new BufferedInputStream(new FileInputStream(p), 32768);
                    try {
                        if (md5sum) {
                            byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                            in.close();
                            String mds = BaseEncoding.base64().encode(md5Hash);
                            md.setContentMD5(mds);
                            md.addUserMetadata("md5sum", mds);
                        }

                    } catch (NoSuchAlgorithmException e2) {
                        SDFSLogger.getLog().error("while hashing", e2);
                        throw new IOException(e2);
                    }

                    in = new FileInputStream(p);
                    md.setContentLength(p.length());
                    try {
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        s3Service.putObject(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                        SDFSLogger.getLog().debug(
                                "uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified"));
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                } else {
                    try {
                        md.setContentType("binary/octet-stream");
                        in = new BufferedInputStream(new FileInputStream(p), 32768);
                        byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                        in.close();
                        String mds = BaseEncoding.base64().encode(md5Hash);
                        md.setContentMD5(mds);
                        md.addUserMetadata("md5sum", mds);
                        in = new BufferedInputStream(new FileInputStream(p), 32768);

                        md.setContentLength(p.length());
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        multiPartUpload(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                }
            } finally {
                try {
                    if (in != null)
                        in.close();
                } finally {
                    p.delete();
                    z.delete();
                    e.delete();
                }
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutObject(long id, int claims) throws IOException {
    this.s3clientLock.readLock().lock();
    try {/*w  w w  .  j  a va 2  s .com*/
        if (!this.clustered)
            throw new IOException("volume is not clustered");
        ObjectMetadata om = this.getClaimMetaData(id);
        if (om != null)
            return;
        else {
            String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
            om = s3Service.getObjectMetadata(this.name, "keys/" + haName);
            Map<String, String> md = om.getUserMetadata();
            md.put("objects", Integer.toString(claims));
            if (md.containsKey("deleted")) {
                md.remove("deleted");
            }
            if (md.containsKey("deleted-objects")) {
                md.remove("deleted-objects");
            }
            if (md.containsKey("bsize")) {
                HashBlobArchive.currentLength.addAndGet(Integer.parseInt(md.get("bsize")));
            }
            if (md.containsKey("bcompressedsize")) {
                HashBlobArchive.compressedLength.addAndGet(Integer.parseInt(md.get("bcompressedsize")));
            }
            byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();

            om.setContentLength(msg.length);
            try {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
                om.setContentMD5(mds);
                om.addUserMetadata("md5sum", mds);
            } catch (Exception e) {
                throw new IOException(e);
            }
            try {
                PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                        new ByteArrayInputStream(msg), om);
                s3Service.putObject(creq);
            } catch (AmazonS3Exception e1) {
                if (e1.getStatusCode() == 409) {
                    try {
                        s3Service.deleteObject(this.name, this.getClaimName(id));
                        this.checkoutObject(id, claims);
                        return;
                    } catch (Exception e2) {
                        throw new IOException(e2);
                    }
                } else {

                    throw new IOException(e1);
                }
            } catch (Exception e1) {
                // SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutFile(String name) throws IOException {
    String pth = "claims/" + name + "/"
            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {//from   w  w w . j  av a2  s  .com
        byte[] b = Long.toString(System.currentTimeMillis()).getBytes();
        ObjectMetadata om = new ObjectMetadata();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(b));
        om.setContentMD5(mds);
        om.addUserMetadata("md5sum", mds);
        om.setContentLength(b.length);
        PutObjectRequest creq = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(b), om);
        s3Service.putObject(creq);
    } catch (AmazonS3Exception e1) {
        if (e1.getStatusCode() == 409) {
            try {
                s3Service.deleteObject(this.name, pth);
                this.checkoutFile(name);
                return;
            } catch (Exception e2) {
                throw new IOException(e2);
            }
        } else {

            throw new IOException(e1);
        }
    } catch (Exception e1) {
        // SDFSLogger.getLog().error("error uploading", e1);
        throw new IOException(e1);
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.openflamingo.fs.s3.S3ObjectProvider.java

License:Apache License

@Override
public boolean mkdir(String path) {
    //        Assert.hasLength(path, " ?  'path'  .");
    Assert.hasLength(path, "Please enter the path");

    String bucket = S3Utils.getBucket(path);
    String relativePath = S3Utils.getObjectKey(path);

    try {/*from ww  w.jav a  2s  . c  om*/
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(0);
        InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, relativePath, emptyContent, metadata);
        awsClient.putObject(putObjectRequest);
        /*
                    auditService.mkdir(FileSystemType.S3, username, path);
        */
        return true;
    } catch (AmazonServiceException ase) {
        //            throw new FileSystemException(" ?   ?  . ? ? ? .", ase);
        throw new FileSystemException("Cannot create the directory.", ase);
    } catch (AmazonClientException ace) {
        //            throw new FileSystemException(" ?   ?  . ? ? ? .", ace);
        throw new FileSystemException("Cannot create the directory.", ace);
    }
}

From source file:org.openflamingo.fs.s3.S3ObjectProvider.java

License:Apache License

public boolean save(InputStream is, long size, String path) {
    //        Assert.notNull(is, " ??   'is'?  .");
    Assert.notNull(is, "Please enter the input stream.");
    //        Assert.hasLength(path, "??  ? ??  'path'?  .");
    Assert.hasLength(path, "Please enter the path.");

    try {//ww  w.  j  a  v a 2  s .c  om
        String bucket = S3Utils.getBucket(path);
        String key = StringUtils.remove(path, "/" + bucket + "/");
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setHeader(Headers.CONTENT_LENGTH, size);
        awsClient.putObject(new PutObjectRequest(bucket, key, is, metadata));
        return true;
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, " + "which means your request made it "
                + "to Amazon S3, but was rejected with an error " + "response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());

        //            throw new FileSystemException("??    . ? ? ? .", ase);
        throw new FileSystemException("Connot copy the file.", ase);
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, " + "which means the client encountered "
                + "an internal error while trying to " + " communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());

        //            throw new FileSystemException("??   . ? ? ?.", ace);
        throw new FileSystemException("Connot copy the file.", ace);
    }
}

From source file:org.openinfinity.cloud.domain.repository.deployer.BucketRepositoryAWSImpl.java

License:Apache License

/**
 * Creates bucket based on file input stream.
 * //from www .jav  a2 s.co m
 * @param inputStream Represents the file input stream.
 * @param bucketName Represents the file name of the deployment. 
 * @param key Represents the key under which to store the new object.
 * @return key Defines the created resource key.
 */
public String createBucket(InputStream inputStream, String bucketName, String key,
        Map<String, String> metadataMap) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadataMap);
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, objectMetadata);
    simpleStorageService.createBucket(bucketName);
    simpleStorageService.putObject(putObjectRequest);
    return key;
}