Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, String redirectLocation) 

Source Link

Document

Constructs a new PutObjectRequest object to perform a redirect for the specified bucket and key.

Usage

From source file:org.dspace.storage.bitstore.S3BitStoreService.java

License:BSD License

/**
 * Store a stream of bits./* www  .  j  a  va 2  s .  co m*/
 *
 * <p>
 * If this method returns successfully, the bits have been stored.
 * If an exception is thrown, the bits have not been stored.
 * </p>
 *
 * @param in
 *            The stream of bits to store
 * @exception java.io.IOException
 *             If a problem occurs while storing the bits
 *
 * @return Map containing technical metadata (size, checksum, etc)
 */
public void put(Bitstream bitstream, InputStream in) throws IOException {
    String key = getFullKey(bitstream.getInternalId());
    //Copy istream to temp file, and send the file, with some metadata
    File scratchFile = File.createTempFile(bitstream.getInternalId(), "s3bs");
    try {
        FileUtils.copyInputStreamToFile(in, scratchFile);
        Long contentLength = Long.valueOf(scratchFile.length());

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, scratchFile);
        PutObjectResult putObjectResult = s3Service.putObject(putObjectRequest);

        bitstream.setSizeBytes(contentLength);
        bitstream.setChecksum(putObjectResult.getETag());
        bitstream.setChecksumAlgorithm(CSA);

        scratchFile.delete();

    } catch (Exception e) {
        log.error("put(" + bitstream.getInternalId() + ", is)", e);
        throw new IOException(e);
    } finally {
        if (scratchFile.exists()) {
            scratchFile.delete();
        }
    }
}

From source file:org.ecocean.media.S3AssetStore.java

License:Open Source License

public MediaAsset copyIn(final File file, final JSONObject params, final boolean createMediaAsset)
        throws IOException {
    if (!this.writable)
        throw new IOException(this.name + " is a read-only AssetStore");
    if (!file.exists())
        throw new IOException(file.toString() + " does not exist");

    //TODO handle > 5G files:  https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
    if (file.length() > 5 * 1024 * 1024 * 1024)
        throw new IOException("S3AssetStore does not yet support file upload > 5G");

    Object bp = getParameter(params, "bucket");
    Object kp = getParameter(params, "key");
    if ((bp == null) || (kp == null))
        throw new IllegalArgumentException("Invalid bucket and/or key value");
    getS3Client().putObject(new PutObjectRequest(bp.toString(), kp.toString(), file));
    if (!createMediaAsset)
        return null;
    return new MediaAsset(this, params);
}

From source file:org.finra.dm.dao.impl.MockS3OperationsImpl.java

License:Apache License

/**
 * Uploads a list of files.// w  w w  .  j  a v a2 s.c  om
 * <p/>
 * Delegates to {@link #putObject(PutObjectRequest, AmazonS3Client)} for each file.
 */
@Override
public MultipleFileUpload uploadFileList(String bucketName, String virtualDirectoryKeyPrefix, File directory,
        List<File> files, ObjectMetadataProvider metadataProvider, TransferManager transferManager) {
    LOGGER.debug("uploadFileList(): bucketName = " + bucketName + ", virtualDirectoryKeyPrefix = "
            + virtualDirectoryKeyPrefix + ", directory = " + directory + ", files = " + files);

    String directoryPath = directory.getAbsolutePath();

    long totalFileLength = 0;
    List<Upload> subTransfers = new ArrayList<>();
    for (File file : files) {
        // Get path to file relative to the specified directory
        String relativeFilePath = file.getAbsolutePath().substring(directoryPath.length());

        // Replace any backslashes (i.e. Windows separator) with a forward slash.
        relativeFilePath = relativeFilePath.replace("\\", "/");

        // Remove any leading slashes
        relativeFilePath = relativeFilePath.replaceAll("^/+", "");

        long fileLength = file.length();

        // Remove any trailing slashes
        virtualDirectoryKeyPrefix = virtualDirectoryKeyPrefix.replaceAll("/+$", "");

        String s3ObjectKey = virtualDirectoryKeyPrefix + "/" + relativeFilePath;
        totalFileLength += fileLength;

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, s3ObjectKey, file);

        putObject(putObjectRequest, (AmazonS3Client) transferManager.getAmazonS3Client());

        subTransfers.add(new UploadImpl(null, null, null, null));
    }

    TransferProgress progress = new TransferProgress();
    progress.setTotalBytesToTransfer(totalFileLength);
    progress.updateProgress(totalFileLength);

    MultipleFileUploadImpl multipleFileUpload = new MultipleFileUploadImpl(null, progress, null,
            virtualDirectoryKeyPrefix, bucketName, subTransfers);
    multipleFileUpload.setState(TransferState.Completed);
    return multipleFileUpload;
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public S3FileTransferResultsDto uploadFile(final S3FileTransferRequestParamsDto params)
        throws InterruptedException {
    LOGGER.info(String.format("Uploading %s local file to s3://%s/%s ...", params.getLocalPath(),
            params.getS3BucketName(), params.getS3KeyPrefix()));

    // Perform the transfer.
    S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
        @Override//w w w .java  2s  .  c om
        public Transfer performTransfer(TransferManager transferManager) {
            // Get a handle to the local file.
            File localFile = new File(params.getLocalPath());

            // Create and prepare the metadata.
            ObjectMetadata metadata = new ObjectMetadata();
            prepareMetadata(params, metadata);

            // Create a put request and a transfer manager with the parameters and the metadata.
            PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(),
                    params.getS3KeyPrefix(), localFile);
            putObjectRequest.setMetadata(metadata);

            return s3Operations.upload(putObjectRequest, transferManager);
        }
    });

    LOGGER.info("Local file \"" + params.getLocalPath() + "\" contains " + results.getTotalBytesTransferred()
            + " byte(s) which was successfully transferred to S3 key prefix \"" + params.getS3KeyPrefix()
            + "\" in bucket \"" + params.getS3BucketName() + "\" in "
            + DmDateUtils.formatDuration(results.getDurationMillis(), true));

    LOGGER.info(String.format("Overall transfer rate: %.2f kBytes/s (%.2f Mbits/s)",
            getTransferRateInKilobytesPerSecond(results.getTotalBytesTransferred(),
                    results.getDurationMillis()),
            getTransferRateInMegabitsPerSecond(results.getTotalBytesTransferred(),
                    results.getDurationMillis())));

    return results;
}

From source file:org.finra.herd.dao.impl.MockS3OperationsImpl.java

License:Apache License

@Override
public MultipleFileUpload uploadFileList(String bucketName, String virtualDirectoryKeyPrefix, File directory,
        List<File> files, ObjectMetadataProvider metadataProvider, TransferManager transferManager) {
    LOGGER.debug("uploadFileList(): bucketName = " + bucketName + ", virtualDirectoryKeyPrefix = "
            + virtualDirectoryKeyPrefix + ", directory = " + directory + ", files = " + files);

    String directoryPath = directory.getAbsolutePath();

    long totalFileLength = 0;
    List<Upload> subTransfers = new ArrayList<>();
    for (File file : files) {
        // Get path to file relative to the specified directory
        String relativeFilePath = file.getAbsolutePath().substring(directoryPath.length());

        // Replace any backslashes (i.e. Windows separator) with a forward slash.
        relativeFilePath = relativeFilePath.replace("\\", "/");

        // Remove any leading slashes
        relativeFilePath = relativeFilePath.replaceAll("^/+", "");

        long fileLength = file.length();

        // Remove any trailing slashes
        virtualDirectoryKeyPrefix = virtualDirectoryKeyPrefix.replaceAll("/+$", "");

        String s3ObjectKey = virtualDirectoryKeyPrefix + "/" + relativeFilePath;
        totalFileLength += fileLength;/*w  ww  .  j  a  va2s . c  o m*/

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, s3ObjectKey, file);

        ObjectMetadata objectMetadata = new ObjectMetadata();
        metadataProvider.provideObjectMetadata(null, objectMetadata);
        putObjectRequest.setMetadata(objectMetadata);

        putObject(putObjectRequest, transferManager.getAmazonS3Client());

        subTransfers.add(new UploadImpl(null, null, null, null));
    }

    TransferProgress progress = new TransferProgress();
    progress.setTotalBytesToTransfer(totalFileLength);
    progress.updateProgress(totalFileLength);

    MultipleFileUploadImpl multipleFileUpload = new MultipleFileUploadImpl(null, progress, null,
            virtualDirectoryKeyPrefix, bucketName, subTransfers);
    multipleFileUpload.setState(TransferState.Completed);
    return multipleFileUpload;
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public S3FileTransferResultsDto uploadFile(final S3FileTransferRequestParamsDto params)
        throws InterruptedException {
    LOGGER.info("Uploading local file to S3... localPath=\"{}\" s3Key=\"{}\" s3BucketName=\"{}\"",
            params.getLocalPath(), params.getS3KeyPrefix(), params.getS3BucketName());

    // Perform the transfer.
    S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
        @Override/*  w  w  w .j  a  v a  2  s.  c  o  m*/
        public Transfer performTransfer(TransferManager transferManager) {
            // Get a handle to the local file.
            File localFile = new File(params.getLocalPath());

            // Create and prepare the metadata.
            ObjectMetadata metadata = new ObjectMetadata();
            prepareMetadata(params, metadata);

            // Create a put request and a transfer manager with the parameters and the metadata.
            PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(),
                    params.getS3KeyPrefix(), localFile);
            putObjectRequest.setMetadata(metadata);

            return s3Operations.upload(putObjectRequest, transferManager);
        }
    });

    LOGGER.info(
            "Uploaded local file to the S3. localPath=\"{}\" s3Key=\"{}\" s3BucketName=\"{}\" totalBytesTransferred={} transferDuration=\"{}\"",
            params.getLocalPath(), params.getS3KeyPrefix(), params.getS3BucketName(),
            results.getTotalBytesTransferred(), HerdDateUtils.formatDuration(results.getDurationMillis()));

    logOverallTransferRate(results);

    return results;
}

From source file:org.geoserver.taskmanager.external.impl.S3FileServiceImpl.java

License:Open Source License

@Override
public void create(String filePath, InputStream content) throws IOException {
    // Check parameters
    if (content == null) {
        throw new IllegalArgumentException("Content of a file can not be null.");
    }// w  ww.  jav a  2 s.c  om
    if (filePath == null) {
        throw new IllegalArgumentException("Name of a file can not be null.");
    }

    if (checkFileExists(filePath)) {
        throw new IllegalArgumentException("The file already exists");
    }
    File scratchFile = File.createTempFile("prefix", String.valueOf(System.currentTimeMillis()));
    try {
        if (!getS3Client().doesBucketExist(rootFolder)) {
            getS3Client().createBucket(rootFolder);
        }

        FileUtils.copyInputStreamToFile(content, scratchFile);

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentEncoding(ENCODING);

        PutObjectRequest putObjectRequest = new PutObjectRequest(rootFolder, filePath, scratchFile);

        putObjectRequest.withMetadata(metadata);

        getS3Client().putObject(putObjectRequest);
    } catch (AmazonClientException e) {
        throw new IOException(e);
    } finally {
        if (scratchFile.exists()) {
            scratchFile.delete();
        }
    }
}

From source file:org.jdamico.s3.components.S3Component.java

License:Apache License

public void upload(AppProperties appProperties, File uploadFile, String keyName) throws TopLevelException {
    AmazonS3 s3client = getS3Client(appProperties);
    try {//from ww w .  j  av a2  s.  co m

        Utils.getInstance().handleVerboseLog(appProperties, 'i', "Uploading a new object to S3 from a file\n");
        s3client.putObject(new PutObjectRequest(appProperties.getBucketnName(), keyName, uploadFile));

    } catch (AmazonServiceException ase) {
        Utils.getInstance().handleVerboseLog(appProperties, 'e',
                "Caught an AmazonServiceException, which " + "means your request made it "
                        + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        Utils.getInstance().handleVerboseLog(appProperties, 'e', "Error Message:    " + ase.getMessage());
        Utils.getInstance().handleVerboseLog(appProperties, 'e', "HTTP Status Code: " + ase.getStatusCode());
        Utils.getInstance().handleVerboseLog(appProperties, 'e', "AWS Error Code:   " + ase.getErrorCode());
        Utils.getInstance().handleVerboseLog(appProperties, 'e', "Error Type:       " + ase.getErrorType());
        Utils.getInstance().handleVerboseLog(appProperties, 'e', "Request ID:       " + ase.getRequestId());

        throw new TopLevelException(appProperties, ase);

    } catch (AmazonClientException ace) {
        throw new TopLevelException(appProperties,
                "Caught an AmazonClientException, which " + "means the client encountered "
                        + "an internal error while trying to " + "communicate with S3, "
                        + "such as not being able to access the network. Error Message: " + ace.getMessage());

    }
}

From source file:org.jenkinsci.plugins.appio.service.S3Service.java

License:Open Source License

/**
 * @param bucketName/* www  .j  a va 2  s.  co  m*/
 * @param keyName
 * @param uploadFile
 * @return
 */
public String getUploadUrl(String bucketName, String keyName, File uploadFile)
        throws AmazonServiceException, AmazonClientException {

    try {
        s3client.putObject(new PutObjectRequest(bucketName, keyName, uploadFile)
                .withCannedAcl(CannedAccessControlList.PublicRead));

    } catch (AmazonServiceException ase) {
        LOGGER.fine("AmazonServiceException");
        LOGGER.fine("Error Message:    " + ase.getMessage());
        LOGGER.fine("HTTP Status Code: " + ase.getStatusCode());
        LOGGER.fine("AWS Error Code:   " + ase.getErrorCode());
        LOGGER.fine("Error Type:       " + ase.getErrorType());
        LOGGER.fine("Request ID:       " + ase.getRequestId());
        throw ase;
    } catch (AmazonClientException ace) {
        LOGGER.fine("AmazonClientException");
        LOGGER.fine("Error Message: " + ace.getMessage());
        throw ace;
    }

    String s3PublicUrl = "https://s3.amazonaws.com/" + bucketName + "/" + keyName;
    LOGGER.fine("S3 public URL: " + s3PublicUrl);
    return s3PublicUrl;
}

From source file:org.mobicents.servlet.restcomm.amazonS3.S3AccessTool.java

License:Open Source License

public URI uploadFile(final String fileToUpload) {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, securityKey);
    AmazonS3 s3client = new AmazonS3Client(credentials);
    try {//w w w.  j a va2s  .  c  o  m
        StringBuffer bucket = new StringBuffer();
        bucket.append(bucketName);
        if (folder != null && !folder.isEmpty())
            bucket.append("/").append(folder);
        URI fileUri = URI.create(fileToUpload);
        logger.info("File to upload to S3: " + fileUri.toString());
        File file = new File(fileUri);
        //            while (!file.exists()){}
        //            logger.info("File exist: "+file.exists());
        //First generate the Presigned URL, buy some time for the file to be written on the disk
        Date date = new Date();
        Calendar cal = Calendar.getInstance();
        cal.setTime(date);
        if (daysToRetainPublicUrl > 0) {
            cal.add(Calendar.DATE, daysToRetainPublicUrl);
        } else {
            //By default the Public URL will be valid for 180 days
            cal.add(Calendar.DATE, 180);
        }
        date = cal.getTime();
        GeneratePresignedUrlRequest generatePresignedUrlRequestGET = new GeneratePresignedUrlRequest(
                bucket.toString(), file.getName());
        generatePresignedUrlRequestGET.setMethod(HttpMethod.GET);
        generatePresignedUrlRequestGET.setExpiration(date);

        URL downloadUrl = s3client.generatePresignedUrl(generatePresignedUrlRequestGET);

        //Second upload the file to S3
        //            while (!file.exists()){}
        while (!FileUtils.waitFor(file, 30)) {
        }
        if (file.exists()) {
            PutObjectRequest putRequest = new PutObjectRequest(bucket.toString(), file.getName(), file);
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentType(new MimetypesFileTypeMap().getContentType(file));
            putRequest.setMetadata(metadata);
            if (reducedRedundancy)
                putRequest.setStorageClass(StorageClass.ReducedRedundancy);
            s3client.putObject(putRequest);

            if (removeOriginalFile) {
                removeLocalFile(file);
            }
            return downloadUrl.toURI();
        } else {
            logger.error("Timeout waiting for the recording file: " + file.getAbsolutePath());
            return null;
        }
    } catch (AmazonServiceException ase) {
        logger.error("Caught an AmazonServiceException");
        logger.error("Error Message:    " + ase.getMessage());
        logger.error("HTTP Status Code: " + ase.getStatusCode());
        logger.error("AWS Error Code:   " + ase.getErrorCode());
        logger.error("Error Type:       " + ase.getErrorType());
        logger.error("Request ID:       " + ase.getRequestId());
        return null;
    } catch (AmazonClientException ace) {
        logger.error("Caught an AmazonClientException, which ");
        logger.error("Error Message: " + ace.getMessage());
        return null;
    } catch (URISyntaxException e) {
        logger.error("URISyntaxException: " + e.getMessage());
        return null;
    }
}