Example usage for com.amazonaws.services.s3.model ObjectMetadata getContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata getContentLength.

Prototype

public long getContentLength() 

Source Link

Document

<p> Gets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:com.liferay.portal.store.s3.S3Store.java

License:Open Source License

@Override
public long getFileSize(long companyId, long repositoryId, String fileName) throws PortalException {

    String headVersionLabel = getHeadVersionLabel(companyId, repositoryId, fileName);

    String key = _s3KeyTransformer.getFileVersionKey(companyId, repositoryId, fileName, headVersionLabel);

    GetObjectMetadataRequest getObjectMetadataRequest = new GetObjectMetadataRequest(_bucketName, key);

    ObjectMetadata objectMetadata = _amazonS3.getObjectMetadata(getObjectMetadataRequest);

    if (objectMetadata == null) {
        throw new NoSuchFileException(companyId, repositoryId, fileName);
    }/* ww w .ja  va2  s  .c o m*/

    return objectMetadata.getContentLength();
}

From source file:com.netflix.exhibitor.core.config.s3.S3ConfigProvider.java

License:Apache License

private ObjectMetadata getConfigMetadata() throws Exception {
    try {/*  w ww. j  av a  2 s .c  o  m*/
        ObjectMetadata metadata = s3Client.getObjectMetadata(arguments.getBucket(), arguments.getKey());
        if (metadata.getContentLength() > 0) {
            return metadata;
        }
    } catch (AmazonS3Exception e) {
        if (!isNotFoundError(e)) {
            throw e;
        }
    }
    return null;
}

From source file:com.nextdoor.bender.S3SnsNotifier.java

License:Apache License

public static void main(String[] args) throws ParseException, InterruptedException, IOException {
    formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZoneUTC();

    /*/*from w  w w  . j  av a2  s  .c  o m*/
     * Parse cli arguments
     */
    Options options = new Options();
    options.addOption(Option.builder().longOpt("bucket").hasArg().required()
            .desc("Name of S3 bucket to list s3 objects from").build());
    options.addOption(Option.builder().longOpt("key-file").hasArg().required()
            .desc("Local file of S3 keys to process").build());
    options.addOption(
            Option.builder().longOpt("sns-arn").hasArg().required().desc("SNS arn to publish to").build());
    options.addOption(Option.builder().longOpt("throttle-ms").hasArg()
            .desc("Amount of ms to wait between publishing to SNS").build());
    options.addOption(Option.builder().longOpt("processed-file").hasArg()
            .desc("Local file to use to store procssed S3 object names").build());
    options.addOption(Option.builder().longOpt("skip-processed").hasArg(false)
            .desc("Whether to skip S3 objects that have been processed").build());
    options.addOption(
            Option.builder().longOpt("dry-run").hasArg(false).desc("If set do not publish to SNS").build());

    CommandLineParser parser = new DefaultParser();
    CommandLine cmd = parser.parse(options, args);

    String bucket = cmd.getOptionValue("bucket");
    String keyFile = cmd.getOptionValue("key-file");
    String snsArn = cmd.getOptionValue("sns-arn");
    String processedFile = cmd.getOptionValue("processed-file", null);
    boolean skipProcessed = cmd.hasOption("skip-processed");
    dryRun = cmd.hasOption("dry-run");
    long throttle = Long.parseLong(cmd.getOptionValue("throttle-ms", "-1"));

    if (processedFile != null) {
        File file = new File(processedFile);

        if (!file.exists()) {
            logger.debug("creating local file to store processed s3 object names: " + processedFile);
            file.createNewFile();
        }
    }

    /*
     * Import S3 keys that have been processed
     */
    if (skipProcessed && processedFile != null) {
        try (BufferedReader br = new BufferedReader(new FileReader(processedFile))) {
            String line;
            while ((line = br.readLine()) != null) {
                alreadyPublished.add(line.trim());
            }
        }
    }

    /*
     * Setup writer for file containing processed S3 keys
     */
    FileWriter fw = null;
    BufferedWriter bw = null;
    if (processedFile != null) {
        fw = new FileWriter(processedFile, true);
        bw = new BufferedWriter(fw);
    }

    /*
     * Create clients
     */
    AmazonS3Client s3Client = new AmazonS3Client();
    AmazonSNSClient snsClient = new AmazonSNSClient();

    /*
     * Get S3 object list
     */
    try (BufferedReader br = new BufferedReader(new FileReader(keyFile))) {
        String line;
        while ((line = br.readLine()) != null) {
            String key = line.trim();

            if (alreadyPublished.contains(key)) {
                logger.info("skipping " + key);
            }

            ObjectMetadata om = s3Client.getObjectMetadata(bucket, key);

            S3EventNotification s3Notification = getS3Notification(key, bucket, om.getContentLength());

            String json = s3Notification.toJson();

            /*
             * Publish to SNS
             */
            if (publish(snsArn, json, snsClient, key) && processedFile != null) {
                bw.write(key + "\n");
                bw.flush();
            }

            if (throttle != -1) {
                Thread.sleep(throttle);
            }

        }
    }

    if (processedFile != null) {
        bw.close();
        fw.close();
    }
}

From source file:com.proofpoint.event.collector.combiner.S3StorageHelper.java

License:Apache License

public static StoredObject updateStoredObject(URI location, ObjectMetadata metadata) {
    Preconditions.checkNotNull(location, "location is null");
    Preconditions.checkNotNull(metadata, "metadata is null");

    return new StoredObject(location, metadata.getETag(), metadata.getContentLength(),
            metadata.getLastModified().getTime());
}

From source file:com.tango.BucketSyncer.KeyJobs.S32GCSKeyCopyJob.java

License:Apache License

boolean keyCopied(ObjectMetadata sourceMetadata) {
    boolean copied = false;
    String key = summary.getKey();
    MirrorOptions options = context.getOptions();
    boolean verbose = options.isVerbose();
    int maxRetries = options.getMaxRetries();
    MirrorStats stats = context.getStats();

    InputStreamContent mediaContent = null;

    for (int tries = 0; tries < maxRetries; tries++) {

        if (verbose) {
            log.info("copying (try # {} ): {} to: {}", new Object[] { tries, key, keydest });
        }/*  www  .  j  a v a2 s  . c o m*/

        //get object from S3
        //deal with exception that the object has been deleted when trying to fetch it from S3
        S3Object s3object = null;

        try {
            s3object = s3Client.getObject(new GetObjectRequest(options.getSourceBucket(), key));
        } catch (AmazonServiceException e) {
            log.error("Failed to fetch object from S3. Object {} may have been deleted: {}", key, e);
        } catch (Exception e) {
            log.error("Failed to fetch object from S3. Object {} may have been deleted: {}", key, e);
        }

        if (s3object != null) {

            InputStream inputStream = s3object.getObjectContent();

            String type = s3object.getObjectMetadata().getContentType();
            mediaContent = new InputStreamContent(type, inputStream);

            String etag = s3object.getObjectMetadata().getETag();
            StorageObject objectMetadata = new StorageObject().setMetadata(ImmutableMap.of("Etag", etag));

            Storage.Objects.Insert insertObject = null;
            try {
                insertObject = gcsClient.objects().insert(options.getDestinationBucket(), objectMetadata,
                        mediaContent);
            } catch (IOException e) {
                log.error("Failed to create insertObject of GCS ", e);
            }

            insertObject.setName(key);

            insertObject.getMediaHttpUploader().setProgressListener(new CustomUploadProgressListener())
                    .setDisableGZipContent(true);

            // For small files, you may wish to call setDirectUploadEnabled(true), to
            // reduce the number of HTTP requests made to the server.

            if (mediaContent.getLength() > 0 && mediaContent.getLength() <= 2 * 1000 * 1000 /* 2MB */) {
                insertObject.getMediaHttpUploader().setDirectUploadEnabled(true);
            }

            try {
                stats.copyCount.incrementAndGet();
                insertObject.execute();
                stats.bytesCopied.addAndGet(sourceMetadata.getContentLength());
                if (verbose)
                    log.info("Successfully copied (on try # {} ): {} to: {} in GCS",
                            new Object[] { tries, key, keydest });
                copied = true;
                break;
            } catch (GoogleJsonResponseException e) {
                if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
                    log.error("Failed to access GCS bucket. Check bucket name: ", e);
                    System.exit(1);
                }
            } catch (IOException e) {
                log.error("GCS exception copying (try # {} ) {} to: {} : {}",
                        new Object[] { tries, key, keydest, e });
            }
        }

        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
            log.error("interrupted while waiting to retry key: {}, {}", key, e);
            return copied;
        }
    }
    return copied;
}

From source file:com.tango.BucketSyncer.KeyJobs.S32S3KeyCopyJob.java

License:Apache License

boolean keyCopied(ObjectMetadata sourceMetadata, AccessControlList objectAcl) {
    boolean copied = false;
    String key = summary.getKey();
    MirrorOptions options = context.getOptions();
    boolean verbose = options.isVerbose();
    int maxRetries = options.getMaxRetries();
    MirrorStats stats = context.getStats();
    for (int tries = 0; tries < maxRetries; tries++) {
        if (verbose) {
            log.info("copying (try # {}): {} to: {}", new Object[] { tries, key, keydest });
        }/*from  w ww. j  a  v  a 2 s  .  c om*/
        final CopyObjectRequest request = new CopyObjectRequest(options.getSourceBucket(), key,
                options.getDestinationBucket(), keydest);
        request.setNewObjectMetadata(sourceMetadata);
        if (options.isCrossAccountCopy()) {
            request.setCannedAccessControlList(CannedAccessControlList.BucketOwnerFullControl);
        } else {
            request.setAccessControlList(objectAcl);
        }
        try {
            stats.copyCount.incrementAndGet();
            client.copyObject(request);
            stats.bytesCopied.addAndGet(sourceMetadata.getContentLength());
            if (verbose) {
                log.info("successfully copied (on try #{}): {} to: {}", new Object[] { tries, key, keydest });
            }
            copied = true;
            break;
        } catch (AmazonS3Exception s3e) {
            //if return with 404 error, problem with bucket name
            if (s3e.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
                log.error("Failed to access S3 bucket. Check bucket name: ", s3e);
                System.exit(1);
            }
            log.error("s3 exception copying (try #{}) {} to: {}: {}",
                    new Object[] { tries, key, keydest, s3e });
        } catch (Exception e) {
            log.error("unexpected exception copying (try #{}) {} to: {}: {}",
                    new Object[] { tries, key, keydest, e });
        }
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
            log.error("interrupted while waiting to retry key: {}: {}", key, e);
            return copied;
        }
    }
    return copied;
}

From source file:com.tango.BucketSyncer.KeyJobs.S32S3KeyCopyJob.java

License:Apache License

private boolean shouldTransfer() {
    final MirrorOptions options = context.getOptions();
    final String key = summary.getKey();
    final boolean verbose = options.isVerbose();

    if (options.hasCtime()) {
        final Date lastModified = summary.getLastModified();
        if (lastModified == null) {
            if (verbose) {
                log.info("No Last-Modified header for key: {}", key);
            }/*from   w w w  . j av  a  2  s. com*/

        } else {
            if (lastModified.getTime() < options.getMaxAge()) {
                if (verbose) {
                    log.info("key {} (lastmod = {}) is older than {} (cutoff = {}), not copying",
                            new Object[] { key, lastModified, options.getCtime(), options.getMaxAgeDate() });
                }
                return false;
            }
        }
    }
    final ObjectMetadata metadata;
    try {
        metadata = getObjectMetadata(options.getDestinationBucket(), keydest, options);
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
            if (verbose) {
                log.debug("Key not found in destination bucket (will copy): {}", keydest);
            }
            return true;
        } else {
            log.warn("Error getting metadata for {} {} (not copying): {}",
                    new Object[] { options.getDestinationBucket(), keydest, e });
            return false;
        }
    } catch (Exception e) {
        log.warn("Error getting metadata for {} {} (not copying): {}",
                new Object[] { options.getDestinationBucket(), keydest, e });
        return false;
    }

    if (summary.getSize() > MirrorOptions.MAX_SINGLE_REQUEST_UPLOAD_FILE_SIZE) {
        return metadata.getContentLength() != summary.getSize();
    }
    final boolean objectChanged = objectChanged(metadata);
    if (verbose && !objectChanged) {
        log.info("Destination file is same as source, not copying: {}", key);
    }

    return objectChanged;
}

From source file:com.tango.BucketSyncer.KeyJobs.S32S3KeyCopyJob.java

License:Apache License

boolean objectChanged(ObjectMetadata metadata) {
    final KeyFingerprint sourceFingerprint = new KeyFingerprint(summary.getSize(), summary.getETag());
    final KeyFingerprint destFingerprint = new KeyFingerprint(metadata.getContentLength(), metadata.getETag());
    return !sourceFingerprint.equals(destFingerprint);
}

From source file:com.tango.BucketSyncer.KeyJobs.S32S3MultipartKeyCopyJob.java

License:Apache License

@Override
boolean objectChanged(ObjectMetadata metadata) {
    return summary.getSize() != metadata.getContentLength();
}

From source file:com.tracermedia.maven.plugins.CreateVersionMojo.java

License:Open Source License

protected void copyFileToS3(final String s3Bucket, final String s3Key, final File file) throws IOException {

    final ObjectMetadata meta = new ObjectMetadata();

    InputStream in = new FileInputStream(file);
    try {/*from  w  w  w . j av  a 2 s.  c om*/
        meta.setContentLength(file.length());
        meta.setContentType(Mimetypes.getInstance().getMimetype(file));
    } finally {
        in.close();
    }

    in = new ProgressReportingInputStream(new RepeatableFileInputStream(file), new ProgressListener() {
        int lastPercent = -1;
        int bytes = 0;

        public void progressChanged(ProgressEvent progressEvent) {
            bytes += progressEvent.getBytesTransfered();
            double percent = 100.0 * bytes / meta.getContentLength();
            if ((int) percent != lastPercent) {
                System.out.print(
                        String.format("\rCopying file [%s] to S3, bucket: %s, key: %s, progress: %.0f%%   ",
                                file.getName(), s3Bucket, s3Key, percent));
                lastPercent = (int) percent;
            }
        }
    });

    try {
        final PutObjectRequest request = new PutObjectRequest(s3Bucket, s3Key, in, meta);
        getS3Client().putObject(request);
        System.out.println(String.format("\rCopying file [%s] to S3, bucket: %s, key: %s, progress: %.0f%%   ",
                file.getName(), s3Bucket, s3Key, 100.0));
    } finally {
        in.close();
    }
}