Example usage for com.amazonaws.services.s3.model S3ObjectSummary getLastModified

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getLastModified

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getLastModified.

Prototype

public Date getLastModified() 

Source Link

Document

Gets the date when, according to Amazon S3, this object was last modified.

Usage

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

private FileStatus createFileStatus(S3ObjectSummary objSummary, String hostName, Path path)
        throws IllegalArgumentException, IOException {
    String objKey = objSummary.getKey();
    String newMergedPath = getMergedPath(hostName, path, objKey);
    return createFileStatus(objSummary.getSize(), objKey, objSummary.getLastModified(),
            new Path(newMergedPath));
}

From source file:com.ibm.stocator.fs.cos.COSUtils.java

License:Apache License

/**
 * Create a files status instance from a listing.
 * @param keyPath path to entry/*from   ww  w.j a va2 s .com*/
 * @param summary summary from AWS
 * @param blockSize block size to declare
 * @return a status entry
 */
public static COSFileStatus createFileStatus(Path keyPath, S3ObjectSummary summary, long blockSize) {
    long size = summary.getSize();
    return createFileStatus(keyPath, objectRepresentsDirectory(summary.getKey(), size), size,
            summary.getLastModified(), blockSize);
}

From source file:com.ikanow.infinit.e.harvest.extraction.document.file.AwsInfiniteFile.java

License:Open Source License

@Override
public InfiniteFile[] listFiles(Date optionalFilterDate, int maxDocs) {
    InfiniteFile[] fileList = null;/*from  w  w  w .  jav a  2  s . c o  m*/
    ObjectListing list = null;
    _overwriteTime = 0L;
    ListObjectsRequest listRequest = new ListObjectsRequest().withBucketName(_awsBucketName);
    if (null != _awsObjectName) {
        listRequest.withPrefix(_awsObjectName);
    }
    listRequest.withDelimiter("/");
    list = ((AmazonS3Client) _awsClient).listObjects(listRequest);
    fileList = new InfiniteFile[list.getObjectSummaries().size() + list.getCommonPrefixes().size()];
    //TESTED (3.2)
    int nAdded = 0;
    // Get the sub-directories
    for (String subDir : list.getCommonPrefixes()) {
        // Create directories:
        fileList[nAdded] = new AwsInfiniteFile(_awsBucketName, subDir, null, _awsClient);
        nAdded++;
    } //TESTED (3b.3)
      // Get the files:
    for (S3ObjectSummary s3Obj : list.getObjectSummaries()) {
        if (!s3Obj.getKey().endsWith("/")) {
            fileList[nAdded] = new AwsInfiniteFile(s3Obj.getBucketName(), s3Obj.getKey(),
                    s3Obj.getLastModified(), _awsClient);
            long fileTime = fileList[nAdded].getDate();
            if (fileTime > _overwriteTime) {
                _overwriteTime = fileTime;
            } //TESTED (3.2)
            nAdded++;
        }
    }
    return fileList;
}

From source file:com.lithium.flow.filer.S3Filer.java

License:Apache License

@Override
@Nonnull/*from  w w  w . j  a v a  2  s  .  c om*/
public List<Record> listRecords(@Nonnull String path) throws IOException {
    ObjectListing listing = s3
            .listObjects(new ListObjectsRequest().withBucketName(bucket).withPrefix(path.substring(1)));

    List<Record> records = Lists.newArrayList();
    for (S3ObjectSummary summary : listing.getObjectSummaries()) {
        File file = new File(summary.getKey());
        String parent = file.getParent();
        String name = file.getName();
        long time = summary.getLastModified().getTime();
        long size = summary.getSize();
        boolean directory = name.endsWith("/");
        records.add(new Record(uri, "/" + parent, name, time, size, directory));
    }
    return records;
}

From source file:com.mycompany.mytubeaws.ListServlet.java

/**
 * Handles the HTTP <code>POST</code> method.
 *
 * @param request servlet request//from w  w w  . jav  a 2s  .c  o m
 * @param response servlet response
 * @throws ServletException if a servlet-specific error occurs
 * @throws IOException if an I/O error occurs
 */
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    ArrayList<String> nameList = new ArrayList<>();
    ArrayList<String> sizeList = new ArrayList<>();
    ArrayList<String> dateList = new ArrayList<>();

    ObjectListing objects = s3.listObjects(bucketName);
    do {
        for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
            nameList.add(objectSummary.getKey());
            sizeList.add(Long.toString(objectSummary.getSize()));
            dateList.add(StringUtils.fromDate(objectSummary.getLastModified()));
        }
        objects = s3.listNextBatchOfObjects(objects);
    } while (objects.isTruncated());

    request.setAttribute("nameList", nameList);
    request.setAttribute("sizeList", sizeList);
    request.setAttribute("dateList", dateList);
    request.getRequestDispatcher("/UploadResult.jsp").forward(request, response);
}

From source file:com.netflix.ice.processor.BillingFileProcessor.java

License:Apache License

@Override
protected void poll() throws Exception {

    TreeMap<DateTime, List<BillingFile>> filesToProcess = Maps.newTreeMap();
    Map<DateTime, List<BillingFile>> monitorFilesToProcess = Maps.newTreeMap();

    // list the tar.gz file in billing file folder
    for (int i = 0; i < config.billingS3BucketNames.length; i++) {
        String billingS3BucketName = config.billingS3BucketNames[i];
        String billingS3BucketPrefix = config.billingS3BucketPrefixes.length > i
                ? config.billingS3BucketPrefixes[i]
                : "";
        String accountId = config.billingAccountIds.length > i ? config.billingAccountIds[i] : "";
        String billingAccessRoleName = config.billingAccessRoleNames.length > i
                ? config.billingAccessRoleNames[i]
                : "";
        String billingAccessExternalId = config.billingAccessExternalIds.length > i
                ? config.billingAccessExternalIds[i]
                : "";

        logger.info("trying to list objects in billing bucket " + billingS3BucketName
                + " using assume role, and external id " + billingAccessRoleName + " "
                + billingAccessExternalId);
        List<S3ObjectSummary> objectSummaries = AwsUtils.listAllObjects(billingS3BucketName,
                billingS3BucketPrefix, accountId, billingAccessRoleName, billingAccessExternalId);
        logger.info("found " + objectSummaries.size() + " in billing bucket " + billingS3BucketName);
        TreeMap<DateTime, S3ObjectSummary> filesToProcessInOneBucket = Maps.newTreeMap();
        Map<DateTime, S3ObjectSummary> monitorFilesToProcessInOneBucket = Maps.newTreeMap();

        // for each file, download&process if not needed
        for (S3ObjectSummary objectSummary : objectSummaries) {

            String fileKey = objectSummary.getKey();
            DateTime dataTime = AwsUtils.getDateTimeFromFileNameWithTags(fileKey);
            boolean withTags = true;
            if (dataTime == null) {
                dataTime = AwsUtils.getDateTimeFromFileName(fileKey);
                withTags = false;//from   w ww  . j av  a 2  s .com
            }

            if (dataTime != null && !dataTime.isBefore(config.startDate)) {
                if (!filesToProcessInOneBucket.containsKey(dataTime)
                        || withTags && config.resourceService != null
                        || !withTags && config.resourceService == null)
                    filesToProcessInOneBucket.put(dataTime, objectSummary);
                else
                    logger.info("ignoring file " + objectSummary.getKey());
            } else {
                logger.info("ignoring file " + objectSummary.getKey());
            }
        }

        for (S3ObjectSummary objectSummary : objectSummaries) {
            String fileKey = objectSummary.getKey();
            DateTime dataTime = AwsUtils.getDateTimeFromFileNameWithMonitoring(fileKey);

            if (dataTime != null && !dataTime.isBefore(config.startDate)) {
                monitorFilesToProcessInOneBucket.put(dataTime, objectSummary);
            }
        }

        for (DateTime key : filesToProcessInOneBucket.keySet()) {
            List<BillingFile> list = filesToProcess.get(key);
            if (list == null) {
                list = Lists.newArrayList();
                filesToProcess.put(key, list);
            }
            list.add(new BillingFile(filesToProcessInOneBucket.get(key), accountId, billingAccessRoleName,
                    billingAccessExternalId, billingS3BucketPrefix));
        }

        for (DateTime key : monitorFilesToProcessInOneBucket.keySet()) {
            List<BillingFile> list = monitorFilesToProcess.get(key);
            if (list == null) {
                list = Lists.newArrayList();
                monitorFilesToProcess.put(key, list);
            }
            list.add(new BillingFile(monitorFilesToProcessInOneBucket.get(key), accountId,
                    billingAccessRoleName, billingAccessExternalId, billingS3BucketPrefix));
        }
    }

    for (DateTime dataTime : filesToProcess.keySet()) {
        startMilli = endMilli = dataTime.getMillis();
        init();

        boolean hasNewFiles = false;
        boolean hasTags = false;
        long lastProcessed = lastProcessTime(AwsUtils.monthDateFormat.print(dataTime));

        for (BillingFile billingFile : filesToProcess.get(dataTime)) {
            S3ObjectSummary objectSummary = billingFile.s3ObjectSummary;
            if (objectSummary.getLastModified().getTime() < lastProcessed) {
                logger.info("data has been processed. ignoring " + objectSummary.getKey() + "...");
                continue;
            }
            hasNewFiles = true;
        }

        if (!hasNewFiles) {
            logger.info("data has been processed. ignoring all files at "
                    + AwsUtils.monthDateFormat.print(dataTime));
            continue;
        }

        long processTime = new DateTime(DateTimeZone.UTC).getMillis();
        for (BillingFile billingFile : filesToProcess.get(dataTime)) {

            S3ObjectSummary objectSummary = billingFile.s3ObjectSummary;
            String fileKey = objectSummary.getKey();

            File file = new File(config.localDir, fileKey.substring(billingFile.prefix.length()));
            logger.info("trying to download " + fileKey + "...");
            boolean downloaded = AwsUtils.downloadFileIfChangedSince(objectSummary.getBucketName(),
                    billingFile.prefix, file, lastProcessed, billingFile.accountId, billingFile.accessRoleName,
                    billingFile.externalId);
            if (downloaded)
                logger.info("downloaded " + fileKey);
            else {
                logger.info("file already downloaded " + fileKey + "...");
            }

            logger.info("processing " + fileKey + "...");
            boolean withTags = fileKey.contains("with-resources-and-tags");
            hasTags = hasTags || withTags;
            processingMonitor = false;
            processBillingZipFile(file, withTags);
            logger.info("done processing " + fileKey);
        }

        if (monitorFilesToProcess.get(dataTime) != null) {
            for (BillingFile monitorBillingFile : monitorFilesToProcess.get(dataTime)) {

                S3ObjectSummary monitorObjectSummary = monitorBillingFile.s3ObjectSummary;
                if (monitorObjectSummary != null) {
                    String monitorFileKey = monitorObjectSummary.getKey();
                    logger.info("processing " + monitorFileKey + "...");
                    File monitorFile = new File(config.localDir,
                            monitorFileKey.substring(monitorFileKey.lastIndexOf("/") + 1));
                    logger.info("trying to download " + monitorFileKey + "...");
                    boolean downloaded = AwsUtils.downloadFileIfChangedSince(
                            monitorObjectSummary.getBucketName(), monitorBillingFile.prefix, monitorFile,
                            lastProcessed, monitorBillingFile.accountId, monitorBillingFile.accessRoleName,
                            monitorBillingFile.externalId);
                    if (downloaded)
                        logger.info("downloaded " + monitorFile);
                    else
                        logger.warn(monitorFile + "already downloaded...");
                    FileInputStream in = new FileInputStream(monitorFile);
                    try {
                        processingMonitor = true;
                        processBillingFile(monitorFile.getName(), in, true);
                    } catch (Exception e) {
                        logger.error("Error processing " + monitorFile, e);
                    } finally {
                        in.close();
                    }
                }
            }
        }

        if (dataTime.equals(filesToProcess.lastKey())) {
            int hours = (int) ((endMilli - startMilli) / 3600000L);
            logger.info("cut hours to " + hours);
            cutData(hours);
        }

        // now get reservation capacity to calculate upfront and un-used cost
        for (Ec2InstanceReservationPrice.ReservationUtilization utilization : Ec2InstanceReservationPrice.ReservationUtilization
                .values())
            processReservations(utilization);

        if (hasTags && config.resourceService != null)
            config.resourceService.commit();

        logger.info("archiving results for " + dataTime + "...");
        archive();
        logger.info("done archiving " + dataTime);

        updateProcessTime(AwsUtils.monthDateFormat.print(dataTime), processTime);
        if (dataTime.equals(filesToProcess.lastKey())) {
            sendOndemandCostAlert();
        }
    }

    logger.info("AWS usage processed.");
}

From source file:com.proofpoint.event.collector.combiner.S3StorageSystem.java

License:Apache License

@Override
public List<StoredObject> listObjects(URI storageArea) {
    S3StorageHelper.checkValidS3Uri(storageArea);

    String s3Path = getS3ObjectKey(storageArea);
    Iterator<S3ObjectSummary> iter = new S3ObjectListing(s3Service,
            new ListObjectsRequest(getS3Bucket(storageArea), s3Path, null, "/", null)).iterator();

    ImmutableList.Builder<StoredObject> builder = ImmutableList.builder();
    while (iter.hasNext()) {
        S3ObjectSummary summary = iter.next();
        builder.add(new StoredObject(buildS3Location(storageArea, summary.getKey().substring(s3Path.length())),
                summary.getETag(), summary.getSize(), summary.getLastModified().getTime()));
    }//w ww  . j a v a2 s .  c om
    return builder.build();
}

From source file:com.qubole.presto.kinesis.s3config.S3TableConfigClient.java

License:Apache License

/**
 * Connect to S3 directory to look for new or updated table definitions and then
 * update the map./*from w  w  w .j a  v a 2 s.c  om*/
 */
protected void updateTablesFromS3() {
    long now = System.currentTimeMillis();

    List<S3ObjectSummary> objectList = this.getObjectSummaries();
    AmazonS3Client s3client = this.clientManager.getS3Client();
    AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl);

    for (S3ObjectSummary objInfo : objectList) {
        if (!this.internalMap.containsKey(objInfo.getKey())
                || objInfo.getLastModified().getTime() >= this.lastCheck) {
            // New or updated file, so we must read from AWS
            try {
                if (objInfo.getKey().endsWith("/")) {
                    continue;
                }

                log.info("Getting : %s - %s", objInfo.getBucketName(), objInfo.getKey());
                S3Object object = s3client
                        .getObject(new GetObjectRequest(objInfo.getBucketName(), objInfo.getKey()));

                StringBuilder resultStr = new StringBuilder("");
                try (BufferedReader reader = new BufferedReader(
                        new InputStreamReader(object.getObjectContent()))) {
                    boolean hasMore = true;
                    while (hasMore) {
                        String line = reader.readLine();
                        if (line != null) {
                            resultStr.append(line);
                        } else {
                            hasMore = false;
                        }
                    }

                    KinesisStreamDescription table = streamDescriptionCodec.fromJson(resultStr.toString());

                    internalMap.put(objInfo.getKey(), table);
                    log.info("Put table description into the map from %s", objInfo.getKey());
                } catch (IOException iox) {
                    log.error("Problem reading input stream from object.", iox);
                }
            } catch (AmazonServiceException ase) {
                StringBuilder sb = new StringBuilder();
                sb.append("Caught an AmazonServiceException, which means your request made it ");
                sb.append("to Amazon S3, but was rejected with an error response for some reason.\n");
                sb.append("Error Message:    " + ase.getMessage());
                sb.append("HTTP Status Code: " + ase.getStatusCode());
                sb.append("AWS Error Code:   " + ase.getErrorCode());
                sb.append("Error Type:       " + ase.getErrorType());
                sb.append("Request ID:       " + ase.getRequestId());
                log.error(sb.toString(), ase);
            } catch (AmazonClientException ace) {
                StringBuilder sb = new StringBuilder();
                sb.append("Caught an AmazonClientException, " + "which means the client encountered "
                        + "an internal error while trying to communicate" + " with S3, "
                        + "such as not being able to access the network.");
                sb.append("Error Message: " + ace.getMessage());
                log.error(sb.toString(), ace);
            }
        }
    } // end loop through object descriptions

    log.info("Completed updating table definitions from S3.");
    this.lastCheck = now;

    return;
}

From source file:com.streamsets.pipeline.stage.origin.s3.AbstractAmazonS3Source.java

License:Apache License

private S3Offset fetchNextObjectFromSpooler(S3Offset s3Offset) throws StageException {
    setCurrentObject(null);//from  w w w .  ja v a  2  s. co m
    try {
        //The next object found in queue is mostly eligible since we process objects in chronological order.

        //However after processing a few files, if the configuration is changed [say relax the prefix] and an older file
        //gets selected for processing, it must be ignored.
        S3ObjectSummary nextAvailObj = null;
        do {
            if (nextAvailObj != null) {
                LOG.warn("Ignoring object '{}' in spool directory as is lesser than offset object '{}'",
                        nextAvailObj.getKey(), s3Offset.getKey());
            }
            nextAvailObj = spooler.poolForObject(s3Offset, s3ConfigBean.basicConfig.maxWaitTime,
                    TimeUnit.MILLISECONDS);
        } while (!isEligible(nextAvailObj, s3Offset));

        if (nextAvailObj == null) {
            // no object to process
            LOG.debug("No new object available in spool directory after '{}' secs, producing empty batch",
                    s3ConfigBean.basicConfig.maxWaitTime / 1000);
        } else {
            setCurrentObject(nextAvailObj);

            // if the current offset object is null or the object returned by the spooler is greater than the current offset
            // object we take the object returned by the spooler as the new object and set the offset to zero.
            // if not, it means the spooler returned us the current object, we just keep processing it from the last
            // offset we processed (known via offset tracking)
            if (s3Offset.getKey() == null
                    || isLaterThan(nextAvailObj.getKey(), nextAvailObj.getLastModified().getTime(),
                            s3Offset.getKey(), Long.parseLong(s3Offset.getTimestamp()))) {
                s3Offset = new S3Offset(getCurrentObject().getKey(), ZERO, getCurrentObject().getETag(),
                        String.valueOf(getCurrentObject().getLastModified().getTime()));
            }
        }
    } catch (InterruptedException ex) {
        // the spooler was interrupted while waiting for an object, we log and return, the pipeline agent will invoke us
        // again to wait for an object again
        LOG.warn("Pooling interrupted");
    } catch (AmazonClientException e) {
        throw new StageException(Errors.S3_SPOOLDIR_23, e.toString(), e);
    }
    return s3Offset;
}

From source file:com.streamsets.pipeline.stage.origin.s3.AbstractAmazonS3Source.java

License:Apache License

private boolean isEligible(S3ObjectSummary nextAvailObj, S3Offset s3Offset) {
    return (nextAvailObj == null)
            || (nextAvailObj.getLastModified().getTime() >= Long.parseLong(s3Offset.getTimestamp()));
}