Example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

List of usage examples for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries.

Prototype

public List<S3ObjectSummary> getObjectSummaries() 

Source Link

Document

Gets the list of object summaries describing the objects stored in the S3 bucket.

Usage

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
* Gets the list of available Blobs for the binded bucket from the
* BlobStore.// ww  w .  j  a v  a2 s  .co  m
*
* @return List of DataFile Blobs
*/
@Override
public List<DataFile> getBlob() {
    S3Object obj = null;
    try {
        List<DataFile> objs = new ArrayList<DataFile>();
        // Get the List from BlobStore
        ObjectListing objectList = this.s3Client.listObjects(this.blobstoreConfig.getBucketName());

        for (S3ObjectSummary objectSummary : objectList.getObjectSummaries()) {
            obj = this.s3Client.getObject(
                    new GetObjectRequest(this.blobstoreConfig.getBucketName(), objectSummary.getKey()));
            DataFile data = new DataFile();
            data.setFile(IOUtils.toByteArray(obj.getObjectContent()));
            objs.add(data);
        }
        return objs;
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        if (obj != null) {
            try {
                obj.close();
            } catch (IOException e) {
                throw new RuntimeException(
                        "unable to close object object=" + obj + " throwing original exception", //$NON-NLS-1$ //$NON-NLS-2$
                        e);
            }
        }
    }
}

From source file:com.github.lbroudoux.elasticsearch.river.s3.connector.S3Connector.java

License:Apache License

/**
 * Select and retrieves summaries of object into bucket and of given path prefix
 * that have modification date younger than lastScanTime.
 * @param lastScanTime Last modification date filter
 * @return Summaries of picked objects.//from   w  w  w  . j a va2 s .  co  m
 */
public S3ObjectSummaries getObjectSummaries(String riverName, Long lastScanTime, String initialScanBookmark,
        boolean trackS3Deletions) {
    List<String> keys = new ArrayList<String>();
    List<S3ObjectSummary> result = new ArrayList<S3ObjectSummary>();
    boolean initialScan = initialScanBookmark != null;

    if (initialScan) {
        trackS3Deletions = false;
        logger.info("{}: resuming initial scan of {} from {}", riverName, pathPrefix, initialScanBookmark);
    } else {
        logger.info("{}: checking {} for changes since {}", riverName, pathPrefix, lastScanTime);
    }

    // Store the scan time to return before doing big queries...
    Long lastScanTimeToReturn = System.currentTimeMillis();

    if (lastScanTime == null || initialScan) {
        lastScanTime = 0L;
    }

    ListObjectsRequest request = new ListObjectsRequest().withBucketName(bucketName).withPrefix(pathPrefix)
            .withEncodingType("url");
    ObjectListing listing = s3Client.listObjects(request);
    //logger.debug("Listing: {}", listing);
    int keyCount = 0;
    boolean scanTruncated = false;
    String lastKey = null;

    while (!listing.getObjectSummaries().isEmpty() || listing.isTruncated()) {
        List<S3ObjectSummary> summaries = listing.getObjectSummaries();
        // if (logger.isDebugEnabled()) {
        //    logger.debug("Found {} items in this listObjects page", summaries.size());
        // }

        for (S3ObjectSummary summary : summaries) {
            if (logger.isDebugEnabled()) {
                // logger.debug("Getting {} last modified on {}", summary.getKey(), summary.getLastModified());
            }

            if (trackS3Deletions) {
                keys.add(summary.getKey());
            }

            if (summary.getLastModified().getTime() > lastScanTime
                    && result.size() < MAX_NEW_RESULTS_TO_INDEX_ON_RUN) {
                // logger.debug("  Picked !");

                if (!initialScan || initialScanBookmark.compareTo(summary.getKey()) < 0) {
                    logger.debug("  Picked {}", summary.getKey());
                    result.add(summary);
                    lastKey = summary.getKey();
                }

            } else if (!scanTruncated && result.size() == MAX_NEW_RESULTS_TO_INDEX_ON_RUN) {
                logger.info("{}: only indexing up to {} new objects on this indexing run", riverName,
                        MAX_NEW_RESULTS_TO_INDEX_ON_RUN);
                // initialScan = true;
                scanTruncated = true;

                if (!trackS3Deletions) {
                    // No need to keep iterating through all keys if we aren't doing deleteOnS3 
                    break;
                }
            }

            keyCount += 1;
        }

        if (initialScan && scanTruncated && !trackS3Deletions) {
            break;
        }

        listing = s3Client.listNextBatchOfObjects(listing);
    }

    // Wrap results and latest scan time.
    if (scanTruncated) {
        logger.info("{}: scan truncated for speed: {} files ({} new)", riverName, keyCount, result.size());
    } else {
        logger.info("{}: complete scan: {} files ({} new)", riverName, keyCount, result.size());
    }

    return new S3ObjectSummaries(lastScanTimeToReturn, lastKey, scanTruncated, trackS3Deletions, result, keys);
}

From source file:com.github.wuic.nut.s3.S3NutDao.java

License:Open Source License

/**
 * <p>/*from  www .  jav  a 2 s.  c o  m*/
 * Searches recursively in the given path any files matching the given entry.
 * </p>
 *
 * @param path the path
 * @param pattern the pattern to match
 * @return the list of matching files
 * @throws StreamException if the client can't move to a directory or any I/O error occurs
 */
private List<String> recursiveSearch(final String path, final Pattern pattern) throws StreamException {

    ObjectListing objectListing;

    try {
        final String finalSuffix = path.equals("") ? "" : "/";
        connect();
        objectListing = amazonS3Client.listObjects(new ListObjectsRequest().withBucketName(bucketName)
                .withPrefix(IOUtils.mergePath(path.substring(1), finalSuffix)).withDelimiter("/"));
    } catch (AmazonServiceException ase) {
        throw new StreamException(new IOException(
                String.format("Can't get S3Object on bucket %s for nut key : %s", bucketName, path), ase));
    }

    final List<String> retval = new ArrayList<String>();
    for (final S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) {
        // Ignore directories, all nuts are in the listing
        if (!s3ObjectSummary.getKey().endsWith("/")) {
            final Matcher matcher = pattern.matcher(s3ObjectSummary.getKey());

            if (matcher.find()) {
                retval.add(s3ObjectSummary.getKey());
            }
        }
    }

    // Recursive search on prefixes (directories)
    for (final String s3CommonPrefix : objectListing.getCommonPrefixes()) {
        retval.addAll(recursiveSearch(s3CommonPrefix.substring(0, s3CommonPrefix.length() - 1), pattern));
    }

    return retval;
}

From source file:com.haskins.cloudtrailviewer.dialog.s3filechooser.S3FileList.java

License:Open Source License

private void addFileKeys(ObjectListing objectListing) {

    List<S3ObjectSummary> objectSummaries = objectListing.getObjectSummaries();
    for (final S3ObjectSummary objectSummary : objectSummaries) {

        String file = stripPrefix(objectSummary.getKey());

        S3ListModel model = new S3ListModel(file, file, S3ListModel.FILE_DOC);
        this.s3ListModel.addElement(model);
    }//w  ww . j ava 2  s .c  o m
}

From source file:com.haskins.cloudtrailviewer.dialog.s3filechooser.S3FileList.java

License:Open Source License

private void addFolderFiles(String path) {

    AmazonS3 s3Client = getS3Client();/*from w w  w  .  j  a  v  a  2s. c  o  m*/

    ObjectListing current = s3Client.listObjects(currentAccount.getBucket(), path);
    List<S3ObjectSummary> objectSummaries = current.getObjectSummaries();

    for (final S3ObjectSummary objectSummary : objectSummaries) {
        String file = objectSummary.getKey();
        selected_keys.add(file);
    }

    while (current.isTruncated()) {

        current = s3Client.listNextBatchOfObjects(current);
        objectSummaries = current.getObjectSummaries();

        for (final S3ObjectSummary objectSummary : objectSummaries) {
            String file = objectSummary.getKey();
            selected_keys.add(file);
        }
    }
}

From source file:com.haskins.cloudtrailviewer.dialog.S3FileChooser.java

License:Open Source License

private void reloadContents() {

    loadingLabel.setVisible(true);/* w  ww . j  av  a2 s  . c  o m*/
    this.s3ListModel.clear();

    String bucketName = currentAccount.getBucket();

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
    listObjectsRequest.setBucketName(bucketName);
    listObjectsRequest.setPrefix(prefix);
    listObjectsRequest.setDelimiter("/");

    AWSCredentials credentials = new BasicAWSCredentials(currentAccount.getKey(), currentAccount.getSecret());

    AmazonS3 s3Client = new AmazonS3Client(credentials);

    try {
        ObjectListing objectListing = s3Client.listObjects(listObjectsRequest);

        // Add .. if not at root
        if (prefix.trim().length() != 0) {
            S3ListModel model = new S3ListModel(MOVE_BACK, MOVE_BACK, S3ListModel.FILE_BACK);
            this.s3ListModel.addElement(model);
        }

        // these are directories
        List<String> directories = objectListing.getCommonPrefixes();
        for (String directory : directories) {

            String dir = stripPrefix(directory);
            int lastSlash = dir.lastIndexOf("/");
            String strippeDir = dir.substring(0, lastSlash);

            String alias = dir;
            if (isAccountNumber(strippeDir)) {
                if (aliasMap.containsKey(strippeDir)) {
                    alias = aliasMap.get(strippeDir);
                }
            }

            S3ListModel model = new S3ListModel(dir, alias, S3ListModel.FILE_DIR);
            this.s3ListModel.addElement(model);
        }

        // these are files
        List<S3ObjectSummary> objectSummaries = objectListing.getObjectSummaries();
        for (final S3ObjectSummary objectSummary : objectSummaries) {

            String file = stripPrefix(objectSummary.getKey());

            S3ListModel model = new S3ListModel(file, file, S3ListModel.FILE_DOC);
            this.s3ListModel.addElement(model);
        }

        loadingLabel.setVisible(false);
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public FileStatus getFileStatus(String hostName, Path path, String msg)
        throws IOException, FileNotFoundException {
    FileStatus res = null;/*from w ww  .  ja v  a2  s  . c o m*/
    FileStatus cached = memoryCache.getFileStatus(path.toString());
    if (cached != null) {
        return cached;
    }
    LOG.trace("getFileStatus(start) for {}, hostname: {}", path, hostName);
    /*
     * The requested path is equal to hostName. HostName is equal to
     * hostNameScheme, thus the container. Therefore we have no object to look
     * for and we return the FileStatus as a directory. Containers have to
     * lastModified.
     */
    if (path.toString().equals(hostName) || (path.toString().length() + 1 == hostName.length())) {
        LOG.trace("getFileStatus(completed) {}", path);
        res = new FileStatus(0L, true, 1, mBlockSize, 0L, path);
        memoryCache.putFileStatus(path.toString(), res);
        return res;
    }
    if (path.toString().contains(HADOOP_TEMPORARY)) {
        LOG.debug("getFileStatus on temp object {}. Return not found", path.toString());
        throw new FileNotFoundException("Not found " + path.toString());
    }
    String key = pathToKey(hostName, path);
    LOG.debug("getFileStatus: on original key {}", key);
    try {
        FileStatus fileStatus = null;
        try {
            fileStatus = getFileStatusKeyBased(key, path);
        } catch (AmazonS3Exception e) {
            if (e.getStatusCode() != 404) {
                throw new IOException(e);
            }
        }
        if (fileStatus != null) {
            LOG.trace("getFileStatus(completed) {}", path);
            memoryCache.putFileStatus(path.toString(), fileStatus);
            return fileStatus;
        }
        // means key returned not found. Trying to call get file status on key/
        // probably not needed this call
        if (!key.endsWith("/")) {
            String newKey = key + "/";
            try {
                LOG.debug("getFileStatus: original key not found. Alternative key {}", key);
                fileStatus = getFileStatusKeyBased(newKey, path);
            } catch (AmazonS3Exception e) {
                if (e.getStatusCode() != 404) {
                    throw new IOException(e);
                }
            }

            if (fileStatus != null) {
                LOG.trace("getFileStatus(completed) {}", path);
                memoryCache.putFileStatus(path.toString(), fileStatus);
                return fileStatus;
            } else {
                // if here: both key and key/ returned not found.
                // trying to see if pseudo directory of the form
                // a/b/key/d/e (a/b/key/ doesn't exists by itself)
                // perform listing on the key
                LOG.debug("getFileStatus: Modifined key {} not found. Trying to lisr", key);
                key = maybeAddTrailingSlash(key);
                ListObjectsRequest request = new ListObjectsRequest();
                request.setBucketName(mBucket);
                request.setPrefix(key);
                request.setDelimiter("/");
                request.setMaxKeys(1);

                ObjectListing objects = mClient.listObjects(request);
                if (!objects.getCommonPrefixes().isEmpty() || !objects.getObjectSummaries().isEmpty()) {
                    LOG.trace("getFileStatus(completed) {}", path);
                    res = new FileStatus(0, true, 1, 0, 0, path);
                    memoryCache.putFileStatus(path.toString(), res);
                    return res;
                } else if (key.isEmpty()) {
                    LOG.debug("Found root directory");
                    LOG.trace("getFileStatus(completed) {}", path);
                    res = new FileStatus(0, true, 1, 0, 0, path);
                    memoryCache.putFileStatus(path.toString(), res);
                    return res;
                }
            }
        }
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == 403) {
            throw new IOException(e);
        }
    } catch (Exception e) {
        LOG.debug("Not found {}", path.toString());
        LOG.warn(e.getMessage());
        throw new FileNotFoundException("Not found " + path.toString());
    }
    throw new FileNotFoundException("Not found " + path.toString());
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

/**
 * {@inheritDoc}//from  ww  w.ja  v  a2 s . co m
 *
 * Prefix based
 * Return everything that starts with the prefix
 * Fill listing
 * Return all objects, even zero size
 * If fileStatus is null means the path is part of some name, neither object
 * or pseudo directory. Was called by Globber
 *
 * @param hostName hostName
 * @param path path
 * @param fullListing Return all objects, even zero size
 * @param prefixBased Return everything that starts with the prefix
 * @return list
 * @throws IOException if error
 */
/*
public FileStatus[] list(String hostName, Path path, boolean fullListing,
    boolean prefixBased) throws IOException {
  String key = pathToKey(hostName, path);
  ArrayList<FileStatus> tmpResult = new ArrayList<FileStatus>();
  ListObjectsRequest request = new ListObjectsRequest().withBucketName(mBucket).withPrefix(key);
        
  String curObj;
  if (path.toString().equals(mBucket)) {
    curObj = "";
  } else if (path.toString().startsWith(mBucket + "/")) {
    curObj = path.toString().substring(mBucket.length() + 1);
  } else if (path.toString().startsWith(hostName)) {
    curObj = path.toString().substring(hostName.length());
  } else {
    curObj = path.toString();
  }
        
  ObjectListing objectList = mClient.listObjects(request);
  List<S3ObjectSummary> objectSummaries = objectList.getObjectSummaries();
  if (objectSummaries.size() == 0) {
    FileStatus[] emptyRes = {};
    LOG.debug("List for bucket {} is empty", mBucket);
    return emptyRes;
  }
  boolean objectScanContinue = true;
  S3ObjectSummary prevObj = null;
  while (objectScanContinue) {
    for (S3ObjectSummary obj : objectSummaries) {
if (prevObj == null) {
  prevObj = obj;
  continue;
}
String objKey = obj.getKey();
String unifiedObjectName = extractUnifiedObjectName(objKey);
if (!prefixBased && !curObj.equals("") && !path.toString().endsWith("/")
    && !unifiedObjectName.equals(curObj) && !unifiedObjectName.startsWith(curObj + "/")) {
  LOG.trace("{} does not match {}. Skipped", unifiedObjectName, curObj);
  continue;
}
if (isSparkOrigin(unifiedObjectName) && !fullListing) {
  LOG.trace("{} created by Spark", unifiedObjectName);
  if (!isJobSuccessful(unifiedObjectName)) {
    LOG.trace("{} created by failed Spark job. Skipped", unifiedObjectName);
    if (fModeAutomaticDelete) {
      delete(hostName, new Path(objKey), true);
    }
    continue;
  } else {
    // if we here - data created by spark and job completed
    // successfully
    // however there be might parts of failed tasks that
    // were not aborted
    // we need to make sure there are no failed attempts
    if (nameWithoutTaskID(objKey).equals(nameWithoutTaskID(prevObj.getKey()))) {
      // found failed that was not aborted.
      LOG.trace("Colisiion found between {} and {}", prevObj.getKey(), objKey);
      if (prevObj.getSize() < obj.getSize()) {
        LOG.trace("New candidate is {}. Removed {}", obj.getKey(), prevObj.getKey());
        prevObj = obj;
      }
      continue;
    }
  }
}
if (prevObj.getSize() > 0 || fullListing) {
  FileStatus fs = getFileStatusObjSummaryBased(prevObj, hostName, path);
  tmpResult.add(fs);
}
prevObj = obj;
    }
    boolean isTruncated = objectList.isTruncated();
    if (isTruncated) {
objectList = mClient.listNextBatchOfObjects(objectList);
objectSummaries = objectList.getObjectSummaries();
    } else {
objectScanContinue = false;
    }
  }
  if (prevObj != null && (prevObj.getSize() > 0 || fullListing)) {
    FileStatus fs = getFileStatusObjSummaryBased(prevObj, hostName, path);
    tmpResult.add(fs);
  }
  if (LOG.isTraceEnabled()) {
    LOG.trace("COS List to return length {}", tmpResult.size());
    for (FileStatus fs: tmpResult) {
LOG.trace("{}", fs.getPath());
    }
  }
  return tmpResult.toArray(new FileStatus[tmpResult.size()]);
}
*/
@Override
public FileStatus[] list(String hostName, Path path, boolean fullListing, boolean prefixBased,
        Boolean isDirectory, boolean flatListing, PathFilter filter) throws FileNotFoundException, IOException {
    LOG.debug("Native direct list status for {}", path);
    ArrayList<FileStatus> tmpResult = new ArrayList<FileStatus>();
    String key = pathToKey(hostName, path);
    if (isDirectory != null && isDirectory.booleanValue() && !key.endsWith("/")) {
        key = key + "/";
        LOG.debug("listNativeDirect modify key to {}", key);
    }

    Map<String, FileStatus> emptyObjects = new HashMap<String, FileStatus>();
    ListObjectsRequest request = new ListObjectsRequest();
    request.setBucketName(mBucket);
    request.setMaxKeys(5000);
    request.setPrefix(key);
    if (!flatListing) {
        request.setDelimiter("/");
    }

    ObjectListing objectList = mClient.listObjects(request);

    List<S3ObjectSummary> objectSummaries = objectList.getObjectSummaries();
    List<String> commonPrefixes = objectList.getCommonPrefixes();

    boolean objectScanContinue = true;
    S3ObjectSummary prevObj = null;
    // start FTA logic
    boolean stocatorOrigin = isSparkOrigin(key, path.toString());
    if (stocatorOrigin) {
        LOG.debug("Stocator origin is true for {}", key);
        if (!isJobSuccessful(key)) {
            LOG.debug("{} created by failed Spark job. Skipped", key);
            if (fModeAutomaticDelete) {
                delete(hostName, new Path(key), true);
            }
            return new FileStatus[0];
        }
    }
    while (objectScanContinue) {
        for (S3ObjectSummary obj : objectSummaries) {
            if (prevObj == null) {
                prevObj = obj;
                continue;
            }
            String objKey = obj.getKey();
            String unifiedObjectName = extractUnifiedObjectName(objKey);
            LOG.debug("list candidate {}, unified name {}", objKey, unifiedObjectName);
            if (stocatorOrigin && !fullListing) {
                LOG.trace("{} created by Spark", unifiedObjectName);
                // if we here - data created by spark and job completed
                // successfully
                // however there be might parts of failed tasks that
                // were not aborted
                // we need to make sure there are no failed attempts
                if (nameWithoutTaskID(objKey).equals(nameWithoutTaskID(prevObj.getKey()))) {
                    // found failed that was not aborted.
                    LOG.trace("Colisiion found between {} and {}", prevObj.getKey(), objKey);
                    if (prevObj.getSize() < obj.getSize()) {
                        LOG.trace("New candidate is {}. Removed {}", obj.getKey(), prevObj.getKey());
                        prevObj = obj;
                    }
                    continue;
                }
            }
            FileStatus fs = createFileStatus(prevObj, hostName, path);
            if (fs.getLen() > 0 || fullListing) {
                LOG.debug("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen());
                if (filter == null) {
                    tmpResult.add(fs);
                } else if (filter != null && filter.accept(fs.getPath())) {
                    tmpResult.add(fs);
                } else {
                    LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter);
                }
            } else {
                emptyObjects.put(fs.getPath().toString(), fs);
            }
            prevObj = obj;
        }
        boolean isTruncated = objectList.isTruncated();
        if (isTruncated) {
            objectList = mClient.listNextBatchOfObjects(objectList);
            objectSummaries = objectList.getObjectSummaries();
        } else {
            objectScanContinue = false;
        }
    }

    if (prevObj != null) {
        FileStatus fs = createFileStatus(prevObj, hostName, path);
        LOG.debug("Adding the last object from the list {}", fs.getPath());
        if (fs.getLen() > 0 || fullListing) {
            LOG.debug("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen());
            if (filter == null) {
                memoryCache.putFileStatus(fs.getPath().toString(), fs);
                tmpResult.add(fs);
            } else if (filter != null && filter.accept(fs.getPath())) {
                memoryCache.putFileStatus(fs.getPath().toString(), fs);
                tmpResult.add(fs);
            } else {
                LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter);
            }
        } else if (!fs.getPath().getName().equals(HADOOP_SUCCESS)) {
            emptyObjects.put(fs.getPath().toString(), fs);
        }
    }

    // get common prefixes
    for (String comPrefix : commonPrefixes) {
        LOG.debug("Common prefix is {}", comPrefix);
        if (emptyObjects.containsKey(keyToQualifiedPath(hostName, comPrefix).toString())
                || emptyObjects.isEmpty()) {
            FileStatus status = new COSFileStatus(true, false, keyToQualifiedPath(hostName, comPrefix));
            LOG.debug("Match between common prefix and empty object {}. Adding to result", comPrefix);
            if (filter == null) {
                memoryCache.putFileStatus(status.getPath().toString(), status);
                tmpResult.add(status);
            } else if (filter != null && filter.accept(status.getPath())) {
                memoryCache.putFileStatus(status.getPath().toString(), status);
                tmpResult.add(status);
            } else {
                LOG.trace("Common prefix {} rejected by path filter during list. Filter {}", status.getPath(),
                        filter);
            }
        }
    }
    return tmpResult.toArray(new FileStatus[tmpResult.size()]);
}

From source file:com.ikanow.infinit.e.harvest.extraction.document.file.AwsInfiniteFile.java

License:Open Source License

@Override
public InfiniteFile[] listFiles(Date optionalFilterDate, int maxDocs) {
    InfiniteFile[] fileList = null;/*from   w  w  w  . j  a  va 2s. c om*/
    ObjectListing list = null;
    _overwriteTime = 0L;
    ListObjectsRequest listRequest = new ListObjectsRequest().withBucketName(_awsBucketName);
    if (null != _awsObjectName) {
        listRequest.withPrefix(_awsObjectName);
    }
    listRequest.withDelimiter("/");
    list = ((AmazonS3Client) _awsClient).listObjects(listRequest);
    fileList = new InfiniteFile[list.getObjectSummaries().size() + list.getCommonPrefixes().size()];
    //TESTED (3.2)
    int nAdded = 0;
    // Get the sub-directories
    for (String subDir : list.getCommonPrefixes()) {
        // Create directories:
        fileList[nAdded] = new AwsInfiniteFile(_awsBucketName, subDir, null, _awsClient);
        nAdded++;
    } //TESTED (3b.3)
      // Get the files:
    for (S3ObjectSummary s3Obj : list.getObjectSummaries()) {
        if (!s3Obj.getKey().endsWith("/")) {
            fileList[nAdded] = new AwsInfiniteFile(s3Obj.getBucketName(), s3Obj.getKey(),
                    s3Obj.getLastModified(), _awsClient);
            long fileTime = fileList[nAdded].getDate();
            if (fileTime > _overwriteTime) {
                _overwriteTime = fileTime;
            } //TESTED (3.2)
            nAdded++;
        }
    }
    return fileList;
}

From source file:com.images3.data.impl.ImageContentAccessImplS3.java

License:Apache License

@Override
public void deleteImageContentByImagePlantId(String imagePlantId, AmazonS3Bucket bucket) {
    AmazonS3 client = clients.getClient(bucket);
    ObjectListing objList = client.listObjects(bucket.getName(), imagePlantId);
    if (objList.getObjectSummaries().size() > 0) {
        deleteAllImageContent(client, bucket, objList);
    } else {//from w  w w.  ja va2  s .c om
        client.deleteObject(new DeleteObjectRequest(bucket.getName(), imagePlantId));
    }
}