Example usage for com.amazonaws.services.s3.model S3ObjectSummary getKey

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getKey

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getKey.

Prototype

public String getKey() 

Source Link

Document

Gets the key under which this object is stored in Amazon S3.

Usage

From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java

License:Apache License

@Override
public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
    long start = System.currentTimeMillis();
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {/*from ww  w. java 2s  .c o m*/
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        Set<DataIdentifier> ids = new HashSet<DataIdentifier>();
        ObjectListing prevObjectListing = s3service.listObjects(bucket);
        while (true) {
            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                String id = getIdentifierName(s3ObjSumm.getKey());
                if (id != null) {
                    ids.add(new DataIdentifier(id));
                }
            }
            if (!prevObjectListing.isTruncated())
                break;
            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
        }
        LOG.debug("getAllIdentifiers returned size [{}] took [{}] ms.", ids.size(),
                (System.currentTimeMillis() - start));
        return ids.iterator();
    } catch (AmazonServiceException e) {
        throw new DataStoreException("Could not list objects", e);
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}

From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java

License:Apache License

@Override
public Set<DataIdentifier> deleteAllOlderThan(long min) throws DataStoreException {
    long start = System.currentTimeMillis();
    // S3 stores lastModified to lower boundary of timestamp in ms.
    // and hence min is reduced by 1000ms.
    min = min - 1000;//  ww  w  .  ja v  a 2  s . c  o  m
    Set<DataIdentifier> deleteIdSet = new HashSet<DataIdentifier>(30);
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ObjectListing prevObjectListing = s3service.listObjects(bucket);
        while (true) {
            List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                DataIdentifier identifier = new DataIdentifier(getIdentifierName(s3ObjSumm.getKey()));
                long lastModified = s3ObjSumm.getLastModified().getTime();
                LOG.debug("Identifier [{}]'s lastModified = [{}]", identifier, lastModified);
                if (!store.isInUse(identifier) && lastModified < min) {
                    LOG.debug("add id [{}] to delete lists", s3ObjSumm.getKey());
                    deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
                    deleteIdSet.add(identifier);
                }
            }
            if (deleteList.size() > 0) {
                DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
                delObjsReq.setKeys(deleteList);
                DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
                if (dobjs.getDeletedObjects().size() != deleteList.size()) {
                    throw new DataStoreException(
                            "Incomplete delete object request. only  " + dobjs.getDeletedObjects().size()
                                    + " out of " + deleteList.size() + " are deleted");
                } else {
                    LOG.debug("[{}] records deleted from datastore", deleteList);
                }
            }
            if (!prevObjectListing.isTruncated()) {
                break;
            }
            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
    LOG.info(
            "deleteAllOlderThan: min=[{}] exit. Deleted[{}] records. Number of records deleted [{}] took [{}]ms",
            new Object[] { min, deleteIdSet, deleteIdSet.size(), (System.currentTimeMillis() - start) });
    return deleteIdSet;
}

From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java

License:Apache License

/**
 * This method rename object keys in S3 concurrently. The number of
 * concurrent threads is defined by 'maxConnections' property in
 * aws.properties. As S3 doesn't have "move" command, this method simulate
 * move as copy object object to new key and then delete older key.
 *//*from ww  w  .  java  2 s .c  o m*/
private void renameKeys() throws DataStoreException {
    long startTime = System.currentTimeMillis();
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    long count = 0;
    try {
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ObjectListing prevObjectListing = s3service.listObjects(bucket, KEY_PREFIX);
        List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
        int nThreads = Integer.parseInt(properties.getProperty("maxConnections"));
        ExecutorService executor = Executors.newFixedThreadPool(nThreads,
                new NamedThreadFactory("s3-object-rename-worker"));
        boolean taskAdded = false;
        while (true) {
            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                executor.execute(new KeyRenameThread(s3ObjSumm.getKey()));
                taskAdded = true;
                count++;
                deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
            }
            if (!prevObjectListing.isTruncated())
                break;
            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
        }
        // This will make the executor accept no new threads
        // and finish all existing threads in the queue
        executor.shutdown();

        try {
            // Wait until all threads are finish
            while (taskAdded && !executor.awaitTermination(10, TimeUnit.SECONDS)) {
                LOG.info("Rename S3 keys tasks timedout. Waiting again");
            }
        } catch (InterruptedException ie) {

        }
        LOG.info("Renamed [{}] keys, time taken [{}]sec", count,
                ((System.currentTimeMillis() - startTime) / 1000));
        // Delete older keys.
        if (deleteList.size() > 0) {
            DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
            int batchSize = 500, startIndex = 0, size = deleteList.size();
            int endIndex = batchSize < size ? batchSize : size;
            while (endIndex <= size) {
                delObjsReq.setKeys(Collections.unmodifiableList(deleteList.subList(startIndex, endIndex)));
                DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
                LOG.info("Records[{}] deleted in datastore from index [{}] to [{}]",
                        new Object[] { dobjs.getDeletedObjects().size(), startIndex, (endIndex - 1) });
                if (endIndex == size) {
                    break;
                } else {
                    startIndex = endIndex;
                    endIndex = (startIndex + batchSize) < size ? (startIndex + batchSize) : size;
                }
            }
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}

From source file:org.apache.jackrabbit.aws.ext.Utils.java

License:Apache License

/**
 * Delete S3 bucket. This method first deletes all objects from bucket and
 * then delete empty bucket./*from   ww w .  j a va 2s.  c  o m*/
 * 
 * @param bucketName the bucket name.
 */
public static void deleteBucket(final String bucketName) throws IOException {
    Properties prop = readConfig(DEFAULT_CONFIG_FILE);
    AmazonS3 s3service = openService(prop);
    ObjectListing prevObjectListing = s3service.listObjects(bucketName);
    while (true) {
        for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
            s3service.deleteObject(bucketName, s3ObjSumm.getKey());
        }
        if (!prevObjectListing.isTruncated()) {
            break;
        }
        prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
    }
    s3service.deleteBucket(bucketName);
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

@Override
public Iterator<DataIdentifier> getAllIdentifiers() throws DataStoreException {
    return new RecordsIterator<DataIdentifier>(new Function<S3ObjectSummary, DataIdentifier>() {
        @Override/*from  w ww  .j a v  a 2s.co  m*/
        public DataIdentifier apply(S3ObjectSummary input) {
            return new DataIdentifier(getIdentifierName(input.getKey()));
        }
    });
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

@Override
public Set<DataIdentifier> deleteAllOlderThan(long min) throws DataStoreException {
    long start = System.currentTimeMillis();
    // S3 stores lastModified to lower boundary of timestamp in ms.
    // and hence min is reduced by 1000ms.
    min = min - 1000;/*from   w ww  . j ava  2 s .  c o m*/
    Set<DataIdentifier> deleteIdSet = new HashSet<DataIdentifier>(30);
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ObjectListing prevObjectListing = s3service.listObjects(bucket);
        while (true) {
            List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                if (!s3ObjSumm.getKey().startsWith(META_KEY_PREFIX)) {
                    DataIdentifier identifier = new DataIdentifier(getIdentifierName(s3ObjSumm.getKey()));
                    long lastModified = s3ObjSumm.getLastModified().getTime();
                    LOG.debug("Identifier [{}]'s lastModified = [{}]", identifier, lastModified);
                    if (lastModified < min && store.confirmDelete(identifier)
                    // confirm once more that record's lastModified < min
                    //  order is important here
                            && s3service.getObjectMetadata(bucket, s3ObjSumm.getKey()).getLastModified()
                                    .getTime() < min) {

                        store.deleteFromCache(identifier);
                        LOG.debug("add id [{}] to delete lists", s3ObjSumm.getKey());
                        deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
                        deleteIdSet.add(identifier);
                    }
                }
            }
            if (deleteList.size() > 0) {
                DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
                delObjsReq.setKeys(deleteList);
                DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
                if (dobjs.getDeletedObjects().size() != deleteList.size()) {
                    throw new DataStoreException(
                            "Incomplete delete object request. only  " + dobjs.getDeletedObjects().size()
                                    + " out of " + deleteList.size() + " are deleted");
                } else {
                    LOG.debug("[{}] records deleted from datastore", deleteList);
                }
            }
            if (!prevObjectListing.isTruncated()) {
                break;
            }
            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
    LOG.info(
            "deleteAllOlderThan: min=[{}] exit. Deleted[{}] records. Number of records deleted [{}] took [{}]ms",
            new Object[] { min, deleteIdSet, deleteIdSet.size(), (System.currentTimeMillis() - start) });
    return deleteIdSet;
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

public List<DataRecord> getAllMetadataRecords(String prefix) {
    List<DataRecord> metadataList = new ArrayList<DataRecord>();
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {/*from w w  w.  j av a  2  s.com*/
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket)
                .withPrefix(addMetaKeyPrefix(prefix));
        ObjectListing prevObjectListing = s3service.listObjects(listObjectsRequest);
        for (final S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
            metadataList.add(new S3DataRecord(s3service, bucket, stripMetaKeyPrefix(s3ObjSumm.getKey()),
                    s3ObjSumm.getLastModified().getTime(), s3ObjSumm.getSize()));
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
    return metadataList;
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

public void deleteAllMetadataRecords(String prefix) {
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {//from ww w.  j  av a 2 s  .co m
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket)
                .withPrefix(addMetaKeyPrefix(prefix));
        ObjectListing metaList = s3service.listObjects(listObjectsRequest);
        List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
        for (S3ObjectSummary s3ObjSumm : metaList.getObjectSummaries()) {
            deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
        }
        if (deleteList.size() > 0) {
            DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
            delObjsReq.setKeys(deleteList);
            DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

@Override
public Iterator<DataRecord> getAllRecords() {
    return new RecordsIterator<DataRecord>(new Function<S3ObjectSummary, DataRecord>() {
        @Override/*  w w  w .  j a v  a2 s.c om*/
        public DataRecord apply(S3ObjectSummary input) {
            return new S3DataRecord(s3service, bucket, getIdentifierName(input.getKey()),
                    input.getLastModified().getTime(), input.getSize());
        }
    });
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

/**
 * This method rename object keys in S3 concurrently. The number of
 * concurrent threads is defined by 'maxConnections' property in
 * aws.properties. As S3 doesn't have "move" command, this method simulate
 * move as copy object object to new key and then delete older key.
 *//*from ww w.  j  av a2  s  .com*/
private void renameKeys() throws DataStoreException {
    long startTime = System.currentTimeMillis();
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    long count = 0;
    try {
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ObjectListing prevObjectListing = s3service.listObjects(bucket);
        List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
        int nThreads = Integer.parseInt(properties.getProperty("maxConnections"));
        ExecutorService executor = Executors.newFixedThreadPool(nThreads,
                new NamedThreadFactory("s3-object-rename-worker"));
        boolean taskAdded = false;
        while (true) {
            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                executor.execute(new KeyRenameThread(s3ObjSumm.getKey()));
                taskAdded = true;
                count++;
                // delete the object if it follows old key name format
                if (s3ObjSumm.getKey().startsWith(KEY_PREFIX)) {
                    deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
                }
            }
            if (!prevObjectListing.isTruncated())
                break;
            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
        }
        // This will make the executor accept no new threads
        // and finish all existing threads in the queue
        executor.shutdown();

        try {
            // Wait until all threads are finish
            while (taskAdded && !executor.awaitTermination(10, TimeUnit.SECONDS)) {
                LOG.info("Rename S3 keys tasks timedout. Waiting again");
            }
        } catch (InterruptedException ie) {

        }
        LOG.info("Renamed [{}] keys, time taken [{}]sec", count,
                ((System.currentTimeMillis() - startTime) / 1000));
        // Delete older keys.
        if (deleteList.size() > 0) {
            DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
            int batchSize = 500, startIndex = 0, size = deleteList.size();
            int endIndex = batchSize < size ? batchSize : size;
            while (endIndex <= size) {
                delObjsReq.setKeys(Collections.unmodifiableList(deleteList.subList(startIndex, endIndex)));
                DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
                LOG.info("Records[{}] deleted in datastore from index [{}] to [{}]",
                        new Object[] { dobjs.getDeletedObjects().size(), startIndex, (endIndex - 1) });
                if (endIndex == size) {
                    break;
                } else {
                    startIndex = endIndex;
                    endIndex = (startIndex + batchSize) < size ? (startIndex + batchSize) : size;
                }
            }
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}