List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getKey
public String getKey()
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** Delete a file. * * @param f the path to delete./* w w w.java2 s . c o m*/ * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException */ public boolean delete(Path f, boolean recursive) throws IOException { LOG.info("Delete path " + f + " - recursive " + recursive); S3AFileStatus status; try { status = getFileStatus(f); } catch (FileNotFoundException e) { if (LOG.isDebugEnabled()) { LOG.debug("Couldn't delete " + f + " - does not exist"); } return false; } String key = pathToKey(f); if (status.isDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a directory"); } if (!recursive) { throw new IOException("Path is a folder: " + f); } if (!key.endsWith("/")) { key = key + "/"; } if (status.isEmptyDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("Deleting fake empty directory"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } else { if (LOG.isDebugEnabled()) { LOG.debug("Getting objects for directory prefix " + key + " to delete"); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); // Hopefully not setting a delimiter will cause this to find everything //request.setDelimiter("/"); request.setMaxKeys(maxKeys); List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<DeleteObjectsRequest.KeyVersion>(); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); if (LOG.isDebugEnabled()) { LOG.debug("Got object to delete " + summary.getKey()); } if (keys.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keys.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { break; } } if (!keys.isEmpty()) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } } } else { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a file"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } createFakeDirectoryIfNecessary(f.getParent()); return true; }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * List the statuses of the files/directories in the given path if the path is * a directory./*w w w . j av a 2 s . co m*/ * * @param f given path * @return the statuses of the files/directories in the given patch * @throws FileNotFoundException when the path does not exist; * IOException see specific implementation */ public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException { String key = pathToKey(f); LOG.info("List status for path: " + f); final List<FileStatus> result = new ArrayList<FileStatus>(); final FileStatus fileStatus = getFileStatus(f); if (fileStatus.isDirectory()) { if (!key.isEmpty()) { key = key + "/"; } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); request.setDelimiter("/"); request.setMaxKeys(maxKeys); if (LOG.isDebugEnabled()) { LOG.debug("listStatus: doing listObjects for directory " + key); } ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { Path keyPath = keyToPath(summary.getKey()).makeQualified(uri, workingDir); // Skip over keys that are ourselves and old S3N _$folder$ files if (keyPath.equals(f) || summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring: " + keyPath); } continue; } if (objectRepresentsDirectory(summary.getKey(), summary.getSize())) { result.add(new S3AFileStatus(true, true, keyPath)); if (LOG.isDebugEnabled()) { LOG.debug("Adding: fd: " + keyPath); } } else { result.add(new S3AFileStatus(summary.getSize(), dateToLong(summary.getLastModified()), keyPath)); if (LOG.isDebugEnabled()) { LOG.debug("Adding: fi: " + keyPath); } } } for (String prefix : objects.getCommonPrefixes()) { Path keyPath = keyToPath(prefix).makeQualified(uri, workingDir); if (keyPath.equals(f)) { continue; } result.add(new S3AFileStatus(true, false, keyPath)); if (LOG.isDebugEnabled()) { LOG.debug("Adding: rd: " + keyPath); } } if (objects.isTruncated()) { if (LOG.isDebugEnabled()) { LOG.debug("listStatus: list truncated - getting next batch"); } objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { break; } } } else { if (LOG.isDebugEnabled()) { LOG.debug("Adding: rd (not a dir): " + f); } result.add(fileStatus); } return result.toArray(new FileStatus[result.size()]); }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * Return a file status object that represents the path. * @param f The path we want information from * @return a FileStatus object//from w w w . j ava 2s . co m * @throws java.io.FileNotFoundException when the path does not exist; * IOException see specific implementation */ public S3AFileStatus getFileStatus(Path f) throws IOException { String key = pathToKey(f); LOG.info("Getting path status for " + f + " (" + key + ")"); if (!key.isEmpty()) { try { ObjectMetadata meta = s3.getObjectMetadata(bucket, key); statistics.incrementReadOps(1); if (objectRepresentsDirectory(key, meta.getContentLength())) { if (LOG.isDebugEnabled()) { LOG.debug("Found exact file: fake directory"); } return new S3AFileStatus(true, true, f.makeQualified(uri, workingDir)); } else { if (LOG.isDebugEnabled()) { LOG.debug("Found exact file: normal file"); } return new S3AFileStatus(meta.getContentLength(), dateToLong(meta.getLastModified()), f.makeQualified(uri, workingDir)); } } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) { printAmazonServiceException(e); throw e; } } catch (AmazonClientException e) { printAmazonClientException(e); throw e; } // Necessary? if (!key.endsWith("/")) { try { String newKey = key + "/"; ObjectMetadata meta = s3.getObjectMetadata(bucket, newKey); statistics.incrementReadOps(1); if (objectRepresentsDirectory(newKey, meta.getContentLength())) { if (LOG.isDebugEnabled()) { LOG.debug("Found file (with /): fake directory"); } return new S3AFileStatus(true, true, f.makeQualified(uri, workingDir)); } else { LOG.warn("Found file (with /): real file? should not happen: " + key); return new S3AFileStatus(meta.getContentLength(), dateToLong(meta.getLastModified()), f.makeQualified(uri, workingDir)); } } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) { printAmazonServiceException(e); throw e; } } catch (AmazonClientException e) { printAmazonClientException(e); throw e; } } } try { if (!key.isEmpty() && !key.endsWith("/")) { key = key + "/"; } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); request.setDelimiter("/"); request.setMaxKeys(1); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); if (objects.getCommonPrefixes().size() > 0 || objects.getObjectSummaries().size() > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Found path as directory (with /): " + objects.getCommonPrefixes().size() + "/" + objects.getObjectSummaries().size()); for (S3ObjectSummary summary : objects.getObjectSummaries()) { LOG.debug("Summary: " + summary.getKey() + " " + summary.getSize()); } for (String prefix : objects.getCommonPrefixes()) { LOG.debug("Prefix: " + prefix); } } return new S3AFileStatus(true, false, f.makeQualified(uri, workingDir)); } } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) { printAmazonServiceException(e); throw e; } } catch (AmazonClientException e) { printAmazonClientException(e); throw e; } if (LOG.isDebugEnabled()) { LOG.debug("Not Found: " + f); } throw new FileNotFoundException("No such file or directory: " + f); }
From source file:org.apache.hadoop.fs.s3a.S3AUtils.java
License:Apache License
/** * Create a files status instance from a listing. * @param keyPath path to entry//from w w w . j av a 2 s.c om * @param summary summary from AWS * @param blockSize block size to declare. * @return a status entry */ public static S3AFileStatus createFileStatus(Path keyPath, S3ObjectSummary summary, long blockSize) { if (objectRepresentsDirectory(summary.getKey(), summary.getSize())) { return new S3AFileStatus(true, true, keyPath); } else { return new S3AFileStatus(summary.getSize(), dateToLong(summary.getLastModified()), keyPath, blockSize); } }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** * Renames Path src to Path dst. Can take place on local fs * or remote DFS./*from ww w. j a v a 2 s.com*/ * * Warning: S3 does not support renames. This method does a copy which can * take S3 some time to execute with large files and directories. Since * there is no Progressable passed in, this can time out jobs. * * Note: This implementation differs with other S3 drivers. Specifically: * Fails if src is a file and dst is a directory. * Fails if src is a directory and dst is a file. * Fails if the parent of dst does not exist or is a file. * Fails if dst is a directory that is not empty. * * @param src path to be renamed * @param dst new path after rename * @throws IOException on failure * @return true if rename is successful */ public boolean rename(Path src, Path dst) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Rename path {} to {}", src, dst); } String srcKey = pathToKey(src); String dstKey = pathToKey(dst); if (srcKey.isEmpty() || dstKey.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: src or dst are empty"); } return false; } S3RFileStatus srcStatus; try { srcStatus = getFileStatus(src); } catch (FileNotFoundException e) { LOG.error("rename: src not found {}", src); return false; } if (srcKey.equals(dstKey)) { if (LOG.isDebugEnabled()) { LOG.debug("rename: src and dst refer to the same file or directory"); } return srcStatus.isFile(); } S3RFileStatus dstStatus = null; try { dstStatus = getFileStatus(dst); if (srcStatus.isDirectory() && dstStatus.isFile()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: src is a directory and dst is a file"); } return false; } if (dstStatus.isDirectory() && !dstStatus.isEmptyDirectory()) { return false; } } catch (FileNotFoundException e) { // Parent must exist Path parent = dst.getParent(); if (!pathToKey(parent).isEmpty()) { try { S3RFileStatus dstParentStatus = getFileStatus(dst.getParent()); if (!dstParentStatus.isDirectory()) { return false; } } catch (FileNotFoundException e2) { return false; } } } // Ok! Time to start if (srcStatus.isFile()) { if (LOG.isDebugEnabled()) { LOG.debug("rename: renaming file " + src + " to " + dst); } if (dstStatus != null && dstStatus.isDirectory()) { String newDstKey = dstKey; if (!newDstKey.endsWith("/")) { newDstKey = newDstKey + "/"; } String filename = srcKey.substring(pathToKey(src.getParent()).length() + 1); newDstKey = newDstKey + filename; copyFile(srcKey, newDstKey); } else { copyFile(srcKey, dstKey); } delete(src, false); } else { if (LOG.isDebugEnabled()) { LOG.debug("rename: renaming directory " + src + " to " + dst); } // This is a directory to directory copy if (!dstKey.endsWith("/")) { dstKey = dstKey + "/"; } if (!srcKey.endsWith("/")) { srcKey = srcKey + "/"; } //Verify dest is not a child of the source directory if (dstKey.startsWith(srcKey)) { if (LOG.isDebugEnabled()) { LOG.debug("cannot rename a directory to a subdirectory of self"); } return false; } List<DeleteObjectsRequest.KeyVersion> keysToDelete = new ArrayList<>(); if (dstStatus != null && dstStatus.isEmptyDirectory()) { // delete unnecessary fake directory. keysToDelete.add(new DeleteObjectsRequest.KeyVersion(dstKey)); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(srcKey); request.setMaxKeys(maxKeys); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keysToDelete.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); String newDstKey = dstKey + summary.getKey().substring(srcKey.length()); copyFile(summary.getKey(), newDstKey); if (keysToDelete.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket) .withKeys(keysToDelete); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keysToDelete.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { if (keysToDelete.size() > 0) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket) .withKeys(keysToDelete); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } break; } } } if (src.getParent() != dst.getParent()) { deleteUnnecessaryFakeDirectories(dst.getParent()); createFakeDirectoryIfNecessary(src.getParent()); } return true; }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** Delete a file. * * @param f the path to delete.//from www. j av a 2 s .c o m * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException */ public boolean delete(Path f, boolean recursive) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Delete path " + f + " - recursive " + recursive); } S3RFileStatus status; try { status = getFileStatus(f); } catch (FileNotFoundException e) { if (LOG.isDebugEnabled()) { LOG.debug("Couldn't delete " + f + " - does not exist"); } return false; } String key = pathToKey(f); if (status.isDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a directory"); } if (!recursive && !status.isEmptyDirectory()) { throw new IOException("Path is a folder: " + f + " and it is not an empty directory"); } if (!key.endsWith("/")) { key = key + "/"; } if (key.equals("/")) { LOG.info("s3a cannot delete the root directory"); return false; } if (status.isEmptyDirectory()) { if (LOG.isDebugEnabled()) { LOG.debug("Deleting fake empty directory"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } else { if (LOG.isDebugEnabled()) { LOG.debug("Getting objects for directory prefix " + key + " to delete"); } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); // Hopefully not setting a delimiter will cause this to find everything //request.setDelimiter("/"); request.setMaxKeys(maxKeys); List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); if (LOG.isDebugEnabled()) { LOG.debug("Got object to delete " + summary.getKey()); } if (keys.size() == MAX_ENTRIES_TO_DELETE) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); keys.clear(); } } if (objects.isTruncated()) { objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { if (!keys.isEmpty()) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket).withKeys(keys); s3.deleteObjects(deleteRequest); statistics.incrementWriteOps(1); } break; } } } } else { if (LOG.isDebugEnabled()) { LOG.debug("delete: Path is a file"); } s3.deleteObject(bucket, key); statistics.incrementWriteOps(1); } createFakeDirectoryIfNecessary(f.getParent()); return true; }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** * List the statuses of the files/directories in the given path if the path is * a directory.// w w w .j a v a 2 s .c o m * * @param f given path * @return the statuses of the files/directories in the given patch * @throws FileNotFoundException when the path does not exist; * IOException see specific implementation */ public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException { String key = pathToKey(f); if (LOG.isDebugEnabled()) { LOG.debug("List status for path: " + f); } final List<FileStatus> result = new ArrayList<FileStatus>(); final FileStatus fileStatus = getFileStatus(f); if (fileStatus.isDirectory()) { if (!key.isEmpty()) { key = key + "/"; } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); request.setDelimiter("/"); request.setMaxKeys(maxKeys); if (LOG.isDebugEnabled()) { LOG.debug("listStatus: doing listObjects for directory " + key); } ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); while (true) { for (S3ObjectSummary summary : objects.getObjectSummaries()) { Path keyPath = keyToPath(summary.getKey()).makeQualified(uri, workingDir); // Skip over keys that are ourselves and old S3N _$folder$ files if (keyPath.equals(f) || summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring: " + keyPath); } continue; } if (objectRepresentsDirectory(summary.getKey(), summary.getSize())) { result.add(new S3RFileStatus(true, true, keyPath)); if (LOG.isDebugEnabled()) { LOG.debug("Adding: fd: " + keyPath); } } else { result.add(new S3RFileStatus(summary.getSize(), dateToLong(summary.getLastModified()), keyPath, getDefaultBlockSize(f.makeQualified(uri, workingDir)))); if (LOG.isDebugEnabled()) { LOG.debug("Adding: fi: " + keyPath); } } } for (String prefix : objects.getCommonPrefixes()) { Path keyPath = keyToPath(prefix).makeQualified(uri, workingDir); if (keyPath.equals(f)) { continue; } result.add(new S3RFileStatus(true, false, keyPath)); if (LOG.isDebugEnabled()) { LOG.debug("Adding: rd: " + keyPath); } } if (objects.isTruncated()) { if (LOG.isDebugEnabled()) { LOG.debug("listStatus: list truncated - getting next batch"); } objects = s3.listNextBatchOfObjects(objects); statistics.incrementReadOps(1); } else { break; } } } else { if (LOG.isDebugEnabled()) { LOG.debug("Adding: rd (not a dir): " + f); } result.add(fileStatus); } return result.toArray(new FileStatus[result.size()]); }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** * Return a file status object that represents the path. * @param f The path we want information from * @return a FileStatus object//from ww w . j a v a 2 s . c om * @throws FileNotFoundException when the path does not exist; * IOException see specific implementation */ public S3RFileStatus getFileStatus(Path f) throws IOException { String key = pathToKey(f); if (LOG.isDebugEnabled()) { LOG.debug("Getting path status for " + f + " (" + key + ")"); } if (!key.isEmpty()) { try { ObjectMetadata meta = s3.getObjectMetadata(bucket, key); statistics.incrementReadOps(1); if (objectRepresentsDirectory(key, meta.getContentLength())) { if (LOG.isDebugEnabled()) { LOG.debug("Found exact file: fake directory"); } return new S3RFileStatus(true, true, f.makeQualified(uri, workingDir)); } else { if (LOG.isDebugEnabled()) { LOG.debug("Found exact file: normal file"); } return new S3RFileStatus(meta.getContentLength(), dateToLong(meta.getLastModified()), f.makeQualified(uri, workingDir), getDefaultBlockSize(f.makeQualified(uri, workingDir))); } } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) { printAmazonServiceException(e); throw e; } } catch (AmazonClientException e) { printAmazonClientException(e); throw e; } // Necessary? if (!key.endsWith("/")) { try { String newKey = key + "/"; ObjectMetadata meta = s3.getObjectMetadata(bucket, newKey); statistics.incrementReadOps(1); if (objectRepresentsDirectory(newKey, meta.getContentLength())) { if (LOG.isDebugEnabled()) { LOG.debug("Found file (with /): fake directory"); } return new S3RFileStatus(true, true, f.makeQualified(uri, workingDir)); } else { LOG.warn("Found file (with /): real file? should not happen: {}", key); return new S3RFileStatus(meta.getContentLength(), dateToLong(meta.getLastModified()), f.makeQualified(uri, workingDir), getDefaultBlockSize(f.makeQualified(uri, workingDir))); } } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) { printAmazonServiceException(e); throw e; } } catch (AmazonClientException e) { printAmazonClientException(e); throw e; } } } try { if (!key.isEmpty() && !key.endsWith("/")) { key = key + "/"; } ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(bucket); request.setPrefix(key); request.setDelimiter("/"); request.setMaxKeys(1); ObjectListing objects = s3.listObjects(request); statistics.incrementReadOps(1); if (!objects.getCommonPrefixes().isEmpty() || objects.getObjectSummaries().size() > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Found path as directory (with /): " + objects.getCommonPrefixes().size() + "/" + objects.getObjectSummaries().size()); for (S3ObjectSummary summary : objects.getObjectSummaries()) { LOG.debug("Summary: " + summary.getKey() + " " + summary.getSize()); } for (String prefix : objects.getCommonPrefixes()) { LOG.debug("Prefix: " + prefix); } } return new S3RFileStatus(true, false, f.makeQualified(uri, workingDir)); } } catch (AmazonServiceException e) { if (e.getStatusCode() != 404) { printAmazonServiceException(e); throw e; } } catch (AmazonClientException e) { printAmazonClientException(e); throw e; } if (LOG.isDebugEnabled()) { LOG.debug("Not Found: " + f); } throw new FileNotFoundException("No such file or directory: " + f); }
From source file:org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpi.java
License:Apache License
/** {@inheritDoc} */ @SuppressWarnings({ "BusyWait" }) @Override//from www. j ava2 s . c o m public void spiStart(String gridName) throws IgniteSpiException { // Start SPI start stopwatch. startStopwatch(); assertParameter(cred != null, "awsCredentials != null"); if (log.isDebugEnabled()) { log.debug(configInfo("awsCredentials", cred)); log.debug(configInfo("clientConfiguration", cfg)); log.debug(configInfo("bucketNameSuffix", bucketNameSuffix)); } if (cfg == null) U.warn(log, "Amazon client configuration is not set (will use default)."); if (F.isEmpty(bucketNameSuffix)) { U.warn(log, "Bucket name suffix is null or empty (will use default bucket name)."); bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX; } else bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix; s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred); if (!s3.doesBucketExist(bucketName)) { try { s3.createBucket(bucketName); if (log.isDebugEnabled()) log.debug("Created S3 bucket: " + bucketName); while (!s3.doesBucketExist(bucketName)) try { U.sleep(200); } catch (IgniteInterruptedCheckedException e) { throw new IgniteSpiException("Thread has been interrupted.", e); } } catch (AmazonClientException e) { try { if (!s3.doesBucketExist(bucketName)) throw new IgniteSpiException("Failed to create bucket: " + bucketName, e); } catch (AmazonClientException ignored) { throw new IgniteSpiException("Failed to create bucket: " + bucketName, e); } } } Collection<S3TimeData> s3TimeDataLst = new LinkedList<>(); try { ObjectListing list = s3.listObjects(bucketName); while (true) { for (S3ObjectSummary sum : list.getObjectSummaries()) { S3CheckpointData data = read(sum.getKey()); if (data != null) { s3TimeDataLst.add(new S3TimeData(data.getExpireTime(), data.getKey())); if (log.isDebugEnabled()) log.debug("Registered existing checkpoint from key: " + data.getKey()); } } if (list.isTruncated()) list = s3.listNextBatchOfObjects(list); else break; } } catch (AmazonClientException e) { throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e); } catch (IgniteCheckedException e) { throw new IgniteSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e); } // Track expiration for only those data that are made by this node timeoutWrk = new S3TimeoutWorker(); timeoutWrk.add(s3TimeDataLst); timeoutWrk.start(); registerMBean(gridName, this, S3CheckpointSpiMBean.class); // Ack ok start. if (log.isDebugEnabled()) log.debug(startInfo()); }
From source file:org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinder.java
License:Apache License
/** {@inheritDoc} */ @Override//from www.j a v a 2s . c o m public Collection<InetSocketAddress> getRegisteredAddresses() throws IgniteSpiException { initClient(); Collection<InetSocketAddress> addrs = new LinkedList<>(); try { ObjectListing list = s3.listObjects(bucketName); while (true) { for (S3ObjectSummary sum : list.getObjectSummaries()) { String key = sum.getKey(); StringTokenizer st = new StringTokenizer(key, DELIM); if (st.countTokens() != 2) U.error(log, "Failed to parse S3 entry due to invalid format: " + key); else { String addrStr = st.nextToken(); String portStr = st.nextToken(); int port = -1; try { port = Integer.parseInt(portStr); } catch (NumberFormatException e) { U.error(log, "Failed to parse port for S3 entry: " + key, e); } if (port != -1) try { addrs.add(new InetSocketAddress(addrStr, port)); } catch (IllegalArgumentException e) { U.error(log, "Failed to parse port for S3 entry: " + key, e); } } } if (list.isTruncated()) list = s3.listNextBatchOfObjects(list); else break; } } catch (AmazonClientException e) { throw new IgniteSpiException("Failed to list objects in the bucket: " + bucketName, e); } return addrs; }