List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getSize
public long getSize()
From source file:com.facebook.presto.hive.PrestoS3FileSystem.java
License:Apache License
private Iterator<LocatedFileStatus> statusFromObjects(List<S3ObjectSummary> objects) { List<LocatedFileStatus> list = new ArrayList<>(); for (S3ObjectSummary object : objects) { if (!object.getKey().endsWith("/")) { FileStatus status = new FileStatus(object.getSize(), false, 1, BLOCK_SIZE.toBytes(), object.getLastModified().getTime(), qualifiedPath(new Path("/" + object.getKey()))); list.add(createLocatedFileStatus(status)); }/*from w w w. java 2s. c om*/ } return list.iterator(); }
From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java
License:MIT License
@Override public List<BinaryInfo> list(StorageInfo storage, BinaryInfo binary) throws StoreException { logger.debug("={}", storage); logger.debug("?={}", binary); List<BinaryInfo> objInfoList = new ArrayList<BinaryInfo>(); AmazonS3 s3client = getS3Client(binary.getBucketName()); try {/* w w w.j a va2s .co m*/ logger.debug("Listing binaries"); final ListObjectsV2Request req = new ListObjectsV2Request().withBucketName(binary.getBucketName()) .withMaxKeys(2); ListObjectsV2Result result; do { result = s3client.listObjectsV2(req); for (S3ObjectSummary binarySummary : result.getObjectSummaries()) { logger.debug(" - {}(size={})", binarySummary.getKey(), binarySummary.getSize()); if (binarySummary.getSize() != 0) { BinaryInfo objInfo = new BinaryInfo(binary.getBucketName()); objInfo.setFileName(binarySummary.getKey()); objInfo.setSize(binarySummary.getSize()); S3Object s3Object = s3client .getObject(new GetObjectRequest(binary.getBucketName(), binarySummary.getKey())); objInfo.setContentType(s3Object.getObjectMetadata().getContentType()); objInfo.setUrl(s3client.getUrl(binary.getBucketName(), binarySummary.getKey()).toString()); logger.debug("Generating pre-signed URL."); URL PresignedUrl = getPresignedUrl(s3client, binary.getBucketName(), binarySummary.getKey()); objInfo.setPresignedUrl(PresignedUrl.toString()); logger.debug("Pre-Signed URL = " + PresignedUrl.toString()); objInfoList.add(objInfo); } } logger.debug("Next Continuation Token : " + result.getNextContinuationToken()); req.setContinuationToken(result.getNextContinuationToken()); } while (result.isTruncated() == true); } catch (AmazonServiceException ase) { throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.LIST_FAIL, ase, binary.getFileName()); } catch (AmazonClientException ace) { throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.LIST_FAIL, ace, binary.getFileName()); } logger.info(" ={}", objInfoList.size()); return objInfoList; }
From source file:com.ibm.stocator.fs.cos.COSAPIClient.java
License:Apache License
private FileStatus createFileStatus(S3ObjectSummary objSummary, String hostName, Path path) throws IllegalArgumentException, IOException { String objKey = objSummary.getKey(); String newMergedPath = getMergedPath(hostName, path, objKey); return createFileStatus(objSummary.getSize(), objKey, objSummary.getLastModified(), new Path(newMergedPath)); }
From source file:com.ibm.stocator.fs.cos.COSAPIClient.java
License:Apache License
/** * {@inheritDoc}//from www.j a va 2s . co m * * Prefix based * Return everything that starts with the prefix * Fill listing * Return all objects, even zero size * If fileStatus is null means the path is part of some name, neither object * or pseudo directory. Was called by Globber * * @param hostName hostName * @param path path * @param fullListing Return all objects, even zero size * @param prefixBased Return everything that starts with the prefix * @return list * @throws IOException if error */ /* public FileStatus[] list(String hostName, Path path, boolean fullListing, boolean prefixBased) throws IOException { String key = pathToKey(hostName, path); ArrayList<FileStatus> tmpResult = new ArrayList<FileStatus>(); ListObjectsRequest request = new ListObjectsRequest().withBucketName(mBucket).withPrefix(key); String curObj; if (path.toString().equals(mBucket)) { curObj = ""; } else if (path.toString().startsWith(mBucket + "/")) { curObj = path.toString().substring(mBucket.length() + 1); } else if (path.toString().startsWith(hostName)) { curObj = path.toString().substring(hostName.length()); } else { curObj = path.toString(); } ObjectListing objectList = mClient.listObjects(request); List<S3ObjectSummary> objectSummaries = objectList.getObjectSummaries(); if (objectSummaries.size() == 0) { FileStatus[] emptyRes = {}; LOG.debug("List for bucket {} is empty", mBucket); return emptyRes; } boolean objectScanContinue = true; S3ObjectSummary prevObj = null; while (objectScanContinue) { for (S3ObjectSummary obj : objectSummaries) { if (prevObj == null) { prevObj = obj; continue; } String objKey = obj.getKey(); String unifiedObjectName = extractUnifiedObjectName(objKey); if (!prefixBased && !curObj.equals("") && !path.toString().endsWith("/") && !unifiedObjectName.equals(curObj) && !unifiedObjectName.startsWith(curObj + "/")) { LOG.trace("{} does not match {}. Skipped", unifiedObjectName, curObj); continue; } if (isSparkOrigin(unifiedObjectName) && !fullListing) { LOG.trace("{} created by Spark", unifiedObjectName); if (!isJobSuccessful(unifiedObjectName)) { LOG.trace("{} created by failed Spark job. Skipped", unifiedObjectName); if (fModeAutomaticDelete) { delete(hostName, new Path(objKey), true); } continue; } else { // if we here - data created by spark and job completed // successfully // however there be might parts of failed tasks that // were not aborted // we need to make sure there are no failed attempts if (nameWithoutTaskID(objKey).equals(nameWithoutTaskID(prevObj.getKey()))) { // found failed that was not aborted. LOG.trace("Colisiion found between {} and {}", prevObj.getKey(), objKey); if (prevObj.getSize() < obj.getSize()) { LOG.trace("New candidate is {}. Removed {}", obj.getKey(), prevObj.getKey()); prevObj = obj; } continue; } } } if (prevObj.getSize() > 0 || fullListing) { FileStatus fs = getFileStatusObjSummaryBased(prevObj, hostName, path); tmpResult.add(fs); } prevObj = obj; } boolean isTruncated = objectList.isTruncated(); if (isTruncated) { objectList = mClient.listNextBatchOfObjects(objectList); objectSummaries = objectList.getObjectSummaries(); } else { objectScanContinue = false; } } if (prevObj != null && (prevObj.getSize() > 0 || fullListing)) { FileStatus fs = getFileStatusObjSummaryBased(prevObj, hostName, path); tmpResult.add(fs); } if (LOG.isTraceEnabled()) { LOG.trace("COS List to return length {}", tmpResult.size()); for (FileStatus fs: tmpResult) { LOG.trace("{}", fs.getPath()); } } return tmpResult.toArray(new FileStatus[tmpResult.size()]); } */ @Override public FileStatus[] list(String hostName, Path path, boolean fullListing, boolean prefixBased, Boolean isDirectory, boolean flatListing, PathFilter filter) throws FileNotFoundException, IOException { LOG.debug("Native direct list status for {}", path); ArrayList<FileStatus> tmpResult = new ArrayList<FileStatus>(); String key = pathToKey(hostName, path); if (isDirectory != null && isDirectory.booleanValue() && !key.endsWith("/")) { key = key + "/"; LOG.debug("listNativeDirect modify key to {}", key); } Map<String, FileStatus> emptyObjects = new HashMap<String, FileStatus>(); ListObjectsRequest request = new ListObjectsRequest(); request.setBucketName(mBucket); request.setMaxKeys(5000); request.setPrefix(key); if (!flatListing) { request.setDelimiter("/"); } ObjectListing objectList = mClient.listObjects(request); List<S3ObjectSummary> objectSummaries = objectList.getObjectSummaries(); List<String> commonPrefixes = objectList.getCommonPrefixes(); boolean objectScanContinue = true; S3ObjectSummary prevObj = null; // start FTA logic boolean stocatorOrigin = isSparkOrigin(key, path.toString()); if (stocatorOrigin) { LOG.debug("Stocator origin is true for {}", key); if (!isJobSuccessful(key)) { LOG.debug("{} created by failed Spark job. Skipped", key); if (fModeAutomaticDelete) { delete(hostName, new Path(key), true); } return new FileStatus[0]; } } while (objectScanContinue) { for (S3ObjectSummary obj : objectSummaries) { if (prevObj == null) { prevObj = obj; continue; } String objKey = obj.getKey(); String unifiedObjectName = extractUnifiedObjectName(objKey); LOG.debug("list candidate {}, unified name {}", objKey, unifiedObjectName); if (stocatorOrigin && !fullListing) { LOG.trace("{} created by Spark", unifiedObjectName); // if we here - data created by spark and job completed // successfully // however there be might parts of failed tasks that // were not aborted // we need to make sure there are no failed attempts if (nameWithoutTaskID(objKey).equals(nameWithoutTaskID(prevObj.getKey()))) { // found failed that was not aborted. LOG.trace("Colisiion found between {} and {}", prevObj.getKey(), objKey); if (prevObj.getSize() < obj.getSize()) { LOG.trace("New candidate is {}. Removed {}", obj.getKey(), prevObj.getKey()); prevObj = obj; } continue; } } FileStatus fs = createFileStatus(prevObj, hostName, path); if (fs.getLen() > 0 || fullListing) { LOG.debug("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen()); if (filter == null) { tmpResult.add(fs); } else if (filter != null && filter.accept(fs.getPath())) { tmpResult.add(fs); } else { LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter); } } else { emptyObjects.put(fs.getPath().toString(), fs); } prevObj = obj; } boolean isTruncated = objectList.isTruncated(); if (isTruncated) { objectList = mClient.listNextBatchOfObjects(objectList); objectSummaries = objectList.getObjectSummaries(); } else { objectScanContinue = false; } } if (prevObj != null) { FileStatus fs = createFileStatus(prevObj, hostName, path); LOG.debug("Adding the last object from the list {}", fs.getPath()); if (fs.getLen() > 0 || fullListing) { LOG.debug("Native direct list. Adding {} size {}", fs.getPath(), fs.getLen()); if (filter == null) { memoryCache.putFileStatus(fs.getPath().toString(), fs); tmpResult.add(fs); } else if (filter != null && filter.accept(fs.getPath())) { memoryCache.putFileStatus(fs.getPath().toString(), fs); tmpResult.add(fs); } else { LOG.trace("{} rejected by path filter during list. Filter {}", fs.getPath(), filter); } } else if (!fs.getPath().getName().equals(HADOOP_SUCCESS)) { emptyObjects.put(fs.getPath().toString(), fs); } } // get common prefixes for (String comPrefix : commonPrefixes) { LOG.debug("Common prefix is {}", comPrefix); if (emptyObjects.containsKey(keyToQualifiedPath(hostName, comPrefix).toString()) || emptyObjects.isEmpty()) { FileStatus status = new COSFileStatus(true, false, keyToQualifiedPath(hostName, comPrefix)); LOG.debug("Match between common prefix and empty object {}. Adding to result", comPrefix); if (filter == null) { memoryCache.putFileStatus(status.getPath().toString(), status); tmpResult.add(status); } else if (filter != null && filter.accept(status.getPath())) { memoryCache.putFileStatus(status.getPath().toString(), status); tmpResult.add(status); } else { LOG.trace("Common prefix {} rejected by path filter during list. Filter {}", status.getPath(), filter); } } } return tmpResult.toArray(new FileStatus[tmpResult.size()]); }
From source file:com.ibm.stocator.fs.cos.COSUtils.java
License:Apache License
/** * Create a files status instance from a listing. * @param keyPath path to entry/*from w w w .j av a2s .c om*/ * @param summary summary from AWS * @param blockSize block size to declare * @return a status entry */ public static COSFileStatus createFileStatus(Path keyPath, S3ObjectSummary summary, long blockSize) { long size = summary.getSize(); return createFileStatus(keyPath, objectRepresentsDirectory(summary.getKey(), size), size, summary.getLastModified(), blockSize); }
From source file:com.ibm.stocator.fs.cos.COSUtils.java
License:Apache License
public static String stringify(S3ObjectSummary summary) { StringBuilder builder = new StringBuilder(summary.getKey().length() + 100); builder.append(summary.getKey()).append(' '); builder.append("size=").append(summary.getSize()); return builder.toString(); }
From source file:com.intuit.s3encrypt.S3Encrypt.java
License:Open Source License
public static void main(String[] args) throws IOException, NoSuchAlgorithmException, InvalidKeySpecException { // create Options object Options options = new Options(); options.addOption(create_bucket);//from ww w. j a v a2 s . c o m options.addOption(create_key); options.addOption(delete_bucket); options.addOption(get); options.addOption(help); options.addOption(inspect); options.addOption(keyfile); options.addOption(list_buckets); options.addOption(list_objects); options.addOption(put); options.addOption(remove); options.addOption(rotate); options.addOption(rotateall); options.addOption(rotateKey); // CommandLineParser parser = new GnuParser(); // Changed from above GnuParser to below PosixParser because I found code which allows for multiple arguments PosixParser parser = new PosixParser(); CommandLine cmd; try { cmd = parser.parse(options, args); Logger.getRootLogger().setLevel(Level.OFF); if (cmd.hasOption("help")) { HelpFormatter help = new HelpFormatter(); System.out.println(); help.printHelp("S3Encrypt", options); System.out.println(); System.exit(1); } else if (cmd.hasOption("create_key")) { keyname = cmd.getOptionValue("keyfile"); createKeyFile(keyname); key = new File(keyname); } else { if (cmd.hasOption("keyfile")) { keyname = cmd.getOptionValue("keyfile"); } key = new File(keyname); } if (!(key.exists())) { System.out.println("Key does not exist or not provided"); System.exit(1); } // AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); ClasspathPropertiesFileCredentialsProvider credentials = new ClasspathPropertiesFileCredentialsProvider( ".s3encrypt"); EncryptionMaterials encryptionMaterials = new EncryptionMaterials(getKeyFile(keyname)); AmazonS3EncryptionClient s3 = new AmazonS3EncryptionClient(credentials.getCredentials(), encryptionMaterials); // Region usWest2 = Region.getRegion(Regions.US_WEST_2); // s3.setRegion(usWest2); if (cmd.hasOption("create_bucket")) { String bucket = cmd.getOptionValue("create_bucket"); System.out.println("Creating bucket " + bucket + "\n"); s3.createBucket(bucket); } else if (cmd.hasOption("delete_bucket")) { String bucket = cmd.getOptionValue("delete_bucket"); System.out.println("Deleting bucket " + bucket + "\n"); s3.deleteBucket(bucket); } else if (cmd.hasOption("get")) { String[] searchArgs = cmd.getOptionValues("get"); String bucket = searchArgs[0]; String filename = searchArgs[1]; getS3Object(cmd, s3, bucket, filename); } else if (cmd.hasOption("inspect")) { String[] searchArgs = cmd.getOptionValues("inspect"); String bucket = searchArgs[0]; String filename = searchArgs[1]; String keyname = "encryption_key"; String metadata = inspectS3Object(cmd, s3, bucket, filename, keyname); System.out.println(metadata); } else if (cmd.hasOption("list_buckets")) { System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(bucket.getName()); } System.out.println(); } else if (cmd.hasOption("list_objects")) { String bucket = cmd.getOptionValue("list_objects"); System.out.println("Listing objects"); ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucket)); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println(objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); } else if (cmd.hasOption("put")) { String[] searchArgs = cmd.getOptionValues("put"); String bucket = searchArgs[0]; String filename = searchArgs[1]; String metadataKeyname = "encryption_key"; String key = keyname; putS3Object(cmd, s3, bucket, filename, metadataKeyname, key); } else if (cmd.hasOption("remove")) { String[] searchArgs = cmd.getOptionValues("remove"); String bucket = searchArgs[0]; String filename = searchArgs[1]; System.out.println("Removing object in S3 from BUCKET = " + bucket + " FILENAME = " + filename); s3.deleteObject(new DeleteObjectRequest(bucket, filename)); System.out.println(); } else if (cmd.hasOption("rotate")) { String[] searchArgs = cmd.getOptionValues("rotate"); String bucket = searchArgs[0]; String filename = searchArgs[1]; String key1 = cmd.getOptionValue("keyfile"); String key2 = cmd.getOptionValue("rotateKey"); String metadataKeyname = "encryption_key"; System.out.println("Supposed to get object from here OPTION VALUE = " + bucket + " FILENAME = " + filename + " KEY1 = " + key1 + " KEY2 = " + key2); EncryptionMaterials rotateEncryptionMaterials = new EncryptionMaterials(getKeyFile(key2)); AmazonS3EncryptionClient rotateS3 = new AmazonS3EncryptionClient(credentials.getCredentials(), rotateEncryptionMaterials); getS3Object(cmd, s3, bucket, filename); putS3Object(cmd, rotateS3, bucket, filename, metadataKeyname, key2); } else if (cmd.hasOption("rotateall")) { String[] searchArgs = cmd.getOptionValues("rotateall"); String bucket = searchArgs[0]; String key1 = searchArgs[1]; String key2 = searchArgs[2]; System.out.println("Supposed to rotateall here for BUCKET NAME = " + bucket + " KEY1 = " + key1 + " KEY2 = " + key2); } else { System.out.println("Something went wrong... "); System.exit(1); } } catch (ParseException e) { e.printStackTrace(); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.jfixby.scarabei.red.aws.test.S3Sample.java
License:Open Source License
public static void main(final String[] args) throws IOException { /*//from w w w . j a va 2s . c o m * The ProfileCredentialsProvider will return your [default] credential profile by reading from the credentials file located * at (C:\\Users\\JCode\\.aws\\credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (final Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\%USERNAME%\\.aws\\credentials), and is in valid format.", e); } final AmazonS3 s3 = new AmazonS3Client(credentials); final Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); final String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); final String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, so once a bucket name has been taken by any user, * you can't create another bucket with that same name. * * You can optionally specify a location for your bucket if you want to keep your data closer to your applications or * users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (final Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to S3, or upload directly an InputStream if you know * the length of the data in the stream. You can also specify your own metadata when uploading to S3, which allows you * set a variety of options like content-type and content-encoding, plus additional metadata specific to your * applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of the object's metadata and a stream from which to read * the contents. It's important to read the contents of the stream as quickly as possibly since the data is streamed * directly from Amazon S3 and your network connection will remain open until you read all the data or close the input * stream. * * GetObjectRequest also supports several other options, including conditional downloading of objects based on * modification times, ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); final S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for listing the objects in your bucket. Keep in mind * that buckets with many objects might truncate their results when listing their objects, so be sure to check if the * returned object listing is truncated, and use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); final ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (final S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, there is no way to undelete an object, so use * caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be deleted, so remember to delete any objects from * your buckets before you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (final AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (final AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.jktsoftware.amazondownloader.download.S3TypeBucket.java
License:Open Source License
public List<IObject> getObjectsInRepo() { String repoid = getRepoId();/*from w ww . j a va 2s . c om*/ AWSCredentials awscredentials = new BasicAWSCredentials(this.credentials.getAccessKey(), this.credentials.getSecretAccessKey()); AmazonS3 s3 = new AmazonS3Client(awscredentials); s3.setEndpoint(endpoint); System.out.println("Getting objects"); ObjectListing objectListing = s3.listObjects(repoid); List<IObject> objects = new ArrayList<IObject>(); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { S3TypeObject obj = new S3TypeObject(objectSummary.getKey(), objectSummary.getSize(), objectSummary.getBucketName(), objectSummary.getStorageClass(), s3); objects.add(obj); } return objects; }
From source file:com.lithium.flow.filer.S3Filer.java
License:Apache License
@Override @Nonnull//from w ww .j a v a 2 s . c o m public List<Record> listRecords(@Nonnull String path) throws IOException { ObjectListing listing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucket).withPrefix(path.substring(1))); List<Record> records = Lists.newArrayList(); for (S3ObjectSummary summary : listing.getObjectSummaries()) { File file = new File(summary.getKey()); String parent = file.getParent(); String name = file.getName(); long time = summary.getLastModified().getTime(); long size = summary.getSize(); boolean directory = name.endsWith("/"); records.add(new Record(uri, "/" + parent, name, time, size, directory)); } return records; }