List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:com.vanilla.transmilenio.servicio.AmazonServicio.java
public String guardarArchivo(InputStream input, String nombreArchivo) { PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, nombreArchivo, input, new ObjectMetadata()); putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); s3client.putObject(putObjectRequest); String url = "https://s3.amazonaws.com/" + bucketName + "/" + nombreArchivo; return url;/* ww w .j a v a 2 s . c o m*/ }
From source file:com.yahoo.ycsb.db.S3Client.java
License:Open Source License
/** * Upload a new object to S3 or update an object on S3. * * @param bucket//from w ww . jav a 2 s . c om * The name of the bucket * @param key * The file key of the object to upload/update. * @param values * The data to be written on the object * @param updateMarker * A boolean value. If true a new object will be uploaded * to S3. If false an existing object will be re-uploaded * */ protected Status writeToStorage(String bucket, String key, HashMap<String, ByteIterator> values, Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) { int totalSize = 0; int fieldCount = values.size(); //number of fields to concatenate // getting the first field in the values Object keyToSearch = values.keySet().toArray()[0]; // getting the content of just one field byte[] sourceArray = values.get(keyToSearch).toArray(); int sizeArray = sourceArray.length; //size of each array if (updateMarker) { totalSize = sizeArray * fieldCount; } else { try { Map.Entry<S3Object, ObjectMetadata> objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal); int sizeOfFile = (int) objectAndMetadata.getValue().getContentLength(); fieldCount = sizeOfFile / sizeArray; totalSize = sizeOfFile; objectAndMetadata.getKey().close(); } catch (Exception e) { System.err.println("Not possible to get the object :" + key); e.printStackTrace(); return Status.ERROR; } } byte[] destinationArray = new byte[totalSize]; int offset = 0; for (int i = 0; i < fieldCount; i++) { System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray); offset += sizeArray; } try (InputStream input = new ByteArrayInputStream(destinationArray)) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(totalSize); PutObjectRequest putObjectRequest = null; if (sseLocal.equals("true")) { metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } else if (ssecLocal != null) { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal); } else { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } try { PutObjectResult res = s3Client.putObject(putObjectRequest); if (res.getETag() == null) { return Status.ERROR; } else { if (sseLocal.equals("true")) { System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm()); } else if (ssecLocal != null) { System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm()); } } } catch (Exception e) { System.err.println("Not possible to write object :" + key); e.printStackTrace(); return Status.ERROR; } } catch (Exception e) { System.err.println("Error in the creation of the stream :" + e.toString()); e.printStackTrace(); return Status.ERROR; } return Status.OK; }
From source file:com.yahoo.ycsb.utils.connection.S3Connection.java
License:Open Source License
public Status insert(String key, byte[] bytes) { try (InputStream input = new ByteArrayInputStream(bytes)) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(bytes.length); PutObjectRequest putObjectRequest = null; if (ssecKey != null) { if (ssecKey.equals("true")) { metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } else { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata) .withSSECustomerKey(ssecKey); }/*from ww w . j ava 2s.co m*/ } else { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } try { PutObjectResult res = awsClient.putObject(putObjectRequest); if (res.getETag() == null) { return Status.ERROR; } else { if (ssecKey != null) { if (ssecKey.equals("true")) { logger.debug("Uploaded object encryption status is " + res.getSSEAlgorithm()); } else { logger.debug("Uploaded object encryption status is " + res.getSSEAlgorithm()); } } } } catch (Exception e) { logger.error("Not possible to write object :" + key); System.err.println("Retrying " + key); insert(key, bytes); } } catch (Exception e) { logger.error("Error in the creation of the stream :" + e.toString()); System.err.println("Retrying " + key); insert(key, bytes); //e.printStackTrace(); //return Status.ERROR; } return Status.OK; }
From source file:com.zero_x_baadf00d.play.module.aws.s3.ebean.BaseS3FileModel.java
License:Open Source License
/** * Save the current object. The file will be uploaded to PlayS3 bucket. * * @since 16.03.13/*ww w . j a v a2 s . co m*/ */ @Override public void save() { if (this.id == null) { this.id = Generators.timeBasedGenerator().generate(); } if (!PlayS3.isReady()) { Logger.error("Could not save PlayS3 file because amazonS3 variable is null"); throw new RuntimeException("Could not save"); } else { this.bucket = PlayS3.getBucketName(); if (this.subDirectory == null) { this.subDirectory = ""; } this.subDirectory = this.subDirectory.trim(); // Set cache control and server side encryption final ObjectMetadata objMetaData = new ObjectMetadata(); objMetaData.setContentType(this.contentType); objMetaData.setCacheControl("max-age=315360000, public"); objMetaData.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); try { objMetaData.setContentLength(this.objectData.available()); } catch (final IOException ex) { Logger.warn("Can't retrieve stream available size", ex); } finally { try { if (this.objectData.markSupported()) { this.objectData.reset(); } } catch (final IOException ex) { Logger.error("Can't reset stream position", ex); } } // Upload file to PlayS3 final PutObjectRequest putObjectRequest = new PutObjectRequest(this.bucket, this.getActualFileName(), this.objectData, objMetaData); putObjectRequest.withCannedAcl( this.isPrivate ? CannedAccessControlList.Private : CannedAccessControlList.PublicRead); PlayS3.getAmazonS3().putObject(putObjectRequest); try { if (this.objectData != null) { this.objectData.close(); } } catch (final IOException ignore) { } // Save object on database super.save(); } }
From source file:ConnectionUtils.AWSS3Utils.java
public static String saveImage(String bucketName, ImageDTO imageDTO) { InputStream imageIS = null;/*from w w w . j av a 2 s. c o m*/ Properties awsCredentialsProperties = new Properties(); try { awsCredentialsProperties .load(AWSS3Utils.class.getClassLoader().getResourceAsStream("prefs.properties")); } catch (Exception ex) { Logger.getLogger(AWSS3Utils.class.getName()).log(Level.SEVERE, null, ex); } String awsAccessKey = awsCredentialsProperties.getProperty("AWSACCESSKEY"); String awsSecretKey = awsCredentialsProperties.getProperty("SECRETACCESSKEY"); try { // AccessControlList accessControlList = new AccessControlList(); // accessControlList.grantPermission(GroupGrantee.AllUsers, Permission.FullControl); BasicAWSCredentials awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey); AmazonS3 s3Client = new AmazonS3Client(awsCredentials); ObjectMetadata objectMetadata = new ObjectMetadata(); imageIS = imageDTO.getImageBlob(); PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, imageDTO.getIID() + ".jpg", imageIS, objectMetadata); s3Client.putObject(putObjectRequest);//.withAccessControlList(accessControlList)); imageIS = imageDTO.getThumbnailBlob(); putObjectRequest = new PutObjectRequest(bucketName, imageDTO.getIID() + "_" + IMAGE_TYPE.THUMB.toString() + ".jpg", imageIS, objectMetadata); s3Client.putObject(putObjectRequest);//.withAccessControlList(accessControlList)); return ("Image Saved"); } catch (Exception ex) { Logger.getLogger(AWSS3Utils.class.getName()).log(Level.SEVERE, null, ex); return "Image Not Saved"; } finally { try { if (imageIS != null) { imageIS.close(); } } catch (IOException ex) { Logger.getLogger(AWSS3Utils.class.getName()).log(Level.SEVERE, null, ex); } } }
From source file:dataMappers.PictureDataMapper.java
public static void addPictureToReport(DBConnector dbconnector, HttpServletRequest request) throws FileUploadException, IOException, SQLException { if (!ServletFileUpload.isMultipartContent(request)) { System.out.println("Invalid upload request"); return;//from w ww. j a v a2 s. c o m } // Define limits for disk item DiskFileItemFactory factory = new DiskFileItemFactory(); factory.setSizeThreshold(THRESHOLD_SIZE); // Define limit for servlet upload ServletFileUpload upload = new ServletFileUpload(factory); upload.setFileSizeMax(MAX_FILE_SIZE); upload.setSizeMax(MAX_REQUEST_SIZE); FileItem itemFile = null; int reportID = 0; // Get list of items in request (parameters, files etc.) List formItems = upload.parseRequest(request); Iterator iter = formItems.iterator(); // Loop items while (iter.hasNext()) { FileItem item = (FileItem) iter.next(); if (!item.isFormField()) { itemFile = item; // If not form field, must be item } else if (item.getFieldName().equalsIgnoreCase("reportID")) { // else it is a form field try { System.out.println(item.getString()); reportID = Integer.parseInt(item.getString()); } catch (NumberFormatException e) { reportID = 0; } } } // This will be null if no fields were declared as image/upload. // Also, reportID must be > 0 if (itemFile != null || reportID == 0) { try { // Create credentials from final vars BasicAWSCredentials awsCredentials = new BasicAWSCredentials(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY); // Create client with credentials AmazonS3 s3client = new AmazonS3Client(awsCredentials); // Set region s3client.setRegion(Region.getRegion(Regions.EU_WEST_1)); // Set content length (size) of file ObjectMetadata om = new ObjectMetadata(); om.setContentLength(itemFile.getSize()); // Get extension for file String ext = FilenameUtils.getExtension(itemFile.getName()); // Generate random filename String keyName = UUID.randomUUID().toString() + '.' + ext; // This is the actual upload command s3client.putObject(new PutObjectRequest(S3_BUCKET_NAME, keyName, itemFile.getInputStream(), om)); // Picture was uploaded to S3 if we made it this far. Now we insert the row into the database for the report. PreparedStatement stmt = dbconnector.getCon() .prepareStatement("INSERT INTO reports_pictures" + "(REPORTID, PICTURE) VALUES (?,?)"); stmt.setInt(1, reportID); stmt.setString(2, keyName); stmt.executeUpdate(); stmt.close(); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } } }
From source file:eu.openg.aws.s3.internal.AmazonS3Fake.java
License:Apache License
private PutObjectResult putObject(String bucketName, String key, InputStream input) { return putObject(bucketName, key, input, new ObjectMetadata()); }
From source file:eu.stratosphere.nephele.fs.s3.S3DataOutputStream.java
License:Apache License
/** * {@inheritDoc}/*from w w w . j a v a 2s . c o m*/ */ @Override public void close() throws IOException { if (this.uploadId == null) { // This is not a multipart upload // No data has been written if (this.bytesWritten == 0) { return; } final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten); final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(this.bytesWritten); final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om); if (this.useRRS) { por.setStorageClass(StorageClass.ReducedRedundancy); } else { por.setStorageClass(StorageClass.Standard); } try { this.s3Client.putObject(por); } catch (AmazonServiceException e) { throw new IOException(StringUtils.stringifyException(e)); } this.bytesWritten = 0; } else { if (this.bytesWritten > 0) { uploadPartAndFlushBuffer(); } boolean operationSuccessful = false; try { final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket, this.object, this.uploadId, this.partETags); this.s3Client.completeMultipartUpload(request); operationSuccessful = true; } catch (AmazonServiceException e) { throw new IOException(StringUtils.stringifyException(e)); } finally { if (!operationSuccessful) { abortUpload(); } } } }
From source file:eu.stratosphere.nephele.fs.s3.S3FileSystem.java
License:Apache License
private void createEmptyObject(final String bucketName, final String objectName) { final InputStream im = new InputStream() { @Override/* w w w . j a va 2s .c om*/ public int read() throws IOException { return -1; } }; final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(0L); this.s3Client.putObject(bucketName, objectName, im, om); }
From source file:eu.stratosphere.runtime.fs.s3.S3DataOutputStream.java
License:Apache License
@Override public void close() throws IOException { if (this.uploadId == null) { // This is not a multipart upload // No data has been written if (this.bytesWritten == 0) { return; }//w ww . j ava2s . c o m final InputStream is = new InternalUploadInputStream(this.buf, this.bytesWritten); final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(this.bytesWritten); final PutObjectRequest por = new PutObjectRequest(this.bucket, this.object, is, om); if (this.useRRS) { por.setStorageClass(StorageClass.ReducedRedundancy); } else { por.setStorageClass(StorageClass.Standard); } try { this.s3Client.putObject(por); } catch (AmazonServiceException e) { throw new IOException(StringUtils.stringifyException(e)); } this.bytesWritten = 0; } else { if (this.bytesWritten > 0) { uploadPartAndFlushBuffer(); } boolean operationSuccessful = false; try { final CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(this.bucket, this.object, this.uploadId, this.partETags); this.s3Client.completeMultipartUpload(request); operationSuccessful = true; } catch (AmazonServiceException e) { throw new IOException(StringUtils.stringifyException(e)); } finally { if (!operationSuccessful) { abortUpload(); } } } }