List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:net.henryhu.roxlab2.NotePadProvider.java
License:Apache License
private int s3put(String key, ContentValues values) { Log.w("s3put()", "put with key: " + key); String sValues = contentValuesToString(values); byte[] bVals = null; try {//from w ww . ja v a 2 s . c o m bVals = sValues.getBytes("UTF-8"); } catch (Exception e) { } InputStream is = new ByteArrayInputStream(bVals); ObjectMetadata om = new ObjectMetadata(); om.setContentLength(bVals.length); om.setContentType("text/plain"); try { s3.putObject(bucketName, key, is, om); return 1; } catch (AmazonClientException e) { Log.w("s3put()", "Exception: " + e.toString() + " ; " + e.getMessage()); return 0; } }
From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java
License:Apache License
@Override public StorageResponse store(StorageRequest request) throws IOException { final IFile requestFile = request.getFile(); AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS)); Map<String, String> fileMetadata = new HashMap<String, String>(); fileMetadata.put("accountUrn", request.getUser().getAccount().getUrn()); fileMetadata.put("userUrn", request.getUser().getUrn()); fileMetadata.put("fileUrn", requestFile.getUrn()); fileMetadata.put("entityReferenceType", requestFile.getEntityReferenceType().name()); fileMetadata.put("referenceUrn", requestFile.getReferenceUrn()); fileMetadata.put("recordedTimestamp", Long.toString(request.getFile().getTimestamp())); // fileMetadata.put("mimeType", request.getVfsObject().getMimeType()); ObjectMetadata metadata = new ObjectMetadata(); if (request.getContentLength() > 0) { LOG.debug("Including content length : " + request.getContentLength()); metadata.setContentLength(request.getContentLength()); }// w w w. ja v a2s . c o m // metadata.setContentMD5(streamMD5); metadata.setUserMetadata(fileMetadata); try { LOG.trace("Bucket name: " + getBucketName()); LOG.trace("File name: " + request.getFileName()); LOG.trace("inputStream == null? " + (request.getInputStream() == null)); HashingInputStream his = new HashingInputStream(request.getInputStream(), "SHA-256"); PutObjectResult putResult = s3 .putObject(new PutObjectRequest(getBucketName(), request.getFileName(), his, metadata)); String finalUrl = getUrl(request.getFileName()); LOG.trace("File URL: " + finalUrl); requestFile.setUrl(getUrl(request.getFileName())); byte[] signature = his.getSignature(); JSONObject jsonObject = HashUtil.signFile(requestFile, signature); LOG.info("File Signature\n\n{}\n\n", jsonObject.toString(3)); return new StorageResponse(requestFile, finalUrl, jsonObject.toString(3)); } catch (AmazonS3Exception e) { e.printStackTrace(); throw e; } catch (JSONException | NoSuchAlgorithmException e) { e.printStackTrace(); throw new IOException(e); } }
From source file:net.solarnetwork.node.backup.s3.S3BackupService.java
License:Open Source License
private Backup performBackupInternal(final Iterable<BackupResource> resources, final Calendar now, Map<String, String> props) { if (resources == null) { return null; }// w w w . ja v a 2 s . co m final Iterator<BackupResource> itr = resources.iterator(); if (!itr.hasNext()) { log.debug("No resources provided, nothing to backup"); return null; } S3Client client = this.s3Client; if (!status.compareAndSet(BackupStatus.Configured, BackupStatus.RunningBackup)) { // try to reset from error if (!status.compareAndSet(BackupStatus.Error, BackupStatus.RunningBackup)) { return null; } } S3BackupMetadata result = null; try { final Long nodeId = nodeId(props); final String metaName = String.format(META_NAME_FORMAT, now, nodeId); final String metaObjectKey = objectKeyForPath(META_OBJECT_KEY_PREFIX + metaName); log.info("Starting backup to archive {}", metaObjectKey); final Set<S3ObjectReference> allDataObjects = client .listObjects(objectKeyForPath(DATA_OBJECT_KEY_PREFIX)); S3BackupMetadata meta = new S3BackupMetadata(); meta.setNodeId(nodeId); MessageDigest digest = DigestUtils.getSha256Digest(); byte[] buf = new byte[4096]; for (BackupResource rsrc : resources) { ObjectMetadata objectMetadata = new ObjectMetadata(); if (rsrc.getModificationDate() > 0) { objectMetadata.setLastModified(new Date(rsrc.getModificationDate())); } String sha = calculateContentDigest(rsrc, digest, buf, objectMetadata); String objectKey = objectKeyForPath(DATA_OBJECT_KEY_PREFIX + sha); // see if already exists if (!allDataObjects.contains(new S3ObjectReference(objectKey))) { log.info("Saving resource to S3: {}", rsrc.getBackupPath()); client.putObject(objectKey, rsrc.getInputStream(), objectMetadata); } else { log.info("Backup resource already saved to S3: {}", rsrc.getBackupPath()); } meta.addBackupResource(rsrc, objectKey, sha); } // now save metadata meta.setComplete(true); meta.setDate(now.getTime()); meta.setKey(metaName); byte[] metaJsonBytes = OBJECT_MAPPER.writeValueAsBytes(meta); try (ByteArrayInputStream in = new ByteArrayInputStream(metaJsonBytes)) { ObjectMetadata metaObjectMetadata = new ObjectMetadata(); metaObjectMetadata.setContentType("application/json;charset=UTF-8"); metaObjectMetadata.setContentLength(metaJsonBytes.length); metaObjectMetadata.setLastModified(meta.getDate()); S3ObjectReference metaRef = client.putObject(metaObjectKey, in, metaObjectMetadata); result = new S3BackupMetadata(metaRef); } if (additionalBackupCount < 1) { // add this backup to the cached data CachedResult<List<Backup>> cached = cachedBackupList.get(); if (cached != null) { List<Backup> list = cached.getResult(); List<Backup> newList = new ArrayList<>(list); newList.add(0, result); updateCachedBackupList(newList); } } else { // clean out older backups List<Backup> knownBackups = getAvailableBackupsInternal(); List<String> backupsForNode = knownBackups.stream().filter(b -> nodeId.equals(b.getNodeId())) .map(b -> b.getKey()).collect(Collectors.toList()); if (backupsForNode.size() > additionalBackupCount + 1) { Set<String> keysToDelete = backupsForNode.stream() .limit(backupsForNode.size() - additionalBackupCount - 1).collect(Collectors.toSet()); log.info("Deleting {} expired backups for node {}: {}", keysToDelete.size(), nodeId, keysToDelete); client.deleteObjects(keysToDelete); // update cache knownBackups = knownBackups.stream().filter(b -> !keysToDelete.contains(b.getKey())) .collect(Collectors.toList()); updateCachedBackupList(knownBackups); } } } catch (IOException e) { log.error("IO error performing backup", e); } finally { status.compareAndSet(BackupStatus.RunningBackup, BackupStatus.Configured); } return result; }
From source file:nl.kpmg.lcm.server.data.s3.S3FileAdapter.java
License:Apache License
@Override public void write(InputStream stream, Long size) throws IOException { if (size == null || size <= 0) { throw new LcmExposableException("Error! Unable to transfer file to s3 storage with unknown size."); }/*www.j a va 2 s .c o m*/ ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(size); if (!s3Client.doesBucketExist(bucketName)) { s3Client.createBucket(bucketName); } s3Client.putObject(new PutObjectRequest(bucketName, fileName, stream, metadata)); LOGGER.info("Successfully written data in s3 storage. Bucket: " + bucketName); }
From source file:nl.nn.adapterframework.filesystem.AmazonS3FileSystem.java
License:Apache License
@Override public Iterator<S3Object> listFiles(String folder) throws FileSystemException { List<S3ObjectSummary> summaries = null; String prefix = folder != null ? folder + "/" : ""; try {// ww w . j av a 2s .c o m ObjectListing listing = s3Client.listObjects(bucketName, prefix); summaries = listing.getObjectSummaries(); while (listing.isTruncated()) { listing = s3Client.listNextBatchOfObjects(listing); summaries.addAll(listing.getObjectSummaries()); } } catch (AmazonServiceException e) { throw new FileSystemException("Cannot process requested action", e); } List<S3Object> list = new ArrayList<S3Object>(); for (S3ObjectSummary summary : summaries) { S3Object object = new S3Object(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(summary.getSize()); object.setBucketName(summary.getBucketName()); object.setKey(summary.getKey()); object.setObjectMetadata(metadata); if (!object.getKey().endsWith("/") && !(prefix.isEmpty() && object.getKey().contains("/"))) { list.add(object); } } return list.iterator(); }
From source file:nl.nn.adapterframework.filesystem.AmazonS3FileSystem.java
License:Apache License
@Override public OutputStream createFile(final S3Object f) throws FileSystemException, IOException { String fileName = FileUtils.getTempDirectory().getAbsolutePath() + "tempFile"; final File file = new File(fileName); final FileOutputStream fos = new FileOutputStream(file); final BufferedOutputStream bos = new BufferedOutputStream(fos); FilterOutputStream filterOutputStream = new FilterOutputStream(bos) { boolean isClosed = false; @Override/* ww w . j av a 2 s . c om*/ public void close() throws IOException { super.close(); bos.close(); if (!isClosed) { FileInputStream fis = new FileInputStream(file); ObjectMetadata metaData = new ObjectMetadata(); metaData.setContentLength(file.length()); s3Client.putObject(bucketName, f.getKey(), fis, metaData); fis.close(); file.delete(); isClosed = true; } } }; return filterOutputStream; }
From source file:ohnosequences.ivy.S3Repository.java
License:Apache License
@Override protected void put(File source, String destination, boolean overwrite) { //System.out.print("parent> "); String bucket = S3Utils.getBucket(destination); String key = S3Utils.getKey(destination); // System.out.println("publishing: bucket=" + bucket + " key=" + key); PutObjectRequest request = new PutObjectRequest(bucket, key, source); request = request.withCannedAcl(acl); if (serverSideEncryption) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(objectMetadata); }// w ww .j a v a2s . co m if (!getS3Client().doesBucketExist(bucket)) { if (!createBucket(bucket, region)) { throw new Error("couldn't create bucket"); } } if (!this.overwrite && !getS3Client().listObjects(bucket, key).getObjectSummaries().isEmpty()) { throw new Error(destination + " exists but overwriting is disabled"); } getS3Client().putObject(request); }
From source file:onl.area51.filesystem.s3.S3Sender.java
License:Apache License
@Override public void send(char[] path) throws IOException { String pathValue = String.valueOf(path); ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(getDelegate().size(path)); meta.setContentType(pathValue);/* www .j a v a 2s. co m*/ try (InputStream is = getDelegate().newInputStream(path)) { LOG.log(Level.FINE, () -> "Sending " + getBucketName() + ":" + pathValue); getS3().putObject(new PutObjectRequest(getBucketName(), pathValue, is, meta)); LOG.log(Level.FINE, () -> "Sent " + getBucketName() + ":" + pathValue); } catch (AmazonS3Exception ex) { LOG.log(Level.FINE, () -> "Send error " + ex.getStatusCode() + " " + getBucketName() + ":" + pathValue); throw new IOException(ex.getStatusCode() + ": Failed to put " + pathValue, ex); } catch (IOException ex) { throw new IOException("Failed to put " + pathValue, ex); } }
From source file:org.adroitlogic.build.aws.maven.SimpleStorageServiceWagon.java
License:Apache License
@Override protected void putResource(File source, String destination, TransferProgress transferProgress) throws TransferFailedException, ResourceDoesNotExistException { String key = getKey(destination); mkdirs(key, 0);//from w ww . j av a 2s .c o m InputStream in = null; try { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(source.length()); objectMetadata.setContentType(Mimetypes.getInstance().getMimetype(source)); in = new TransferProgressFileInputStream(source, transferProgress); this.amazonS3.putObject(new PutObjectRequest(this.bucketName, key, in, objectMetadata)); } catch (AmazonServiceException e) { throw new TransferFailedException(String.format("Cannot write file to '%s'", destination), e); } catch (FileNotFoundException e) { throw new ResourceDoesNotExistException(String.format("Cannot read file from '%s'", source), e); } finally { IoUtils.closeQuietly(in); } }
From source file:org.adroitlogic.build.aws.maven.SimpleStorageServiceWagon.java
License:Apache License
private PutObjectRequest createDirectoryPutObjectRequest(String key) { ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(0);//ww w. java 2 s. c o m return new PutObjectRequest(this.bucketName, key, inputStream, objectMetadata) .withCannedAcl(CannedAccessControlList.PublicRead); }