Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:gr.abiss.calipso.fs.S3FilePersistenceServiceImpl.java

License:Open Source License

/**
 * Save file in S3//from ww w  .j  a  va2 s  .  com
 * @see gr.abiss.calipso.fs.FilePersistenceService#saveFile(java.io.InputStream, long, java.lang.String, java.lang.String)
 */
@Override
public String saveFile(InputStream in, long contentLength, String contentType, String path) {
    String url;
    // create metadata
    ObjectMetadata meta = new ObjectMetadata();
    meta.setContentLength(contentLength);
    meta.setContentType(contentType);

    // save to bucket
    s3Client.putObject(new PutObjectRequest(nameCardBucket, path, in, meta)
            .withCannedAcl(CannedAccessControlList.PublicRead));
    // set the URL to return
    url = s3Client.getUrl(nameCardBucket, path).toString();
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("File saved: " + path + ", size: " + contentLength + ", contentType: " + contentType);
    }
    return url;
}

From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java

License:Apache License

public void upload(RunFileTransferEntity runFileTransferEntity) {
    log.debug("Start AWSS3Util upload");
    int retryAttempt = 0;
    int i;/*from   w  w  w  .  ja va2 s .  c om*/

    java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath();
    String keyName = inputfile.getFileName().toString();
    log.info("keyName is: " + keyName);
    log.info("bucket name is:" + runFileTransferEntity.getBucketName());
    log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket());

    String amazonFileUploadLocationOriginal = null;
    FileInputStream stream = null;
    File filecheck = new File(runFileTransferEntity.getLocalPath());
    if (runFileTransferEntity.getFailOnError())
        if (!(filecheck.isFile() || filecheck.isDirectory())
                && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) {
            Log.error("Invalid local path.Please provide valid path");
            throw new AWSUtilException("Invalid local path");
        }

    if (runFileTransferEntity.getRetryAttempt() == 0)
        retryAttempt = 1;
    else
        retryAttempt = runFileTransferEntity.getRetryAttempt();

    for (i = 0; i < retryAttempt; i++) {
        log.info("connection attempt: " + (i + 1));
        try {
            AmazonS3 s3Client = null;
            ClientConfiguration clientConf = new ClientConfiguration();
            clientConf.setProtocol(Protocol.HTTPS);
            if (runFileTransferEntity.getCrediationalPropertiesFile() == null) {
                BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(),
                        runFileTransferEntity.getSecretAccessKey());
                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            } else {
                File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile());
                PropertiesCredentials creds = new PropertiesCredentials(securityFile);

                s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf)
                        .withRegion(runFileTransferEntity.getRegion())
                        .withCredentials(new AWSStaticCredentialsProvider(creds)).build();
            }

            String s3folderName = null;
            String filepath = runFileTransferEntity.getFolder_name_in_bucket();
            log.debug("file path name" + filepath);
            s3folderName = filepath;

            if (s3folderName != null && !s3folderName.trim().equals("")) {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName;
            } else {
                amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName();
            }

            File f = new File(runFileTransferEntity.getLocalPath());

            if (runFileTransferEntity.getLocalPath().contains("hdfs://")) {
                log.debug("Provided HDFS local path ");
                String inputPath = runFileTransferEntity.getLocalPath();
                String s1 = inputPath.substring(7, inputPath.length());
                String s2 = s1.substring(0, s1.indexOf("/"));
                File file = new File("/tmp");
                if (!file.exists())
                    file.mkdir();
                Configuration conf = new Configuration();
                conf.set("fs.defaultFS", "hdfs://" + s2);
                FileSystem hdfsFileSystem = FileSystem.get(conf);
                Path local = new Path("/tmp");
                String s = inputPath.substring(7, inputPath.length());
                String hdfspath = s.substring(s.indexOf("/"), s.length());
                Path hdfs = new Path(hdfspath);
                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());
                File dir = new File(hdfspath);
                if (hdfsFileSystem.isDirectory(new Path(hdfspath))) {
                    InputStream is = null;
                    OutputStream os = null;
                    String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);
                    FileStatus[] fileStatus = hdfsFileSystem
                            .listStatus(new Path(runFileTransferEntity.getLocalPath()));
                    Path[] paths = FileUtil.stat2Paths(fileStatus);
                    File dirs = null;

                    try {
                        String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1);

                        DateFormat df = new SimpleDateFormat("dd-MM-yyyy");
                        String dateWithoutTime = df.format(new Date()).toString();
                        Random ran = new Random();
                        String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000);
                        dirs = new File("/tmp/" + tempFolder);

                        boolean success = dirs.mkdirs();
                        for (Path files : paths) {
                            is = hdfsFileSystem.open(files);
                            os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName()));
                            org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf);
                        }

                        for (File files : dirs.listFiles()) {

                            if (files.isFile()) {
                                s3Client.putObject(new PutObjectRequest(
                                        amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                        files));
                            }

                        }
                    }

                    catch (IOException e) {
                        Log.error("IOException occured while transfering the file", e);
                    } finally {
                        org.apache.hadoop.io.IOUtils.closeStream(is);
                        org.apache.hadoop.io.IOUtils.closeStream(os);
                        if (dirs != null) {

                            FileUtils.deleteDirectory(dirs);
                        }

                    }

                } else {
                    hdfsFileSystem.copyToLocalFile(false, hdfs, local);
                    stream = new FileInputStream("/tmp/" + f.getName());
                    File S3file = new File("/tmp/" + f.getName());

                    PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal,
                            keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            } else {

                ObjectMetadata objectMetadata = new ObjectMetadata();
                if (runFileTransferEntity.getEncoding() != null)
                    objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding());

                if (Files.isDirectory(inputfile)) {

                    File fileloc = new File(inputfile.toAbsolutePath().toString());
                    String folderName = new File(runFileTransferEntity.getLocalPath()).getName();
                    for (File files : fileloc.listFiles()) {

                        if (files.isFile()) {
                            PutObjectRequest putObjectRequest = new PutObjectRequest(
                                    amazonFileUploadLocationOriginal + "/" + folderName, files.getName(),
                                    files);

                            PutObjectResult result = s3Client.putObject(putObjectRequest);
                        }

                    }

                } else {
                    PutObjectRequest putObjectRequest = null;
                    File file = new File(runFileTransferEntity.getLocalPath());
                    stream = new FileInputStream(runFileTransferEntity.getLocalPath());
                    putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file);
                    PutObjectResult result = s3Client.putObject(putObjectRequest);
                }
            }

        }

        catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403 || e.getStatusCode() == 404) {
                if (runFileTransferEntity.getFailOnError())
                    Log.error("Incorrect details provided.Please provide valid details", e);
                throw new AWSUtilException("Incorrect details provided");

            }

            {
                try {
                    Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
                } catch (Exception e1) {
                    Log.error("Exception occured while sleeping the thread");
                }
                continue;
            }

        } catch (Exception e) {
            log.error("error while transferring file", e);
            try {
                Thread.sleep(runFileTransferEntity.getRetryAfterDuration());
            } catch (Exception e1) {
                Log.error("Exception occured while sleeping the thread");
            }
            continue;
        } catch (Error err) {
            Log.error("Error occured while uploading the file", err);
            throw new AWSUtilException(err);
        }
        done = true;
        break;
    }
    if (runFileTransferEntity.getFailOnError() && !done) {
        log.error("File transfer failed");
        throw new AWSUtilException("File transfer failed");
    } else if (!done) {
        log.error("File transfer failed but mentioned fail on error as false");
    }
    if (i == runFileTransferEntity.getRetryAttempt()) {
        if (runFileTransferEntity.getFailOnError())
            throw new AWSUtilException("File transfer failed");
    }
    log.debug("Finished AWSS3Util upload");
}

From source file:ingest.utility.IngestUtilities.java

License:Apache License

/**
 * Will copy external AWS S3 file to piazza S3 Bucket
 * //from ww w  .  j a v  a  2  s  .c  o m
 * @param dataResource
 * @param host
 *            if piazza should host the data
 */
public void copyS3Source(DataResource dataResource) throws InvalidInputException, IOException {
    logger.log(String.format("Copying Data %s to Piazza S3 Location.", dataResource.getDataId()),
            Severity.INFORMATIONAL, new AuditElement(INGEST, "copyS3DataToPiazza", dataResource.getDataId()));
    // Obtain file input stream
    FileLocation fileLocation = ((FileRepresentation) dataResource.getDataType()).getLocation();
    FileAccessFactory fileFactory = getFileFactoryForDataResource(dataResource);
    InputStream inputStream = fileFactory.getFile(fileLocation);

    // Write stream directly into the Piazza S3 bucket
    AmazonS3 s3Client = getAwsClient(USE_KMS.booleanValue());
    ObjectMetadata metadata = new ObjectMetadata();
    String fileKey = String.format("%s-%s", dataResource.getDataId(), fileLocation.getFileName());
    s3Client.putObject(AMAZONS3_BUCKET_NAME, fileKey, inputStream, metadata);

    // Clean up
    inputStream.close();
}

From source file:io.confluent.connect.s3.storage.S3OutputStream.java

License:Open Source License

private ObjectMetadata newObjectMetadata() {
    ObjectMetadata meta = new ObjectMetadata();
    if (StringUtils.isNotBlank(ssea)) {
        meta.setSSEAlgorithm(ssea);/*w  w w  .  j  a  v a  2s .  co m*/
    }
    return meta;
}

From source file:io.druid.storage.s3.S3ServerSideEncryption.java

License:Apache License

@Override
public PutObjectRequest decorate(PutObjectRequest request) {
    final ObjectMetadata objectMetadata = request.getMetadata() == null ? new ObjectMetadata()
            : request.getMetadata().clone();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    return request.withMetadata(objectMetadata);
}

From source file:io.druid.storage.s3.S3ServerSideEncryption.java

License:Apache License

@Override
public CopyObjectRequest decorate(CopyObjectRequest request) {
    final ObjectMetadata objectMetadata = request.getNewObjectMetadata() == null ? new ObjectMetadata()
            : request.getNewObjectMetadata().clone();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    return request.withNewObjectMetadata(objectMetadata);
}

From source file:io.druid.storage.s3.ServerSideEncryptingAmazonS3.java

License:Apache License

public PutObjectResult putObject(String bucket, String key, String content) {
    final InputStream in = new ByteArrayInputStream(StringUtils.toUtf8(content));
    return putObject(new PutObjectRequest(bucket, key, in, new ObjectMetadata()));
}

From source file:io.fastup.maven.plugin.app.DeployPutRequestMaker.java

License:Open Source License

PutObjectRequest makePutRequest() throws MojoExecutionException {
    final TvaritEnvironment tvaritEnvironment = TvaritEnvironment.getInstance();
    tvaritEnvironment.<AppDeployerMojo>getMojo().getArtifactBucketName();
    final File warFile = tvaritEnvironment.getMavenProject().getArtifact().getFile();
    String projectArtifactId = tvaritEnvironment.getMavenProject().getArtifactId();
    String projectVersion = tvaritEnvironment.getMavenProject().getVersion();
    final String projectGroupId = tvaritEnvironment.getMavenProject().getGroupId();
    final String key = "deployables/" + projectGroupId + "/" + projectArtifactId + "/" + projectVersion + "/"
            + warFile.getName();/*from  ww  w.  ja  va2 s.co  m*/

    final String bucketName = tvaritEnvironment.getArtifactBucketName();
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, warFile);
    final ObjectMetadata metadata = new ObjectMetadata();
    final Map<String, String> userMetadata = new HashMap<>();
    userMetadata.put("project_name", tvaritEnvironment.getProjectName());
    userMetadata.put("health_check_url", tvaritEnvironment.<AppDeployerMojo>getMojo().getHealthCheckUrl());
    userMetadata.put("private_key_name", tvaritEnvironment.<AppDeployerMojo>getMojo().getSshKeyName());
    userMetadata.put("db-version", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbVersion());
    userMetadata.put("group-id", tvaritEnvironment.getMavenProject().getGroupId());
    userMetadata.put("artifact-id", tvaritEnvironment.getMavenProject().getArtifactId());
    userMetadata.put("version", tvaritEnvironment.getMavenProject().getVersion());
    userMetadata.put("app_fqdn", tvaritEnvironment.<AppDeployerMojo>getMojo().getAppFqdn());
    userMetadata.put("db-name", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbName());
    userMetadata.put("db-username", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbUsername());
    userMetadata.put("db-password", tvaritEnvironment.<AppDeployerMojo>getMojo().getDbPassword());
    final String contextConfigUrl = tvaritEnvironment.<AppDeployerMojo>getMojo().getContextConfigUrl();
    final URL url;
    try {
        url = new TemplateUrlMaker().makeUrl(contextConfigUrl);
    } catch (MalformedURLException e) {
        throw new MojoExecutionException("failed", e);
    }
    userMetadata.put("context_config_url", url.toString());
    final String contextRoot = tvaritEnvironment.<AppDeployerMojo>getMojo().getContextRoot();
    userMetadata.put("context_root", contextRoot.equals("/") ? "ROOT" : contextRoot);
    metadata.setUserMetadata(userMetadata);
    putObjectRequest.withMetadata(metadata);
    return putObjectRequest;
}

From source file:io.jeffrey.web.assemble.S3PutObjectTarget.java

@Override
public void upload(String key, String md5, String contentType, InputStream body, long contentLength)
        throws Exception {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(contentLength);
    metadata.setContentType(contentType);
    if (md5.equalsIgnoreCase(etags.get(key))) {
        System.out.println("skipping:" + key);
        return;//  w ww .jav  a2 s.co m
    }
    System.out.println("uploading:" + key);
    s3.putObject(new PutObjectRequest(bucket, key, body, metadata)
            .withCannedAcl(CannedAccessControlList.PublicRead));
}

From source file:io.milton.s3.service.AmazonStorageServiceImpl.java

License:Open Source License

@Override
public boolean putEntity(String bucketName, Entity entity, InputStream inputStream) {
    if (entity == null) {
        return false;
    }//from  w w w. ja va  2  s.co m

    // Only store file in Amazon S3
    if (entity instanceof File) {
        String keyName = getAmazonS3UniqueKey(entity);

        // Additional metadata instructing Amazon S3 how to handle the
        // uploaded data (e.g. custom user metadata, hooks for specifying
        // content type, etc.).
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentType(((File) entity).getContentType());

        // Always set the content length, even if it's already set
        metadata.setContentLength(((File) entity).getSize());
        boolean isUploaded = amazonS3Manager.uploadEntity(bucketName, keyName, inputStream, metadata);
        if (!isUploaded) {
            return false;
        }
    }

    // Store folder as hierarchy in Amazon DynamoDB
    return dynamoDBManager.putEntity(bucketName, entity);
}