Example usage for com.amazonaws.services.s3 AmazonS3 putObject

List of usage examples for com.amazonaws.services.s3 AmazonS3 putObject

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 putObject.

Prototype

public PutObjectResult putObject(PutObjectRequest putObjectRequest)
        throws SdkClientException, AmazonServiceException;

Source Link

Document

Uploads a new object to the specified Amazon S3 bucket.

Usage

From source file:org.pentaho.amazon.hive.job.AmazonHiveJobExecutor.java

License:Apache License

/**
 * Executes a Hive job into the AWS Elastic MapReduce service.
 *//*from   w  w w  .j av a2 s . c om*/
public Result execute(Result result, int arg1) throws KettleException {

    // Setup a log file.
    Log4jFileAppender appender = null;
    String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$
    try {
        appender = LogWriter.createFileAppender(logFileName, true, false);
        LogWriter.getInstance().addAppender(appender);
        log.setLogLevel(parentJob.getLogLevel());
    } catch (Exception e) {
        logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$
                logFileName, e.toString()));
        logError(Const.getStackTracker(e));
    }

    try {
        // Create and connect an AWS service.
        AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials);
        AmazonS3 s3Client = new AmazonS3Client(awsCredentials);

        // Get bucket name and S3 URL.
        String stagingBucketName = GetBucketName(stagingDir);
        String stagingS3BucketUrl = "s3://" + stagingBucketName; //$NON-NLS-1$

        // Prepare staging S3 URL for Hive script file.
        String stagingS3qUrl = "";
        if (qUrl.startsWith(S3FileProvider.SCHEME + "://")) { //$NON-NLS-1$

            // If the .q file is in S3, its staging S3 URL is s3://{bucketname}/{path}
            if (qUrl.indexOf("@s3") > 0) { //$NON-NLS-1$
                stagingS3qUrl = S3FileProvider.SCHEME + "://" + qUrl.substring(qUrl.indexOf("@s3") + 4); //$NON-NLS-1$
            } else {
                stagingS3qUrl = qUrl;
            }

        } else {
            // A local filename is given for the Hive script file. It should be copied to the S3 Log Directory.
            // First, check for the correct protocol.
            if (!qUrl.startsWith("file:")) { //$NON-NLS-1$
                if (log.isBasic()) {
                    logBasic(BaseMessages.getString(PKG,
                            "AmazonElasticMapReduceJobExecutor.HiveScriptFilename.Error") + qUrl); //$NON-NLS-1$
                }
            }
            // pull down .q file from VSF
            FileObject qFile = KettleVFS.getFileObject(buildFilename(qUrl));
            File tmpFile = File.createTempFile("customEMR", "q"); //$NON-NLS-1$
            tmpFile.deleteOnExit();
            FileOutputStream tmpFileOut = new FileOutputStream(tmpFile);
            IOUtils.copy(qFile.getContent().getInputStream(), tmpFileOut);
            // Get key name for the script file S3 destination. Key is defined as path name after {bucket}/
            String key = GetKeyFromS3Url(stagingDir);
            if (key == null) {
                key = qFile.getName().getBaseName();
            } else {
                key += "/" + qFile.getName().getBaseName(); //$NON-NLS-1$
            }

            // delete the previous .q file in S3
            try {
                s3Client.deleteObject(stagingBucketName, key);
            } catch (Exception ex) {
                logError(Const.getStackTracker(ex));
            }

            // Put .q file in S3 Log Directory.
            s3Client.putObject(new PutObjectRequest(stagingBucketName, key, tmpFile));
            stagingS3qUrl = stagingS3BucketUrl + "/" + key; //$NON-NLS-1$
        }

        // AWS provides script-runner.jar (in its public bucket), which should be used as a MapReduce jar for Hive EMR
        // job.
        jarUrl = "s3://elasticmapreduce/libs/script-runner/script-runner.jar"; //$NON-NLS-1$

        RunJobFlowRequest runJobFlowRequest = null;
        RunJobFlowResult runJobFlowResult = null;
        if (StringUtil.isEmpty(hadoopJobFlowId)) {
            // create an EMR job flow, start a step to setup Hive and get the job flow ID.
            runJobFlowRequest = createJobFlow();
            runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest);
            hadoopJobFlowId = runJobFlowResult.getJobFlowId();
        }

        // Now EMR job flow is ready to accept a Run Hive Script step.
        // First, prepare a Job Flow ID list.
        List<String> jobFlowIds = new ArrayList<String>();
        jobFlowIds.add(hadoopJobFlowId);

        // Configure a HadoopJarStep.
        String args = "s3://elasticmapreduce/libs/hive/hive-script "
                + "--base-path s3://elasticmapreduce/libs/hive/ --hive-version 0.7 --run-hive-script --args -f "
                + environmentSubstitute(stagingS3qUrl) + " " + environmentSubstitute(cmdLineArgs); //$NON-NLS-1$
        List<StepConfig> steps = ConfigHadoopJarStep(hadoopJobName, jarUrl, args);

        // Add a Run Hive Script step to the existing job flow.
        AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest();
        addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId);
        addJobFlowStepsRequest.setSteps(steps);
        emrClient.addJobFlowSteps(addJobFlowStepsRequest);

        // Set a logging interval.
        String loggingIntervalS = environmentSubstitute(loggingInterval);
        int logIntv = 10;
        try {
            logIntv = Integer.parseInt(loggingIntervalS);
        } catch (NumberFormatException ex) {
            logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.LoggingInterval.Error", //$NON-NLS-1$
                    loggingIntervalS));
        }

        // monitor and log if intended.
        if (blocking) {
            try {
                if (log.isBasic()) {

                    String executionState = "RUNNING"; //$NON-NLS-1$

                    while (isRunning(executionState)) {
                        DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest();
                        describeJobFlowsRequest.setJobFlowIds(jobFlowIds);

                        DescribeJobFlowsResult describeJobFlowsResult = emrClient
                                .describeJobFlows(describeJobFlowsRequest);
                        boolean found = false;
                        for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) {
                            if (jobFlowDetail.getJobFlowId().equals(hadoopJobFlowId)) {
                                executionState = jobFlowDetail.getExecutionStatusDetail().getState();
                                found = true;
                            }
                        }

                        if (!found) {
                            break;
                        }
                        logBasic(hadoopJobName + " " + BaseMessages.getString(PKG, //$NON-NLS-1$
                                "AmazonElasticMapReduceJobExecutor.JobFlowExecutionStatus", hadoopJobFlowId)
                                + executionState);

                        if (parentJob.isStopped()) {
                            if (!alive) {
                                TerminateJobFlowsRequest terminateJobFlowsRequest = new TerminateJobFlowsRequest();
                                terminateJobFlowsRequest.withJobFlowIds(hadoopJobFlowId);
                                emrClient.terminateJobFlows(terminateJobFlowsRequest);
                            }
                            break;
                        }

                        try {
                            if (isRunning(executionState)) {
                                Thread.sleep(logIntv * 1000);
                            }
                        } catch (InterruptedException ie) {
                            logError(Const.getStackTracker(ie));
                        }
                    }

                    if ("FAILED".equalsIgnoreCase(executionState)) { //$NON-NLS-1$
                        result.setStopped(true);
                        result.setNrErrors(1);
                        result.setResult(false);

                        S3Object outObject = s3Client.getObject(stagingBucketName,
                                hadoopJobFlowId + "/steps/1/stdout"); //$NON-NLS-1$
                        ByteArrayOutputStream outStream = new ByteArrayOutputStream();
                        IOUtils.copy(outObject.getObjectContent(), outStream);
                        logError(outStream.toString());

                        S3Object errorObject = s3Client.getObject(stagingBucketName,
                                hadoopJobFlowId + "/steps/1/stderr"); //$NON-NLS-1$
                        ByteArrayOutputStream errorStream = new ByteArrayOutputStream();
                        IOUtils.copy(errorObject.getObjectContent(), errorStream);
                        logError(errorStream.toString());
                    }
                }
            } catch (Exception e) {
                logError(e.getMessage(), e);
            }
        }

    } catch (Throwable t) {
        t.printStackTrace();
        result.setStopped(true);
        result.setNrErrors(1);
        result.setResult(false);
        logError(t.getMessage(), t);
    }

    if (appender != null) {
        LogWriter.getInstance().removeAppender(appender);
        appender.close();

        ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(),
                parentJob.getJobname(), getName());
        result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
    }

    return result;
}

From source file:org.springfield.lou.servlet.LouServlet.java

License:Open Source License

private String handleFileUpload(HttpServletRequest request) {
    System.out.println("HANDLE FILE UPLOAD");
    try {// w  w  w  .  j a  v a2  s .c  om
        String targetid = request.getParameter("targetid");
        System.out.println("TARGETID UPLOAD=" + targetid);
        String screenid = request.getParameter("screenid");
        String cfilename = request.getParameter("cfilename");
        System.out.println("CFILENAME=" + cfilename);
        String cfilesize = request.getParameter("cfilesize");
        System.out.println("CFILESIZE=" + cfilesize);

        Html5ApplicationInterface app = null;
        String url = request.getRequestURI();
        int pos = url.indexOf("/domain/");
        if (pos != -1) {
            String tappname = url.substring(pos);
            app = ApplicationManager.instance().getApplication(tappname);
        }
        Screen eventscreen = app.getScreen(screenid);

        if (eventscreen == null)
            return null;

        String method = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/method");
        System.out.println("METHOD=" + method);

        String destpath = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/destpath");
        System.out.println("DESTPATH=" + destpath + " T=" + targetid);
        if (destpath == null || destpath.equals("")) {
            setUploadError(eventscreen, targetid, "destpath not set");
            return null;
        }

        String destname_prefix = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/destname_prefix");
        if (destname_prefix == null || destname_prefix.equals("")) {
            setUploadError(eventscreen, targetid, "destname_prefix not set");
            return null;
        }

        String filetype = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/filetype");
        if (filetype == null || filetype.equals("")) {
            setUploadError(eventscreen, targetid, "filetype not set");
            return null;
        }

        String fileext = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/fileext");
        if (fileext == null || fileext.equals("")) {
            setUploadError(eventscreen, targetid, "fileext not set");
            return null;
        }

        String checkupload = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/checkupload");
        if (checkupload == null || checkupload.equals("")) {
            setUploadError(eventscreen, targetid, "checkupload not set");
            return null;
        }

        String storagehost = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/storagehost");
        if (storagehost == null || storagehost.equals("")) {
            setUploadError(eventscreen, targetid, "storagehost not set");
            return null;
        }

        String destname_type = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/destname_type");
        if (destname_type == null || destname_type.equals("")) {
            setUploadError(eventscreen, targetid, "destname_type not set");
            return null;
        }

        String publicpath = eventscreen.getModel()
                .getProperty("/screen['upload']/target['" + targetid + "']/publicpath");
        if (publicpath == null || publicpath.equals("")) {
            setUploadError(eventscreen, targetid, "publicpath not set");
            return null;
        }

        // here we can check if its a valid upload based on filename and other specs and kill if needed, also map real extension 

        fileext = getValidExtension(fileext, cfilename);
        if (fileext == null)
            return null; // kill the request its not a valid format

        if (method.equals("s3amazon")) {
            String bucketname = eventscreen.getModel()
                    .getProperty("/screen['upload']/target['" + targetid + "']/bucketname");
            if (bucketname == null || bucketname.equals("")) {
                setUploadError(eventscreen, targetid, "bucketname not set");
                return null;
            }

            AmazonS3 s3Client = AmazonS3ClientBuilder.standard()
                    .withCredentials(new EnvironmentVariableCredentialsProvider()).build();
            String filename = "unknown";
            int storageport = 22;

            if (destname_type.equals("epoch")) {
                filename = destpath + destname_prefix + "" + new Date().getTime();
            }

            String publicurl = publicpath + bucketname + "/" + filename + "." + fileext;

            FsPropertySet ps = new FsPropertySet(); // we will use this to send status reports back
            ps.setProperty("action", "start");
            ps.setProperty("progress", "0");
            ps.setProperty("cfilename", cfilename);
            ps.setProperty("url", publicurl);
            eventscreen.getModel().setProperties("/screen/upload/" + targetid, ps);

            try {
                InputStream inst = request.getInputStream();
                int read = 0;
                int readtotal = 0;
                int b;
                while ((b = inst.read()) != 44) {
                    // skip the base64 tagline, not sure how todo this better
                }

                Base64InputStream b64i = new Base64InputStream(inst);

                //System.out.println("Uploading a new object to S3 from a stream "+bucketname+"/"+filename+"."+fileext);

                ObjectMetadata metadata = new ObjectMetadata();
                metadata.setContentType(filetype + "/" + fileext);

                PutObjectRequest or = new PutObjectRequest(bucketname, filename + "." + fileext, b64i,
                        metadata);

                or.setGeneralProgressListener(new UploadProgressListener(eventscreen.getModel(), publicurl,
                        cfilename, cfilesize, targetid));
                s3Client.putObject(or);

            } catch (AmazonServiceException ase) {
                ase.printStackTrace();
            }
            ps.setProperty("action", "done");
            ps.setProperty("progress", "100");
            ps.setProperty("cfilename", cfilename);
            ps.setProperty("url", publicurl);

            eventscreen.getModel().setProperties("/screen/upload/" + targetid, ps);
            return bucketname + "/" + filename + "." + fileext;

        } else if (method.equals("scp")) {
            String pemfile = eventscreen.getModel()
                    .getProperty("/screen['upload']/target['" + targetid + "']/pemfile");
            if (destpath == null || destpath.equals("")) {
                setUploadError(eventscreen, targetid, "destpath not set");
                return null;
            }

            String storagename = eventscreen.getModel()
                    .getProperty("/screen['upload']/target['" + targetid + "']/storagename");
            if (storagename == null || storagehost.equals("")) {
                setUploadError(eventscreen, targetid, "storagename not set");
                return null;
            }

            String filename = "unknown";
            int storageport = 22;

            if (destname_type.equals("epoch")) {
                filename = destname_prefix + "" + new Date().getTime();
            }

            String publicurl = publicpath + filename + "." + fileext;

            FsPropertySet ps = new FsPropertySet(); // we will use this to send status reports back
            ps.setProperty("action", "start");
            ps.setProperty("progress", "0");
            ps.setProperty("url", publicurl);
            eventscreen.getModel().setProperties("/screen/upload/" + targetid, ps);

            JSch jsch = new JSch();
            jsch.addIdentity(pemfile);
            jsch.setConfig("StrictHostKeyChecking", "no");
            Session session = jsch.getSession(storagename, storagehost, storageport);
            session.connect();
            Channel channel = session.openChannel("sftp");

            channel.connect();
            ChannelSftp channelSftp = (ChannelSftp) channel;
            channelSftp.cd(destpath);

            InputStream inst = request.getInputStream();
            int read = 0;
            int readtotal = 0;
            int b;
            while ((b = inst.read()) != 44) {
                // skip the base64 tagline, not sure how todo this better
            }
            Base64InputStream b64i = new Base64InputStream(inst);

            channelSftp.put(b64i, filename + "." + fileext);

            ps.setProperty("action", "done");
            ps.setProperty("progress", "100");
            ps.setProperty("url", publicurl);
            eventscreen.getModel().setProperties("/screen/upload/" + targetid, ps);
            return filename + "." + fileext;
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
    return null;
}

From source file:org.xmlsh.aws.gradle.s3.AmazonS3FileUploadTask.java

License:BSD License

@TaskAction
public void upload() throws IOException {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String key = getKey();//from  w w  w  .  jav a  2  s  . c om
    File file = getFile();

    if (bucketName == null)
        throw new GradleException("bucketName is not specified");
    if (key == null)
        throw new GradleException("key is not specified");
    if (file == null)
        throw new GradleException("file is not specified");
    if (file.isFile() == false)
        throw new GradleException("file must be regular file");

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    // metadata will be null iff the object does not exist
    ObjectMetadata metadata = existingObjectMetadata();

    if (metadata == null || (isOverwrite() && metadata.getETag().equals(md5()) == false)) {
        getLogger().info("uploading... " + bucketName + "/" + key);
        s3.putObject(new PutObjectRequest(bucketName, key, file).withMetadata(getObjectMetadata()));
        getLogger().info("upload completed: " + getResourceUrl());
    } else {
        getLogger().info("s3://{}/{} already exists with matching md5 sum -- skipped", bucketName, key);
    }
    setResourceUrl(((AmazonS3Client) s3).getResourceUrl(bucketName, key));
}

From source file:org.xmlsh.aws.gradle.s3.BulkUploadTask.java

License:BSD License

@TaskAction
public void upload() {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    String prefix = getNormalizedPrefix();
    FileTree source = getSource();//from w  w w . j  av a 2  s  .c  om

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();

    getLogger().info("uploading... {} to s3://{}/{}", source, bucketName, prefix);
    source.visit(new EmptyFileVisitor() {
        public void visitFile(FileVisitDetails element) {
            String key = prefix + element.getRelativePath();
            getLogger().info(" => s3://{}/{}", bucketName, key);
            Closure<ObjectMetadata> metadataProvider = getMetadataProvider();
            s3.putObject(new PutObjectRequest(bucketName, key, element.getFile())
                    .withMetadata(metadataProvider == null ? null
                            : metadataProvider.call(getBucketName(), key, element.getFile())));
        }
    });
}

From source file:org.zalando.stups.fullstop.s3.S3Writer.java

License:Apache License

public void putObjectToS3(String bucket, String fileName, String keyName, ObjectMetadata metadata,
        InputStream stream) {/*from  w w w.ja v a2s .c om*/
    AmazonS3 s3client = new AmazonS3Client();
    try {
        logger.info("Uploading a new object to S3 from a file");

        s3client.putObject(
                new PutObjectRequest(bucket, Paths.get(keyName, fileName).toString(), stream, metadata));

    } catch (AmazonServiceException ase) {
        logger.error("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        logger.error("Error Message:    " + ase.getMessage());
        logger.error("HTTP Status Code: " + ase.getStatusCode());
        logger.error("AWS Error Code:   " + ase.getErrorCode());
        logger.error("Error Type:       " + ase.getErrorType());
        logger.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        logger.error("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        logger.error("Error Message: " + ace.getMessage());
    }
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Creates an AWS S3 folder// w  w w.  j a va 2 s .c o m
 * 
 * @param bucketName
 * @param folderName
 * @param client 
 */
public static void createFolder(String bucketName, String folderName, AmazonS3 client) {

    try {

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(0);

        InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, folderName + "/", emptyContent,
                metadata);

        client.putObject(putObjectRequest);

        System.out
                .println("Sucessfully created the folder of " + folderName + " in the bucket of " + bucketName);
    } catch (Exception ex) {
        System.out.println("Failed to create the folder of " + folderName + " in the bucket of " + bucketName);
        //          Logger.getLogger(AwsS3Processor.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:oulib.aws.s3.S3Util.java

/**
 * Generate a small tiff file from large Tiff S3 bucket object <br>
 * Note: the small tiff file will have the same key path as the original one
 * /*from   w  w w  . j a va  2s. c o  m*/
 * @param s3client : S3 client
 * @param s3 : S3 object that con
 * @param targetBucketName : the bucket that stores the small tiff file
 * @param targetKey : key of the object in the target bucket
 * @param compressionRate : compression rate
 * @return : PutObjectResult
 */
public static PutObjectResult generateSmallTiff(AmazonS3 s3client, S3Object s3, String targetBucketName,
        String targetKey, double compressionRate) {

    PutObjectResult result = null;
    ByteArrayOutputStream bos = null;
    ByteArrayOutputStream os = null;
    ByteArrayInputStream is = null;
    S3ObjectInputStream s = null;
    ByteArrayInputStream byteInputStream = null;

    try {
        System.setProperty("com.sun.media.jai.disableMediaLib", "true");

        bos = new ByteArrayOutputStream();
        s = s3.getObjectContent();
        byte[] bytes = IOUtils.toByteArray(s);
        byteInputStream = new ByteArrayInputStream(bytes);

        TIFFDecodeParam param = new TIFFDecodeParam();
        ImageDecoder dec = ImageCodec.createImageDecoder("TIFF", byteInputStream, param);

        RenderedImage image = dec.decodeAsRenderedImage();

        RenderingHints qualityHints = new RenderingHints(RenderingHints.KEY_RENDERING,
                RenderingHints.VALUE_RENDER_QUALITY);

        RenderedOp resizedImage = JAI.create("SubsampleAverage", image, compressionRate, compressionRate,
                qualityHints);

        TIFFEncodeParam params = new com.sun.media.jai.codec.TIFFEncodeParam();

        resizedImage = JAI.create("encode", resizedImage, bos, "TIFF", params);

        BufferedImage imagenew = resizedImage.getSourceImage(0).getAsBufferedImage();

        os = new ByteArrayOutputStream();
        ImageIO.write(imagenew, "tif", os);
        is = new ByteArrayInputStream(os.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(os.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        os.close();

        imagenew.flush();

        result = s3client.putObject(new PutObjectRequest(targetBucketName, targetKey, is, metadata));
    } catch (IOException | AmazonClientException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (bos != null) {
                bos.close();
            }
            if (os != null) {
                os.close();
            }
            if (is != null) {
                is.close();
            }
            if (s != null) {
                s.close();
            }
            if (byteInputStream != null) {
                byteInputStream.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    return result;
}

From source file:oulib.aws.s3.S3Util.java

/**
 * Pull out Tiff metadata from input S3 object and inject into the 
 * content of target S3 Object;<br>
 * Generate the new output S3 object that has the metadata from input object.
 * /* ww  w  .  ja  v  a 2  s  .  c  o m*/
 * @param s3client : S3 client
 * @param obj1 : input object that provides metadata
 * @param obj2 : target object that receives metadata
 * 
 * @return PutObjectResult
 */
public static PutObjectResult copyS3ObjectTiffMetadata(AmazonS3 s3client, S3Object obj1, S3Object obj2) {

    PutObjectResult result = null;

    BufferedInputStream bufferedInputStrean = null;
    ByteArrayOutputStream byteArrayOutputStream = null;
    ByteArrayInputStream byteArrayInputStream = null;
    ByteArrayInputStream bis = null;
    S3ObjectInputStream content1 = null;
    S3ObjectInputStream content2 = null;
    String targetBucketName = obj2.getBucketName();
    String outputKey = obj2.getKey().split(".tif")[0] + "-copied.tif";

    ImageMetadata metadata1, metadata2;
    TiffImageMetadata tiffMetadata1, tiffMetadata2;
    TiffOutputSet output1, output2;

    try {
        content1 = obj1.getObjectContent();
        content2 = obj2.getObjectContent();

        byte[] bytes1 = IOUtils.toByteArray(content1);
        byte[] bytes2 = IOUtils.toByteArray(content2);

        metadata1 = Imaging.getMetadata(bytes1);
        metadata2 = Imaging.getMetadata(bytes2);

        tiffMetadata1 = (TiffImageMetadata) metadata1;
        tiffMetadata2 = (TiffImageMetadata) metadata2;

        output1 = tiffMetadata1.getOutputSet();
        output2 = tiffMetadata2.getOutputSet();

        TiffOutputDirectory rootDir = output2.getOrCreateRootDirectory();
        TiffOutputDirectory exifDir = output2.getOrCreateExifDirectory();
        TiffOutputDirectory gpsDir = output2.getOrCreateGPSDirectory();

        if (null != output1.getRootDirectory()) {
            List<TiffOutputField> fs = output1.getRootDirectory().getFields();
            for (TiffOutputField f1 : fs) {
                if (null == rootDir.findField(f1.tag)
                        // CANNOT create the output image with this tag included!
                        && !"PlanarConfiguration".equals(f1.tagInfo.name)) {
                    rootDir.add(f1);
                }
            }
        }

        if (null != output1.getExifDirectory()) {
            for (TiffOutputField f2 : output1.getExifDirectory().getFields()) {
                exifDir.removeField(f2.tagInfo);
                exifDir.add(f2);
            }
        }

        if (null != output1.getGPSDirectory()) {
            for (TiffOutputField f3 : output1.getGPSDirectory().getFields()) {
                gpsDir.removeField(f3.tagInfo);
                gpsDir.add(f3);
            }
        }

        byteArrayOutputStream = new ByteArrayOutputStream();
        TiffImageWriterLossy writerLossy = new TiffImageWriterLossy(output2.byteOrder);
        writerLossy.write(byteArrayOutputStream, output2);

        byteArrayInputStream = new ByteArrayInputStream(byteArrayOutputStream.toByteArray());

        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(byteArrayOutputStream.toByteArray().length);
        metadata.setContentType("image/tiff");
        metadata.setLastModified(new Date());

        result = s3client
                .putObject(new PutObjectRequest(targetBucketName, outputKey, byteArrayInputStream, metadata));

    } catch (ImageReadException | IOException | ImageWriteException ex) {
        Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
    } finally {
        try {
            if (null != content1) {
                content1.close();
            }
            if (null != content2) {
                content2.close();
            }
            if (null != bufferedInputStrean) {
                bufferedInputStrean.close();
            }
            if (null != byteArrayInputStream) {
                byteArrayInputStream.close();
            }
            if (null != byteArrayOutputStream) {
                byteArrayOutputStream.close();
            }
            if (null != bis) {
                bis.close();
            }
        } catch (IOException ex) {
            Logger.getLogger(S3Util.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
    return result;
}

From source file:pl.pawlik.cymes.controllers.FormController.java

@RequestMapping(value = "/upload", method = RequestMethod.POST)
public String handleFileUpload(@RequestParam("plik") MultipartFile file) {
    if (!file.isEmpty()) {
        try {/*from  ww w. j a v a 2  s  . com*/

            //myObject.getClass().getProtectionDomain().getCodeSource()
            System.out.println("------------" + ObjectMapper.class.getProtectionDomain().getCodeSource());
            Logger.getLogger("FormController").log(Level.SEVERE,
                    "------------" + ObjectMapper.class.getProtectionDomain().getCodeSource());
            UUID uuid = UUID.randomUUID();
            String filename = "/uploads/upload_" + uuid.toString();
            String bucketName = "pawliktest";
            String accessKey = "xx";
            String secretKey = "xx";
            byte[] bytes = file.getBytes();
            InputStream inputStream = new ByteArrayInputStream(bytes);
            AmazonS3 s3client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
            s3client.putObject(new PutObjectRequest(bucketName, filename, inputStream, new ObjectMetadata()));

            System.out.println("File {} has been successfully uploaded as ");
        } catch (Exception e) {
            e.printStackTrace();
        }
    } else {
        System.out.println("Uploaded file is empty");
    }
    return "redirect:/cymes/upload";
}

From source file:S3Controller.UploadImages.java

public void UploadToS3(String bucketname, String filename, String filepath) {
    //  public static void main(String[] args) {

    AWSCredentials credentials = null;/*from   w  w w.ja  va 2  s .  co  m*/
    String aws_access_key_id = "PUT_YOUR_aws_access_key_id_HERE";
    String aws_secret_access_key = "PUT_YOUR_aws_secret_access_key_HERE";
    String bucketName = bucketname; //"zillionbucket";
    String fileName = filename; //"javaee_duke_image.jpg";
    String localpath = filepath; //"C:\\Users\\Hariom\\Documents\\NetBeansProjects\\JavaApplication9\\src\\ProcessedImages\\javaee_duke_image.jpg";
    try {
        credentials = new BasicAWSCredentials(aws_access_key_id, aws_secret_access_key);//.getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region AP_SOUTHEAST_1 = Region.getRegion(Regions.AP_SOUTHEAST_1);
    s3.setRegion(AP_SOUTHEAST_1);

    try {

        s3.putObject(new PutObjectRequest(bucketName, fileName, new File(localpath))
                .withCannedAcl(CannedAccessControlList.PublicRead));

        System.out.println("File Uploaded to S3: " + fileName);

    }

    catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

}