List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, String redirectLocation)
From source file:controllers.s3modify.java
License:Open Source License
public static void main(String[] args) throws Exception { /*// www. j a va 2 s . c o m * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (/home/sravya/.aws/credentials). * * TransferManager manages a pool of threads, so we create a * single instance and share it throughout our application. */ try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/home/sravya/.aws/credentials), and is in valid format.", e); } int argLen = args.length; Region reg = Region.getRegion(Regions.US_WEST_2); int hack = 0; int userrequests; try { userrequests = Integer.parseInt(args[argLen - 1]); } catch (NumberFormatException e) { userrequests = 1; String use = args[argLen - 1]; if (use.equals("Australia")) { hack = 0; } else if (use.equals("SouthAfrica")) { hack = 1; } else if (use.equals("India")) { hack = 2; } else if (use.equals("UnitedKingdom")) { hack = 3; } else if (use.equals("China")) { hack = 4; } else if (use.equals("Germany")) { hack = 5; } else if (use.equals("France")) { hack = 6; } else if (use.equals("Japan")) { hack = 7; } else if (use.equals("Thailand")) { hack = 8; } else if (use.equals("Spain")) { hack = 9; } } int filecount = 0; for (int m = 0; m < argLen - 1; m++) { filecount = filecount + 1; } int numphotos = filecount; int numIDCs = numphotos; int locationInd = 0; long[] cusize = new long[6]; long photosspace = 0; for (int mm = 0; mm < userrequests; mm++) { for (int i = 0; i < cusize.length; i++) cusize[i] = 0; ArrayList<Integer> originalgarph = new ArrayList<Integer>(); loadObj ob = calculateload(); long[] regionload = new long[5]; regionload = ob.load; for (int i = 0; i < regionload.length; i++) { if (regionload[i] == 0) { regionload[i] = 1000; } } for (int i = 0; i < 5; i++) { System.out.println(regionload[i]); double diffload = 0; double avgload = 0; int count = 0; for (int j = 0; j < 5; j++) { if (j != i) { avgload = avgload + regionload[j]; count++; } } avgload = (avgload / count); diffload = (regionload[i] / avgload); System.out.println("avgload: " + avgload); System.out.println("diffload: " + diffload); if (diffload < 1.8) { originalgarph.add(i + 1); } } availSpaceNorthCal = maxsize - regionload[0]; photosspace = numphotos * 6000; if (availSpaceNorthCal < photosspace) { availSpaceNorthCal = maxsize; cusize[1] = availSpaceNorthCal / 6000; } else { cusize[1] = availSpaceNorthCal / 6000; } availSpaceOregon = maxsize - regionload[1]; photosspace = numphotos * 6000; if (availSpaceOregon < photosspace) { availSpaceOregon = maxsize; cusize[2] = availSpaceOregon / 6000; } else { cusize[2] = availSpaceOregon / 6000; } availSpaceSingapore = maxsize - regionload[2]; photosspace = numphotos * 6000; if (availSpaceSingapore < photosspace) { availSpaceSingapore = maxsize; cusize[3] = availSpaceSingapore / 6000; } else { cusize[3] = availSpaceSingapore / 6000; } availSpaceTokyo = maxsize - regionload[3]; photosspace = numphotos * 6000; if (availSpaceTokyo < photosspace) { availSpaceTokyo = maxsize; cusize[4] = availSpaceTokyo / 6000; } else { cusize[4] = availSpaceTokyo / 6000; } availSpaceSydney = maxsize - regionload[4]; photosspace = numphotos * 6000; if (availSpaceSydney < photosspace) { availSpaceSydney = maxsize; cusize[5] = availSpaceSydney / 6000; } else { cusize[5] = availSpaceSydney / 6000; } int cou = originalgarph.size(); String fileName = algorithmPath + "request.alg"; PrintWriter writer = new PrintWriter(fileName, "UTF-8"); for (int i = 0; i < cou; i++) { int value = originalgarph.get(i); writer.print(value + " "); System.out.println(" IDC " + String.valueOf(value)); } writer.println(); for (int i = 0; i < originalgarph.size(); i++) { int j = originalgarph.get(i); writer.print(cusize[j] + " "); } writer.println(); writer.println(1); if (userrequests != 1) { locationInd = randInt(0, 9); } else { locationInd = hack; } writer.println(locationInd); System.out.println(" locationInd " + String.valueOf(locationInd)); writer.println(numphotos); System.out.println(" numphotos " + String.valueOf(numphotos)); writer.println(numIDCs); System.out.println(" numIDCs " + String.valueOf(numIDCs)); writer.close(); originalgarph.clear(); try { String prg = "import sys\nprint int(sys.argv[1])+int(sys.argv[2])\n"; String pythonCmd = "/usr/bin/python " + algorithmPath + "ramd.py"; Process p = Runtime.getRuntime().exec(pythonCmd); try { Thread.sleep(2000); //1000 milliseconds is one second. } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } String fileName1 = algorithmPath + "work.alg"; File log = new File(fileName1); int filenumber = 0; String fileName2 = algorithmPath + "filenumber.alg"; Scanner numberscan = new Scanner(new File(fileName2)); if (numberscan.hasNextLine()) { filenumber = numberscan.nextInt(); } else { filenumber = 1; } numberscan.close(); ArrayList<String> photofnames = new ArrayList<String>(); ArrayList<String> argFNames = new ArrayList<String>(); for (int ll = 0; ll < argLen - 1; ll++) { photofnames.add(photosPath + args[ll] + ".bmp"); argFNames.add(args[ll]); System.out.println("Will upload " + photosPath + args[ll] + ".bmp"); } String sCurrentLine; BufferedReader br = null; br = new BufferedReader(new FileReader(fileName1)); int currLine = 0; Integer userNumber = 0; ArrayList<Integer> idcSet = new ArrayList<Integer>(); ArrayList<Integer> photonos = new ArrayList<Integer>(); int currU = 0; while ((sCurrentLine = br.readLine()) != null) { if (currLine % 3 == 0) { userNumber = Integer.parseInt(sCurrentLine); idcSet.clear(); photonos.clear(); } if (currLine % 3 == 1) { String[] idcnums = sCurrentLine.split(" "); String regions = ""; String regionsFileName = algorithmPath + "regions.alg"; PrintWriter regionwriter = new PrintWriter(regionsFileName, "UTF-8"); for (int numIdcs = 0; numIdcs < idcnums.length; numIdcs++) { idcSet.add(Integer.parseInt(idcnums[numIdcs])); if (idcnums[numIdcs].equals("1")) { regions = "region1"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("2")) { regions = "region2"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("3")) { regions = "region3"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("4")) { regions = "region4"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("5")) { regions = "region5"; regionwriter.print(regions + " "); } System.out.println("IDCs: " + idcnums[numIdcs]); } regionwriter.close(); } if (currLine % 3 == 2) { String[] idcpnums = sCurrentLine.split(" "); for (int numIdcs = 0; numIdcs < idcpnums.length; numIdcs++) { photonos.add(Integer.parseInt(idcpnums[numIdcs])); } ArrayList<String> smallestBnames = new ArrayList<String>(); ArrayList<String> bucketnames = new ArrayList<String>(); for (int tot = 0; tot < idcSet.size(); tot++) { smallestBnames.add(ob.bnames[idcSet.get(tot) - 1]); } AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); int currPno = 0; ArrayList<String> transferThese = new ArrayList<String>(); ArrayList<Integer> transferThesebno = new ArrayList<Integer>(); System.out.println(String.valueOf(idcpnums.length)); //upload everything to 1 bucket for (int numIdcs = 0; numIdcs < idcpnums.length; numIdcs++) { for (int numP = 0; numP < photonos.get(numIdcs); numP++) { String uploadFileName = photofnames.get(currPno); String keyName = String.valueOf(userNumber) + "_" + argFNames.get(currPno) + '_' + filenumber++ + ".bmp"; if (numIdcs > 0) { transferThese.add(keyName); transferThesebno.add(numIdcs); } try { System.out.println("Uploading " + uploadFileName + " to " + smallestBnames.get(0) + " with keyname " + keyName); File file = new File(uploadFileName); s3client.putObject(new PutObjectRequest(smallestBnames.get(0), keyName, file)); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } currPno++; } } //transfer other files System.out.println("Number of files to transfer " + String.valueOf(transferThese.size())); for (int tot = 0; tot < transferThese.size(); tot++) { String source = smallestBnames.get(0); String dest = smallestBnames.get(transferThesebno.get(tot)); String fname = transferThese.get(tot); String src = "s3://" + source + "/" + fname; String d = "s3://" + dest; String cmd = "aws s3 mv " + src + " " + d + "\n"; System.out.println("Moving " + src + " to " + d + "\n"); Process p1 = Runtime.getRuntime().exec(cmd); } currU++; if (currU >= 1) { transferThese.clear(); transferThesebno.clear(); photofnames.clear(); argFNames.clear(); smallestBnames.clear(); break; } } currLine++; } String fileNumberFilePath = algorithmPath + "filenumber.alg"; PrintWriter numberwriter = new PrintWriter(fileNumberFilePath, "UTF-8"); numberwriter.println(filenumber); numberwriter.close(); } catch (Exception e) { } } // String s = null; // Process p1 = Runtime.getRuntime().exec("ls -alrt"); // // BufferedReader stdInput = new BufferedReader(new // InputStreamReader(p1.getInputStream())); // // BufferedReader stdError = new BufferedReader(new // InputStreamReader(p1.getErrorStream())); // // // read the output from the command // System.out.println("Here is the standard output of the command:\n"); // while ((s = stdInput.readLine()) != null) { // System.out.println(s); // } }
From source file:cz.pichlik.goodsentiment.server.repository.S3RepositoryBase.java
License:Apache License
public void save(final String bucket, String key, File file, boolean publicRead) { PutObjectRequest req = new PutObjectRequest(bucket, key, file); if (publicRead) { req.withCannedAcl(CannedAccessControlList.PublicRead); }// w w w .j a v a 2s . c o m s3Client.putObject(req); }
From source file:ecplugins.s3.S3Util.java
License:Apache License
public static void UploadObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException, Exception { Properties props = TestUtils.getProperties(); File file = new File(createFile()); BasicAWSCredentials credentials = new BasicAWSCredentials(props.getProperty(StringConstants.ACCESS_ID), props.getProperty(StringConstants.SECRET_ACCESS_ID)); // Create TransferManager TransferManager tx = new TransferManager(credentials); // Get S3 Client AmazonS3 s3 = tx.getAmazonS3Client(); try {/*from w ww. ja va 2s .c om*/ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, file)); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException"); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println( "Caught an AmazonClientException, which means the client encountered an internal error while trying to such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:edu.harvard.hms.dbmi.bd2k.irct.aws.event.result.S3AfterSaveResult.java
License:Mozilla Public License
@Override public void fire(SecureSession session, Executable executable) { try {/*from ww w . ja v a 2 s. com*/ if (executable.getStatus() != ExecutableStatus.COMPLETED) { return; } Result result = executable.getResults(); for (File resultFile : result.getData().getFileList()) { String keyName = s3Folder + result.getId() + "/" + resultFile.getName(); // Copy the result into S3 if bucketName is not empty or null s3client.putObject(new PutObjectRequest(bucketName, keyName, resultFile)); log.info("Moved " + result.getResultSetLocation() + " to " + bucketName + "/" + keyName); // Delete File resultFile.delete(); log.info("Deleted " + resultFile.getName()); } result.setResultSetLocation("S3://" + s3Folder + result.getId()); } catch (AmazonServiceException ase) { log.warn("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); log.warn("Error Message: " + ase.getMessage()); log.warn("HTTP Status Code: " + ase.getStatusCode()); log.warn("AWS Error Code: " + ase.getErrorCode()); log.warn("Error Type: " + ase.getErrorType()); log.warn("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.warn("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); log.warn("Error Message: " + ace.getMessage()); } catch (ResourceInterfaceException e) { log.warn("Error Message: " + e.getMessage()); } }
From source file:edu.iit.s3bucket.S3Bucket.java
/** * * @param file/* w w w. ja v a 2 s . c o m*/ */ public void putObjectsToBucket(File file) { s3client.putObject(new PutObjectRequest(this.bucketname, file.getName(), file)); }
From source file:example.uploads3.UploadS3.java
License:Apache License
public static void main(String[] args) throws Exception { String uploadFileName = args[0]; String bucketName = "haos3"; String keyName = "test/byspark.txt"; // Create a Java Spark Context. SparkConf conf = new SparkConf().setAppName("UploadS3"); JavaSparkContext sc = new JavaSparkContext(conf); AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); try {/* w w w. ja v a 2 s . co m*/ System.out.println("Uploading a new object to S3 from a file\n"); File file = new File(uploadFileName); PutObjectRequest putRequest = new PutObjectRequest(bucketName, keyName, file); // Request server-side encryption. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setServerSideEncryption("AES256"); putRequest.setMetadata(objectMetadata); s3client.putObject(putRequest); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:exemplos.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*// ww w . j a v a2s . co m * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:generator.components.S3FileUtility.java
License:Apache License
/** * Upload file to s3 bucket//ww w . ja va2 s. c o m * * @param file * the object */ public String writeFileToS3(File file, FileLocation fileLocation) throws FileNotFoundException { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(file.length()); String fileKey = String.format("%s-%s", uuidFactory.getUUID(), file.getName()); // BasicAWSCredentials credentials = new BasicAWSCredentials(AMAZONS3_ACCESS_KEY, AMAZONS3_PRIVATE_KEY); s3Client = new AmazonS3Client(); // Making the object public PutObjectRequest putObj = new PutObjectRequest(S3_OUTPUT_BUCKET, fileKey, file); putObj.setCannedAcl(CannedAccessControlList.PublicRead); s3Client.putObject(putObj); return fileKey; }
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void upload(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util upload"); int retryAttempt = 0; int i;/*from w w w. j a va2 s .c o m*/ java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath(); String keyName = inputfile.getFileName().toString(); log.info("keyName is: " + keyName); log.info("bucket name is:" + runFileTransferEntity.getBucketName()); log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); String amazonFileUploadLocationOriginal = null; FileInputStream stream = null; File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.isFile() || filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { Log.error("Invalid local path.Please provide valid path"); throw new AWSUtilException("Invalid local path"); } if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); log.debug("file path name" + filepath); s3folderName = filepath; if (s3folderName != null && !s3folderName.trim().equals("")) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } File f = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { log.debug("Provided HDFS local path "); String inputPath = runFileTransferEntity.getLocalPath(); String s1 = inputPath.substring(7, inputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File file = new File("/tmp"); if (!file.exists()) file.mkdir(); Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); Path local = new Path("/tmp"); String s = inputPath.substring(7, inputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path hdfs = new Path(hdfspath); ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); File dir = new File(hdfspath); if (hdfsFileSystem.isDirectory(new Path(hdfspath))) { InputStream is = null; OutputStream os = null; String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); FileStatus[] fileStatus = hdfsFileSystem .listStatus(new Path(runFileTransferEntity.getLocalPath())); Path[] paths = FileUtil.stat2Paths(fileStatus); File dirs = null; try { String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); DateFormat df = new SimpleDateFormat("dd-MM-yyyy"); String dateWithoutTime = df.format(new Date()).toString(); Random ran = new Random(); String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000); dirs = new File("/tmp/" + tempFolder); boolean success = dirs.mkdirs(); for (Path files : paths) { is = hdfsFileSystem.open(files); os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName())); org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf); } for (File files : dirs.listFiles()) { if (files.isFile()) { s3Client.putObject(new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files)); } } } catch (IOException e) { Log.error("IOException occured while transfering the file", e); } finally { org.apache.hadoop.io.IOUtils.closeStream(is); org.apache.hadoop.io.IOUtils.closeStream(os); if (dirs != null) { FileUtils.deleteDirectory(dirs); } } } else { hdfsFileSystem.copyToLocalFile(false, hdfs, local); stream = new FileInputStream("/tmp/" + f.getName()); File S3file = new File("/tmp/" + f.getName()); PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } else { ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); if (Files.isDirectory(inputfile)) { File fileloc = new File(inputfile.toAbsolutePath().toString()); String folderName = new File(runFileTransferEntity.getLocalPath()).getName(); for (File files : fileloc.listFiles()) { if (files.isFile()) { PutObjectRequest putObjectRequest = new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } else { PutObjectRequest putObjectRequest = null; File file = new File(runFileTransferEntity.getLocalPath()); stream = new FileInputStream(runFileTransferEntity.getLocalPath()); putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } catch (AmazonServiceException e) { if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) Log.error("Incorrect details provided.Please provide valid details", e); throw new AWSUtilException("Incorrect details provided"); } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Exception e) { log.error("error while transferring file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } catch (Error err) { Log.error("Error occured while uploading the file", err); throw new AWSUtilException(err); } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) throw new AWSUtilException("File transfer failed"); } log.debug("Finished AWSS3Util upload"); }
From source file:ics.uci.edu.amazons3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w w w . j a v a 2s.c o m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ final AmazonS3 s3 = new AmazonS3Client( new BasicAWSCredentials("AKIAJTW5BOY6EXOGV2YQ", "PDcnFYIf9Hdo9GsKTEjLXretZ3yEg4mRCDQKjxu6")); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }