List of usage examples for com.amazonaws.auth.profile ProfileCredentialsProvider ProfileCredentialsProvider
public ProfileCredentialsProvider(String profileName)
From source file:com.ipcglobal.awscdh.util.Utils.java
License:Apache License
/** * Inits the profile credentials provider. * * @param profileName the profile name/*from w w w. j av a 2 s . com*/ * @return the AWS credentials provider * @throws Exception the exception */ public static AWSCredentialsProvider initProfileCredentialsProvider(String profileName) throws Exception { // Get credentials from the ~/.aws/credentials file AWSCredentialsProvider credentialsProvider = null; try { credentialsProvider = new ProfileCredentialsProvider(profileName); // Verify we can fetch credentials from the provider credentialsProvider.getCredentials(); } catch (AmazonClientException e) { log.error("Unable to obtain credentials from ProfileCredentialsProvider, profileName=" + profileName); throw e; } return credentialsProvider; }
From source file:com.jfixby.scarabei.red.aws.test.S3Sample.java
License:Open Source License
public static void main(final String[] args) throws IOException { /*/*from www . ja va 2 s. c o m*/ * The ProfileCredentialsProvider will return your [default] credential profile by reading from the credentials file located * at (C:\\Users\\JCode\\.aws\\credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (final Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\%USERNAME%\\.aws\\credentials), and is in valid format.", e); } final AmazonS3 s3 = new AmazonS3Client(credentials); final Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); final String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); final String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, so once a bucket name has been taken by any user, * you can't create another bucket with that same name. * * You can optionally specify a location for your bucket if you want to keep your data closer to your applications or * users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (final Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to S3, or upload directly an InputStream if you know * the length of the data in the stream. You can also specify your own metadata when uploading to S3, which allows you * set a variety of options like content-type and content-encoding, plus additional metadata specific to your * applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of the object's metadata and a stream from which to read * the contents. It's important to read the contents of the stream as quickly as possibly since the data is streamed * directly from Amazon S3 and your network connection will remain open until you read all the data or close the input * stream. * * GetObjectRequest also supports several other options, including conditional downloading of objects based on * modification times, ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); final S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for listing the objects in your bucket. Keep in mind * that buckets with many objects might truncate their results when listing their objects, so be sure to check if the * returned object listing is truncated, and use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); final ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (final S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, there is no way to undelete an object, so use * caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be deleted, so remember to delete any objects from * your buckets before you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (final AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (final AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.kirana.services.ProductServicesImpl.java
public AmazonS3 getS3Client() { AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider("kirana-s3")); s3client.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_1)); return s3client; }
From source file:com.prd.AmazonSESSample.java
License:Open Source License
public static void main(String[] args) throws IOException { // Construct an object to contain the recipient address. Destination destination = new Destination().withToAddresses(new String[] { TO }); // Create the subject and body of the message. Content subject = new Content().withData(SUBJECT); Content textBody = new Content().withData(BODY); Body body = new Body().withText(textBody); // Create a message with the specified subject and body. Message message = new Message().withSubject(subject).withBody(body); // Assemble the email. SendEmailRequest request = new SendEmailRequest().withSource(FROM).withDestination(destination) .withMessage(message);//from w ww . j a v a 2 s.com try { System.out.println("Attempting to send an email through Amazon SES by using the AWS SDK for Java..."); /* * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (/Users/prabhjitsingh/.aws/credentials). * * TransferManager manages a pool of threads, so we create a * single instance and share it throughout our application. */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/Users/prabhjitsingh/.aws/credentials), and is in valid format.", e); } // Instantiate an Amazon SES client, which will make the service call with the supplied AWS credentials. AmazonSimpleEmailServiceClient client = new AmazonSimpleEmailServiceClient(credentials); // Choose the AWS region of the Amazon SES endpoint you want to connect to. Note that your production // access status, sending limits, and Amazon SES identity-related settings are specific to a given // AWS region, so be sure to select an AWS region in which you set up Amazon SES. Here, we are using // the US East (N. Virginia) region. Examples of other regions that Amazon SES supports are US_WEST_2 // and EU_WEST_1. For a complete list, see http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html Region REGION = Region.getRegion(Regions.US_EAST_1); client.setRegion(REGION); // Send the email. client.sendEmail(request); System.out.println("Email sent!"); } catch (Exception ex) { System.out.println("The email was not sent."); System.out.println("Error message: " + ex.getMessage()); } }
From source file:com.saife.sample.S3Sample.java
License:Apache License
/** * //from w ww .ja va 2 s .c o m */ private void initS3() { // S3 credential identity final String me = "john.curtis@saife-tiprnet"; AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider(me).getCredentials(); } catch (final Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/home/builder/.aws/credentials), and is in valid format.", e); } s3 = new AmazonS3Client(credentials); final Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); // define a bucket name bucketName = null; try { /* * List the buckets in your account */ for (final Bucket bucket : s3.listBuckets()) { if (bucket.getName().startsWith("saife-test-bucket")) { bucketName = bucket.getName(); System.out.println("Found Test Bucket:" + bucket.getName()); } } /* * Create a globally unique bucket name if needed. */ if (null == bucketName) { bucketName = "saife-test-bucket" + UUID.randomUUID(); System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); } } catch (final AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (final AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } System.out.println("S3 services are enabled."); }
From source file:com.solomon.aws.service.AwsUtil.java
public static AWSCredentials getAwsCredentials() { try {/* w ww .java 2 s . c o m*/ AWSCredentials credentials = new ProfileCredentialsProvider(DEVELOPER_PROFILE_NAME).getCredentials(); return credentials; } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location, and is in valid format.", e); } }
From source file:com.supprema.utils.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//from w w w . j ava 2 s .c o m * The ProfileCredentialsProvider will return your [fabiano-user-s3] * credential profile by reading from the credentials file located at * (/Users/fabianorodriguesmatias/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("fabiano-user-s3").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/Users/fabianorodriguesmatias/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.trulia.stail.Stail.java
License:Apache License
public static void main(String[] args) { final Stail stail = new Stail(); JCommander jct = new JCommander(stail); jct.setProgramName("stail"); try {/*from w ww . j a v a 2 s . c o m*/ jct.parse(args); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); if (stail.profile != null) { credentialsProvider = new ProfileCredentialsProvider(stail.profile); } if (stail.role != null) { credentialsProvider = new STSAssumeRoleSessionCredentialsProvider.Builder(stail.role, "stail") .withStsClient(AWSSecurityTokenServiceClientBuilder.standard() .withCredentials(credentialsProvider).build()) .build(); } AmazonKinesis client = AmazonKinesisClientBuilder.standard().withRegion(stail.region) .withCredentials(credentialsProvider).build(); // prepare the initial shard iterators at the LATEST position Map<Shard, String> shardIterators = getShardIterators(client, stail.stream, stail.start); IRecordProcessor processor = stail.json ? new JSONRecordProcessor() : new RawRecordProcessor(); Map<Shard, RateLimiter> rateLimiters = new HashMap<>(); shardIterators.keySet() .forEach(shard -> rateLimiters.put(shard, RateLimiter.create(MAX_SHARD_THROUGHPUT))); long end = Strings.isNullOrEmpty(stail.duration) ? Long.MAX_VALUE : System.currentTimeMillis() + Duration.parse(stail.duration).toMillis(); Set<String> reshardedShards = new HashSet<>(); Map<Shard, String> sequenceNumbers = new HashMap<>(); while (System.currentTimeMillis() < end) { if (!reshardedShards.isEmpty()) { // get the new list of shards List<Shard> shards = getShards(client, stail.stream); for (Shard shard : shards) { if (!Strings.isNullOrEmpty(shard.getParentShardId()) && reshardedShards.contains(shard.getParentShardId())) { // the old shard was split, so we need to consume this new shard from the beginning shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard)); } else if (!Strings.isNullOrEmpty(shard.getAdjacentParentShardId()) && reshardedShards.contains(shard.getAdjacentParentShardId())) { // the old shards were merged into a new shard shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard)); } } reshardedShards.clear(); } for (Shard shard : Lists.newArrayList(shardIterators.keySet())) { String shardIterator = shardIterators.remove(shard); GetRecordsRequest getRecordsRequest = new GetRecordsRequest(); getRecordsRequest.setShardIterator(shardIterator); getRecordsRequest.setLimit(BATCH_SIZE); try { GetRecordsResult getRecordsResult = client.getRecords(getRecordsRequest); List<Record> records = getRecordsResult.getRecords(); processor.processRecords(records, null); shardIterator = getRecordsResult.getNextShardIterator(); if (records.size() <= 0) { // nothing on the stream yet, so lets wait a bit to see if something appears TimeUnit.SECONDS.sleep(1); } else { int bytesRead = records.stream().map(record -> record.getData().position()) .reduce((_1, _2) -> _1 + _2).get(); sequenceNumbers.put(shard, records.get(records.size() - 1).getSequenceNumber()); // optionally sleep if we have hit the limit for this shard rateLimiters.get(shard).acquire(bytesRead); } if (!Strings.isNullOrEmpty(shardIterator)) { shardIterators.put(shard, shardIterator); } else { reshardedShards.add(shard.getShardId()); } } catch (ProvisionedThroughputExceededException e) { logger.warn("tripped the max throughput. Backing off: {}", e.getMessage()); TimeUnit.SECONDS.sleep(6); // we tripped the max throughput. Back off // add the original iterator back into the map so we can try it again shardIterators.put(shard, shardIterator); } catch (ExpiredIteratorException e) { logger.debug("Iterator expired", e); String sequenceNumber = sequenceNumbers.get(shard); if (sequenceNumber == null) { logger.warn("No previously known sequence number for {}. Moving to LATEST", shard.getShardId()); shardIterators.put(shard, getShardIterator(client, stail.stream, shard, null)); } else { shardIterators.put(shard, getShardIteratorAtSequenceNumber(client, stail.stream, shard, sequenceNumber)); } } } } } catch (ParameterException e) { jct.usage(); System.exit(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); System.exit(2); } }
From source file:com.venu.springmvc.dao.AmazonDynamoDBDAO.java
License:Open Source License
/** * The only information needed to create a client are security credentials * consisting of the AWS Access Key ID and Secret Access Key. All other * configuration, such as the service endpoints, are performed * automatically. Client parameters, such as proxies, can be specified in an * optional ClientConfiguration object when constructing a client. * * @see com.amazonaws.auth.BasicAWSCredentials * @see com.amazonaws.auth.ProfilesConfigFile * @see com.amazonaws.ClientConfiguration *//*w w w . ja va 2s .c om*/ private static void init() throws Exception { /* * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (C:\\Users\\vabbu\\.aws\\credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\vabbu\\.aws\\credentials), and is in valid format.", e); } // dynamoDB = new AmazonDynamoDBClient(credentials); dynamoDB = new AmazonDynamoDBClient(); Region usWest2 = Region.getRegion(Regions.US_WEST_2); dynamoDB.setRegion(usWest2); }
From source file:controllers.s3locationmodify.java
License:Open Source License
public static void main(String[] args) throws Exception { /*/*from w w w .j a v a2 s . co m*/ * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (/home/sravya/.aws/credentials). * * TransferManager manages a pool of threads, so we create a * single instance and share it throughout our application. */ try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/home/sravya/.aws/credentials), and is in valid format.", e); } int argLen = args.length; Region reg = Region.getRegion(Regions.US_WEST_2); int hack = 0; int userrequests; try { userrequests = Integer.parseInt(args[argLen - 1]); } catch (NumberFormatException e) { userrequests = 1; String use = args[argLen - 1]; if (use.equals("Australia")) { hack = 0; } else if (use.equals("SouthAfrica")) { hack = 1; } else if (use.equals("India")) { hack = 2; } else if (use.equals("UnitedKingdom")) { hack = 3; } else if (use.equals("China")) { hack = 4; } else if (use.equals("Germany")) { hack = 5; } else if (use.equals("France")) { hack = 6; } else if (use.equals("Japan")) { hack = 7; } else if (use.equals("Thailand")) { hack = 8; } else if (use.equals("Spain")) { hack = 9; } } int filecount = 0; for (int m = 0; m < argLen - 1; m++) { filecount = filecount + 1; } int numphotos = filecount; int numIDCs = numphotos; int locationInd = 0; long[] cusize = new long[6]; long photosspace = 0; for (int mm = 0; mm < userrequests; mm++) { for (int i = 0; i < cusize.length; i++) cusize[i] = 0; ArrayList<Integer> originalgarph = new ArrayList<Integer>(); loadObj ob = calculateload(); long[] regionload = new long[5]; regionload = ob.load; for (int i = 0; i < regionload.length; i++) { if (regionload[i] == 0) { regionload[i] = 1000; } } /*for (int i=0; i<5 ;i++) { System.out.println(regionload[i]); double diffload=0; double avgload=0; int count=0; for(int j=0;j<5; j++) { if(j!=i) { avgload = avgload + regionload[j]; count++; } } avgload= (avgload / count); diffload= (regionload[i]/avgload) ; System.out.println("avgload: "+ avgload); System.out.println("diffload: "+ diffload); if(diffload < 1.8) { originalgarph.add(i+1); } }*/ for (int i = 0; i < 5; i++) { originalgarph.add(i + 1); } availSpaceNorthCal = maxsize - regionload[0]; photosspace = numphotos * 6000; if (availSpaceNorthCal < photosspace) { availSpaceNorthCal = maxsize; cusize[1] = availSpaceNorthCal / 6000; } else { cusize[1] = availSpaceNorthCal / 6000; } availSpaceOregon = maxsize - regionload[1]; photosspace = numphotos * 6000; if (availSpaceOregon < photosspace) { availSpaceOregon = maxsize; cusize[2] = availSpaceOregon / 6000; } else { cusize[2] = availSpaceOregon / 6000; } availSpaceSingapore = maxsize - regionload[2]; photosspace = numphotos * 6000; if (availSpaceSingapore < photosspace) { availSpaceSingapore = maxsize; cusize[3] = availSpaceSingapore / 6000; } else { cusize[3] = availSpaceSingapore / 6000; } availSpaceTokyo = maxsize - regionload[3]; photosspace = numphotos * 6000; if (availSpaceTokyo < photosspace) { availSpaceTokyo = maxsize; cusize[4] = availSpaceTokyo / 6000; } else { cusize[4] = availSpaceTokyo / 6000; } availSpaceSydney = maxsize - regionload[4]; photosspace = numphotos * 6000; if (availSpaceSydney < photosspace) { availSpaceSydney = maxsize; cusize[5] = availSpaceSydney / 6000; } else { cusize[5] = availSpaceSydney / 6000; } int cou = originalgarph.size(); String fileName = algorithmPath + "request.alg"; PrintWriter writer = new PrintWriter(fileName, "UTF-8"); for (int i = 0; i < cou; i++) { int value = originalgarph.get(i); writer.print(value + " "); System.out.println(" IDC " + String.valueOf(value)); } writer.println(); for (int i = 0; i < originalgarph.size(); i++) { int j = originalgarph.get(i); writer.print(cusize[j] + " "); } writer.println(); writer.println(1); if (userrequests != 1) { locationInd = randInt(0, 9); } else { locationInd = hack; } writer.println(locationInd); System.out.println(" locationInd " + String.valueOf(locationInd)); writer.println(numphotos); System.out.println(" numphotos " + String.valueOf(numphotos)); writer.println(numIDCs); System.out.println(" numIDCs " + String.valueOf(numIDCs)); writer.close(); originalgarph.clear(); try { String prg = "import sys\nprint int(sys.argv[1])+int(sys.argv[2])\n"; String pythonCmd = "/usr/bin/python " + algorithmPath + "ramd.py"; Process p = Runtime.getRuntime().exec(pythonCmd); try { Thread.sleep(2000); //1000 milliseconds is one second. } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } String fileName1 = algorithmPath + "work.alg"; File log = new File(fileName1); int filenumber = 0; String fileName2 = algorithmPath + "filenumber.alg"; Scanner numberscan = new Scanner(new File(fileName2)); if (numberscan.hasNextLine()) { filenumber = numberscan.nextInt(); } else { filenumber = 1; } numberscan.close(); ArrayList<String> photofnames = new ArrayList<String>(); ArrayList<String> argFNames = new ArrayList<String>(); for (int ll = 0; ll < argLen - 1; ll++) { photofnames.add(photosPath + args[ll] + ".bmp"); argFNames.add(args[ll]); System.out.println("Will upload " + photosPath + args[ll] + ".bmp"); } String sCurrentLine; BufferedReader br = null; br = new BufferedReader(new FileReader(fileName1)); int currLine = 0; Integer userNumber = 0; ArrayList<Integer> idcSet = new ArrayList<Integer>(); ArrayList<Integer> photonos = new ArrayList<Integer>(); int currU = 0; while ((sCurrentLine = br.readLine()) != null) { if (currLine % 3 == 0) { userNumber = Integer.parseInt(sCurrentLine); idcSet.clear(); photonos.clear(); } if (currLine % 3 == 1) { String[] idcnums = sCurrentLine.split(" "); String regions = ""; String regionsFileName = algorithmPath + "regions.alg"; PrintWriter regionwriter = new PrintWriter(regionsFileName, "UTF-8"); for (int numIdcs = 0; numIdcs < idcnums.length; numIdcs++) { idcSet.add(Integer.parseInt(idcnums[numIdcs])); if (idcnums[numIdcs].equals("1")) { regions = "region1"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("2")) { regions = "region2"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("3")) { regions = "region3"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("4")) { regions = "region4"; regionwriter.print(regions + " "); } else if (idcnums[numIdcs].equals("5")) { regions = "region5"; regionwriter.print(regions + " "); } System.out.println("IDCs: " + idcnums[numIdcs]); } regionwriter.close(); } if (currLine % 3 == 2) { String[] idcpnums = sCurrentLine.split(" "); for (int numIdcs = 0; numIdcs < idcpnums.length; numIdcs++) { photonos.add(Integer.parseInt(idcpnums[numIdcs])); } ArrayList<String> smallestBnames = new ArrayList<String>(); ArrayList<String> bucketnames = new ArrayList<String>(); for (int tot = 0; tot < idcSet.size(); tot++) { smallestBnames.add(ob.bnames[idcSet.get(tot) - 1]); } AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); int currPno = 0; ArrayList<String> transferThese = new ArrayList<String>(); ArrayList<Integer> transferThesebno = new ArrayList<Integer>(); System.out.println(String.valueOf(idcpnums.length)); //upload everything to 1 bucket for (int numIdcs = 0; numIdcs < idcpnums.length; numIdcs++) { for (int numP = 0; numP < photonos.get(numIdcs); numP++) { String uploadFileName = photofnames.get(currPno); String keyName = String.valueOf(userNumber) + "_" + argFNames.get(currPno) + '_' + filenumber++ + ".bmp"; if (numIdcs > 0) { transferThese.add(keyName); transferThesebno.add(numIdcs); } try { System.out.println("Uploading " + uploadFileName + " to " + smallestBnames.get(0) + " with keyname " + keyName); File file = new File(uploadFileName); s3client.putObject(new PutObjectRequest(smallestBnames.get(0), keyName, file)); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } currPno++; } } //transfer other files System.out.println("Number of files to transfer " + String.valueOf(transferThese.size())); for (int tot = 0; tot < transferThese.size(); tot++) { String source = smallestBnames.get(0); String dest = smallestBnames.get(transferThesebno.get(tot)); String fname = transferThese.get(tot); String src = "s3://" + source + "/" + fname; String d = "s3://" + dest; String cmd = "aws s3 mv " + src + " " + d + "\n"; System.out.println("Moving " + src + " to " + d + "\n"); Process p1 = Runtime.getRuntime().exec(cmd); } currU++; if (currU >= 1) { transferThese.clear(); transferThesebno.clear(); photofnames.clear(); argFNames.clear(); smallestBnames.clear(); break; } } currLine++; } String fileNumberFilePath = algorithmPath + "filenumber.alg"; PrintWriter numberwriter = new PrintWriter(fileNumberFilePath, "UTF-8"); numberwriter.println(filenumber); numberwriter.close(); } catch (Exception e) { } } // String s = null; // Process p1 = Runtime.getRuntime().exec("ls -alrt"); // // BufferedReader stdInput = new BufferedReader(new // InputStreamReader(p1.getInputStream())); // // BufferedReader stdError = new BufferedReader(new // InputStreamReader(p1.getErrorStream())); // // // read the output from the command // System.out.println("Here is the standard output of the command:\n"); // while ((s = stdInput.readLine()) != null) { // System.out.println(s); // } }