List of usage examples for com.amazonaws.auth BasicAWSCredentials BasicAWSCredentials
public BasicAWSCredentials(String accessKey, String secretKey)
From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java
License:Apache License
/** * @param endpoint S3 endpoint URL/* ww w.ja va 2s.c o m*/ * @param accessKey Access key * @param secretKey Secret key * @param bucket Bucket name * @param path Key name of the object to download (path + file name) * @param file Local file to download to * @throws Exception On any error */ public static void download(String endpoint, String accessKey, String secretKey, String bucket, String path, File file) throws Exception { AmazonS3Client amazonS3Client = null; InputStream in = null; OutputStream out = null; try { AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); ClientConfiguration clientConfiguration = new ClientConfiguration(); clientConfiguration.setMaxConnections(MAX_CONNECTIONS); clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY); clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT); amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration); S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true); amazonS3Client.setS3ClientOptions(clientOptions); amazonS3Client.setEndpoint(endpoint); S3Object object = amazonS3Client.getObject(new GetObjectRequest(bucket, path)); in = object.getObjectContent(); byte[] buf = new byte[BUFFER_SIZE]; out = new FileOutputStream(file); int count; while ((count = in.read(buf)) != -1) out.write(buf, 0, count); out.close(); in.close(); } catch (AmazonServiceException x) { Shrinker.myLogger.info("download error: " + x.getMessage()); throw new Exception("download exception", x); } catch (AmazonClientException x) { Shrinker.myLogger.info("download error: " + x.getMessage()); throw new Exception("download exception", x); } catch (IOException x) { Shrinker.myLogger.info("download error: " + x.getMessage()); throw new Exception("download exception", x); } finally { if (in != null) { try { in.close(); } catch (Exception e) { } } if (out != null) { try { out.close(); } catch (Exception e) { } } if (amazonS3Client != null) { try { amazonS3Client.shutdown(); } catch (Exception e) { } } } }
From source file:hu.mta.sztaki.lpds.cloud.entice.imageoptimizer.iaashandler.amazontarget.Storage.java
License:Apache License
/** * @param file Local file to upload/* w w w .j a v a2s .co m*/ * @param endpoint S3 endpoint URL * @param accessKey Access key * @param secretKey Secret key * @param bucket Bucket name * @param path Key name (path + file name) * @throws Exception On any error */ public static void upload(File file, String endpoint, String accessKey, String secretKey, String bucket, String path) throws Exception { AmazonS3Client amazonS3Client = null; try { AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); ClientConfiguration clientConfiguration = new ClientConfiguration(); clientConfiguration.setMaxConnections(MAX_CONNECTIONS); clientConfiguration.setMaxErrorRetry(PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY); clientConfiguration.setConnectionTimeout(ClientConfiguration.DEFAULT_CONNECTION_TIMEOUT); amazonS3Client = new AmazonS3Client(awsCredentials, clientConfiguration); S3ClientOptions clientOptions = new S3ClientOptions().withPathStyleAccess(true); amazonS3Client.setS3ClientOptions(clientOptions); amazonS3Client.setEndpoint(endpoint); // amazonS3Client.putObject(new PutObjectRequest(bucket, path, file)); // up to 5GB TransferManager tm = new TransferManager(amazonS3Client); // up to 5TB Upload upload = tm.upload(bucket, path, file); // while (!upload.isDone()) { upload.getProgress().getBytesTransferred(); Thread.sleep(1000); } // to get progress upload.waitForCompletion(); tm.shutdownNow(); } catch (AmazonServiceException x) { Shrinker.myLogger.info("upload error: " + x.getMessage()); throw new Exception("upload exception", x); } catch (AmazonClientException x) { Shrinker.myLogger.info("upload error: " + x.getMessage()); throw new Exception("upload exception", x); } finally { if (amazonS3Client != null) { try { amazonS3Client.shutdown(); } catch (Exception e) { } } } }
From source file:hudson.plugins.ec2.EC2Cloud.java
License:Open Source License
public static AWSCredentialsProvider createCredentialsProvider(final boolean useInstanceProfileForCredentials, final String accessId, final Secret secretKey) { if (useInstanceProfileForCredentials) { return new InstanceProfileCredentialsProvider(); }//from w w w. j a v a 2 s . c om BasicAWSCredentials credentials = new BasicAWSCredentials(accessId, Secret.toString(secretKey)); return new StaticCredentialsProvider(credentials); }
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void upload(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util upload"); int retryAttempt = 0; int i;/*from www. j ava 2s. c o m*/ java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath(); String keyName = inputfile.getFileName().toString(); log.info("keyName is: " + keyName); log.info("bucket name is:" + runFileTransferEntity.getBucketName()); log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); String amazonFileUploadLocationOriginal = null; FileInputStream stream = null; File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.isFile() || filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { Log.error("Invalid local path.Please provide valid path"); throw new AWSUtilException("Invalid local path"); } if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); log.debug("file path name" + filepath); s3folderName = filepath; if (s3folderName != null && !s3folderName.trim().equals("")) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } File f = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { log.debug("Provided HDFS local path "); String inputPath = runFileTransferEntity.getLocalPath(); String s1 = inputPath.substring(7, inputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File file = new File("/tmp"); if (!file.exists()) file.mkdir(); Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); Path local = new Path("/tmp"); String s = inputPath.substring(7, inputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path hdfs = new Path(hdfspath); ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); File dir = new File(hdfspath); if (hdfsFileSystem.isDirectory(new Path(hdfspath))) { InputStream is = null; OutputStream os = null; String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); FileStatus[] fileStatus = hdfsFileSystem .listStatus(new Path(runFileTransferEntity.getLocalPath())); Path[] paths = FileUtil.stat2Paths(fileStatus); File dirs = null; try { String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); DateFormat df = new SimpleDateFormat("dd-MM-yyyy"); String dateWithoutTime = df.format(new Date()).toString(); Random ran = new Random(); String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000); dirs = new File("/tmp/" + tempFolder); boolean success = dirs.mkdirs(); for (Path files : paths) { is = hdfsFileSystem.open(files); os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName())); org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf); } for (File files : dirs.listFiles()) { if (files.isFile()) { s3Client.putObject(new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files)); } } } catch (IOException e) { Log.error("IOException occured while transfering the file", e); } finally { org.apache.hadoop.io.IOUtils.closeStream(is); org.apache.hadoop.io.IOUtils.closeStream(os); if (dirs != null) { FileUtils.deleteDirectory(dirs); } } } else { hdfsFileSystem.copyToLocalFile(false, hdfs, local); stream = new FileInputStream("/tmp/" + f.getName()); File S3file = new File("/tmp/" + f.getName()); PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } else { ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); if (Files.isDirectory(inputfile)) { File fileloc = new File(inputfile.toAbsolutePath().toString()); String folderName = new File(runFileTransferEntity.getLocalPath()).getName(); for (File files : fileloc.listFiles()) { if (files.isFile()) { PutObjectRequest putObjectRequest = new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } else { PutObjectRequest putObjectRequest = null; File file = new File(runFileTransferEntity.getLocalPath()); stream = new FileInputStream(runFileTransferEntity.getLocalPath()); putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } catch (AmazonServiceException e) { if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) Log.error("Incorrect details provided.Please provide valid details", e); throw new AWSUtilException("Incorrect details provided"); } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Exception e) { log.error("error while transferring file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } catch (Error err) { Log.error("Error occured while uploading the file", err); throw new AWSUtilException(err); } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) throw new AWSUtilException("File transfer failed"); } log.debug("Finished AWSS3Util upload"); }
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void download(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util download"); File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.exists() && filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { throw new AWSUtilException("Invalid local path"); }/*from w w w . j a v a 2 s . c o m*/ boolean fail_if_exist = false; int retryAttempt = 0; int i; String amazonFileUploadLocationOriginal = null; String keyName = null; if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); if (filepath.lastIndexOf("/") != -1) { s3folderName = filepath.substring(0, filepath.lastIndexOf("/")); keyName = filepath.substring(filepath.lastIndexOf("/") + 1); } else { keyName = filepath; } log.debug("keyName is: " + keyName); log.debug("bucket name is:" + runFileTransferEntity.getBucketName()); log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); if (s3folderName != null) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { String outputPath = runFileTransferEntity.getLocalPath(); String s1 = outputPath.substring(7, outputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File f = new File("/tmp"); if (!f.exists()) f.mkdir(); GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName); S3Object object = s3Client.getObject(request); if (runFileTransferEntity.getEncoding() != null) object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding()); File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName); if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName)); } else { if (!(fexist.exists() && !fexist.isDirectory())) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { fail_if_exist = true; Log.error("File already exists"); throw new AWSUtilException("File already exists"); } } Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); String s = outputPath.substring(7, outputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path local = new Path("/tmp/" + keyName); Path hdfs = new Path(hdfspath); hdfsFileSystem.copyFromLocalFile(local, hdfs); } else { GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName); S3Object object = s3Client.getObject(request); if (runFileTransferEntity.getEncoding() != null) object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding()); File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName); if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { if (!(fexist.exists() && !fexist.isDirectory())) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { fail_if_exist = true; Log.error("File already exists"); throw new AWSUtilException("File already exists"); } } } } catch (AmazonServiceException e) { log.error("Amazon Service Exception", e); if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) { Log.error("Incorrect details provided.Please provide correct details", e); throw new AWSUtilException("Incorrect details provided"); } else { Log.error("Unknown amezon exception occured", e); } } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Error e) { Log.error("Error occured while sleeping the thread"); throw new AWSUtilException(e); } catch (Exception e) { log.error("error while transfering file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { } catch (Error err) { Log.error("Error occured while downloading"); throw new AWSUtilException(err); } continue; } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) { throw new AWSUtilException("File transfer failed"); } } log.debug("Finished AWSS3Util download"); }
From source file:ics.uci.edu.amazons3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w w w .j av a 2s.c om*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ final AmazonS3 s3 = new AmazonS3Client( new BasicAWSCredentials("AKIAJTW5BOY6EXOGV2YQ", "PDcnFYIf9Hdo9GsKTEjLXretZ3yEg4mRCDQKjxu6")); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:ie.peternagy.jcrypto.module.config.S3ConfigHandler.java
License:Open Source License
@Override public void validateConfig(Map config) { AWSCredentials credentials = new BasicAWSCredentials((String) config.get("access-key"), (String) config.get("secret-key")); AmazonS3 s3client = new AmazonS3Client(credentials); try {//w ww. j a v a 2s . c o m System.out.println("S3 config validation...\n"); s3client.doesBucketExist((String) config.get("bucket-name")); System.out.println("Valid configuration!\n"); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:ie.peternagy.jcrypto.module.config.S3ConfigHandler.java
License:Open Source License
@Override public Object getInitializedServiceClient() { if (service != null) { AWSCredentials credentials = new BasicAWSCredentials((String) service.get("access-key"), (String) service.get("secret-key")); return new AmazonS3Client(credentials); }//from w w w . j av a 2 s . c o m return null; }
From source file:imperial.modaclouds.monitoring.datacollectors.monitors.CloudWatchMonitor.java
License:BSD License
@Override public void run() { long startTime = 0; String accessKeyId = null;/*from w w w . ja v a 2 s . c om*/ String secretKey = null; String endpoint = null; ArrayList<String> measureNames = null; while (!cwmt.isInterrupted()) { if (System.currentTimeMillis() - startTime > 10000) { measureNames = new ArrayList<String>(); metricList = new ArrayList<Metric>(); for (String metric : getProvidedMetrics()) { try { VM resource = new VM(Config.getInstance().getVmType(), Config.getInstance().getVmId()); if (dcAgent.shouldMonitor(resource, metric)) { Map<String, String> parameters = dcAgent.getParameters(resource, metric); Metric temp = new Metric(); temp.setMetricName(metric); measureNames.add(metric.replace("CloudWatch", "")); accessKeyId = parameters.get("accessKey"); instanceID = parameters.get("instanceID"); secretKey = parameters.get("secretKey"); period = Integer.valueOf(parameters.get("samplingTime")) * 1000; temp.setSamplingProb(Double.valueOf(parameters.get("samplingProbability"))); endpoint = parameters.get("endpoint"); metricList.add(temp); } } catch (NumberFormatException | ConfigurationException e) { e.printStackTrace(); } } cloudWatchClient = new AmazonCloudWatchClient(new BasicAWSCredentials(accessKeyId, secretKey)); cloudWatchClient.setEndpoint(endpoint); startTime = System.currentTimeMillis(); } MeasureSet measureSet = null; try { measureSet = this.retrieveMeasureSet(measureNames); } catch (ParseException e) { e.printStackTrace(); } if (measureSet != null) { for (String measureName : measureSet.getMeasureNames()) { //System.out.println(measureName+" "+String.valueOf(measureSet.getMeasure(measureName))); try { for (Metric metric : metricList) { if (metric.getMetricName().equals(measureName)) { if (Math.random() < metric.getSamplingProb()) { logger.info("Sending datum: {} {} {}", String.valueOf(measureSet.getMeasure(measureName)), measureName, monitoredTarget); dcAgent.send( new VM(Config.getInstance().getVmType(), Config.getInstance().getVmId()), measureName, measureSet.getMeasure(measureName)); } } } } catch (Exception e) { e.printStackTrace(); } //sendMonitoringDatum(Double.valueOf(measureSet.getMeasure(measureName)), ResourceFactory.createResource(MC.getURI() + measureName), monitoredResourceURL, monitoredResource); } } try { Thread.sleep(period); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } } }
From source file:imperial.modaclouds.monitoring.datacollectors.monitors.CostMonitor.java
License:BSD License
@Override public void run() { String accessKeyId = null;/* ww w .java 2s. c o m*/ String secretKey = null; ArrayList<String> measureNames = null; long startTime = 0; while (!cwmt.isInterrupted()) { if (System.currentTimeMillis() - startTime > 10000) { measureNames = new ArrayList<String>(); for (String metric : getProvidedMetrics()) { try { VM resource = new VM(Config.getInstance().getVmType(), Config.getInstance().getVmId()); if (dcAgent.shouldMonitor(resource, metric)) { Map<String, String> parameters = dcAgent.getParameters(resource, metric); measureNames.add("EstimatedCharges"); accessKeyId = parameters.get("accessKey"); secretKey = parameters.get("secretKey"); period = Integer.valueOf(parameters.get("samplingTime")) * 1000; samplingProb = Double.valueOf(parameters.get("samplingProbability")); } } catch (NumberFormatException | ConfigurationException e) { e.printStackTrace(); } } cloudWatchClient = new AmazonCloudWatchClient(new BasicAWSCredentials(accessKeyId, secretKey)); cloudWatchClient.setEndpoint("monitoring.us-east-1.amazonaws.com"); startTime = System.currentTimeMillis(); } MeasureSet measureSet = null; try { measureSet = this.retrieveMeasureSet(measureNames); } catch (ParseException e) { e.printStackTrace(); } if (measureSet != null) { for (String measureName : measureSet.getMeasureNames()) { try { if (Math.random() < samplingProb) { logger.info("Sending datum: {} {} {}", measureSet.getMeasure(measureName), "Cost", monitoredTarget); dcAgent.send(new VM(Config.getInstance().getVmType(), Config.getInstance().getVmId()), "Cost", measureSet.getMeasure(measureName)); } } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } //System.out.println(measureName+" "+String.valueOf(measureSet.getMeasure(measureName))); //measure.push(measureName, String.valueOf(measureSet.getMeasure(measureName)),"EC2"); } } try { Thread.sleep(period); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } } }