List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration
public ClientConfiguration()
From source file:lumbermill.internal.aws.KinesisClientFactory.java
License:Apache License
private AmazonKinesisAsync getAsyncClient(MapWrap configuration) { Optional<ClientConfiguration> kinesisConfig = configuration.exists("kinesis_config") ? Optional.of(configuration.getObject("kinesis_config")) : Optional.empty();/*ww w.j a va 2 s .c o m*/ if (kinesisConfig.isPresent()) { return createClient(kinesisConfig.get(), configuration); } ClientConfiguration clientConfiguration = new ClientConfiguration() .withMaxConnections(configuration.asInt("max_connections", 10)) .withRequestTimeout(configuration.asInt("request_timeout", 60000)); if (System.getenv("https_proxy") != null) { URI proxy = URI.create(System.getenv("https_proxy")); LOGGER.info("Using proxy {}", proxy); clientConfiguration.withProxyHost(proxy.getHost()).withProxyPort(proxy.getPort()); } return createClient(clientConfiguration, configuration); }
From source file:lumbermill.internal.aws.S3ClientImpl.java
License:Apache License
public void init() { ClientConfiguration awsConfig = new ClientConfiguration(); if (System.getenv("https_proxy") != null) { URI proxy = URI.create(System.getenv("https_proxy")); awsConfig.setProxyHost(proxy.getHost()); awsConfig.setProxyPort(proxy.getPort()); }/*from w w w .j ava 2 s. com*/ //awsConfig.setConnectionTimeout(2000); //awsConfig.setRequestTimeout(2000); //awsConfig.setSocketTimeout(2000); //awsConfig.setClientExecutionTimeout(2000); AWSCredentialsProvider credentials = new DefaultAWSCredentialsProviderChain(); if (roleArn.isPresent()) { credentials = new STSAssumeRoleSessionCredentialsProvider(credentials, roleArn.get(), "lumbermills3", awsConfig); } s3Client = new AmazonS3Client(credentials, awsConfig); }
From source file:maebackup.MaeBackup.java
License:Open Source License
public static void upload(String lrzname) { try {// w w w. j a va 2s.c om System.out.println("Uploading to Glacier..."); ClientConfiguration config = new ClientConfiguration(); config.setProtocol(Protocol.HTTPS); AmazonGlacierClient client = new AmazonGlacierClient(credentials, config); client.setEndpoint(endpoint); File file = new File(lrzname); String archiveid = ""; if (file.length() < 5 * 1024 * 1024) { System.out.println("File is small, uploading as single chunk"); String treehash = TreeHashGenerator.calculateTreeHash(file); InputStream is = new FileInputStream(file); byte[] buffer = new byte[(int) file.length()]; int bytes = is.read(buffer); if (bytes != file.length()) throw new RuntimeException("Only read " + bytes + " of " + file.length() + " byte file when preparing for upload."); InputStream bais = new ByteArrayInputStream(buffer); UploadArchiveRequest request = new UploadArchiveRequest(vaultname, lrzname, treehash, bais); UploadArchiveResult result = client.uploadArchive(request); archiveid = result.getArchiveId(); } else { long chunks = file.length() / chunksize; while (chunks > 10000) { chunksize <<= 1; chunks = file.length() / chunksize; } String chunksizestr = new Integer(chunksize).toString(); System.out.println( "Starting multipart upload: " + chunks + " full chunks of " + chunksizestr + " bytes"); InitiateMultipartUploadResult imures = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(vaultname, lrzname, chunksizestr)); String uploadid = imures.getUploadId(); RandomAccessFile raf = new RandomAccessFile(file, "r"); byte[] buffer = new byte[chunksize]; for (long x = 0; x < chunks; x++) { try { System.out.println("Uploading chunk " + x + "/" + chunks); raf.seek(x * chunksize); raf.read(buffer); String parthash = TreeHashGenerator.calculateTreeHash(new ByteArrayInputStream(buffer)); String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*"; client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash, range, new ByteArrayInputStream(buffer))); } catch (Exception e) { e.printStackTrace(); System.err.println("Error uploading chunk " + x + ", retrying..."); x--; } } if (file.length() > chunks * chunksize) { do { try { System.out.println("Uploading final partial chunk"); raf.seek(chunks * chunksize); int bytes = raf.read(buffer); String parthash = TreeHashGenerator .calculateTreeHash(new ByteArrayInputStream(buffer, 0, bytes)); String range = "bytes " + (chunks * chunksize) + "-" + (file.length() - 1) + "/*"; client.uploadMultipartPart(new UploadMultipartPartRequest(vaultname, uploadid, parthash, range, new ByteArrayInputStream(buffer, 0, bytes))); } catch (Exception e) { e.printStackTrace(); System.err.println("Error uploading final chunk, retrying..."); continue; } } while (false); } System.out.println("Completing upload"); String treehash = TreeHashGenerator.calculateTreeHash(file); CompleteMultipartUploadResult result = client .completeMultipartUpload(new CompleteMultipartUploadRequest(vaultname, uploadid, new Long(file.length()).toString(), treehash)); archiveid = result.getArchiveId(); } System.out.println("Uploaded " + lrzname + " to Glacier as ID " + archiveid); File listfile = new File(cachedir, "archives.lst"); FileWriter fw = new FileWriter(listfile, true); fw.write(archiveid + " " + lrzname + "\n"); fw.close(); } catch (Exception e) { throw new RuntimeException(e); } }
From source file:maebackup.MaeBackup.java
License:Open Source License
public static void download(String filename, String jobid) { try {//from w ww .j av a 2 s . c o m System.out.println("Starting download..."); ClientConfiguration config = new ClientConfiguration(); config.setProtocol(Protocol.HTTPS); AmazonGlacierClient client = new AmazonGlacierClient(credentials, config); client.setEndpoint(endpoint); if (jobid == null || jobid == "") { String archiveid; // Yes, this will screw up on actual 138-character file names, but... yeah. if (filename.length() == 138) { archiveid = filename; } else { File listfile = new File(cachedir, "archives.lst"); Map<File, String> filemap = loadHashes(listfile); archiveid = filemap.get(filename); if (archiveid == null) { System.err.println("Error: Could not find archive ID for file " + filename); System.exit(1); return; } } InitiateJobResult result = client.initiateJob(new InitiateJobRequest(vaultname, new JobParameters().withType("archive-retrieval").withArchiveId(archiveid))); jobid = result.getJobId(); System.out.println("Started download job as ID " + jobid); } else { DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, jobid)); if (!djres.getStatusCode().equals("Succeeded")) { System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode()); System.out.println(djres.getStatusMessage()); System.exit(2); } long size = djres.getArchiveSizeInBytes(); long chunks = size / chunksize; while (chunks > 10000) { chunksize <<= 1; chunks = size / chunksize; } RandomAccessFile raf = new RandomAccessFile(filename, "w"); raf.setLength(size); byte[] buffer = new byte[chunksize]; for (int x = 0; x < chunks; x++) { try { System.out.println("Downloading chunk " + x + " of " + chunks); String range = "bytes " + (x * chunksize) + "-" + ((x + 1) * chunksize - 1) + "/*"; GetJobOutputResult gjores = client .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range)); gjores.getBody().read(buffer); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(buffer, 0, chunksize); byte[] hash = md.digest(); StringBuffer sb = new StringBuffer(); for (byte b : hash) { sb.append(String.format("%02x", b)); } if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) { System.err.println("Error: Chunk " + x + " does not match SHA-256. Retrying."); x--; continue; } raf.seek(x * chunksize); raf.write(buffer); } catch (Exception e) { System.err.println("Error: Exception while downloading chunk " + x + ". Retrying."); x--; } } if (size > chunks * chunksize) { do { try { System.out.println("Downloading final partial chunk"); String range = "bytes " + (chunks * chunksize) + "-" + (size - 1) + "/*"; GetJobOutputResult gjores = client .getJobOutput(new GetJobOutputRequest(vaultname, jobid, range)); int bytes = gjores.getBody().read(buffer); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(buffer, 0, bytes); byte[] hash = md.digest(); StringBuffer sb = new StringBuffer(); for (byte b : hash) { sb.append(String.format("%02x", b)); } if (!sb.toString().equalsIgnoreCase(gjores.getChecksum())) { System.err.println("Error: Final chunk does not match SHA-256. Retrying."); continue; } raf.seek(chunks * chunksize); raf.write(buffer, 0, bytes); } catch (Exception e) { System.err.println("Error: Exception while downloading final chunk. Retrying."); continue; } } while (false); } raf.close(); String treehash = TreeHashGenerator.calculateTreeHash(new File(filename)); if (!treehash.equalsIgnoreCase(djres.getSHA256TreeHash())) { System.err.println("Error: File failed final tree hash check."); System.exit(3); } System.out.println("Download complete."); } } catch (Exception e) { throw new RuntimeException(e); } }
From source file:maebackup.MaeBackup.java
License:Open Source License
public static void delete(String archive) { try {/*from www . j av a2 s . c o m*/ System.out.println("Deleting from Glacier..."); ClientConfiguration config = new ClientConfiguration(); config.setProtocol(Protocol.HTTPS); AmazonGlacierClient client = new AmazonGlacierClient(credentials, config); client.setEndpoint(endpoint); client.deleteArchive(new DeleteArchiveRequest(vaultname, archive)); System.out.println("Archive deleted."); } catch (Exception e) { throw new RuntimeException(e); } }
From source file:maebackup.MaeBackup.java
License:Open Source License
public static void list(String arg) { try {//from w ww.j a va2 s .c o m System.out.println("Listing Glacier vault..."); ClientConfiguration config = new ClientConfiguration(); config.setProtocol(Protocol.HTTPS); AmazonGlacierClient client = new AmazonGlacierClient(credentials, config); client.setEndpoint(endpoint); if (arg == null || arg == "") { InitiateJobResult result = client.initiateJob( new InitiateJobRequest(vaultname, new JobParameters().withType("inventory-retrieval"))); String jobid = result.getJobId(); System.out.println("Started inventory retrival job as ID " + jobid); } else { DescribeJobResult djres = client.describeJob(new DescribeJobRequest(vaultname, arg)); if (!djres.getStatusCode().equals("Succeeded")) { System.out.println("Job is not listed as Succeeded. It is: " + djres.getStatusCode()); System.out.println(djres.getStatusMessage()); System.exit(2); } GetJobOutputResult gjores = client .getJobOutput(new GetJobOutputRequest().withVaultName(vaultname).withJobId(arg)); byte[] buffer = new byte[1024]; int bytes; while ((bytes = gjores.getBody().read(buffer)) > 0) { System.out.write(buffer, 0, bytes); } } } catch (Exception e) { throw new RuntimeException(e); } }
From source file:n3phele.factory.ec2.VirtualServerResource.java
License:Open Source License
@GET @RolesAllowed("authenticated") @Path("dump") public String dump(@QueryParam("id") String id, @QueryParam("key") String key, @DefaultValue("https://ec2.amazonaws.com") @QueryParam("location") String location) { log.info("Id=" + id + " key=" + key); ClientConfiguration clientConfiguration = new ClientConfiguration(); try {// w w w.ja v a 2 s .c o m clientConfiguration .setProtocol(Protocol.valueOf(URI.create(location).toURL().getProtocol().toUpperCase())); } catch (MalformedURLException e) { throw new WebApplicationException(); } AmazonEC2Client client = new AmazonEC2Client(new BasicAWSCredentials(id, key), clientConfiguration); client.setEndpoint(location.toString()); DescribeKeyPairsResult result = client.describeKeyPairs(); log.info("Key pairs " + result.getKeyPairs()); return (result.getKeyPairs().size() + " key pairs "); }
From source file:n3phele.factory.ec2.VirtualServerResource.java
License:Open Source License
private AmazonEC2Client getEC2Client(String accessKey, String encryptedKey, URI location) { AWSCredentials credentials = null;//from ww w. j av a 2s. co m try { credentials = new EncryptedAWSCredentials(accessKey, encryptedKey); } catch (UnsupportedEncodingException e) { throw new WebApplicationException(); } catch (NoSuchAlgorithmException e) { throw new WebApplicationException(); } catch (Exception e) { throw new WebApplicationException(); } ClientConfiguration clientConfiguration = new ClientConfiguration(); try { clientConfiguration.setProtocol(Protocol.valueOf(location.toURL().getProtocol().toUpperCase())); } catch (MalformedURLException e) { throw new WebApplicationException(); } AmazonEC2Client client = new AmazonEC2Client(credentials, clientConfiguration); client.setEndpoint(location.toString()); return client; }
From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java
License:Apache License
@Override public InputStream retrieve(IFile file) throws IOException { Preconditions.checkArgument((file != null), "file must not be null"); AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS)); GetObjectRequest getObjectRequest = new GetObjectRequest(getBucketName(), file.getFileName()); S3Object storedObject = s3.getObject(getObjectRequest); return storedObject.getObjectContent(); }
From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java
License:Apache License
@Override public void delete(IFile file) throws IOException { AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS)); DeleteObjectRequest deleteObjectRequest = new DeleteObjectRequest(getBucketName(), file.getFileName()); s3.deleteObject(deleteObjectRequest); }