List of usage examples for com.amazonaws.services.s3 AmazonS3Client getObject
@Override public S3Object getObject(GetObjectRequest getObjectRequest) throws SdkClientException, AmazonServiceException
From source file:S3Console.java
License:Open Source License
void createS3Bucket(AmazonS3Client s3, String mc_no) throws IOException { this.s3 = s3; //create bucket String bucketName = "prachi-amazon-s3-bucket-" + mc_no + Virtualize.no_of_days; s3.createBucket(bucketName);//from w w w . j a va2s . co m //set key String key = "ReadMe.txt"; //set value File file = File.createTempFile("temp", ".txt"); file.deleteOnExit(); Writer writer = new OutputStreamWriter(new FileOutputStream(file)); writer.write("Amazon S3 Bucket Created.\r ........Success !!"); writer.close(); //put object - bucket, key, value(file) s3.putObject(new PutObjectRequest(bucketName, key, file)); //get object S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); BufferedReader reader = new BufferedReader(new InputStreamReader(object.getObjectContent())); String data = null; while ((data = reader.readLine()) != null) { System.out.println(data); } }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public GetObjectResponseType getObject(final GetObjectType request) throws S3Exception { User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; GetObjectRequest getRequest = new GetObjectRequest(request.getBucket(), request.getKey()); try {/* w w w. ja v a 2s .c o m*/ internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); GetObjectResponseType reply = request.getReply(); S3Object response; response = s3Client.getObject(getRequest); populateResponseMetadata(reply, response.getObjectMetadata()); reply.setDataInputStream(response.getObjectContent()); return reply; } catch (AmazonServiceException e) { LOG.debug("Error from backend", e); throw S3ExceptionMapper.fromAWSJavaSDK(e); } }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public GetObjectExtendedResponseType getObjectExtended(final GetObjectExtendedType request) throws S3Exception { User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; Boolean getMetaData = request.getGetMetaData(); Long byteRangeStart = request.getByteRangeStart(); Long byteRangeEnd = request.getByteRangeEnd(); Date ifModifiedSince = request.getIfModifiedSince(); Date ifUnmodifiedSince = request.getIfUnmodifiedSince(); String ifMatch = request.getIfMatch(); String ifNoneMatch = request.getIfNoneMatch(); GetObjectRequest getRequest = new GetObjectRequest(request.getBucket(), request.getKey()); if (byteRangeStart == null) { byteRangeStart = 0L;//from w w w . ja va 2 s . c o m } if (byteRangeEnd != null) { getRequest.setRange(byteRangeStart, byteRangeEnd); } if (getMetaData != null) { //Get object metadata } if (ifModifiedSince != null) { getRequest.setModifiedSinceConstraint(ifModifiedSince); } if (ifUnmodifiedSince != null) { getRequest.setUnmodifiedSinceConstraint(ifUnmodifiedSince); } if (ifMatch != null) { List matchList = new ArrayList(); matchList.add(ifMatch); getRequest.setMatchingETagConstraints(matchList); } if (ifNoneMatch != null) { List nonMatchList = new ArrayList(); nonMatchList.add(ifNoneMatch); getRequest.setNonmatchingETagConstraints(nonMatchList); } try { internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); S3Object response = s3Client.getObject(getRequest); GetObjectExtendedResponseType reply = request.getReply(); populateResponseMetadata(reply, response.getObjectMetadata()); reply.setDataInputStream(response.getObjectContent()); reply.setByteRangeStart(request.getByteRangeStart()); reply.setByteRangeEnd(request.getByteRangeEnd()); return reply; } catch (AmazonServiceException e) { LOG.debug("Error from backend", e); throw S3ExceptionMapper.fromAWSJavaSDK(e); } }
From source file:com.facebook.presto.kinesis.s3config.S3TableConfigClient.java
License:Apache License
/** * Connect to S3 directory to look for new or updated table definitions and then * update the map./*w w w . j a v a 2 s . c om*/ */ protected void updateTablesFromS3() { long now = System.currentTimeMillis(); List<S3ObjectSummary> objectList = this.getObjectSummaries(); AmazonS3Client s3client = this.clientManager.getS3Client(); AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl); // Build map of "deltas" which in the end contains new definitions and deleted tables HashMap<String, KinesisStreamDescription> deltasMap = new HashMap<String, KinesisStreamDescription>(); internalMapLock.readLock().lock(); try { Iterator<String> keysIter = this.internalMap.keySet().iterator(); while (keysIter.hasNext()) { deltasMap.put(keysIter.next(), dummyStreamDesc); } } finally { internalMapLock.readLock().unlock(); } for (S3ObjectSummary objInfo : objectList) { if (!deltasMap.containsKey(objInfo.getKey()) || objInfo.getLastModified().getTime() >= this.lastCheck) { // New or updated file, so we must read from AWS try { if (objInfo.getKey().endsWith("/")) { continue; } log.info("Getting : %s - %s", objInfo.getBucketName(), objInfo.getKey()); S3Object object = s3client .getObject(new GetObjectRequest(objInfo.getBucketName(), objInfo.getKey())); StringBuilder resultStr = new StringBuilder(""); try (BufferedReader reader = new BufferedReader( new InputStreamReader(object.getObjectContent()))) { boolean hasMore = true; while (hasMore) { String line = reader.readLine(); if (line != null) { resultStr.append(line); } else { hasMore = false; } } KinesisStreamDescription table = streamDescriptionCodec.fromJson(resultStr.toString()); deltasMap.put(objInfo.getKey(), table); log.info("Put table description into the map from %s : %s.%s", objInfo.getKey(), table.getSchemaName(), table.getTableName()); } catch (IOException iox) { log.error("Problem reading input stream from object.", iox); } catch (IllegalArgumentException iax) { // Note: this gets thrown by airlift json library when the input is malformed. log.error("Invalid JSON table description.", iax); } } catch (AmazonServiceException ase) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonServiceException, which means your request made it "); sb.append("to Amazon S3, but was rejected with an error response for some reason.\n"); sb.append("Error Message: " + ase.getMessage()); sb.append("HTTP Status Code: " + ase.getStatusCode()); sb.append("AWS Error Code: " + ase.getErrorCode()); sb.append("Error Type: " + ase.getErrorType()); sb.append("Request ID: " + ase.getRequestId()); log.error(sb.toString(), ase); } catch (AmazonClientException ace) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); sb.append("Error Message: " + ace.getMessage()); log.error(sb.toString(), ace); } } else if (deltasMap.containsKey(objInfo.getKey())) { deltasMap.remove(objInfo.getKey()); } } // end loop through object descriptions // Deltas: key pointing to dummy means delete, key pointing to other object means update. // This approach lets us delete and update while shortening the locked critical section. Iterator<Map.Entry<String, KinesisStreamDescription>> deltasIter = deltasMap.entrySet().iterator(); internalMapLock.writeLock().lock(); try { while (deltasIter.hasNext()) { Map.Entry<String, KinesisStreamDescription> entry = deltasIter.next(); if (entry.getValue().getTableName().equals("__DUMMY__")) { this.internalMap.remove(entry.getKey()); } else { this.internalMap.put(entry.getKey(), entry.getValue()); } } } finally { internalMapLock.writeLock().unlock(); } log.info("Completed updating table definitions from S3."); this.lastCheck = now; return; }
From source file:com.mesosphere.dcos.cassandra.executor.backup.S3StorageDriver.java
License:Apache License
@Override public String downloadSchema(BackupRestoreContext ctx) throws Exception { final String nodeId = ctx.getNodeId(); final AmazonS3Client amazonS3Client = getAmazonS3Client(ctx); final String key = getPrefixKey(ctx) + "/" + nodeId + "/" + StorageUtil.SCHEMA_FILE; S3Object object = amazonS3Client.getObject(new GetObjectRequest(getBucketName(ctx), key)); InputStream objectData = object.getObjectContent(); String schema = IOUtils.toString(objectData, "UTF-8"); objectData.close();/*from w w w . j a v a2 s .c om*/ return schema; }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Restore.java
License:Apache License
/** * Uses the Amazon S3 API to restore from S3 *//*from ww w. j av a 2s . com*/ @Override public boolean restoreData(String dateString) { long time = restoreTime(dateString); if (time > -1) { logger.info("Restoring data from S3."); AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider()); try { /* construct the key for the backup data */ String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/" + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + time; logger.info("S3 Bucket Name: " + config.getBucketName()); logger.info("Key in Bucket: " + keyName); // Checking if the S3 bucket exists, and if does not, then we create it if (!(s3Client.doesBucketExist(config.getBucketName()))) { logger.error("Bucket with name: " + config.getBucketName() + " does not exist"); } else { S3Object s3object = s3Client.getObject(new GetObjectRequest(config.getBucketName(), keyName)); logger.info("Content-Type: " + s3object.getObjectMetadata().getContentType()); String filepath = null; if (config.isAof()) { filepath = config.getPersistenceLocation() + "/appendonly.aof"; } else { filepath = config.getPersistenceLocation() + "/nfredis.rdb"; } IOUtils.copy(s3object.getObjectContent(), new FileOutputStream(new File(filepath))); } return true; } catch (AmazonServiceException ase) { logger.error("AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error "); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to " + "communicate with S3, "); logger.error("Error Message: " + ace.getMessage()); } catch (IOException io) { logger.error("File storing error: " + io.getMessage()); } } else { logger.error("Date in FP: " + dateString); } return false; }
From source file:com.nextdoor.bender.operations.geo.GeoIpOperationFactory.java
License:Apache License
@Override public void setConf(AbstractConfig config) { this.config = (GeoIpOperationConfig) config; AmazonS3Client client = this.s3Factory.newInstance(); AmazonS3URI uri = new AmazonS3URI(this.config.getGeoLiteDb()); GetObjectRequest req = new GetObjectRequest(uri.getBucket(), uri.getKey()); S3Object obj = client.getObject(req); try {//ww w. ja v a2 s.co m this.databaseReader = new DatabaseReader.Builder(obj.getObjectContent()).withCache(new CHMCache()) .build(); } catch (IOException e) { throw new ConfigurationException("Unable to read " + this.config.getGeoLiteDb(), e); } }
From source file:com.qubole.presto.kinesis.s3config.S3TableConfigClient.java
License:Apache License
/** * Connect to S3 directory to look for new or updated table definitions and then * update the map./* www. j a v a 2 s . c o m*/ */ protected void updateTablesFromS3() { long now = System.currentTimeMillis(); List<S3ObjectSummary> objectList = this.getObjectSummaries(); AmazonS3Client s3client = this.clientManager.getS3Client(); AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl); for (S3ObjectSummary objInfo : objectList) { if (!this.internalMap.containsKey(objInfo.getKey()) || objInfo.getLastModified().getTime() >= this.lastCheck) { // New or updated file, so we must read from AWS try { if (objInfo.getKey().endsWith("/")) { continue; } log.info("Getting : %s - %s", objInfo.getBucketName(), objInfo.getKey()); S3Object object = s3client .getObject(new GetObjectRequest(objInfo.getBucketName(), objInfo.getKey())); StringBuilder resultStr = new StringBuilder(""); try (BufferedReader reader = new BufferedReader( new InputStreamReader(object.getObjectContent()))) { boolean hasMore = true; while (hasMore) { String line = reader.readLine(); if (line != null) { resultStr.append(line); } else { hasMore = false; } } KinesisStreamDescription table = streamDescriptionCodec.fromJson(resultStr.toString()); internalMap.put(objInfo.getKey(), table); log.info("Put table description into the map from %s", objInfo.getKey()); } catch (IOException iox) { log.error("Problem reading input stream from object.", iox); } } catch (AmazonServiceException ase) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonServiceException, which means your request made it "); sb.append("to Amazon S3, but was rejected with an error response for some reason.\n"); sb.append("Error Message: " + ase.getMessage()); sb.append("HTTP Status Code: " + ase.getStatusCode()); sb.append("AWS Error Code: " + ase.getErrorCode()); sb.append("Error Type: " + ase.getErrorType()); sb.append("Request ID: " + ase.getRequestId()); log.error(sb.toString(), ase); } catch (AmazonClientException ace) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); sb.append("Error Message: " + ace.getMessage()); log.error(sb.toString(), ace); } } } // end loop through object descriptions log.info("Completed updating table definitions from S3."); this.lastCheck = now; return; }
From source file:com.streamsets.pipeline.stage.origin.s3.AmazonS3Util.java
License:Apache License
static S3Object getObjectRange(AmazonS3Client s3Client, String bucket, String objectKey, long range) throws AmazonClientException { GetObjectRequest getObjectRequest = new GetObjectRequest(bucket, objectKey).withRange(0, range); return s3Client.getObject(getObjectRequest); }
From source file:dashboard.AmazonLogs.java
License:Open Source License
public int readAmazonLogs(int n, String AWS_USER, String AWS_PASS, String IPfile, String ERRfile, String bucketName, String DELETE_PROCESSED_LOGS, String API_KEY, String TOKEN, String apiuser, String apipass) throws Exception { if (n < 1) return 0; int eventsNumber = 0; String line = null;/*from ww w. j a v a 2s . c o m*/ int begin = 0; int zips = 0; int deletedZips = 0; int mixpanelStatus = 0; String registrant = ""; String ip = ""; String prevIP = ""; Mixpanel mix = new Mixpanel(); Whois w = new Whois(apiuser, apipass); int index = -1; Registrant r; ArrayList<Registrant> rList = new ArrayList<Registrant>(); ArrayList<Registrant> eList = new ArrayList<Registrant>(); IPList ipl = new IPList(); IPList errl = new IPList(); // Log files Bucket AWSCredentials credentials = new BasicAWSCredentials(AWS_USER, AWS_PASS); AmazonS3Client s3Client = new AmazonS3Client(credentials); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); BufferedReader br = null; ObjectListing objectListing = s3Client.listObjects(listObjectsRequest); ObjectListing nextObjectListing = objectListing; zips = 0; Boolean more = true; if (objectListing == null) more = false; else { ipl.loadList(rList, IPfile); ipl.printList(rList, 30); } while (more) { // Reads 1000 files for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { // Handle ZIP files try { // Open and send to mixpanel events of one ZIP file String key = objectSummary.getKey(); S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, key)); // Extract ZIP and read Object to reader br = new BufferedReader(new InputStreamReader(new GZIPInputStream(object.getObjectContent()))); zips++; // Read the lines from the unzipped file, break it and send to Mixpanel while ((line = br.readLine()) != null) { if (line.startsWith("#")) continue; if (line.trim().equals("")) continue; String[] values = line.split("\\s"); String eventTime = values[0] + " " + values[1]; ip = values[4]; if (ip != prevIP) { prevIP = ip; index = ipl.ipInList(ip, rList); if (index >= 0) { r = rList.get(index); registrant = r.name; // Update counter for this IP r.counter = r.counter + 1; rList.set(index, r); } else { // WHOIS - Check registrant of this IP address registrant = w.whoisIP(ip); // if there was an error, try again if (registrant.equals("ERROR")) registrant = w.whoisIP(ip); // if there was a second error, add it to errors list if (registrant.equals("ERROR")) { eList.add(new Registrant(ip, registrant, 1)); } else { // If name includes a comma, exclude the comma registrant = registrant.replace(",", ""); rList.add(new Registrant(ip, registrant, 1)); } } } String method = values[5]; String fileName = values[7]; String statusCode = values[8]; String userAgent = values[10]; String fName = fileName; if (fileName.contains("gigaspaces-")) { begin = fileName.lastIndexOf("gigaspaces-") + 11; fName = fileName.substring(begin, fileName.length()); } eventsNumber++; System.out.println(eventsNumber + ": " + eventTime + " " + ip + " " + registrant); // ==================================================== // Track the event in Mixpanel (using the POST import) // ==================================================== mixpanelStatus = mix.postCDNEventToMixpanel(API_KEY, TOKEN, ip, "Cloudfront CDN", eventTime, method, fileName, fName, userAgent, statusCode, registrant); } // while on ZIP file lines if (mixpanelStatus == 1 & DELETE_PROCESSED_LOGS.equals("YES")) { // Delete the CDN log ZIP file s3Client.deleteObject(bucketName, key); System.out.println("========= Deleted Zip " + zips + " ===== List Size " + rList.size() + " =========="); deletedZips++; } } catch (IOException e) { e.printStackTrace(); return eventsNumber; } finally { if (br != null) { br.close(); } if (eventsNumber >= n) { System.out.println("\n>>> " + eventsNumber + " events in " + zips + " Zip files. Deleted " + deletedZips + " Zip files.\n"); ipl.printList(rList, 100); ipl.saveList(rList, IPfile); if (!eList.isEmpty()) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm"); String fName = ERRfile + sdf.format(new Date()) + ".txt"; System.out.println("\n>>> " + eList.size() + " DomainTools errors:"); errl.saveList(eList, fName); } else System.out.println("\n>>> No DomainTools errors"); return eventsNumber; } } } // for (continue to next ZIP file // If there are more ZIP files, read next batch of 1000 if (objectListing.isTruncated()) { nextObjectListing = s3Client.listNextBatchOfObjects(objectListing); objectListing = nextObjectListing; } else more = false; // no more files } // while next objectListing System.out.println("\n>>> " + eventsNumber + " events in " + zips + " Zip files. Deleted " + deletedZips + " Zip files.\n"); ipl.printList(rList, 50); ipl.saveList(rList, IPfile); if (!eList.isEmpty()) { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd-HH-mm"); String fName = ERRfile + sdf.format(new Date()) + ".txt"; System.out.println("\n>>> " + eList.size() + " DomainTools errors:"); errl.saveList(eList, fName); } else System.out.println("\n>>> No DomainTools errors"); return eventsNumber; }