List of usage examples for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client
@Deprecated
public AmazonS3Client(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration)
From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java
License:Apache License
@Override public StorageResponse store(StorageRequest request) throws IOException { final IFile requestFile = request.getFile(); AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS)); Map<String, String> fileMetadata = new HashMap<String, String>(); fileMetadata.put("accountUrn", request.getUser().getAccount().getUrn()); fileMetadata.put("userUrn", request.getUser().getUrn()); fileMetadata.put("fileUrn", requestFile.getUrn()); fileMetadata.put("entityReferenceType", requestFile.getEntityReferenceType().name()); fileMetadata.put("referenceUrn", requestFile.getReferenceUrn()); fileMetadata.put("recordedTimestamp", Long.toString(request.getFile().getTimestamp())); // fileMetadata.put("mimeType", request.getVfsObject().getMimeType()); ObjectMetadata metadata = new ObjectMetadata(); if (request.getContentLength() > 0) { LOG.debug("Including content length : " + request.getContentLength()); metadata.setContentLength(request.getContentLength()); }//from ww w . jav a 2s . c o m // metadata.setContentMD5(streamMD5); metadata.setUserMetadata(fileMetadata); try { LOG.trace("Bucket name: " + getBucketName()); LOG.trace("File name: " + request.getFileName()); LOG.trace("inputStream == null? " + (request.getInputStream() == null)); HashingInputStream his = new HashingInputStream(request.getInputStream(), "SHA-256"); PutObjectResult putResult = s3 .putObject(new PutObjectRequest(getBucketName(), request.getFileName(), his, metadata)); String finalUrl = getUrl(request.getFileName()); LOG.trace("File URL: " + finalUrl); requestFile.setUrl(getUrl(request.getFileName())); byte[] signature = his.getSignature(); JSONObject jsonObject = HashUtil.signFile(requestFile, signature); LOG.info("File Signature\n\n{}\n\n", jsonObject.toString(3)); return new StorageResponse(requestFile, finalUrl, jsonObject.toString(3)); } catch (AmazonS3Exception e) { e.printStackTrace(); throw e; } catch (JSONException | NoSuchAlgorithmException e) { e.printStackTrace(); throw new IOException(e); } }
From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java
License:Apache License
@Override public boolean isHealthy() { AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS)); return s3.doesBucketExist(getBucketName()); }
From source file:org.adroitlogic.build.aws.maven.SimpleStorageServiceWagon.java
License:Apache License
@Override protected void connectToRepository(Repository repository, AuthenticationInfo authenticationInfo, ProxyInfoProvider proxyInfoProvider) throws AuthenticationException { if (this.amazonS3 == null) { AuthenticationInfoAWSCredentialsProviderChain credentialsProvider = new AuthenticationInfoAWSCredentialsProviderChain( authenticationInfo);// w ww . ja v a 2 s . c o m ClientConfiguration clientConfiguration = S3Utils.getClientConfiguration(proxyInfoProvider); this.bucketName = S3Utils.getBucketName(repository); this.baseDirectory = S3Utils.getBaseDirectory(repository); if (proxyInfoProvider != null) { ProxyInfo pi = proxyInfoProvider.getProxyInfo("s3"); if (pi != null) { clientConfiguration.setProxyHost(pi.getHost()); clientConfiguration.setProxyPort(pi.getPort()); clientConfiguration.setProxyUsername(pi.getUserName()); clientConfiguration.setProxyPassword(pi.getPassword()); clientConfiguration.setProxyDomain(pi.getNtlmDomain()); clientConfiguration.setProxyWorkstation(pi.getNtlmHost()); } } this.amazonS3 = new AmazonS3Client(credentialsProvider, clientConfiguration); Region region = Region.fromLocationConstraint(this.amazonS3.getBucketLocation(this.bucketName)); this.amazonS3.setEndpoint(region.getEndpoint()); } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** Called after a new FileSystem instance is constructed. * @param name a uri whose authority section names the host, port, etc. * for this FileSystem//from www . j a v a 2 s.co m * @param conf the configuration */ public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); uri = URI.create(name.getScheme() + "://" + name.getAuthority()); workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri, this.getWorkingDirectory()); // Try to get our credentials or just connect anonymously String accessKey = conf.get(NEW_ACCESS_KEY, conf.get(OLD_ACCESS_KEY, null)); String secretKey = conf.get(NEW_SECRET_KEY, conf.get(OLD_SECRET_KEY, null)); String userInfo = name.getUserInfo(); if (userInfo != null) { int index = userInfo.indexOf(':'); if (index != -1) { accessKey = userInfo.substring(0, index); secretKey = userInfo.substring(index + 1); } else { accessKey = userInfo; } } AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain( new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(), new AnonymousAWSCredentialsProvider()); bucket = name.getHost(); ClientConfiguration awsConf = new ClientConfiguration(); awsConf.setMaxConnections(conf.getInt(NEW_MAXIMUM_CONNECTIONS, conf.getInt(OLD_MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS))); awsConf.setProtocol(conf.getBoolean(NEW_SECURE_CONNECTIONS, conf.getBoolean(OLD_SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS)) ? Protocol.HTTPS : Protocol.HTTP); awsConf.setMaxErrorRetry( conf.getInt(NEW_MAX_ERROR_RETRIES, conf.getInt(OLD_MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES))); awsConf.setSocketTimeout( conf.getInt(NEW_SOCKET_TIMEOUT, conf.getInt(OLD_SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT))); s3 = new AmazonS3Client(credentials, awsConf); maxKeys = conf.getInt(NEW_MAX_PAGING_KEYS, conf.getInt(OLD_MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS)); partSize = conf.getLong(NEW_MULTIPART_SIZE, conf.getLong(OLD_MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE)); partSizeThreshold = conf.getLong(NEW_MIN_MULTIPART_THRESHOLD, conf.getLong(OLD_MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD)); if (partSize < 5 * 1024 * 1024) { LOG.error(NEW_MULTIPART_SIZE + " must be at least 5 MB"); partSize = 5 * 1024 * 1024; } if (partSizeThreshold < 5 * 1024 * 1024) { LOG.error(NEW_MIN_MULTIPART_THRESHOLD + " must be at least 5 MB"); partSizeThreshold = 5 * 1024 * 1024; } String cannedACLName = conf.get(NEW_CANNED_ACL, conf.get(OLD_CANNED_ACL, DEFAULT_CANNED_ACL)); if (!cannedACLName.isEmpty()) { cannedACL = CannedAccessControlList.valueOf(cannedACLName); } else { cannedACL = null; } if (!s3.doesBucketExist(bucket)) { throw new IOException("Bucket " + bucket + " does not exist"); } boolean purgeExistingMultipart = conf.getBoolean(NEW_PURGE_EXISTING_MULTIPART, conf.getBoolean(OLD_PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART)); long purgeExistingMultipartAge = conf.getLong(NEW_PURGE_EXISTING_MULTIPART_AGE, conf.getLong(OLD_PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE)); if (purgeExistingMultipart) { TransferManager transferManager = new TransferManager(s3); Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000); transferManager.abortMultipartUploads(bucket, purgeBefore); transferManager.shutdownNow(false); } serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM, null); setConf(conf); }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** Called after a new FileSystem instance is constructed. * @param name a uri whose authority section names the host, port, etc. * for this FileSystem//from w w w . j a v a 2 s. c o m * @param conf the configuration */ public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); uri = URI.create(name.getScheme() + "://" + name.getAuthority()); workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(this.uri, this.getWorkingDirectory()); // Try to get our credentials or just connect anonymously String accessKey = conf.get(ACCESS_KEY, null); String secretKey = conf.get(SECRET_KEY, null); String userInfo = name.getUserInfo(); if (userInfo != null) { int index = userInfo.indexOf(':'); if (index != -1) { accessKey = userInfo.substring(0, index); secretKey = userInfo.substring(index + 1); } else { accessKey = userInfo; } } AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain( new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(), new AnonymousAWSCredentialsProvider()); bucket = name.getHost(); ClientConfiguration awsConf = new ClientConfiguration(); awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS)); boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS); awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP); awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES)); awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT)); awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT)); String proxyHost = conf.getTrimmed(PROXY_HOST, ""); int proxyPort = conf.getInt(PROXY_PORT, -1); if (!proxyHost.isEmpty()) { awsConf.setProxyHost(proxyHost); if (proxyPort >= 0) { awsConf.setProxyPort(proxyPort); } else { if (secureConnections) { LOG.warn("Proxy host set without port. Using HTTPS default 443"); awsConf.setProxyPort(443); } else { LOG.warn("Proxy host set without port. Using HTTP default 80"); awsConf.setProxyPort(80); } } String proxyUsername = conf.getTrimmed(PROXY_USERNAME); String proxyPassword = conf.getTrimmed(PROXY_PASSWORD); if ((proxyUsername == null) != (proxyPassword == null)) { String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other."; LOG.error(msg); throw new IllegalArgumentException(msg); } awsConf.setProxyUsername(proxyUsername); awsConf.setProxyPassword(proxyPassword); awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN)); awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION)); if (LOG.isDebugEnabled()) { LOG.debug( "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}", awsConf.getProxyHost(), awsConf.getProxyPort(), String.valueOf(awsConf.getProxyUsername()), awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation()); } } else if (proxyPort >= 0) { String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST; LOG.error(msg); throw new IllegalArgumentException(msg); } s3 = new AmazonS3Client(credentials, awsConf); String endPoint = conf.getTrimmed(ENDPOINT, ""); if (!endPoint.isEmpty()) { try { s3.setEndpoint(endPoint); } catch (IllegalArgumentException e) { String msg = "Incorrect endpoint: " + e.getMessage(); LOG.error(msg); throw new IllegalArgumentException(msg, e); } } maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS); partSize = conf.getLong(MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE); multiPartThreshold = conf.getInt(MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD); if (partSize < 5 * 1024 * 1024) { LOG.error(MULTIPART_SIZE + " must be at least 5 MB"); partSize = 5 * 1024 * 1024; } if (multiPartThreshold < 5 * 1024 * 1024) { LOG.error(MIN_MULTIPART_THRESHOLD + " must be at least 5 MB"); multiPartThreshold = 5 * 1024 * 1024; } int maxThreads = conf.getInt(MAX_THREADS, DEFAULT_MAX_THREADS); int coreThreads = conf.getInt(CORE_THREADS, DEFAULT_CORE_THREADS); if (maxThreads == 0) { maxThreads = Runtime.getRuntime().availableProcessors() * 8; } if (coreThreads == 0) { coreThreads = Runtime.getRuntime().availableProcessors() * 8; } long keepAliveTime = conf.getLong(KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME); LinkedBlockingQueue<Runnable> workQueue = new LinkedBlockingQueue<>( maxThreads * conf.getInt(MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS)); threadPoolExecutor = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, newDaemonThreadFactory("s3a-transfer-shared-")); threadPoolExecutor.allowCoreThreadTimeOut(true); TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(multiPartThreshold); transfers = new TransferManager(s3, threadPoolExecutor); transfers.setConfiguration(transferConfiguration); String cannedACLName = conf.get(CANNED_ACL, DEFAULT_CANNED_ACL); if (!cannedACLName.isEmpty()) { cannedACL = CannedAccessControlList.valueOf(cannedACLName); } else { cannedACL = null; } if (!s3.doesBucketExist(bucket)) { throw new IOException("Bucket " + bucket + " does not exist"); } boolean purgeExistingMultipart = conf.getBoolean(PURGE_EXISTING_MULTIPART, DEFAULT_PURGE_EXISTING_MULTIPART); long purgeExistingMultipartAge = conf.getLong(PURGE_EXISTING_MULTIPART_AGE, DEFAULT_PURGE_EXISTING_MULTIPART_AGE); if (purgeExistingMultipart) { Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000); transfers.abortMultipartUploads(bucket, purgeBefore); } serverSideEncryptionAlgorithm = conf.get(SERVER_SIDE_ENCRYPTION_ALGORITHM); setConf(conf); }
From source file:org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpi.java
License:Apache License
/** {@inheritDoc} */ @SuppressWarnings({ "BusyWait" }) @Override//from www. jav a 2 s . c o m public void spiStart(String gridName) throws IgniteSpiException { // Start SPI start stopwatch. startStopwatch(); assertParameter(cred != null, "awsCredentials != null"); if (log.isDebugEnabled()) { log.debug(configInfo("awsCredentials", cred)); log.debug(configInfo("clientConfiguration", cfg)); log.debug(configInfo("bucketNameSuffix", bucketNameSuffix)); } if (cfg == null) U.warn(log, "Amazon client configuration is not set (will use default)."); if (F.isEmpty(bucketNameSuffix)) { U.warn(log, "Bucket name suffix is null or empty (will use default bucket name)."); bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX; } else bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix; s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred); if (!s3.doesBucketExist(bucketName)) { try { s3.createBucket(bucketName); if (log.isDebugEnabled()) log.debug("Created S3 bucket: " + bucketName); while (!s3.doesBucketExist(bucketName)) try { U.sleep(200); } catch (IgniteInterruptedCheckedException e) { throw new IgniteSpiException("Thread has been interrupted.", e); } } catch (AmazonClientException e) { try { if (!s3.doesBucketExist(bucketName)) throw new IgniteSpiException("Failed to create bucket: " + bucketName, e); } catch (AmazonClientException ignored) { throw new IgniteSpiException("Failed to create bucket: " + bucketName, e); } } } Collection<S3TimeData> s3TimeDataLst = new LinkedList<>(); try { ObjectListing list = s3.listObjects(bucketName); while (true) { for (S3ObjectSummary sum : list.getObjectSummaries()) { S3CheckpointData data = read(sum.getKey()); if (data != null) { s3TimeDataLst.add(new S3TimeData(data.getExpireTime(), data.getKey())); if (log.isDebugEnabled()) log.debug("Registered existing checkpoint from key: " + data.getKey()); } } if (list.isTruncated()) list = s3.listNextBatchOfObjects(list); else break; } } catch (AmazonClientException e) { throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e); } catch (IgniteCheckedException e) { throw new IgniteSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e); } // Track expiration for only those data that are made by this node timeoutWrk = new S3TimeoutWorker(); timeoutWrk.add(s3TimeDataLst); timeoutWrk.start(); registerMBean(gridName, this, S3CheckpointSpiMBean.class); // Ack ok start. if (log.isDebugEnabled()) log.debug(startInfo()); }
From source file:org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinder.java
License:Apache License
/** * Amazon s3 client initialization./*from w ww . ja v a 2 s . com*/ * * @throws org.apache.ignite.spi.IgniteSpiException In case of error. */ @SuppressWarnings({ "BusyWait" }) private void initClient() throws IgniteSpiException { if (initGuard.compareAndSet(false, true)) try { if (cred == null) throw new IgniteSpiException("AWS credentials are not set."); if (cfg == null) U.warn(log, "Amazon client configuration is not set (will use default)."); if (F.isEmpty(bucketName)) throw new IgniteSpiException("Bucket name is null or empty (provide bucket name and restart)."); s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred); if (!s3.doesBucketExist(bucketName)) { try { s3.createBucket(bucketName); if (log.isDebugEnabled()) log.debug("Created S3 bucket: " + bucketName); while (!s3.doesBucketExist(bucketName)) try { U.sleep(200); } catch (IgniteInterruptedCheckedException e) { throw new IgniteSpiException("Thread has been interrupted.", e); } } catch (AmazonClientException e) { if (!s3.doesBucketExist(bucketName)) { s3 = null; throw new IgniteSpiException("Failed to create bucket: " + bucketName, e); } } } } finally { initLatch.countDown(); } else { try { U.await(initLatch); } catch (IgniteInterruptedCheckedException e) { throw new IgniteSpiException("Thread has been interrupted.", e); } if (s3 == null) throw new IgniteSpiException("Ip finder has not been initialized properly."); } }
From source file:org.apache.jackrabbit.aws.ext.Utils.java
License:Apache License
/** * Create AmazonS3Client from properties. * //from w w w . j av a 2 s .c om * @param prop properties to configure @link {@link AmazonS3Client} * @return {@link AmazonS3Client} */ public static AmazonS3Client openService(final Properties prop) { AWSCredentials credentials = new BasicAWSCredentials(prop.getProperty(S3Constants.ACCESS_KEY), prop.getProperty(S3Constants.SECRET_KEY)); int connectionTimeOut = Integer.parseInt(prop.getProperty(S3Constants.S3_CONN_TIMEOUT)); int socketTimeOut = Integer.parseInt(prop.getProperty(S3Constants.S3_SOCK_TIMEOUT)); int maxConnections = Integer.parseInt(prop.getProperty(S3Constants.S3_MAX_CONNS)); int maxErrorRetry = Integer.parseInt(prop.getProperty(S3Constants.S3_MAX_ERR_RETRY)); ClientConfiguration cc = new ClientConfiguration(); cc.setConnectionTimeout(connectionTimeOut); cc.setSocketTimeout(socketTimeOut); cc.setMaxConnections(maxConnections); cc.setMaxErrorRetry(maxErrorRetry); return new AmazonS3Client(credentials, cc); }
From source file:org.apache.nifi.processors.aws.credentials.provider.factory.MockAWSProcessor.java
License:Apache License
/** * Create client using credentials provider. This is the preferred way for creating clients *//*from ww w. j ava 2 s . c o m*/ @Override protected AmazonS3Client createClient(final ProcessContext context, final AWSCredentialsProvider credentialsProvider, final ClientConfiguration config) { getLogger().info("Creating client with credentials provider"); final AmazonS3Client s3 = new AmazonS3Client(credentialsProvider, config); return s3; }
From source file:org.apache.nifi.processors.aws.credentials.provider.factory.MockAWSProcessor.java
License:Apache License
/** * Create client using AWSCredentials/*from w ww .j a va2s . co m*/ * * @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead */ @Override protected AmazonS3Client createClient(final ProcessContext context, final AWSCredentials credentials, final ClientConfiguration config) { getLogger().info("Creating client with awd credentials"); final AmazonS3Client s3 = new AmazonS3Client(credentials, config); return s3; }