List of usage examples for com.amazonaws Protocol HTTPS
Protocol HTTPS
To view the source code for com.amazonaws Protocol HTTPS.
Click Source Link
From source file:alluxio.underfs.s3a.S3AUnderFileSystem.java
License:Apache License
/** * Constructs a new instance of {@link S3AUnderFileSystem}. * * @param uri the {@link AlluxioURI} for this UFS *//*from w w w .j ava 2 s . c om*/ public S3AUnderFileSystem(AlluxioURI uri) { super(uri); mBucketName = uri.getHost(); mBucketPrefix = PathUtils.normalizePath(Constants.HEADER_S3A + mBucketName, PATH_SEPARATOR); // Set the aws credential system properties based on Alluxio properties, if they are set if (Configuration.containsKey(PropertyKey.S3A_ACCESS_KEY)) { System.setProperty(SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY, Configuration.get(PropertyKey.S3A_ACCESS_KEY)); } if (Configuration.containsKey(PropertyKey.S3A_SECRET_KEY)) { System.setProperty(SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY, Configuration.get(PropertyKey.S3A_SECRET_KEY)); } // Checks, in order, env variables, system properties, profile file, and instance profile AWSCredentialsProvider credentials = new AWSCredentialsProviderChain( new DefaultAWSCredentialsProviderChain()); // Set the client configuration based on Alluxio configuration values ClientConfiguration clientConf = new ClientConfiguration(); // Socket timeout clientConf.setSocketTimeout(Configuration.getInt(PropertyKey.UNDERFS_S3A_SOCKET_TIMEOUT_MS)); // HTTP protocol if (Configuration.getBoolean(PropertyKey.UNDERFS_S3A_SECURE_HTTP_ENABLED)) { clientConf.setProtocol(Protocol.HTTPS); } else { clientConf.setProtocol(Protocol.HTTP); } // Proxy host if (Configuration.containsKey(PropertyKey.UNDERFS_S3_PROXY_HOST)) { clientConf.setProxyHost(Configuration.get(PropertyKey.UNDERFS_S3_PROXY_HOST)); } // Proxy port if (Configuration.containsKey(PropertyKey.UNDERFS_S3_PROXY_PORT)) { clientConf.setProxyPort(Configuration.getInt(PropertyKey.UNDERFS_S3_PROXY_PORT)); } mClient = new AmazonS3Client(credentials, clientConf); if (Configuration.containsKey(PropertyKey.UNDERFS_S3_ENDPOINT)) { mClient.setEndpoint(Configuration.get(PropertyKey.UNDERFS_S3_ENDPOINT)); } mManager = new TransferManager(mClient); TransferManagerConfiguration transferConf = new TransferManagerConfiguration(); transferConf.setMultipartCopyThreshold(MULTIPART_COPY_THRESHOLD); mManager.setConfiguration(transferConf); mAccountOwnerId = mClient.getS3AccountOwner().getId(); // Gets the owner from user-defined static mapping from S3 canonical user id to Alluxio // user name. String owner = CommonUtils.getValueFromStaticMapping( Configuration.get(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING), mAccountOwnerId); // If there is no user-defined mapping, use the display name. if (owner == null) { owner = mClient.getS3AccountOwner().getDisplayName(); } mAccountOwner = owner == null ? mAccountOwnerId : owner; AccessControlList acl = mClient.getBucketAcl(mBucketName); mBucketMode = S3AUtils.translateBucketAcl(acl, mAccountOwnerId); }
From source file:com.eucalyptus.objectstorage.client.GenericS3ClientFactory.java
License:Open Source License
protected static ClientConfiguration getDefaultConfiguration(boolean withHttps) { ClientConfiguration config = new ClientConfiguration(); config.setConnectionTimeout(CONNECTION_TIMEOUT_MS); config.setMaxConnections(DEFAULT_MAX_CONNECTIONS); config.setMaxErrorRetry(DEFAULT_MAX_ERROR_RETRY); config.setUseReaper(true);/*from ww w. j a va 2s . c om*/ config.setSocketTimeout(DEFAULT_SOCKET_READ_TIMEOUT_MS); config.setProtocol(withHttps ? Protocol.HTTPS : Protocol.HTTP); return config; }
From source file:com.eucalyptus.objectstorage.client.OsgInternalS3Client.java
License:Open Source License
private synchronized void initializeNewClient(@Nonnull AWSCredentials credentials, @Nonnull String endpoint, @Nonnull Boolean https, @Nonnull Boolean useDns) { ClientConfiguration config = new ClientConfiguration(); config.setConnectionTimeout(CONNECTION_TIMEOUT_MS); //very short timeout config.setSocketTimeout(OSG_SOCKET_TIMEOUT_MS); config.setUseReaper(true);/* ww w .j a v a 2 s . c o m*/ config.setMaxConnections(OSG_MAX_CONNECTIONS); Protocol protocol = https ? Protocol.HTTPS : Protocol.HTTP; config.setProtocol(protocol); this.clientConfig = config; this.s3Client = new AmazonS3Client(credentials, config); this.ops = new S3ClientOptions().withPathStyleAccess(!useDns); this.s3Client.setS3ClientOptions(ops); this.instantiated = new Date(); this.currentCredentials = credentials; this.setS3Endpoint(endpoint); }
From source file:com.eucalyptus.objectstorage.client.OsgInternalS3Client.java
License:Open Source License
public void update(@Nonnull AWSCredentials credentials, @Nonnull String latestEndpoint, @Nonnull Boolean https, @Nonnull Boolean useDns) { //Credentials changes require new client instance (for now) if (this.s3Client == null || !credentialsEqual(this.currentCredentials, credentials)) { this.initializeNewClient(credentials, latestEndpoint, https, useDns); return;//from w ww.j ava 2 s . c o m } //They have opposite semantics, so any equality means it must be updated if (this.ops.isPathStyleAccess() == useDns) { this.ops.setPathStyleAccess(!useDns); this.s3Client.setS3ClientOptions(this.ops); } Protocol tmpProto = https ? Protocol.HTTPS : Protocol.HTTP; if (!tmpProto.equals(this.clientConfig.getProtocol())) { this.clientConfig.setProtocol(tmpProto); } if (this.getS3Endpoint() == null || !this.getS3Endpoint().equals(latestEndpoint)) { this.setS3Endpoint(latestEndpoint); } }
From source file:com.facebook.presto.hive.PrestoS3FileSystem.java
License:Apache License
@Override public void initialize(URI uri, Configuration conf) throws IOException { checkNotNull(uri, "uri is null"); checkNotNull(conf, "conf is null"); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDirectory = new Path("/").makeQualified(this.uri, new Path("/")); HiveClientConfig defaults = new HiveClientConfig(); this.stagingDirectory = new File( conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString())); this.maxClientRetries = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()); int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries()); boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled()); Duration connectTimeout = Duration .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString())); ClientConfiguration configuration = new ClientConfiguration(); configuration.setMaxErrorRetry(maxErrorRetries); configuration.setProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP); configuration.setConnectionTimeout(Ints.checkedCast(connectTimeout.toMillis())); this.s3 = new AmazonS3Client(getAwsCredentials(uri, conf), configuration); }
From source file:com.facebook.presto.hive.s3.PrestoS3ClientFactory.java
License:Apache License
synchronized AmazonS3 getS3Client(Configuration config, HiveClientConfig clientConfig) { if (s3Client != null) { return s3Client; }/*from w w w . jav a2s .c o m*/ HiveS3Config defaults = new HiveS3Config(); String userAgentPrefix = config.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix()); int maxErrorRetries = config.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries()); boolean sslEnabled = config.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled()); Duration connectTimeout = Duration .valueOf(config.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString())); Duration socketTimeout = Duration .valueOf(config.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString())); int maxConnections = config.getInt(S3_SELECT_PUSHDOWN_MAX_CONNECTIONS, clientConfig.getS3SelectPushdownMaxConnections()); if (clientConfig.isS3SelectPushdownEnabled()) { s3UserAgentSuffix = "presto-select"; } ClientConfiguration clientConfiguration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries) .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP) .withConnectionTimeout(toIntExact(connectTimeout.toMillis())) .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections) .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(s3UserAgentSuffix); PrestoS3FileSystemStats stats = new PrestoS3FileSystemStats(); RequestMetricCollector metricCollector = new PrestoS3FileSystemMetricCollector(stats); AWSCredentialsProvider awsCredentialsProvider = getAwsCredentialsProvider(config, defaults); AmazonS3Builder<? extends AmazonS3Builder, ? extends AmazonS3> clientBuilder = AmazonS3Client.builder() .withCredentials(awsCredentialsProvider).withClientConfiguration(clientConfiguration) .withMetricsCollector(metricCollector).enablePathStyleAccess(); boolean regionOrEndpointSet = false; String endpoint = config.get(S3_ENDPOINT); boolean pinS3ClientToCurrentRegion = config.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion()); verify(!pinS3ClientToCurrentRegion || endpoint == null, "Invalid configuration: either endpoint can be set or S3 client can be pinned to the current region"); // use local region when running inside of EC2 if (pinS3ClientToCurrentRegion) { Region region = Regions.getCurrentRegion(); if (region != null) { clientBuilder.withRegion(region.getName()); regionOrEndpointSet = true; } } if (!isNullOrEmpty(endpoint)) { clientBuilder.withEndpointConfiguration(new EndpointConfiguration(endpoint, null)); regionOrEndpointSet = true; } if (!regionOrEndpointSet) { clientBuilder.withRegion(US_EAST_1); clientBuilder.setForceGlobalBucketAccessEnabled(true); } s3Client = clientBuilder.build(); return s3Client; }
From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java
License:Apache License
@Override public void initialize(URI uri, Configuration conf) throws IOException { requireNonNull(uri, "uri is null"); requireNonNull(conf, "conf is null"); super.initialize(uri, conf); setConf(conf);/*from w w w.jav a2 s . c om*/ this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.workingDirectory = new Path(PATH_SEPARATOR).makeQualified(this.uri, new Path(PATH_SEPARATOR)); HiveS3Config defaults = new HiveS3Config(); this.stagingDirectory = new File( conf.get(S3_STAGING_DIRECTORY, defaults.getS3StagingDirectory().toString())); this.maxAttempts = conf.getInt(S3_MAX_CLIENT_RETRIES, defaults.getS3MaxClientRetries()) + 1; this.maxBackoffTime = Duration .valueOf(conf.get(S3_MAX_BACKOFF_TIME, defaults.getS3MaxBackoffTime().toString())); this.maxRetryTime = Duration.valueOf(conf.get(S3_MAX_RETRY_TIME, defaults.getS3MaxRetryTime().toString())); int maxErrorRetries = conf.getInt(S3_MAX_ERROR_RETRIES, defaults.getS3MaxErrorRetries()); boolean sslEnabled = conf.getBoolean(S3_SSL_ENABLED, defaults.isS3SslEnabled()); Duration connectTimeout = Duration .valueOf(conf.get(S3_CONNECT_TIMEOUT, defaults.getS3ConnectTimeout().toString())); Duration socketTimeout = Duration .valueOf(conf.get(S3_SOCKET_TIMEOUT, defaults.getS3SocketTimeout().toString())); int maxConnections = conf.getInt(S3_MAX_CONNECTIONS, defaults.getS3MaxConnections()); long minFileSize = conf.getLong(S3_MULTIPART_MIN_FILE_SIZE, defaults.getS3MultipartMinFileSize().toBytes()); long minPartSize = conf.getLong(S3_MULTIPART_MIN_PART_SIZE, defaults.getS3MultipartMinPartSize().toBytes()); this.isPathStyleAccess = conf.getBoolean(S3_PATH_STYLE_ACCESS, defaults.isS3PathStyleAccess()); this.useInstanceCredentials = conf.getBoolean(S3_USE_INSTANCE_CREDENTIALS, defaults.isS3UseInstanceCredentials()); this.pinS3ClientToCurrentRegion = conf.getBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, defaults.isPinS3ClientToCurrentRegion()); this.sseEnabled = conf.getBoolean(S3_SSE_ENABLED, defaults.isS3SseEnabled()); this.sseType = PrestoS3SseType.valueOf(conf.get(S3_SSE_TYPE, defaults.getS3SseType().name())); this.sseKmsKeyId = conf.get(S3_SSE_KMS_KEY_ID, defaults.getS3SseKmsKeyId()); String userAgentPrefix = conf.get(S3_USER_AGENT_PREFIX, defaults.getS3UserAgentPrefix()); ClientConfiguration configuration = new ClientConfiguration().withMaxErrorRetry(maxErrorRetries) .withProtocol(sslEnabled ? Protocol.HTTPS : Protocol.HTTP) .withConnectionTimeout(toIntExact(connectTimeout.toMillis())) .withSocketTimeout(toIntExact(socketTimeout.toMillis())).withMaxConnections(maxConnections) .withUserAgentPrefix(userAgentPrefix).withUserAgentSuffix(S3_USER_AGENT_SUFFIX); this.s3 = createAmazonS3Client(uri, conf, configuration); transferConfig.setMultipartUploadThreshold(minFileSize); transferConfig.setMinimumUploadPartSize(minPartSize); }
From source file:com.ge.predix.sample.blobstore.connector.spring.BlobstoreServiceConnectorCreator.java
License:Apache License
/** * Creates the BlobStore context using S3Client * * @param serviceInfo Object Store Service Info Object * @param serviceConnectorConfig Cloud Foundry Service Connector Configuration * * @return BlobstoreService Instance of the ObjectStore Service *//*from w w w .j a v a 2s .c om*/ @Override public BlobstoreService create(BlobstoreServiceInfo serviceInfo, ServiceConnectorConfig serviceConnectorConfig) { log.info("create() invoked with serviceInfo? = " + (serviceInfo == null)); ClientConfiguration config = new ClientConfiguration(); config.setProtocol(Protocol.HTTPS); S3ClientOptions options = new S3ClientOptions(); config.setSignerOverride("S3SignerType"); BasicAWSCredentials creds = new BasicAWSCredentials(serviceInfo.getObjectStoreAccessKey(), serviceInfo.getObjectStoreSecretKey()); AmazonS3Client s3Client = new AmazonS3Client(creds, config); s3Client.setEndpoint(serviceInfo.getUrl()); s3Client.setS3ClientOptions(options); try { // Remove the Credentials from the Object Store URL URL url = new URL(serviceInfo.getUrl()); String urlWithoutCredentials = url.getProtocol() + "://" + url.getHost(); // Return BlobstoreService return new BlobstoreService(s3Client, serviceInfo.getBucket(), urlWithoutCredentials); } catch (MalformedURLException e) { log.error("create(): Couldnt parse the URL provided by VCAP_SERVICES. Exception = " + e.getMessage()); throw new RuntimeException("Blobstore URL is Invalid", e); } }
From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java
License:Apache License
/** * -//from ww w .j a va2s .c o m */ @PostConstruct public void init() { ClientConfiguration config = new ClientConfiguration(); config.setProtocol(Protocol.HTTPS); if (this.blobstoreConfig.getProxyHost() != null && !"".equals(this.blobstoreConfig.getProxyHost())) { //$NON-NLS-1$ this.log.info("Connnecting with proxy"); //$NON-NLS-1$ if (this.blobstoreConfig.getProxyHost() != null) { config.withProxyHost(this.blobstoreConfig.getProxyHost()); } if (this.blobstoreConfig.getProxyPort() != null) { config.withProxyPort(Integer.parseInt(this.blobstoreConfig.getProxyPort())); } } BasicAWSCredentials creds = new BasicAWSCredentials(this.blobstoreConfig.getAccessKey(), this.blobstoreConfig.getAccessKey()); this.s3Client = new AmazonS3Client(creds, config); this.s3Client.setEndpoint(this.blobstoreConfig.getUrl()); }
From source file:com.ibm.stocator.fs.cos.COSAPIClient.java
License:Apache License
@Override public void initiate(String scheme) throws IOException, ConfigurationParseException { mCachedSparkOriginated = new HashMap<String, Boolean>(); mCachedSparkJobsStatus = new HashMap<String, Boolean>(); schemaProvided = scheme;// w w w.j ava2s.c om Properties props = ConfigurationHandler.initialize(filesystemURI, conf, scheme); // Set bucket name property int cacheSize = conf.getInt(CACHE_SIZE, GUAVA_CACHE_SIZE_DEFAULT); memoryCache = MemoryCache.getInstance(cacheSize); mBucket = props.getProperty(COS_BUCKET_PROPERTY); workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(filesystemURI, getWorkingDirectory()); fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_COS_PROPERTY, "false")); mIsV2Signer = "true".equals(props.getProperty(V2_SIGNER_TYPE_COS_PROPERTY, "false")); // Define COS client String accessKey = props.getProperty(ACCESS_KEY_COS_PROPERTY); String secretKey = props.getProperty(SECRET_KEY_COS_PROPERTY); if (accessKey == null) { throw new ConfigurationParseException("Access KEY is empty. Please provide valid access key"); } if (secretKey == null) { throw new ConfigurationParseException("Secret KEY is empty. Please provide valid secret key"); } BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey); ClientConfiguration clientConf = new ClientConfiguration(); int maxThreads = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_THREADS, DEFAULT_MAX_THREADS); if (maxThreads < 2) { LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2."); maxThreads = 2; } int totalTasks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS); long keepAliveTime = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME); threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared"); unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded")); boolean secureConnections = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS); clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP); String proxyHost = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_HOST, ""); int proxyPort = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, PROXY_PORT, -1); if (!proxyHost.isEmpty()) { clientConf.setProxyHost(proxyHost); if (proxyPort >= 0) { clientConf.setProxyPort(proxyPort); } else { if (secureConnections) { LOG.warn("Proxy host set without port. Using HTTPS default 443"); clientConf.setProxyPort(443); } else { LOG.warn("Proxy host set without port. Using HTTP default 80"); clientConf.setProxyPort(80); } } String proxyUsername = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_USERNAME); String proxyPassword = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_PASSWORD); if ((proxyUsername == null) != (proxyPassword == null)) { String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other."; LOG.error(msg); throw new IllegalArgumentException(msg); } clientConf.setProxyUsername(proxyUsername); clientConf.setProxyPassword(proxyPassword); clientConf.setProxyDomain(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_DOMAIN)); clientConf.setProxyWorkstation(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_WORKSTATION)); if (LOG.isDebugEnabled()) { LOG.debug( "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}", clientConf.getProxyHost(), clientConf.getProxyPort(), String.valueOf(clientConf.getProxyUsername()), clientConf.getProxyPassword(), clientConf.getProxyDomain(), clientConf.getProxyWorkstation()); } } else if (proxyPort >= 0) { String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST; LOG.error(msg); throw new IllegalArgumentException(msg); } initConnectionSettings(conf, clientConf); if (mIsV2Signer) { clientConf.withSignerOverride("S3SignerType"); } mClient = new AmazonS3Client(creds, clientConf); final String serviceUrl = props.getProperty(ENDPOINT_URL_COS_PROPERTY); if (serviceUrl != null && !serviceUrl.equals(amazonDefaultEndpoint)) { mClient.setEndpoint(serviceUrl); } mClient.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build()); // Set block size property String mBlockSizeString = props.getProperty(BLOCK_SIZE_COS_PROPERTY, "128"); mBlockSize = Long.valueOf(mBlockSizeString).longValue() * 1024 * 1024L; boolean autoCreateBucket = "true" .equalsIgnoreCase((props.getProperty(AUTO_BUCKET_CREATE_COS_PROPERTY, "false"))); partSize = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE); multiPartThreshold = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD); readAhead = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE); LOG.debug(READAHEAD_RANGE + ":" + readAhead); inputPolicy = COSInputPolicy .getPolicy(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, INPUT_FADVISE, INPUT_FADV_NORMAL)); initTransferManager(); maxKeys = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS); flatListingFlag = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FLAT_LISTING, DEFAULT_FLAT_LISTING); if (autoCreateBucket) { try { boolean bucketExist = mClient.doesBucketExist(mBucket); if (bucketExist) { LOG.trace("Bucket {} exists", mBucket); } else { LOG.trace("Bucket {} doesn`t exists and autocreate", mBucket); String mRegion = props.getProperty(REGION_COS_PROPERTY); if (mRegion == null) { mClient.createBucket(mBucket); } else { LOG.trace("Creating bucket {} in region {}", mBucket, mRegion); mClient.createBucket(mBucket, mRegion); } } } catch (AmazonServiceException ase) { /* * we ignore the BucketAlreadyExists exception since multiple processes or threads * might try to create the bucket in parrallel, therefore it is expected that * some will fail to create the bucket */ if (!ase.getErrorCode().equals("BucketAlreadyExists")) { LOG.error(ase.getMessage()); throw (ase); } } catch (Exception e) { LOG.error(e.getMessage()); throw (e); } } initMultipartUploads(conf); enableMultiObjectsDelete = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, ENABLE_MULTI_DELETE, true); blockUploadEnabled = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD, DEFAULT_FAST_UPLOAD); if (blockUploadEnabled) { blockOutputBuffer = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER); partSize = COSUtils.ensureOutputParameterInRange(MULTIPART_SIZE, partSize); blockFactory = COSDataBlocks.createFactory(this, blockOutputBuffer); blockOutputActiveBlocks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS); LOG.debug("Using COSBlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks); } else { LOG.debug("Using COSOutputStream"); } }