Example usage for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client

List of usage examples for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client.

Prototype

@Deprecated
public AmazonS3Client(AWSCredentialsProvider credentialsProvider, ClientConfiguration clientConfiguration) 

Source Link

Document

Constructs a new Amazon S3 client using the specified AWS credentials and client configuration to access Amazon S3.

Usage

From source file:org.apache.nifi.processors.aws.s3.AbstractS3Processor.java

License:Apache License

/**
 * Create client using credentials provider. This is the preferred way for creating clients
 *//*  w  w  w  .  j  ava2 s  .  c  om*/
@Override
protected AmazonS3Client createClient(final ProcessContext context,
        final AWSCredentialsProvider credentialsProvider, final ClientConfiguration config) {
    getLogger().info("Creating client with credentials provider");

    initializeSignerOverride(context, config);

    final AmazonS3Client s3 = new AmazonS3Client(credentialsProvider, config);

    initalizeEndpointOverride(context, s3);

    return s3;
}

From source file:org.apache.nifi.processors.aws.s3.AbstractS3Processor.java

License:Apache License

/**
 * Create client using AWSCredentials// ww  w  .  j  a  v a2  s.c  om
 *
 * @deprecated use {@link #createClient(ProcessContext, AWSCredentialsProvider, ClientConfiguration)} instead
 */
@Override
protected AmazonS3Client createClient(final ProcessContext context, final AWSCredentials credentials,
        final ClientConfiguration config) {
    getLogger().info("Creating client with AWS credentials");

    initializeSignerOverride(context, config);

    final AmazonS3Client s3 = new AmazonS3Client(credentials, config);

    initalizeEndpointOverride(context, s3);

    return s3;
}

From source file:org.apache.storm.s3.output.UploaderFactory.java

License:Apache License

public static Uploader buildUploader(Map conf) {
    Protocol protocol = Protocol.HTTPS;//  ww w.  j a va 2 s  .c o  m
    String proxy = null;
    int proxyPort = 0;
    if (conf.containsKey(S3_PROTOCOL)) {
        protocol = Protocol.valueOf((String) conf.get(S3_PROTOCOL));
    }
    if (conf.containsKey(S3_PROXY)) {
        proxy = (String) conf.get(S3_PROXY);
    }
    if (conf.containsKey(S3_PROXY_PORT)) {
        proxyPort = ((Long) conf.get(S3_PROXY_PORT)).intValue();
    }
    AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
    AWSCredentials credentials = provider.getCredentials();
    ClientConfiguration config = new ClientConfiguration().withProtocol(protocol);
    if (proxy != null) {
        config.withProxyHost(proxy);
    }
    if (proxyPort != 0) {
        config.withProxyPort(proxyPort);
    }
    AmazonS3 client = new AmazonS3Client(credentials, config);
    if (conf.containsKey(S3_ENDPOINT)) {
        client.setEndpoint((String) conf.get(S3_ENDPOINT));
    }
    return getUploader(conf, client);
}

From source file:org.apache.streams.s3.S3PersistReader.java

License:Apache License

public void prepare(Object configurationObject) {
    // Connect to S3
    synchronized (this) {
        // Create the credentials Object
        AWSCredentials credentials = new BasicAWSCredentials(s3ReaderConfiguration.getKey(),
                s3ReaderConfiguration.getSecretKey());

        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setProtocol(Protocol.valueOf(s3ReaderConfiguration.getProtocol().toString()));

        // We do not want path style access
        S3ClientOptions clientOptions = new S3ClientOptions();
        clientOptions.setPathStyleAccess(false);

        this.amazonS3Client = new AmazonS3Client(credentials, clientConfig);
        if (!Strings.isNullOrEmpty(s3ReaderConfiguration.getRegion()))
            this.amazonS3Client
                    .setRegion(Region.getRegion(Regions.fromName(s3ReaderConfiguration.getRegion())));
        this.amazonS3Client.setS3ClientOptions(clientOptions);
    }/*  www. j av a 2s  .c om*/

    final ListObjectsRequest request = new ListObjectsRequest()
            .withBucketName(this.s3ReaderConfiguration.getBucket())
            .withPrefix(s3ReaderConfiguration.getReaderPath()).withMaxKeys(500);

    ObjectListing listing = this.amazonS3Client.listObjects(request);

    this.files = new ArrayList<String>();

    /**
     * If you can list files that are in this path, then you must be dealing with a directory
     * if you cannot list files that are in this path, then you are most likely dealing with
     * a simple file.
     */
    boolean hasCommonPrefixes = listing.getCommonPrefixes().size() > 0 ? true : false;
    boolean hasObjectSummaries = listing.getObjectSummaries().size() > 0 ? true : false;

    if (hasCommonPrefixes || hasObjectSummaries) {
        // Handle the 'directory' use case
        do {
            if (hasCommonPrefixes) {
                for (String file : listing.getCommonPrefixes()) {
                    this.files.add(file);
                }
            } else {
                for (final S3ObjectSummary objectSummary : listing.getObjectSummaries()) {
                    this.files.add(objectSummary.getKey());
                }
            }

            // get the next batch.
            listing = this.amazonS3Client.listNextBatchOfObjects(listing);
        } while (listing.isTruncated());
    } else {
        // handle the single file use-case
        this.files.add(s3ReaderConfiguration.getReaderPath());
    }

    if (this.files.size() <= 0)
        LOGGER.error("There are no files to read");

    this.persistQueue = Queues.synchronizedQueue(new LinkedBlockingQueue<StreamsDatum>(10000));
    this.executor = Executors.newSingleThreadExecutor();
}

From source file:org.apache.streams.s3.S3PersistWriter.java

License:Apache License

public void prepare(Object configurationObject) {
    // Connect to S3
    synchronized (this) {

        try {//from  w ww.j  a va2  s. c  o  m
            // if the user has chosen to not set the object mapper, then set a default object mapper for them.
            if (this.objectMapper == null)
                this.objectMapper = new StreamsJacksonMapper();

            // Create the credentials Object
            if (this.amazonS3Client == null) {
                AWSCredentials credentials = new BasicAWSCredentials(s3WriterConfiguration.getKey(),
                        s3WriterConfiguration.getSecretKey());

                ClientConfiguration clientConfig = new ClientConfiguration();
                clientConfig.setProtocol(Protocol.valueOf(s3WriterConfiguration.getProtocol().toString()));

                // We do not want path style access
                S3ClientOptions clientOptions = new S3ClientOptions();
                clientOptions.setPathStyleAccess(false);

                this.amazonS3Client = new AmazonS3Client(credentials, clientConfig);
                if (!Strings.isNullOrEmpty(s3WriterConfiguration.getRegion()))
                    this.amazonS3Client
                            .setRegion(Region.getRegion(Regions.fromName(s3WriterConfiguration.getRegion())));
                this.amazonS3Client.setS3ClientOptions(clientOptions);
            }
        } catch (Exception e) {
            LOGGER.error("Exception while preparing the S3 client: {}", e);
        }

        Preconditions.checkArgument(this.amazonS3Client != null);
    }
}

From source file:org.apache.tajo.storage.s3.S3TableSpace.java

License:Apache License

@Override
public void init(TajoConf tajoConf) throws IOException {
    super.init(tajoConf);

    try {//from   w  ww .ja v  a  2  s.  c  o m
        // Try to get our credentials or just connect anonymously
        String accessKey = conf.get(ACCESS_KEY, null);
        String secretKey = conf.get(SECRET_KEY, null);

        String userInfo = uri.getUserInfo();
        if (userInfo != null) {
            int index = userInfo.indexOf(':');
            if (index != -1) {
                accessKey = userInfo.substring(0, index);
                secretKey = userInfo.substring(index + 1);
            } else {
                accessKey = userInfo;
            }
        }

        AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
                new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
                new AnonymousAWSCredentialsProvider());

        ClientConfiguration awsConf = new ClientConfiguration();
        awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS));
        boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
        awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
        awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES));
        awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT));
        awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));

        String proxyHost = conf.getTrimmed(PROXY_HOST, "");
        int proxyPort = conf.getInt(PROXY_PORT, -1);
        if (!proxyHost.isEmpty()) {
            awsConf.setProxyHost(proxyHost);
            if (proxyPort >= 0) {
                awsConf.setProxyPort(proxyPort);
            } else {
                if (secureConnections) {
                    LOG.warn("Proxy host set without port. Using HTTPS default 443");
                    awsConf.setProxyPort(443);
                } else {
                    LOG.warn("Proxy host set without port. Using HTTP default 80");
                    awsConf.setProxyPort(80);
                }
            }
            String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
            String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
            if ((proxyUsername == null) != (proxyPassword == null)) {
                String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD
                        + " set without the other.";
                LOG.error(msg);
            }
            awsConf.setProxyUsername(proxyUsername);
            awsConf.setProxyPassword(proxyPassword);
            awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
            awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
            if (LOG.isDebugEnabled()) {
                LOG.debug(String.format(
                        "Using proxy server %s:%d as user %s with password %s on domain %s as workstation "
                                + "%s",
                        awsConf.getProxyHost(), awsConf.getProxyPort(), awsConf.getProxyUsername(),
                        awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation()));
            }
        } else if (proxyPort >= 0) {
            String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
            LOG.error(msg);
        }

        s3 = new AmazonS3Client(credentials, awsConf);
        String endPoint = conf.getTrimmed(ENDPOINT, "");
        if (!endPoint.isEmpty()) {
            try {
                s3.setEndpoint(endPoint);
            } catch (IllegalArgumentException e) {
                String msg = "Incorrect endpoint: " + e.getMessage();
                LOG.error(msg);
            }
        }

        maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
        s3Enabled = true;
    } catch (NoClassDefFoundError e) {
        // If the version of hadoop is less than 2.6.0, hadoop doesn't include aws dependencies because it doesn't provide
        // S3AFileSystem. In this case, tajo never uses aws s3 api directly.
        LOG.warn(e);
        s3Enabled = false;
    } catch (Exception e) {
        throw new TajoInternalError(e);
    }
}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

License:Apache License

private AmazonS3 getS3Client() throws Exception {

    this.bucketName = properties.getProperty("usergrid.binary.bucketname");
    if (bucketName == null) {
        logger.error("usergrid.binary.bucketname not properly set so amazon bucket is null");
        throw new AwsPropertiesNotFoundException("usergrid.binary.bucketname");

    }/*from  w  w w .  ja  v a 2s  .c  o  m*/

    final UsergridAwsCredentialsProvider ugProvider = new UsergridAwsCredentialsProvider();
    AWSCredentials credentials = ugProvider.getCredentials();
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    s3Client = new AmazonS3Client(credentials, clientConfig);
    if (regionName != null)
        s3Client.setRegion(Region.getRegion(Regions.fromName(regionName)));

    return s3Client;
}

From source file:org.apache.usergrid.tools.WarehouseExport.java

License:Apache License

private void copyToS3(String fileName) {

    String bucketName = (String) properties.get(BUCKET_PROPNAME);
    String accessId = (String) properties.get(ACCESS_ID_PROPNAME);
    String secretKey = (String) properties.get(SECRET_KEY_PROPNAME);

    Properties overrides = new Properties();
    overrides.setProperty("s3" + ".identity", accessId);
    overrides.setProperty("s3" + ".credential", secretKey);

    final Iterable<? extends Module> MODULES = ImmutableSet.of(new JavaUrlHttpCommandExecutorServiceModule(),
            new Log4JLoggingModule(), new NettyPayloadModule());

    AWSCredentials credentials = new BasicAWSCredentials(accessId, secretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    AmazonS3Client s3Client = new AmazonS3Client(credentials, clientConfig);

    s3Client.createBucket(bucketName);//from   w  w w.  j  a  v a2  s. c  om
    File uploadFile = new File(fileName);
    PutObjectResult putObjectResult = s3Client.putObject(bucketName, uploadFile.getName(), uploadFile);
    logger.info("Uploaded file etag={}", putObjectResult.getETag());
}

From source file:org.apache.zeppelin.notebook.repo.OldS3NotebookRepo.java

License:Apache License

public void init(ZeppelinConfiguration conf) throws IOException {
    this.conf = conf;
    bucketName = conf.getS3BucketName();
    user = conf.getS3User();//from  ww w. j  a v  a2 s.  c o m
    useServerSideEncryption = conf.isS3ServerSideEncryption();

    // always use the default provider chain
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    CryptoConfiguration cryptoConf = new CryptoConfiguration();
    String keyRegion = conf.getS3KMSKeyRegion();

    if (StringUtils.isNotBlank(keyRegion)) {
        cryptoConf.setAwsKmsRegion(Region.getRegion(Regions.fromName(keyRegion)));
    }

    ClientConfiguration cliConf = createClientConfiguration();

    // see if we should be encrypting data in S3
    String kmsKeyID = conf.getS3KMSKeyID();
    if (kmsKeyID != null) {
        // use the AWS KMS to encrypt data
        KMSEncryptionMaterialsProvider emp = new KMSEncryptionMaterialsProvider(kmsKeyID);
        this.s3client = new AmazonS3EncryptionClient(credentialsProvider, emp, cliConf, cryptoConf);
    } else if (conf.getS3EncryptionMaterialsProviderClass() != null) {
        // use a custom encryption materials provider class
        EncryptionMaterialsProvider emp = createCustomProvider(conf);
        this.s3client = new AmazonS3EncryptionClient(credentialsProvider, emp, cliConf, cryptoConf);
    } else {
        // regular S3
        this.s3client = new AmazonS3Client(credentialsProvider, cliConf);
    }

    // set S3 endpoint to use
    s3client.setEndpoint(conf.getS3Endpoint());
}

From source file:org.apache.zeppelin.notebook.repo.S3NotebookRepo.java

License:Apache License

public void init(ZeppelinConfiguration conf) throws IOException {
    this.conf = conf;
    bucketName = conf.getS3BucketName();
    user = conf.getS3User();//w ww.  j a  v  a  2s  . co  m
    rootFolder = user + "/notebook";
    useServerSideEncryption = conf.isS3ServerSideEncryption();

    // always use the default provider chain
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    CryptoConfiguration cryptoConf = new CryptoConfiguration();
    String keyRegion = conf.getS3KMSKeyRegion();

    if (StringUtils.isNotBlank(keyRegion)) {
        cryptoConf.setAwsKmsRegion(Region.getRegion(Regions.fromName(keyRegion)));
    }

    ClientConfiguration cliConf = createClientConfiguration();

    // see if we should be encrypting data in S3
    String kmsKeyID = conf.getS3KMSKeyID();
    if (kmsKeyID != null) {
        // use the AWS KMS to encrypt data
        KMSEncryptionMaterialsProvider emp = new KMSEncryptionMaterialsProvider(kmsKeyID);
        this.s3client = new AmazonS3EncryptionClient(credentialsProvider, emp, cliConf, cryptoConf);
    } else if (conf.getS3EncryptionMaterialsProviderClass() != null) {
        // use a custom encryption materials provider class
        EncryptionMaterialsProvider emp = createCustomProvider(conf);
        this.s3client = new AmazonS3EncryptionClient(credentialsProvider, emp, cliConf, cryptoConf);
    } else {
        // regular S3
        this.s3client = new AmazonS3Client(credentialsProvider, cliConf);
    }

    // set S3 endpoint to use
    s3client.setEndpoint(conf.getS3Endpoint());
}