Example usage for com.amazonaws ClientConfiguration ClientConfiguration

List of usage examples for com.amazonaws ClientConfiguration ClientConfiguration

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration ClientConfiguration.

Prototype

public ClientConfiguration() 

Source Link

Usage

From source file:org.apache.streams.s3.S3PersistWriter.java

License:Apache License

public void prepare(Object configurationObject) {
    // Connect to S3
    synchronized (this) {

        try {/*from w  w w.ja  v  a  2  s.c o  m*/
            // if the user has chosen to not set the object mapper, then set a default object mapper for them.
            if (this.objectMapper == null)
                this.objectMapper = new StreamsJacksonMapper();

            // Create the credentials Object
            if (this.amazonS3Client == null) {
                AWSCredentials credentials = new BasicAWSCredentials(s3WriterConfiguration.getKey(),
                        s3WriterConfiguration.getSecretKey());

                ClientConfiguration clientConfig = new ClientConfiguration();
                clientConfig.setProtocol(Protocol.valueOf(s3WriterConfiguration.getProtocol().toString()));

                // We do not want path style access
                S3ClientOptions clientOptions = new S3ClientOptions();
                clientOptions.setPathStyleAccess(false);

                this.amazonS3Client = new AmazonS3Client(credentials, clientConfig);
                if (!Strings.isNullOrEmpty(s3WriterConfiguration.getRegion()))
                    this.amazonS3Client
                            .setRegion(Region.getRegion(Regions.fromName(s3WriterConfiguration.getRegion())));
                this.amazonS3Client.setS3ClientOptions(clientOptions);
            }
        } catch (Exception e) {
            LOGGER.error("Exception while preparing the S3 client: {}", e);
        }

        Preconditions.checkArgument(this.amazonS3Client != null);
    }
}

From source file:org.apache.tajo.storage.s3.S3TableSpace.java

License:Apache License

@Override
public void init(TajoConf tajoConf) throws IOException {
    super.init(tajoConf);

    try {//from  w w w. jav  a  2 s  . c  o m
        // Try to get our credentials or just connect anonymously
        String accessKey = conf.get(ACCESS_KEY, null);
        String secretKey = conf.get(SECRET_KEY, null);

        String userInfo = uri.getUserInfo();
        if (userInfo != null) {
            int index = userInfo.indexOf(':');
            if (index != -1) {
                accessKey = userInfo.substring(0, index);
                secretKey = userInfo.substring(index + 1);
            } else {
                accessKey = userInfo;
            }
        }

        AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
                new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
                new AnonymousAWSCredentialsProvider());

        ClientConfiguration awsConf = new ClientConfiguration();
        awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS));
        boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
        awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
        awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES));
        awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT));
        awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));

        String proxyHost = conf.getTrimmed(PROXY_HOST, "");
        int proxyPort = conf.getInt(PROXY_PORT, -1);
        if (!proxyHost.isEmpty()) {
            awsConf.setProxyHost(proxyHost);
            if (proxyPort >= 0) {
                awsConf.setProxyPort(proxyPort);
            } else {
                if (secureConnections) {
                    LOG.warn("Proxy host set without port. Using HTTPS default 443");
                    awsConf.setProxyPort(443);
                } else {
                    LOG.warn("Proxy host set without port. Using HTTP default 80");
                    awsConf.setProxyPort(80);
                }
            }
            String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
            String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
            if ((proxyUsername == null) != (proxyPassword == null)) {
                String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD
                        + " set without the other.";
                LOG.error(msg);
            }
            awsConf.setProxyUsername(proxyUsername);
            awsConf.setProxyPassword(proxyPassword);
            awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
            awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
            if (LOG.isDebugEnabled()) {
                LOG.debug(String.format(
                        "Using proxy server %s:%d as user %s with password %s on domain %s as workstation "
                                + "%s",
                        awsConf.getProxyHost(), awsConf.getProxyPort(), awsConf.getProxyUsername(),
                        awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation()));
            }
        } else if (proxyPort >= 0) {
            String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
            LOG.error(msg);
        }

        s3 = new AmazonS3Client(credentials, awsConf);
        String endPoint = conf.getTrimmed(ENDPOINT, "");
        if (!endPoint.isEmpty()) {
            try {
                s3.setEndpoint(endPoint);
            } catch (IllegalArgumentException e) {
                String msg = "Incorrect endpoint: " + e.getMessage();
                LOG.error(msg);
            }
        }

        maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
        s3Enabled = true;
    } catch (NoClassDefFoundError e) {
        // If the version of hadoop is less than 2.6.0, hadoop doesn't include aws dependencies because it doesn't provide
        // S3AFileSystem. In this case, tajo never uses aws s3 api directly.
        LOG.warn(e);
        s3Enabled = false;
    } catch (Exception e) {
        throw new TajoInternalError(e);
    }
}

From source file:org.apache.usergrid.chop.api.store.amazon.AmazonUtils.java

License:Apache License

/**
 * @param accessKey/*from w ww .  ja v  a 2  s .c om*/
 * @param secretKey
 * @return
 */
public static AmazonEC2Client getEC2Client(String accessKey, String secretKey) {
    AWSCredentialsProvider provider;
    if (accessKey != null && secretKey != null) {
        AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
        provider = new StaticCredentialsProvider(credentials);
    } else {
        provider = new DefaultAWSCredentialsProviderChain();
    }

    AmazonEC2Client client = new AmazonEC2Client(provider);

    ClientConfiguration configuration = new ClientConfiguration();
    configuration.setProtocol(Protocol.HTTPS);
    client.setConfiguration(configuration);
    return client;
}

From source file:org.apache.usergrid.persistence.queue.impl.SNSQueueManagerImpl.java

License:Apache License

@Inject
public SNSQueueManagerImpl(@Assisted LegacyQueueScope scope, LegacyQueueFig fig, ClusterFig clusterFig,
        CassandraConfig cassandraConfig, LegacyQueueFig queueFig) {
    this.scope = scope;
    this.fig = fig;
    this.clusterFig = clusterFig;
    this.cassandraConfig = cassandraConfig;

    // create our own executor which has a bounded queue w/ caller runs policy for rejected tasks
    final ExecutorService executor = TaskExecutorFactory.createTaskExecutor("amazon-async-io",
            queueFig.getAsyncMaxThreads(), queueFig.getAsyncQueueSize(),
            TaskExecutorFactory.RejectionAction.CALLERRUNS);

    final Region region = getRegion();

    this.clientConfiguration = new ClientConfiguration()
            .withConnectionTimeout(queueFig.getQueueClientConnectionTimeout())
            // don't let the socket timeout be configured less than 5 sec (network delays do happen)
            .withSocketTimeout(Math.max(MIN_CLIENT_SOCKET_TIMEOUT, queueFig.getQueueClientSocketTimeout()))
            .withGzip(true);//  w ww  . ja va  2 s.  co  m

    try {
        sqs = createSQSClient(region);
        sns = createSNSClient(region);
        snsAsync = createAsyncSNSClient(region, executor);
        sqsAsync = createAsyncSQSClient(region, executor);
    } catch (Exception e) {
        throw new RuntimeException("Error setting up mapper", e);
    }
}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

License:Apache License

private AmazonS3 getS3Client() throws Exception {

    this.bucketName = properties.getProperty("usergrid.binary.bucketname");
    if (bucketName == null) {
        logger.error("usergrid.binary.bucketname not properly set so amazon bucket is null");
        throw new AwsPropertiesNotFoundException("usergrid.binary.bucketname");

    }//from   ww  w . ja v  a 2s. c  o  m

    final UsergridAwsCredentialsProvider ugProvider = new UsergridAwsCredentialsProvider();
    AWSCredentials credentials = ugProvider.getCredentials();
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    s3Client = new AmazonS3Client(credentials, clientConfig);
    if (regionName != null)
        s3Client.setRegion(Region.getRegion(Regions.fromName(regionName)));

    return s3Client;
}

From source file:org.apache.usergrid.tools.WarehouseExport.java

License:Apache License

private void copyToS3(String fileName) {

    String bucketName = (String) properties.get(BUCKET_PROPNAME);
    String accessId = (String) properties.get(ACCESS_ID_PROPNAME);
    String secretKey = (String) properties.get(SECRET_KEY_PROPNAME);

    Properties overrides = new Properties();
    overrides.setProperty("s3" + ".identity", accessId);
    overrides.setProperty("s3" + ".credential", secretKey);

    final Iterable<? extends Module> MODULES = ImmutableSet.of(new JavaUrlHttpCommandExecutorServiceModule(),
            new Log4JLoggingModule(), new NettyPayloadModule());

    AWSCredentials credentials = new BasicAWSCredentials(accessId, secretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    AmazonS3Client s3Client = new AmazonS3Client(credentials, clientConfig);

    s3Client.createBucket(bucketName);/*from   w  w w .ja v a2 s  .  c  o m*/
    File uploadFile = new File(fileName);
    PutObjectResult putObjectResult = s3Client.putObject(bucketName, uploadFile.getName(), uploadFile);
    logger.info("Uploaded file etag={}", putObjectResult.getETag());
}

From source file:org.cloudfoundry.community.servicebroker.s3.config.AwsClientConfiguration.java

License:Apache License

public ClientConfiguration toClientConfiguration() {
    ClientConfiguration clientConfiguration = new ClientConfiguration();
    clientConfiguration.setProxyHost(proxyHost);
    if (proxyPort != null) {
        clientConfiguration.setProxyPort(Integer.parseInt(proxyPort));
    }// w  w  w. j a  va  2  s .  c om
    clientConfiguration.setProxyUsername(proxyUsername);
    clientConfiguration.setProxyPassword(proxyPassword);
    if (preemptiveBasicProxyAuth != null) {
        clientConfiguration.setPreemptiveBasicProxyAuth(preemptiveBasicProxyAuth);
    }
    return clientConfiguration;
}

From source file:org.diksha.common.dyutils.SchedulerUDE.java

License:Apache License

public static SchedulerWorkflowClientExternal getScheduler(String clientId, SchedulerConfig schedulerConfig) {
    AWSCredentials awsCredentials = DyDBUtils.getAwsCredentials();

    ClientConfiguration config = new ClientConfiguration()
            .withSocketTimeout(Integer.parseInt(schedulerConfig.getSocketTimeout()));
    AmazonSimpleWorkflow service = new AmazonSimpleWorkflowClient(awsCredentials, config);

    service.setEndpoint(schedulerConfig.getEndPoint());
    String domain = schedulerConfig.getDomain();

    SchedulerWorkflowClientExternalFactory factory = new SchedulerWorkflowClientExternalFactoryImpl(service,
            domain);/*  w w  w . ja va 2  s  . c  o m*/

    SchedulerWorkflowClientExternal scheduler = factory.getClient(clientId);

    return scheduler;
}

From source file:org.diksha.engine.SchedulerWorker.java

License:Apache License

public static void main(String[] args) throws Exception {

    ClientConfiguration config = new ClientConfiguration().withSocketTimeout(70 * 1000);

    String swfAccessId = System.getenv("AWS_ACCESS_KEY_ID");
    String swfSecretKey = System.getenv("AWS_SECRET_ACCESS_KEY");
    AWSCredentials awsCredentials = new BasicAWSCredentials(swfAccessId, swfSecretKey);

    AmazonSimpleWorkflow service = new AmazonSimpleWorkflowClient(awsCredentials, config);

    String configParam;//from w w  w . j a  v  a  2s.c  om

    if (args.length == 0) {
        configParam = "cf1";
    } else {
        configParam = args[0];
    }

    SchedulerConfig schedulerConfig = DyDBUtils.getSchedulerConfig(configParam);

    service.setEndpoint(schedulerConfig.getEndPoint());

    String domain = schedulerConfig.getDomain();
    String taskListToPoll = schedulerConfig.getTaskList();

    try {
        ActivityWorker aw = new ActivityWorker(service, domain, taskListToPoll);
        aw.addActivitiesImplementation(new SchedulerActivitiesImpl());
        aw.start();

        WorkflowWorker wfw = new WorkflowWorker(service, domain, taskListToPoll);
        wfw.addWorkflowImplementationType(SchedulerWorkflowImpl.class);
        wfw.start();

    } catch (Exception e) {
        System.out.println("should have caught it");
    }

}

From source file:org.eclipse.hawkbit.artifact.repository.S3RepositoryAutoConfiguration.java

License:Open Source License

/**
 * The default AmazonS3 client configuration, which declares the
 * configuration for managing connection behavior to s3.
 * /*from   w  w w.ja v  a  2 s .c o  m*/
 * @return the default {@link ClientConfiguration} bean with the default
 *         client configuration
 */
@Bean
@ConditionalOnMissingBean
public ClientConfiguration awsClientConfiguration() {
    return new ClientConfiguration();
}