Example usage for com.amazonaws.services.s3 AmazonS3 setEndpoint

List of usage examples for com.amazonaws.services.s3 AmazonS3 setEndpoint

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 setEndpoint.

Prototype

public void setEndpoint(String endpoint);

Source Link

Document

Overrides the default endpoint for this client.

Usage

From source file:jp.dqneo.amazons3.sample.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*//  www .  j a v a 2 s. co m
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(
            new PropertiesCredentials(S3Sample.class.getResourceAsStream("AwsCredentials.properties")));

    s3.setEndpoint("https://s3-ap-northeast-1.amazonaws.com");

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "2/foo/bar";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        PutObjectRequest p = new PutObjectRequest(bucketName, key, createSampleFile());
        s3.putObject(p);
        System.out.println("ok\n");

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:mail.server.storage.AWSStorageCreation.java

License:GNU General Public License

public Map<String, String> create(String email, String region) throws Exception {
    log.debug("I will now figure out what region to put things in", region);
    Region awsRegion = Region.valueOf(region);
    String awsRegionString = awsRegion.toString();
    if (awsRegionString == null)
        awsRegionString = "";

    String awsRegionStringEndPoint = awsRegionString.isEmpty() ? "s3.amazonaws.com"
            : ("s3-" + awsRegionString + ".amazonaws.com");

    log.debug("I will now log in to S3 and the IdentityManagement to check these credentials.");

    SimpleAWSCredentials credentials = new SimpleAWSCredentials(awsAccessKeyId, awsSecretKey);
    AmazonS3 s3 = new AmazonS3Client(credentials);
    AmazonIdentityManagement im = new AmazonIdentityManagementClient(credentials);

    log.debug("Successfully logged into S3");

    log.debug("I will now derive names for items");
    deriveNames(generateBucketName(email));

    log.debug("I will now try to:\n" + "  1. Create the S3 Bucket with name ", bucketName,
            "\n" + "  2. Create two IAM Identities for permissions -\n" + "       ", writeIdentity,
            " to be sent to the mail server to be able to write to the mailbox.\n" + "       ", writeIdentity,
            " to be stored in your configuration to enable the mail client to read and write mail.\n\n");

    s3.setEndpoint(awsRegionStringEndPoint);
    s3.createBucket(bucketName, awsRegion);

    log.debug("Setting website configuration");

    BucketWebsiteConfiguration bwc = new BucketWebsiteConfiguration("index.html");
    s3.setBucketWebsiteConfiguration(bucketName, bwc);
    log.debug("Done");

    log.debug("Enabling CORS");
    CORSRule rule1 = new CORSRule().withId("CORSRule1")
            .withAllowedMethods(Arrays.asList(new CORSRule.AllowedMethods[] { CORSRule.AllowedMethods.GET,
                    CORSRule.AllowedMethods.PUT, CORSRule.AllowedMethods.DELETE }))
            .withAllowedOrigins(Arrays.asList(new String[] { "*" })).withMaxAgeSeconds(3000)
            .withAllowedHeaders(Arrays.asList(new String[] { "*" }))
            .withExposedHeaders(Arrays.asList(new String[] { "ETag" }));

    BucketCrossOriginConfiguration cors = new BucketCrossOriginConfiguration();
    cors.setRules(Arrays.asList(new CORSRule[] { rule1 }));

    s3.setBucketCrossOriginConfiguration(bucketName, cors);
    log.debug("Done");

    log.format("Creating group %s ... ", groupName);
    im.createGroup(new CreateGroupRequest().withGroupName(groupName));
    log.debug("Done");

    log.format("Creating user %s ... ", writeIdentity);
    im.createUser(new CreateUserRequest().withUserName(writeIdentity));
    log.debug("Done");

    log.format("Adding user %s to group %s ... ", writeIdentity, groupName);
    im.addUserToGroup(new AddUserToGroupRequest().withGroupName(groupName).withUserName(writeIdentity));
    log.debug("Done");

    log.format("Creating user %s ... ", readWriteIdentity);
    im.createUser(new CreateUserRequest().withUserName(readWriteIdentity));
    log.debug("Done");

    log.format("Adding user %s to group %s ... ", readWriteIdentity, groupName);
    im.addUserToGroup(new AddUserToGroupRequest().withGroupName(groupName).withUserName(readWriteIdentity));
    log.debug("Done");

    log.format("Creating permissions for %s to write to bucket %s ... \n", writeIdentity, bucketName);

    String writePolicyRaw = "{                        \n" + "  #Statement#: [            \n"
            + "    {                     \n" + "      #Sid#: #SID#,         \n"
            + "      #Action#: [            \n" + "        #s3:PutObject#,      \n"
            + "        #s3:PutObjectAcl#      \n" + "      ],                  \n"
            + "      #Effect#: #Allow#,      \n" + "      #Resource#: [         \n"
            + "        #arn:aws:s3:::BUCKET/*#\n" + "      ]                  \n"
            + "    }                     \n" + "  ]                     \n" + "}\n";

    String writePolicy = writePolicyRaw.replaceAll("#", "\"").replace("SID", policyWriteName).replace("BUCKET",
            bucketName);//from w  w w  .  j a  v  a 2s  .co m
    //      q.println ("Policy definition: " + writePolicy);
    im.putUserPolicy(new PutUserPolicyRequest().withUserName(writeIdentity).withPolicyDocument(writePolicy)
            .withPolicyName(policyWriteName));
    log.debug("Done");

    log.format("Creating permissions for %s to read/write to bucket %s ... \n", writeIdentity, bucketName);

    String readWritePolicyRaw = "{                        \n" + "  #Statement#: [            \n"
            + "  {                     \n" + "      #Sid#: #SID#,         \n"
            + "      #Action#: [            \n" + "        #s3:PutObject#,      \n"
            + "        #s3:PutObjectAcl#,      \n" + "        #s3:DeleteObject#,      \n"
            + "        #s3:Get*#,            \n" + "        #s3:List*#            \n"
            + "      ],                  \n" + "      #Effect#: #Allow#,      \n"
            + "      #Resource#: [         \n" + "        #arn:aws:s3:::BUCKET/*#,\n"
            + "        #arn:aws:s3:::BUCKET#   \n" + "      ]                  \n"
            + "    }                     \n" + "  ]                     \n" + "}\n";

    String readWritePolicy = readWritePolicyRaw.replaceAll("#", "\"").replace("SID", policyReadWriteName)
            .replace("BUCKET", bucketName);
    //      q.println ("Policy definition: " + readPolicy);
    im.putUserPolicy(new PutUserPolicyRequest().withUserName(readWriteIdentity)
            .withPolicyDocument(readWritePolicy).withPolicyName(policyReadWriteName));
    log.debug("Done");

    log.format("Requesting access key for %s", writeIdentity);
    writeAccessKey = im.createAccessKey(new CreateAccessKeyRequest().withUserName(writeIdentity))
            .getAccessKey();
    log.format("Received [%s] [%s] Done.\n", writeAccessKey.getAccessKeyId(),
            writeAccessKey.getSecretAccessKey());

    log.format("Requesting access key for %s", readWriteIdentity);
    readWriteAccessKey = im.createAccessKey(new CreateAccessKeyRequest().withUserName(readWriteIdentity))
            .getAccessKey();
    log.format("Received [%s] [%s] Done.\n", readWriteAccessKey.getAccessKeyId(),
            readWriteAccessKey.getSecretAccessKey());

    log.debug();
    log.debug("I have finished the creating the S3 items.\n");

    return Maps.toMap("bucketName", bucketName, "bucketRegion", awsRegionString, "writeAccessKey",
            writeAccessKey.getAccessKeyId(), "writeSecretKey", writeAccessKey.getSecretAccessKey(),
            "readWriteAccessKey", readWriteAccessKey.getAccessKeyId(), "readWriteSecretKey",
            readWriteAccessKey.getSecretAccessKey());
}

From source file:org.apache.apex.malhar.lib.fs.s3.S3BlockUploadOperator.java

License:Apache License

/**
 * Create AmazonS3 client using AWS credentials
 * @return AmazonS3//  www. j a v  a 2 s.  c  o  m
 */
protected AmazonS3 createClient() {
    AmazonS3 client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretAccessKey));
    if (endPoint != null) {
        client.setEndpoint(endPoint);
    }
    return client;
}

From source file:org.apache.camel.component.aws.s3.S3Endpoint.java

License:Apache License

/**
 * Provide the possibility to override this method for an mock implementation
 *
 * @return AmazonS3Client//from ww  w  .  j  a  v  a  2s.c o m
 */
AmazonS3 createS3Client() {
    AWSCredentials credentials = new BasicAWSCredentials(configuration.getAccessKey(),
            configuration.getSecretKey());
    AmazonS3 client = new AmazonS3Client(credentials);
    if (configuration.getAmazonS3Endpoint() != null) {
        client.setEndpoint(configuration.getAmazonS3Endpoint());
    }
    return client;
}

From source file:org.apache.storm.s3.output.UploaderFactory.java

License:Apache License

public static Uploader buildUploader(Map conf) {
    Protocol protocol = Protocol.HTTPS;/*from  ww  w .  j ava  2 s. c  o  m*/
    String proxy = null;
    int proxyPort = 0;
    if (conf.containsKey(S3_PROTOCOL)) {
        protocol = Protocol.valueOf((String) conf.get(S3_PROTOCOL));
    }
    if (conf.containsKey(S3_PROXY)) {
        proxy = (String) conf.get(S3_PROXY);
    }
    if (conf.containsKey(S3_PROXY_PORT)) {
        proxyPort = ((Long) conf.get(S3_PROXY_PORT)).intValue();
    }
    AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
    AWSCredentials credentials = provider.getCredentials();
    ClientConfiguration config = new ClientConfiguration().withProtocol(protocol);
    if (proxy != null) {
        config.withProxyHost(proxy);
    }
    if (proxyPort != 0) {
        config.withProxyPort(proxyPort);
    }
    AmazonS3 client = new AmazonS3Client(credentials, config);
    if (conf.containsKey(S3_ENDPOINT)) {
        client.setEndpoint((String) conf.get(S3_ENDPOINT));
    }
    return getUploader(conf, client);
}

From source file:org.elasticsearch.repositories.s3.S3Service.java

License:Apache License

private AmazonS3 buildClient(S3ClientSettings clientSettings) {
    final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
    final ClientConfiguration configuration = buildConfiguration(clientSettings);
    final AmazonS3 client = buildClient(credentials, configuration);
    if (Strings.hasText(clientSettings.endpoint)) {
        client.setEndpoint(clientSettings.endpoint);
    }//from  w w  w .  j  a  v a2  s.c  o m
    return client;
}

From source file:org.geoserver.taskmanager.external.impl.S3FileServiceImpl.java

License:Open Source License

private AmazonS3 getS3Client() {
    if (endpoint == null) {
        throw new IllegalArgumentException("The endpoint is required, add a property: alias.s3.endpoint");
    }//from  w  ww . ja  v  a 2 s  . c  o m
    if (user == null) {
        throw new IllegalArgumentException("The user is required, add a property: alias.s3.user");
    }
    if (password == null) {
        throw new IllegalArgumentException("The password is required, add a property: alias.s3.password");
    }
    if (rootFolder == null) {
        throw new IllegalStateException("The rootfolder is required, add a property: alias.s3.rootfolder");
    }

    AmazonS3 s3;
    // custom endpoint

    s3 = new AmazonS3Client(new BasicAWSCredentials(user, password));

    final S3ClientOptions clientOptions = S3ClientOptions.builder().setPathStyleAccess(true).build();
    s3.setS3ClientOptions(clientOptions);
    String endpoint = this.endpoint;
    if (!endpoint.endsWith("/")) {
        endpoint = endpoint + "/";
    }
    s3.setEndpoint(endpoint);

    return s3;
}

From source file:org.jumpmind.symmetric.io.RedshiftBulkDatabaseWriter.java

License:Open Source License

protected void flush() {
    if (loadedRows > 0) {
        stagedInputFile.close();//from  ww w  .j a v a 2  s  . c om
        statistics.get(batch).startTimer(DataWriterStatisticConstants.DATABASEMILLIS);
        AmazonS3 s3client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
        if (isNotBlank(s3Endpoint)) {
            s3client.setEndpoint(s3Endpoint);
        }
        String objectKey = stagedInputFile.getFile().getName();
        try {
            s3client.putObject(bucket, objectKey, stagedInputFile.getFile());
        } catch (AmazonServiceException ase) {
            log.error("Exception from AWS service: " + ase.getMessage());
        } catch (AmazonClientException ace) {
            log.error("Exception from AWS client: " + ace.getMessage());
        }

        try {
            JdbcSqlTransaction jdbcTransaction = (JdbcSqlTransaction) transaction;
            Connection c = jdbcTransaction.getConnection();
            String sql = "COPY " + getTargetTable().getFullyQualifiedTableName() + " ("
                    + Table.getCommaDeliminatedColumns(table.getColumns()) + ") FROM 's3://" + bucket + "/"
                    + objectKey + "' CREDENTIALS 'aws_access_key_id=" + accessKey + ";aws_secret_access_key="
                    + secretKey + "' CSV DATEFORMAT 'YYYY-MM-DD HH:MI:SS' "
                    + (needsExplicitIds ? "EXPLICIT_IDS" : "")
                    + (isNotBlank(appendToCopyCommand) ? (" " + appendToCopyCommand) : "");
            Statement stmt = c.createStatement();

            log.debug(sql);
            stmt.execute(sql);
            stmt.close();
            transaction.commit();
        } catch (SQLException ex) {
            throw platform.getSqlTemplate().translate(ex);
        } finally {
            statistics.get(batch).stopTimer(DataWriterStatisticConstants.DATABASEMILLIS);
        }

        stagedInputFile.delete();
        try {
            s3client.deleteObject(bucket, objectKey);
        } catch (AmazonServiceException ase) {
            log.error("Exception from AWS service: " + ase.getMessage());
        } catch (AmazonClientException ace) {
            log.error("Exception from AWS client: " + ace.getMessage());
        }

        createStagingFile();
        loadedRows = 0;
        loadedBytes = 0;
    }
}

From source file:org.pathirage.ceph.bench.S3Client.java

License:Apache License

AmazonS3 getS3Connection() {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    AmazonS3 conn = new AmazonS3Client(credentials, clientConfig);
    conn.setEndpoint(host);

    return conn;// w  w  w .j av a2s .c  om
}