Example usage for com.amazonaws.services.s3 AmazonS3 doesBucketExist

List of usage examples for com.amazonaws.services.s3 AmazonS3 doesBucketExist

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 doesBucketExist.

Prototype

@Deprecated
public boolean doesBucketExist(String bucketName) throws SdkClientException, AmazonServiceException;

Source Link

Document

Checks if the specified bucket exists.

Usage

From source file:ecplugins.s3.S3Util.java

License:Apache License

public static boolean CheckIsBucketAvailable(String bucketName) throws Exception {

    Properties props = TestUtils.getProperties();

    BasicAWSCredentials credentials = new BasicAWSCredentials(props.getProperty(StringConstants.ACCESS_ID),
            props.getProperty(StringConstants.SECRET_ACCESS_ID));

    // Create TransferManager
    TransferManager tx = new TransferManager(credentials);

    // Get S3 Client
    AmazonS3 s3 = tx.getAmazonS3Client();

    return s3.doesBucketExist(bucketName);
}

From source file:ie.peternagy.jcrypto.module.config.S3ConfigHandler.java

License:Open Source License

@Override
public void validateConfig(Map config) {
    AWSCredentials credentials = new BasicAWSCredentials((String) config.get("access-key"),
            (String) config.get("secret-key"));
    AmazonS3 s3client = new AmazonS3Client(credentials);
    try {/*ww  w .ja  va  2s  . co m*/
        System.out.println("S3 config validation...\n");
        s3client.doesBucketExist((String) config.get("bucket-name"));

        System.out.println("Valid configuration!\n");
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:modules.storage.AmazonS3Storage.java

License:Open Source License

@Inject
public AmazonS3Storage(Configuration configuration) {
    bucketName = configuration.getString("storage.s3.bucket", "thunderbit");

    String accessKey = configuration.getString("storage.s3.accesskey");
    String secretKey = configuration.getString("storage.s3.secretkey");
    credentials = new BasicAWSCredentials(accessKey, secretKey);

    AmazonS3 amazonS3 = new AmazonS3Client(credentials);

    if (configuration.getBoolean("storage.s3.createBucket", true)) {
        try {//  ww  w.j a va 2 s. c om
            if (!(amazonS3.doesBucketExist(bucketName))) {
                amazonS3.createBucket(new CreateBucketRequest(bucketName));
            }

            String bucketLocation = amazonS3.getBucketLocation(new GetBucketLocationRequest(bucketName));
            logger.info("Amazon S3 bucket created at " + bucketLocation);
        } catch (AmazonServiceException ase) {
            logAmazonServiceException(ase);
        } catch (AmazonClientException ace) {
            logAmazonClientException(ace);
        }
    }
}

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public boolean isHealthy() {
    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));
    return s3.doesBucketExist(getBucketName());
}

From source file:org.alanwilliamson.amazon.s3.ExistsBucket.java

License:Open Source License

public cfData execute(cfSession _session, cfArgStructData argStruct) throws cfmRunTimeException {

    AmazonKey amazonKey = getAmazonKey(_session, argStruct);
    AmazonS3 s3Client = getAmazonS3(amazonKey);
    String bucket = getNamedStringParam(argStruct, "bucket", null);

    try {/*  w  w  w.ja  v  a 2s.  c  om*/
        return cfBooleanData.getcfBooleanData(s3Client.doesBucketExist(bucket.toLowerCase()));
    } catch (Exception e) {
        throwException(_session, "AmazonS3: " + e.getMessage());
    }

    return cfBooleanData.FALSE;
}

From source file:org.benetech.secureapp.generator.AmazonS3Utils.java

License:Open Source License

static public void uploadToAmazonS3(HttpSession session, File fileToUpload) throws S3Exception {
    try {/*from  w w  w. java  2s  .  c  om*/
        AmazonS3 s3client = getS3();
        String bucketName = getDownloadS3Bucket();
        if (!s3client.doesBucketExist(bucketName))
            SagLogger.logError(session, "Does not exist?  S3 Bucket :" + bucketName);

        AccessControlList acl = new AccessControlList();
        acl.grantPermission(GroupGrantee.AllUsers, Permission.Read);
        s3client.putObject(
                new PutObjectRequest(bucketName, getAPKDownloadFilePathWithFile(fileToUpload.getName()),
                        fileToUpload).withAccessControlList(acl));

        SagLogger.logInfo(session, "Finished uploading to S3");
    } catch (Exception e) {
        SagLogger.logException(session, e);
        throw new S3Exception(e);
    }
}

From source file:org.deeplearning4j.legacyExamples.EmrSparkExample.java

License:Apache License

public void entryPoint(String[] args) {
    JCommander jcmdr = new JCommander(this);
    try {/*w w  w  .j  a va  2s.c  o  m*/
        jcmdr.parse(args);
    } catch (ParameterException e) {
        jcmdr.usage();
        try {
            Thread.sleep(500);
        } catch (Exception e2) {
        }
        throw e;
    }

    AmazonElasticMapReduceClientBuilder builder = AmazonElasticMapReduceClientBuilder.standard();
    builder.withRegion(region);
    builder.withCredentials(getCredentialsProvider());

    AmazonElasticMapReduce emr = builder.build();

    List<StepConfig> steps = new ArrayList<>();

    if (upload) {
        log.info("uploading uber jar");

        AmazonS3ClientBuilder s3builder = AmazonS3ClientBuilder.standard();
        s3builder.withRegion(region);
        s3builder.withCredentials(getCredentialsProvider());
        AmazonS3 s3Client = s3builder.build();

        if (!s3Client.doesBucketExist(bucketName)) {
            s3Client.createBucket(bucketName);
        }

        File uberJarFile = new File(uberJar);

        s3Client.putObject(new PutObjectRequest(bucketName, uberJarFile.getName(), uberJarFile));
    }

    if (debug) {
        log.info("enable debug");

        StepFactory stepFactory = new StepFactory(builder.getRegion() + ".elasticmapreduce");
        StepConfig enableDebugging = new StepConfig().withName("Enable Debugging")
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newEnableDebuggingStep());
        steps.add(enableDebugging);
    }

    if (execute) {
        log.info("execute spark step");

        HadoopJarStepConfig sparkStepConf = new HadoopJarStepConfig();
        sparkStepConf.withJar("command-runner.jar");
        sparkStepConf.withArgs("spark-submit", "--deploy-mode", "cluster", "--class", className,
                getS3UberJarUrl(), "-useSparkLocal", "false");

        ActionOnFailure action = ActionOnFailure.TERMINATE_JOB_FLOW;

        if (keepAlive) {
            action = ActionOnFailure.CONTINUE;
        }

        StepConfig sparkStep = new StepConfig().withName("Spark Step").withActionOnFailure(action)
                .withHadoopJarStep(sparkStepConf);
        steps.add(sparkStep);
    }

    log.info("create spark cluster");

    Application sparkApp = new Application().withName("Spark");

    // service and job flow role will be created automatically when
    // launching cluster in aws console, better do that first or create
    // manually

    RunJobFlowRequest request = new RunJobFlowRequest().withName("Spark Cluster").withSteps(steps)
            .withServiceRole("EMR_DefaultRole").withJobFlowRole("EMR_EC2_DefaultRole")
            .withApplications(sparkApp).withReleaseLabel(emrVersion).withLogUri(getS3BucketLogsUrl())
            .withInstances(new JobFlowInstancesConfig().withEc2KeyName("spark").withInstanceCount(instanceCount)
                    .withKeepJobFlowAliveWhenNoSteps(keepAlive).withMasterInstanceType(instanceType)
                    .withSlaveInstanceType(instanceType));

    RunJobFlowResult result = emr.runJobFlow(request);

    log.info(result.toString());

    log.info("done");
}

From source file:org.elasticsearch.cloud.aws.blobstore.S3BlobStore.java

License:Apache License

public S3BlobStore(Settings settings, AmazonS3 client, String bucket, @Nullable String region,
        ThreadPool threadPool, boolean serverSideEncryption) {
    super(settings);
    this.client = client;
    this.bucket = bucket;
    this.region = region;
    this.threadPool = threadPool;
    this.serverSideEncryption = serverSideEncryption;

    this.bufferSizeInBytes = (int) settings
            .getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();

    if (!client.doesBucketExist(bucket)) {
        if (region != null) {
            client.createBucket(bucket, region);
        } else {/*from  w  w  w .  jav  a 2 s . co  m*/
            client.createBucket(bucket);
        }
    }
}

From source file:org.elasticsearch.repositories.s3.S3BlobStore.java

License:Apache License

S3BlobStore(Settings settings, AmazonS3 client, String bucket, boolean serverSideEncryption,
        ByteSizeValue bufferSize, String cannedACL, String storageClass) {
    super(settings);
    this.client = client;
    this.bucket = bucket;
    this.serverSideEncryption = serverSideEncryption;
    this.bufferSize = bufferSize;
    this.cannedACL = initCannedACL(cannedACL);
    this.storageClass = initStorageClass(storageClass);

    // Note: the method client.doesBucketExist() may return 'true' is the bucket exists
    // but we don't have access to it (ie, 403 Forbidden response code)
    // Also, if invalid security credentials are used to execute this method, the
    // client is not able to distinguish between bucket permission errors and
    // invalid credential errors, and this method could return an incorrect result.
    SocketAccess.doPrivilegedVoid(() -> {
        if (client.doesBucketExist(bucket) == false) {
            throw new IllegalArgumentException(
                    "The bucket [" + bucket + "] does not exist. Please create it before "
                            + " creating an s3 snapshot repository backed by it.");
        }/* w  w  w. j a  v a  2  s  .  c  o m*/
    });
}

From source file:org.p365.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*from w ww.j a va2 s  . c o  m*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "mynewbuket";
    String key = "Myobj/sd.jpg";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        if (!s3.doesBucketExist(bucketName)) {
            s3.createBucket(bucketName);
        }

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        String pathname = "D:\\Program Files\\apache-tomcat-7.0.42\\webapps\\WorkerForP365\\src\\AAA_1465.jpg";
        File file = new File(pathname);
        s3.putObject(
                new PutObjectRequest(bucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //System.out.println("Deleting an object\n");
        //s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //System.out.println("Deleting bucket " + bucketName + "\n");
        //s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}