Example usage for com.amazonaws Protocol HTTP

List of usage examples for com.amazonaws Protocol HTTP

Introduction

In this page you can find the example usage for com.amazonaws Protocol HTTP.

Prototype

Protocol HTTP

To view the source code for com.amazonaws Protocol HTTP.

Click Source Link

Document

HTTP Protocol - Using the HTTP protocol is less secure than HTTPS, but can slightly reduce the system resources used when communicating with AWS.

Usage

From source file:org.geowebcache.s3.S3BlobStoreConfig.java

License:Open Source License

/**
 * @return {@link AmazonS3Client} constructed from this {@link S3BlobStoreConfig}.
 *//*www . jav a  2s.  c  o m*/
public AmazonS3Client buildClient() {
    AWSCredentials awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    if (null != useHTTPS) {
        clientConfig.setProtocol(useHTTPS ? Protocol.HTTPS : Protocol.HTTP);
    }
    if (null != maxConnections && maxConnections > 0) {
        clientConfig.setMaxConnections(maxConnections);
    }
    clientConfig.setProxyDomain(proxyDomain);
    clientConfig.setProxyWorkstation(proxyWorkstation);
    clientConfig.setProxyHost(proxyHost);
    if (null != proxyPort) {
        clientConfig.setProxyPort(proxyPort);
    }
    clientConfig.setProxyUsername(proxyUsername);
    clientConfig.setProxyPassword(proxyPassword);
    if (null != useGzip) {
        clientConfig.setUseGzip(useGzip);
    }
    log.debug("Initializing AWS S3 connection");
    return new AmazonS3Client(awsCredentials, clientConfig);
}

From source file:org.geowebcache.s3.S3BlobStoreInfo.java

License:Open Source License

/** @return {@link AmazonS3Client} constructed from this {@link S3BlobStoreInfo}. */
public AmazonS3Client buildClient() {
    ClientConfiguration clientConfig = new ClientConfiguration();
    if (null != useHTTPS) {
        clientConfig.setProtocol(useHTTPS ? Protocol.HTTPS : Protocol.HTTP);
    }/*w w w  .  j ava  2 s . c o  m*/
    if (null != maxConnections && maxConnections > 0) {
        clientConfig.setMaxConnections(maxConnections);
    }
    clientConfig.setProxyDomain(proxyDomain);
    clientConfig.setProxyWorkstation(proxyWorkstation);
    clientConfig.setProxyHost(proxyHost);
    if (null != proxyPort) {
        clientConfig.setProxyPort(proxyPort);
    }
    clientConfig.setProxyUsername(proxyUsername);
    clientConfig.setProxyPassword(proxyPassword);
    if (null != useGzip) {
        clientConfig.setUseGzip(useGzip);
    }
    log.debug("Initializing AWS S3 connection");
    AmazonS3Client client = new AmazonS3Client(getCredentialsProvider(), clientConfig);
    if (endpoint != null && !"".equals(endpoint)) {
        S3ClientOptions s3ClientOptions = new S3ClientOptions();
        s3ClientOptions.setPathStyleAccess(true);
        client.setS3ClientOptions(s3ClientOptions);
        client.setEndpoint(endpoint);
    }
    if (!client.doesBucketExist(bucket)) {
        client.createBucket(bucket);
    }
    return client;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {//from  w  w w .jav a 2  s. c  om
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.pathirage.ceph.bench.S3Client.java

License:Apache License

AmazonS3 getS3Connection() {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    AmazonS3 conn = new AmazonS3Client(credentials, clientConfig);
    conn.setEndpoint(host);/*from   w  ww  .  jav  a  2 s.c o m*/

    return conn;
}

From source file:usingflume.ch05.S3Sink.java

License:Apache License

@Override
public void start() {
    // Set up Amazon S3 client.
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKeyId, awsSecretKey);
    ClientConfiguration config = new ClientConfiguration();
    config.setProtocol(Protocol.HTTP);
    connection = new AmazonS3Client(credentials, config);
    connection.setEndpoint(endPoint);/*from w ww .j a  v  a  2 s .c  om*/
    if (!connection.doesBucketExist(bucket)) {
        connection.createBucket(bucket);
    }
    super.start();
}