Example usage for com.amazonaws ClientConfiguration setConnectionTimeout

List of usage examples for com.amazonaws ClientConfiguration setConnectionTimeout

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration setConnectionTimeout.

Prototype

public void setConnectionTimeout(int connectionTimeout) 

Source Link

Document

Sets the amount of time to wait (in milliseconds) when initially establishing a connection before giving up and timing out.

Usage

From source file:org.apache.nifi.processors.aws.AbstractAWSProcessor.java

License:Apache License

protected ClientConfiguration createConfiguration(final ProcessContext context) {
    final ClientConfiguration config = new ClientConfiguration();
    config.setMaxConnections(context.getMaxConcurrentTasks());
    config.setMaxErrorRetry(0);/*  w  w w .  ja  v  a 2  s  . c om*/
    config.setUserAgent(DEFAULT_USER_AGENT);
    // If this is changed to be a property, ensure other uses are also changed
    config.setProtocol(DEFAULT_PROTOCOL);
    final int commsTimeout = context.getProperty(TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue();
    config.setConnectionTimeout(commsTimeout);
    config.setSocketTimeout(commsTimeout);

    final SSLContextService sslContextService = context.getProperty(SSL_CONTEXT_SERVICE)
            .asControllerService(SSLContextService.class);
    if (sslContextService != null) {
        final SSLContext sslContext = sslContextService.createSSLContext(SSLContextService.ClientAuth.NONE);
        // NIFI-3788: Changed hostnameVerifier from null to DHV (BrowserCompatibleHostnameVerifier is deprecated)
        SdkTLSSocketFactory sdkTLSSocketFactory = new SdkTLSSocketFactory(sslContext,
                new DefaultHostnameVerifier());
        config.getApacheHttpClientConfig().setSslSocketFactory(sdkTLSSocketFactory);
    }

    if (context.getProperty(PROXY_HOST).isSet()) {
        String proxyHost = context.getProperty(PROXY_HOST).evaluateAttributeExpressions().getValue();
        config.setProxyHost(proxyHost);
        Integer proxyPort = context.getProperty(PROXY_HOST_PORT).evaluateAttributeExpressions().asInteger();
        config.setProxyPort(proxyPort);
    }

    return config;
}

From source file:org.apache.tajo.storage.s3.S3TableSpace.java

License:Apache License

@Override
public void init(TajoConf tajoConf) throws IOException {
    super.init(tajoConf);

    try {//from   w w w  . j a v a  2 s. com
        // Try to get our credentials or just connect anonymously
        String accessKey = conf.get(ACCESS_KEY, null);
        String secretKey = conf.get(SECRET_KEY, null);

        String userInfo = uri.getUserInfo();
        if (userInfo != null) {
            int index = userInfo.indexOf(':');
            if (index != -1) {
                accessKey = userInfo.substring(0, index);
                secretKey = userInfo.substring(index + 1);
            } else {
                accessKey = userInfo;
            }
        }

        AWSCredentialsProviderChain credentials = new AWSCredentialsProviderChain(
                new BasicAWSCredentialsProvider(accessKey, secretKey), new InstanceProfileCredentialsProvider(),
                new AnonymousAWSCredentialsProvider());

        ClientConfiguration awsConf = new ClientConfiguration();
        awsConf.setMaxConnections(conf.getInt(MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS));
        boolean secureConnections = conf.getBoolean(SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
        awsConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
        awsConf.setMaxErrorRetry(conf.getInt(MAX_ERROR_RETRIES, DEFAULT_MAX_ERROR_RETRIES));
        awsConf.setConnectionTimeout(conf.getInt(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT));
        awsConf.setSocketTimeout(conf.getInt(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT));

        String proxyHost = conf.getTrimmed(PROXY_HOST, "");
        int proxyPort = conf.getInt(PROXY_PORT, -1);
        if (!proxyHost.isEmpty()) {
            awsConf.setProxyHost(proxyHost);
            if (proxyPort >= 0) {
                awsConf.setProxyPort(proxyPort);
            } else {
                if (secureConnections) {
                    LOG.warn("Proxy host set without port. Using HTTPS default 443");
                    awsConf.setProxyPort(443);
                } else {
                    LOG.warn("Proxy host set without port. Using HTTP default 80");
                    awsConf.setProxyPort(80);
                }
            }
            String proxyUsername = conf.getTrimmed(PROXY_USERNAME);
            String proxyPassword = conf.getTrimmed(PROXY_PASSWORD);
            if ((proxyUsername == null) != (proxyPassword == null)) {
                String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD
                        + " set without the other.";
                LOG.error(msg);
            }
            awsConf.setProxyUsername(proxyUsername);
            awsConf.setProxyPassword(proxyPassword);
            awsConf.setProxyDomain(conf.getTrimmed(PROXY_DOMAIN));
            awsConf.setProxyWorkstation(conf.getTrimmed(PROXY_WORKSTATION));
            if (LOG.isDebugEnabled()) {
                LOG.debug(String.format(
                        "Using proxy server %s:%d as user %s with password %s on domain %s as workstation "
                                + "%s",
                        awsConf.getProxyHost(), awsConf.getProxyPort(), awsConf.getProxyUsername(),
                        awsConf.getProxyPassword(), awsConf.getProxyDomain(), awsConf.getProxyWorkstation()));
            }
        } else if (proxyPort >= 0) {
            String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
            LOG.error(msg);
        }

        s3 = new AmazonS3Client(credentials, awsConf);
        String endPoint = conf.getTrimmed(ENDPOINT, "");
        if (!endPoint.isEmpty()) {
            try {
                s3.setEndpoint(endPoint);
            } catch (IllegalArgumentException e) {
                String msg = "Incorrect endpoint: " + e.getMessage();
                LOG.error(msg);
            }
        }

        maxKeys = conf.getInt(MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
        s3Enabled = true;
    } catch (NoClassDefFoundError e) {
        // If the version of hadoop is less than 2.6.0, hadoop doesn't include aws dependencies because it doesn't provide
        // S3AFileSystem. In this case, tajo never uses aws s3 api directly.
        LOG.warn(e);
        s3Enabled = false;
    } catch (Exception e) {
        throw new TajoInternalError(e);
    }
}

From source file:org.mule.module.s3.S3Connector.java

License:Open Source License

/**
 * Creates an {@link AmazonS3} client. If accessKey and secretKey are not set,
 * the resulting client is anonymous//from   ww  w .ja v  a 2s.com
 * 
 * @return a new {@link AmazonS3}
 */
private AmazonS3 createAmazonS3(String accessKey, String secretKey) {
    ClientConfiguration clientConfig = new ClientConfiguration();
    if (proxyUsername != null) {
        clientConfig.setProxyUsername(proxyUsername);
    }
    if (proxyPort != null) {
        clientConfig.setProxyPort(proxyPort);
    }
    if (proxyPassword != null) {
        clientConfig.setProxyPassword(proxyPassword);
    }
    if (proxyHost != null) {
        clientConfig.setProxyHost(proxyHost);
    }
    if (connectionTimeout != null) {
        clientConfig.setConnectionTimeout(connectionTimeout);
    }
    if (socketTimeout != null) {
        clientConfig.setSocketTimeout(socketTimeout);
    }

    return new AmazonS3Client(createCredentials(accessKey, secretKey), clientConfig);
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {// w ww.j a  v  a 2s . c  o m
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public boolean checkAccess(String username, String password, Properties props) throws Exception {
    BasicAWSCredentials _cred = new BasicAWSCredentials(username, password);
    if (props.containsKey("default-bucket-location")) {
        bucketLocation = RegionUtils.getRegion(props.getProperty("default-bucket-location"));
    }//ww w.  j av  a  2  s . c  om

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setMaxConnections(Main.dseIOThreads * 2);
    clientConfig.setConnectionTimeout(10000);
    clientConfig.setSocketTimeout(10000);
    String s3Target = null;

    if (props.containsKey("s3-target")) {
        s3Target = props.getProperty("s3-target");
    }
    if (props.containsKey("proxy-host")) {
        clientConfig.setProxyHost(props.getProperty("proxy-host"));
    }
    if (props.containsKey("proxy-domain")) {
        clientConfig.setProxyDomain(props.getProperty("proxy-domain"));
    }
    if (props.containsKey("proxy-password")) {
        clientConfig.setProxyPassword(props.getProperty("proxy-password"));
    }
    if (props.containsKey("proxy-port")) {
        clientConfig.setProxyPort(Integer.parseInt(props.getProperty("proxy-port")));
    }
    if (props.containsKey("proxy-username")) {
        clientConfig.setProxyUsername(props.getProperty("proxy-username"));
    }
    s3Service = new AmazonS3Client(_cred, clientConfig);
    if (s3Target != null) {
        TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
            @Override
            public boolean isTrusted(X509Certificate[] certificate, String authType) {
                return true;
            }
        };
        SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
        clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        s3Service.setEndpoint(s3Target);
    }
    s3Service.listBuckets();
    return true;
}

From source file:org.swiftshire.nifi.processors.kinesis.consumer.AbstractKinesisConsumerProcessor.java

License:Apache License

/**
 * Creates and initializes our KCL configuration to use.
 *
 * @param context/*from  w ww .jav a  2s . co  m*/
 * @return
 */
protected ClientConfiguration createConfiguration(final ProcessContext context) {
    final ClientConfiguration config = new ClientConfiguration();

    config.setMaxConnections(context.getMaxConcurrentTasks());
    config.setMaxErrorRetry(0);
    config.setUserAgent(DEFAULT_USER_AGENT);

    final int timeout = context.getProperty(TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue();
    config.setConnectionTimeout(timeout);
    config.setSocketTimeout(timeout);

    final SSLContextService sslContextService = context.getProperty(SSL_CONTEXT_SERVICE)
            .asControllerService(SSLContextService.class);

    if (sslContextService != null) {
        final SSLContext sslContext = sslContextService.createSSLContext(SSLContextService.ClientAuth.NONE);

        SdkTLSSocketFactory sdkTLSSocketFactory = new SdkTLSSocketFactory(sslContext, null);
        config.getApacheHttpClientConfig().setSslSocketFactory(sdkTLSSocketFactory);
    }

    if (context.getProperty(PROXY_HOST).isSet()) {
        String proxyHost = context.getProperty(PROXY_HOST).getValue();
        config.setProxyHost(proxyHost);

        Integer proxyPort = context.getProperty(PROXY_HOST_PORT).asInteger();
        config.setProxyPort(proxyPort);
    }

    return config;
}

From source file:oulib.aws.Main.java

public static void main(String[] args) {

    try {/*from   w  w w .ja  v a  2 s .c om*/
        AWSCredentials credentials = null;
        AmazonS3 s3Client = null;
        //            args = new String[4];
        //            args[0] = "ul-bagit";
        //            args[1] = "ul-ir-workspace";
        //            args[2] = "Borelli_1680-1681";
        //            args[3] = "6";
        try {
            credentials = new ProfileCredentialsProvider("default").getCredentials();
        } catch (Exception e) {
            String access_key_id = null;
            String secret_key_id = null;
            String credentialInfo = AwsUtil.getAwsCredentials();
            ObjectMapper mapper = new ObjectMapper();
            Map<String, String> credentialInfoMap = new HashMap<>();
            credentialInfoMap = mapper.readValue(credentialInfo, HashMap.class);
            for (String key : credentialInfoMap.keySet()) {

                if ("AccessKeyId".equals(key)) {
                    access_key_id = credentialInfoMap.get(key);
                } else if ("SecretAccessKey".equals(key)) {
                    secret_key_id = credentialInfoMap.get(key);
                }
            }
            //                System.out.println("access_key_id = "+access_key_id+" access_key_id = "+access_key_id);
            if (null != access_key_id && null != secret_key_id) {
                credentials = new BasicAWSCredentials(access_key_id, secret_key_id);
                //                    s3Client = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(awsCreds)).build();
            } else {
                throw new AmazonClientException("Cannot load the credentials from the credential information. "
                        + "Please make sure that your credentials file is at the correct, and is in valid format.",
                        e);
            }
        }

        ClientConfiguration config = new ClientConfiguration();
        config.setConnectionTimeout(250000);
        config.setSocketTimeout(50000);

        s3Client = new AmazonS3Client(credentials, config);
        Region usEast = Region.getRegion(Regions.US_EAST_1);
        s3Client.setRegion(usEast);

        String bookName = args[2];

        S3BookInfo bookInfo = new S3BookInfo();
        bookInfo.setBookName(bookName);
        bookInfo.setBucketSourceName(args[0]);
        bookInfo.setBucketTargetName(args[1]);
        bookInfo.setCompressionSize(15000000);

        // *** Generate metadadta *****

        //                S3Util.copyS3ObjectTiffMetadata(s3client, "ul-bagit", "ul-ir-workspace", "Zuniga_1591/data/004.tif", "Zuniga_1591/data/004.tif");
        //                S3Util.copyS3ObjectTiffMetadata(s3client, "ul-bagit", "ul-ir-workspace", "Zuniga_1591/data/004.tif", "Zuniga_1591/data/004-20.tif");
        //            S3Util.copyS3ObjectTiffMetadata(s3client, "ul-bagit", "ul-ir-workspace", "Zuniga_1591/data/004.tif", "Zuniga_1591/data/004-50.tif");

        // *** Generate small tiffs *****
        Integer threadMaxCount = 0;
        try {
            threadMaxCount = Integer.valueOf(args[3]);
        } catch (Exception ex) {
            ex.printStackTrace();//logger.error("Cannot parse the thread count! "+ex.getMessage());
            return;
        }
        System.out.println(
                "arg0 = " + args[0] + " arg1 = " + args[1] + " arg2 = " + args[2] + " arg3 = " + args[3]);
        ExecutorService executor = Executors.newFixedThreadPool(threadMaxCount);
        List<String> tiffDiff = S3Util.getBucketObjectKeyList(bookInfo.getBucketSourceName(), args[2],
                s3Client);//.getS3BucketFolderObjDiff(s3Client, args[0], bookName+"/data", args[1], bookName+"/data");
        int diff = tiffDiff.size();
        if (diff > 0) {
            System.out.println("There are totally " + String.valueOf(diff)
                    + " tiff images to process.\nStart processing at " + (new java.util.Date()).toString());
            AwsDataProcessorThreadFactory threadFactory = new AwsDataProcessorThreadFactory();
            for (int i = 0; i <= 10; i++) {
                //                    S3TiffProcessorThread s3TiffProcessorThread = new S3TiffProcessorThread(s3Client, bookInfo, String.valueOf(i)+".tif", tiffDiff);
                //                    threadFactory.setIndex(i);
                //                    threadFactory.setJobType("small-tiff-" + bookName);
                //                    executor.execute(threadFactory.newThread(s3TiffProcessorThread));
                //                    System.out.println("obj has path = "+bookInfo.getBucketSourceName() + tiffDiff.get(i));
                S3TiffMetadataProcessorThread thread = new S3TiffMetadataProcessorThread(s3Client, bookInfo,
                        String.valueOf(i) + ".tif", tiffDiff);
                threadFactory.setIndex(i);
                threadFactory.setJobType("tiff-metadata-" + bookName);
                executor.execute(threadFactory.newThread(thread));
            }
        } else {
            System.out.println("There are no tiff images to process");
        }
        executor.shutdown();
        while (!executor.isTerminated()) {
        }
        System.out.println("All the derivatives were generated at " + (new java.util.Date()).toString() + "!");

    } catch (Exception ex) {
        ex.printStackTrace();//logger.error("Cannot finish generating the small tiff images" + ex.getMessage());
    }
}