Example usage for com.amazonaws.regions RegionUtils getRegion

List of usage examples for com.amazonaws.regions RegionUtils getRegion

Introduction

In this page you can find the example usage for com.amazonaws.regions RegionUtils getRegion.

Prototype

public static Region getRegion(String regionName) 

Source Link

Document

Returns the region with the given regionName and proper partition if found in region metadata.

Usage

From source file:net.firejack.aws.web.controller.AWSController.java

License:Apache License

@ResponseBody
@RequestMapping(value = "install", method = RequestMethod.POST)
public Status startInstance(@RequestBody Auth auth) {
    if (!auth.isValid())
        throw new AmazonServiceException("Access or Secret Key is empty");

    if (amazonEC2 != null) {
        amazonEC2.shutdown();//from   www  .  ja  va2 s . com
    }

    amazonEC2 = new AmazonEC2Client(new BasicAWSCredentials(auth.getAccessKey(), auth.getSecretKey()));
    amazonEC2.setRegion(RegionUtils.getRegion(instanceRegion));

    RunInstancesRequest runInstancesRequest = new RunInstancesRequest();
    runInstancesRequest.setInstanceType(InstanceType.fromValue(instanceType));
    runInstancesRequest.setImageId(instanceAmi);
    runInstancesRequest.setMinCount(1);
    runInstancesRequest.setMaxCount(1);

    KeyPair keyPair = createKeyPair();
    String privateKey = keyPair.getKeyMaterial();
    String fileName;
    try {
        fileName = saveKeyFile(keyPair.getKeyName(), privateKey);
    } catch (FileNotFoundException e) {
        throw new AmazonServiceException("Could not create the key file");
    } catch (UnsupportedEncodingException e) {
        throw new AmazonServiceException("Could not create the key file");
    }
    runInstancesRequest.setKeyName(keyPair.getKeyName());

    CreateSecurityGroupResult securityGroupResult = createSecurityGroupWithRules();
    Collection securityGroupIds = new ArrayList();
    securityGroupIds.add(securityGroupResult.getGroupId());
    runInstancesRequest.setSecurityGroupIds(securityGroupIds);

    amazonEC2.runInstances(runInstancesRequest);

    return new Status("Server has been started", fileName);
}

From source file:org.anhonesteffort.p25.ImbeefConfig.java

License:Open Source License

public ImbeefConfig() throws IOException {
    Properties properties = new Properties();
    properties.load(new FileInputStream("imbeef.properties"));

    region = RegionUtils.getRegion(properties.getProperty("region"));
    streamName = properties.getProperty("stream_name");
    accessKeyId = properties.getProperty("access_key_id");
    secretKey = properties.getProperty("secret_key");
    appName = properties.getProperty("app_name");
    appVersion = properties.getProperty("app_version");

    readIntervalMs = Long.parseLong(properties.getProperty("read_interval_ms"));
    minCallDataUnitRate = Double.parseDouble(properties.getProperty("min_call_data_unit_rate"));
    callInactiveCheckRate = Double.parseDouble(properties.getProperty("call_inactive_check_rate"));
    terminatorTimeoutMs = Long.parseLong(properties.getProperty("terminator_timeout_ms"));
    maxAudioChunkSize = Integer.parseInt(properties.getProperty("max_audio_chunk_size"));

    s3PoolSize = Integer.parseInt(properties.getProperty("s3_pool_size"));
    s3Bucket = properties.getProperty("s3_bucket");
    s3KeyPrefix = properties.getProperty("s3_key_prefix");

    if (region == null) {
        throw new IOException("invalid region");
    }//w  w  w .  j a v  a  2 s . c o m
}

From source file:org.anhonesteffort.p25.kinesis.KinesisConfig.java

License:Open Source License

@Override
@JsonProperty
public Region getRegion() {
    return RegionUtils.getRegion(region);
}

From source file:org.apache.hadoop.dynamodb.DynamoDBUtil.java

License:Open Source License

/**
 * Calculates DynamoDB end-point./* ww w .j  ava 2 s. c  om*/
 *
 * Algorithm details:
 * <ol>
 * <li> Use endpoint in job configuration "dynamodb.endpoint" value if available
 * <li> Use endpoint from region in job configuration "dynamodb.region" value if available
 * <li> Use endpoint from region in job configuration "dynamodb.regionid" value if available
 * <li> Use endpoint from EC2 Metadata of instance if available
 * <li> If all previous attempts at retrieving endpoint fail, default to us-east-1 endpoint
 * </ol>
 *
 * @param conf   Job Configuration
 * @param region optional preferred region
 * @return end-point for DynamoDb service
 */
public static String getDynamoDBEndpoint(Configuration conf, String region) {
    String endpoint = getValueFromConf(conf, DynamoDBConstants.ENDPOINT);
    if (Strings.isNullOrEmpty(endpoint)) {
        if (Strings.isNullOrEmpty(region)) {
            region = getValueFromConf(conf, DynamoDBConstants.REGION);
        }
        if (Strings.isNullOrEmpty(region)) {
            region = getValueFromConf(conf, DynamoDBConstants.REGION_ID);
        }
        if (Strings.isNullOrEmpty(region)) {
            try {
                region = EC2MetadataUtils.getEC2InstanceRegion();
            } catch (Exception e) {
                log.warn(String.format("Exception when attempting to get AWS region information. Will "
                        + "ignore and default " + "to %s", DynamoDBConstants.DEFAULT_AWS_REGION), e);
            }
        }
        if (Strings.isNullOrEmpty(region)) {
            region = DynamoDBConstants.DEFAULT_AWS_REGION;
        }
        endpoint = RegionUtils.getRegion(region).getServiceEndpoint(ServiceAbbreviations.Dynamodb);
    }
    log.info("Using endpoint for DynamoDB: " + endpoint);
    return endpoint;
}

From source file:org.apache.nutch.indexwriter.cloudsearch.CloudSearchIndexWriter.java

License:Apache License

@Override
public void open(IndexWriterParams parameters) throws IOException {
    //    LOG.debug("CloudSearchIndexWriter.open() name={} ", name);

    endpoint = parameters.get(CloudSearchConstants.ENDPOINT);
    dumpBatchFilesToTemp = parameters.getBoolean(CloudSearchConstants.BATCH_DUMP, false);
    this.regionName = parameters.get(CloudSearchConstants.REGION);

    if (StringUtils.isBlank(endpoint) && !dumpBatchFilesToTemp) {
        String message = "Missing CloudSearch endpoint. Should set it set via -D "
                + CloudSearchConstants.ENDPOINT + " or in nutch-site.xml";
        message += "\n" + describe();
        LOG.error(message);//  w  w  w .j av  a  2 s  .  c om
        throw new RuntimeException(message);
    }

    maxDocsInBatch = parameters.getInt(CloudSearchConstants.MAX_DOCS_BATCH, -1);

    buffer = new StringBuffer(MAX_SIZE_BATCH_BYTES).append('[');

    if (dumpBatchFilesToTemp) {
        // only dumping to local file
        // no more config required
        return;
    }

    if (StringUtils.isBlank(endpoint)) {
        throw new RuntimeException("endpoint not set for CloudSearch");
    }

    AmazonCloudSearchClient cl = new AmazonCloudSearchClient();
    if (StringUtils.isNotBlank(regionName)) {
        cl.setRegion(RegionUtils.getRegion(regionName));
    }

    String domainName = null;

    // retrieve the domain name
    DescribeDomainsResult domains = cl.describeDomains(new DescribeDomainsRequest());

    Iterator<DomainStatus> dsiter = domains.getDomainStatusList().iterator();
    while (dsiter.hasNext()) {
        DomainStatus ds = dsiter.next();
        if (ds.getDocService().getEndpoint().equals(endpoint)) {
            domainName = ds.getDomainName();
            break;
        }
    }

    // check domain name
    if (StringUtils.isBlank(domainName)) {
        throw new RuntimeException("No domain name found for CloudSearch endpoint");
    }

    DescribeIndexFieldsResult indexDescription = cl
            .describeIndexFields(new DescribeIndexFieldsRequest().withDomainName(domainName));
    for (IndexFieldStatus ifs : indexDescription.getIndexFields()) {
        String indexname = ifs.getOptions().getIndexFieldName();
        String indextype = ifs.getOptions().getIndexFieldType();
        LOG.info("CloudSearch index name {} of type {}", indexname, indextype);
        csfields.put(indexname, indextype);
    }

    client = new AmazonCloudSearchDomainClient();
    client.setEndpoint(endpoint);
}

From source file:org.broadleafcommerce.vendor.amazon.s3.S3ConfigurationServiceImpl.java

License:Apache License

private void initS3ConfigurationImpl() {
    final long ts1 = System.currentTimeMillis();
    s3config = new S3Configuration();
    s3config.setAwsSecretKey(lookupProperty("aws.s3.secretKey"));
    s3config.setDefaultBucketName(lookupProperty("aws.s3.defaultBucketName"));
    s3config.setDefaultBucketRegion(RegionUtils.getRegion(lookupProperty("aws.s3.defaultBucketRegion")));
    s3config.setGetAWSAccessKeyId(lookupProperty("aws.s3.accessKeyId"));
    s3config.setEndpointURI(lookupProperty("aws.s3.endpointURI"));
    s3config.setBucketSubDirectory(lookupProperty("aws.s3.bucketSubDirectory"));

    String uploadedAssetStorageBucketSubDirectory = lookupProperty(
            "aws.s3.uploadedAssetStorageBucketSubDirectory");
    if (!Strings.isNullOrEmpty(uploadedAssetStorageBucketSubDirectory)) {
        s3config.setUploadedAssetStorageBucketSubDirectory(uploadedAssetStorageBucketSubDirectory);
    }/* w ww .  j a va2s.  c om*/

    final String staticAssetFileExtensionPatternStr = lookupProperty("aws.s3.staticAssetFileExtensionPattern");
    if (!Strings.isNullOrEmpty(staticAssetFileExtensionPatternStr)) {
        s3config.setStaticAssetFileExtensionPattern(staticAssetFileExtensionPatternStr);
    }

    final String manifestVersionKey = lookupProperty("aws.s3.manifestVersionKey");
    if (!Strings.isNullOrEmpty(manifestVersionKey)) {
        String versionSubDirectory = Manifests.read(manifestVersionKey);
        if (!Strings.isNullOrEmpty(versionSubDirectory)) {
            versionSubDirectory = "dev";
        }
        s3config.setVersionSubDirectory(versionSubDirectory);
    }

    final boolean accessSecretKeyBlank = StringUtils.isEmpty(s3config.getAwsSecretKey());
    final boolean accessKeyIdBlank = StringUtils.isEmpty(s3config.getGetAWSAccessKeyId());
    final boolean bucketNameBlank = StringUtils.isEmpty(s3config.getDefaultBucketName());
    final Region region = s3config.getDefaultBucketRegion();
    final long ts2 = System.currentTimeMillis();

    if (LOG.isTraceEnabled()) {
        final String msg = String.format("%s - using s3://%s/%s in region %s; setup time = %dms",
                s3config.getEndpointURI(), s3config.getDefaultBucketName(), s3config.getBucketSubDirectory(),
                region.toString(), ts2 - ts1);
        LOG.trace(msg);
    }

    if (region == null || accessSecretKeyBlank || accessKeyIdBlank || bucketNameBlank) {
        StringBuilder errorMessage = new StringBuilder("Amazon S3 Configuration Error : ");

        if (accessSecretKeyBlank) {
            errorMessage.append("aws.s3.secretKey was blank,");
        }

        if (accessKeyIdBlank) {
            errorMessage.append("aws.s3.accessKeyId was blank,");
        }

        if (bucketNameBlank) {
            errorMessage.append("aws.s3.defaultBucketName was blank,");
        }

        if (region == null) {
            errorMessage.append("aws.s3.defaultBucketRegion was set to an invalid value of "
                    + s3config.getDefaultBucketRegion());
        }
        throw new IllegalArgumentException(errorMessage.toString());
    }
}

From source file:org.eluder.logback.ext.dynamodb.appender.DynamoDbAppender.java

License:Open Source License

@Override
protected void doStart() {
    dynamoDb = new AmazonDynamoDBAsyncClient(getCredentials(), getClientConfiguration(),
            AppenderExecutors.newExecutor(this, getThreadPoolSize()));
    dynamoDb.setRegion(RegionUtils.getRegion(region));
}

From source file:org.gradle.internal.resource.transport.aws.s3.S3RegionalResource.java

License:Apache License

private void configure() {
    Matcher matcher = REGIONAL_ENDPOINT_PATTERN.matcher(uri.toString());
    if (matcher.find()) {
        String bucketName = matcher.group(1);
        String region = matcher.group(2);
        String key = matcher.group(4);
        Region derivedRegion;// w  w  w . j a  v a2 s  .c o m
        if (region.equals("external-1")) {
            derivedRegion = Region.getRegion(Regions.US_EAST_1);
        } else {
            derivedRegion = RegionUtils.getRegion(region);
        }

        this.region = derivedRegion;
        this.bucketName = bucketName;
        this.key = key;
    } else {
        this.region = DEFAULT_REGION;
        this.bucketName = getBucketName(uri.getHost());
        this.key = getS3BucketKey(uri);
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {//from w w  w.j  a  va  2s  . co  m
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public boolean checkAccess(String username, String password, Properties props) throws Exception {
    BasicAWSCredentials _cred = new BasicAWSCredentials(username, password);
    if (props.containsKey("default-bucket-location")) {
        bucketLocation = RegionUtils.getRegion(props.getProperty("default-bucket-location"));
    }//  ww  w .  ja va2  s.co  m

    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setMaxConnections(Main.dseIOThreads * 2);
    clientConfig.setConnectionTimeout(10000);
    clientConfig.setSocketTimeout(10000);
    String s3Target = null;

    if (props.containsKey("s3-target")) {
        s3Target = props.getProperty("s3-target");
    }
    if (props.containsKey("proxy-host")) {
        clientConfig.setProxyHost(props.getProperty("proxy-host"));
    }
    if (props.containsKey("proxy-domain")) {
        clientConfig.setProxyDomain(props.getProperty("proxy-domain"));
    }
    if (props.containsKey("proxy-password")) {
        clientConfig.setProxyPassword(props.getProperty("proxy-password"));
    }
    if (props.containsKey("proxy-port")) {
        clientConfig.setProxyPort(Integer.parseInt(props.getProperty("proxy-port")));
    }
    if (props.containsKey("proxy-username")) {
        clientConfig.setProxyUsername(props.getProperty("proxy-username"));
    }
    s3Service = new AmazonS3Client(_cred, clientConfig);
    if (s3Target != null) {
        TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
            @Override
            public boolean isTrusted(X509Certificate[] certificate, String authType) {
                return true;
            }
        };
        SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
        clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        s3Service.setEndpoint(s3Target);
    }
    s3Service.listBuckets();
    return true;
}