Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:org.kuali.rice.krad.service.impl.AmazonS3AttachmentServiceImpl.java

License:Educational Community License

/**
 * @see org.kuali.rice.krad.service.AttachmentService#createAttachment(GloballyUnique,
 * String, String, int, java.io.InputStream, String)
 *///from  www.  ja  v a2  s . co  m
@Override
public Attachment createAttachment(GloballyUnique parent, String uploadedFileName, String mimeType,
        int fileSize, InputStream fileContents, String attachmentTypeCode) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("starting to create attachment for document: " + parent.getObjectId());
    }
    if (parent == null) {
        throw new IllegalArgumentException("invalid (null or uninitialized) document");
    }
    if (StringUtils.isBlank(uploadedFileName)) {
        throw new IllegalArgumentException("invalid (blank) fileName");
    }
    if (StringUtils.isBlank(mimeType)) {
        throw new IllegalArgumentException("invalid (blank) mimeType");
    }
    if (fileSize <= 0) {
        throw new IllegalArgumentException("invalid (non-positive) fileSize");
    }
    if (fileContents == null) {
        throw new IllegalArgumentException("invalid (null) inputStream");
    }

    String uniqueFileNameGuid = UUID.randomUUID().toString();

    TransferManager manager = new TransferManager(this.amazonS3);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(mimeType);
    metadata.setContentDisposition("attachment; filename=" + URLEncoder.encode(uploadedFileName, "UTF-8"));
    metadata.setContentLength(fileSize);
    Upload upload = manager.upload(this.bucketName, generateObjectKey(uniqueFileNameGuid), fileContents,
            metadata);
    try {
        upload.waitForCompletion();
    } catch (InterruptedException e) {
        throw new IllegalStateException("Failed to upload file to s3", e);
    }

    // create DocumentAttachment
    Attachment attachment = new Attachment();
    attachment.setAttachmentIdentifier(uniqueFileNameGuid);
    attachment.setAttachmentFileName(uploadedFileName);
    attachment.setAttachmentFileSize(new Long(fileSize));
    attachment.setAttachmentMimeTypeCode(mimeType);
    attachment.setAttachmentTypeCode(attachmentTypeCode);

    if (LOG.isDebugEnabled()) {
        LOG.debug("finished creating attachment for document: " + parent.getObjectId());
    }
    return attachment;
}

From source file:org.mobicents.servlet.restcomm.amazonS3.S3AccessTool.java

License:Open Source License

public URI uploadFile(final String fileToUpload) {
    AWSCredentials credentials = new BasicAWSCredentials(accessKey, securityKey);
    AmazonS3 s3client = new AmazonS3Client(credentials);
    try {//from   w w  w  .  j  a v a  2  s  .c o  m
        StringBuffer bucket = new StringBuffer();
        bucket.append(bucketName);
        if (folder != null && !folder.isEmpty())
            bucket.append("/").append(folder);
        URI fileUri = URI.create(fileToUpload);
        logger.info("File to upload to S3: " + fileUri.toString());
        File file = new File(fileUri);
        //            while (!file.exists()){}
        //            logger.info("File exist: "+file.exists());
        //First generate the Presigned URL, buy some time for the file to be written on the disk
        Date date = new Date();
        Calendar cal = Calendar.getInstance();
        cal.setTime(date);
        if (daysToRetainPublicUrl > 0) {
            cal.add(Calendar.DATE, daysToRetainPublicUrl);
        } else {
            //By default the Public URL will be valid for 180 days
            cal.add(Calendar.DATE, 180);
        }
        date = cal.getTime();
        GeneratePresignedUrlRequest generatePresignedUrlRequestGET = new GeneratePresignedUrlRequest(
                bucket.toString(), file.getName());
        generatePresignedUrlRequestGET.setMethod(HttpMethod.GET);
        generatePresignedUrlRequestGET.setExpiration(date);

        URL downloadUrl = s3client.generatePresignedUrl(generatePresignedUrlRequestGET);

        //Second upload the file to S3
        //            while (!file.exists()){}
        while (!FileUtils.waitFor(file, 30)) {
        }
        if (file.exists()) {
            PutObjectRequest putRequest = new PutObjectRequest(bucket.toString(), file.getName(), file);
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentType(new MimetypesFileTypeMap().getContentType(file));
            putRequest.setMetadata(metadata);
            if (reducedRedundancy)
                putRequest.setStorageClass(StorageClass.ReducedRedundancy);
            s3client.putObject(putRequest);

            if (removeOriginalFile) {
                removeLocalFile(file);
            }
            return downloadUrl.toURI();
        } else {
            logger.error("Timeout waiting for the recording file: " + file.getAbsolutePath());
            return null;
        }
    } catch (AmazonServiceException ase) {
        logger.error("Caught an AmazonServiceException");
        logger.error("Error Message:    " + ase.getMessage());
        logger.error("HTTP Status Code: " + ase.getStatusCode());
        logger.error("AWS Error Code:   " + ase.getErrorCode());
        logger.error("Error Type:       " + ase.getErrorType());
        logger.error("Request ID:       " + ase.getRequestId());
        return null;
    } catch (AmazonClientException ace) {
        logger.error("Caught an AmazonClientException, which ");
        logger.error("Error Message: " + ace.getMessage());
        return null;
    } catch (URISyntaxException e) {
        logger.error("URISyntaxException: " + e.getMessage());
        return null;
    }
}

From source file:org.mule.module.s3.simpleapi.content.FileS3ObjectContent.java

License:Open Source License

public PutObjectRequest createPutObjectRequest() {
    PutObjectRequest request = new PutObjectRequest(null, null, file);
    request.setMetadata(new ObjectMetadata());
    return request;
}

From source file:org.mule.module.s3.simpleapi.content.InputStreamS3ObjectContent.java

License:Open Source License

public PutObjectRequest createPutObjectRequest() {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(length);/*from w ww. ja  va2s  .c o m*/
    if (md5base64 != null) {
        metadata.setContentMD5(md5base64);
    }
    return new PutObjectRequest(null, null, inputStream, metadata);
}

From source file:org.mule.module.s3.simpleapi.SimpleAmazonS3AmazonDevKitImpl.java

License:Open Source License

public String copyObject(@NotNull S3ObjectId source, @NotNull S3ObjectId destination,
        @NotNull ConditionalConstraints conditionalConstraints, CannedAccessControlList acl,
        StorageClass storageClass, Map<String, String> userMetadata, String encryption) {
    Validate.notNull(source);/*  w w  w.j a v a 2 s .  c o  m*/
    Validate.notNull(destination);
    Validate.notNull(conditionalConstraints);
    CopyObjectRequest request = new CopyObjectRequest(source.getBucketName(), source.getKey(),
            source.getVersionId(), destination.getBucketName(), destination.getKey());
    request.setCannedAccessControlList(acl);
    if (storageClass != null) {
        request.setStorageClass(storageClass);
    }

    if (encryption != null) {
        request.setNewObjectMetadata(new ObjectMetadata());
        request.getNewObjectMetadata().setServerSideEncryption(encryption);
        if (userMetadata != null) {
            request.getNewObjectMetadata().setUserMetadata(userMetadata);
        }
    } else if (userMetadata != null) {
        request.setNewObjectMetadata(new ObjectMetadata());
        request.getNewObjectMetadata().setUserMetadata(userMetadata);
    }

    conditionalConstraints.populate(request);
    return s3.copyObject(request).getVersionId();
}

From source file:org.nickelproject.nickel.blobStore.S3BlobStore.java

License:Apache License

private void putSinglePartByteArray(final BlobRef blobRef, final byte[] pBytes) {
    final ByteArrayInputStream vByteArrayInputStream = new ByteArrayInputStream(pBytes);
    final ObjectMetadata vMetadata = new ObjectMetadata();
    vMetadata.setContentLength(pBytes.length);
    s3Client.putObject(new PutObjectRequest(bucketName, blobRef.toString(), vByteArrayInputStream, vMetadata));
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void close() {
    this.closed = true;
    try {// ww  w.ja  v  a  2  s .  c  o m
        SDFSLogger.getLog().info("############ Closing Bucket##################");
        HashBlobArchive.close();

        ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
        Map<String, String> md = omd.getUserMetadata();
        ObjectMetadata nmd = new ObjectMetadata();
        nmd.setUserMetadata(md);
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("lastupdate", Long.toString(System.currentTimeMillis()));
        md.put("hostname", InetAddress.getLocalHost().getHostName());
        md.put("port", Integer.toString(Main.sdfsCliPort));
        byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
        String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
        md.put("md5sum", st);
        nmd.setContentMD5(st);
        nmd.setContentLength(sz.length);
        nmd.setUserMetadata(md);
        try {
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
        } catch (AmazonS3Exception e1) {
            if (e1.getStatusCode() == 409) {
                try {
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
                } catch (Exception e2) {
                    throw new IOException(e2);
                }
            } else {

                throw new IOException(e1);
            }
        } catch (Exception e1) {
            // SDFSLogger.getLog().error("error uploading", e1);
            throw new IOException(e1);
        }
    } catch (Exception e) {
        SDFSLogger.getLog().warn("error while closing bucket " + this.name, e);
    } finally {
        try {
            s3Service.shutdown();
        } catch (Exception e) {
            SDFSLogger.getLog().debug("error while closing bucket " + this.name, e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {/*from   w  w w. j  ava2  s .  c  om*/
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void writeHashBlobArchive(HashBlobArchive arc, long id) throws IOException {
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {//w  ww  . j  a  va 2 s .c  om
        int csz = toIntExact(arc.getFile().length());
        ObjectMetadata md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.getLen()));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.setContentType("binary/octet-stream");
        md.setContentLength(csz);
        if (md5sum) {
            FileInputStream in = new FileInputStream(arc.getFile());
            String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(in));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
            IOUtils.closeQuietly(in);
        }
        PutObjectRequest req = new PutObjectRequest(this.name, "blocks/" + haName,
                new FileInputStream(arc.getFile()), md);

        if (this.simpleS3)
            s3Service.putObject(req);
        else
            this.multiPartUpload(req);
        byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
        md.setContentMD5(mds);
        md.addUserMetadata("md5sum", mds);
        if (this.clustered) {
            md.setContentType("binary/octet-stream");
            md.setContentLength(msg.length);
            PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                    new ByteArrayInputStream(msg), md);

            s3Service.putObject(creq);
        }
        byte[] hs = arc.getHashesString().getBytes();
        int sz = hs.length;
        if (Main.compress) {
            hs = CompressionUtils.compressLz4(hs);
        }
        byte[] ivb = PassPhrase.getByteIV();
        if (Main.chunkStoreEncryptionEnabled) {
            hs = EncryptUtils.encryptCBC(hs, new IvParameterSpec(ivb));
        }
        md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(sz));
        md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
        md.addUserMetadata("lastaccessed", "0");
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));

        md.setContentType("binary/octet-stream");
        md.setContentLength(hs.length);
        if (md5sum) {
            mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(hs));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
        }
        req = new PutObjectRequest(this.name, "keys/" + haName, new ByteArrayInputStream(hs), md);
        s3Service.putObject(req);
    } catch (Throwable e) {
        SDFSLogger.getLog().fatal("unable to upload " + arc.getID() + " with id " + id, e);
        throw new IOException(e);
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void run() {
    while (!closed) {
        try {/*from   w ww. ja  v a 2s.co m*/
            Thread.sleep(60000);
            try {
                ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                Map<String, String> md = omd.getUserMetadata();
                ObjectMetadata nmd = new ObjectMetadata();
                nmd.setUserMetadata(md);
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                md.put("hostname", InetAddress.getLocalHost().getHostName());
                md.put("port", Integer.toString(Main.sdfsCliPort));
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.put("md5sum", st);
                nmd.setContentMD5(st);
                nmd.setContentLength(sz.length);
                nmd.setUserMetadata(md);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
            } catch (Exception e) {
                try {
                    ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                    Map<String, String> md = omd.getUserMetadata();
                    ObjectMetadata nmd = new ObjectMetadata();
                    nmd.setUserMetadata(md);
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.put("hostname", InetAddress.getLocalHost().getHostName());
                    md.put("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.put("md5sum", st);
                    nmd.setContentMD5(st);
                    nmd.setContentLength(sz.length);
                    nmd.setUserMetadata(md);

                    this.updateObject(binm, nmd);
                } catch (Exception e1) {
                    SDFSLogger.getLog().error("unable to update metadata for " + binm, e);
                }
            }

            if (this.deletes.size() > 0) {
                SDFSLogger.getLog().info("running garbage collection");
                RejectedExecutionHandler executionHandler = new BlockPolicy();
                BlockingQueue<Runnable> worksQueue = new SynchronousQueue<Runnable>();
                ThreadPoolExecutor executor = new ThreadPoolExecutor(1, Main.dseIOThreads, 10, TimeUnit.SECONDS,
                        worksQueue, executionHandler);
                this.delLock.lock();
                HashMap<Long, Integer> odel = null;
                try {
                    odel = this.deletes;
                    this.deletes = new HashMap<Long, Integer>();
                    // SDFSLogger.getLog().info("delete hash table size of "
                    // + odel.size());
                } finally {
                    this.delLock.unlock();
                }
                Set<Long> iter = odel.keySet();
                for (Long k : iter) {
                    DeleteObject obj = new DeleteObject();
                    obj.k = k;
                    obj.odel = odel;
                    obj.st = this;
                    executor.execute(obj);
                }
                executor.shutdown();
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    SDFSLogger.getLog().debug("Awaiting deletion task completion of threads.");
                }
                SDFSLogger.getLog().info("done running garbage collection");
            }
        } catch (InterruptedException e) {
            break;
        } catch (Exception e) {
            SDFSLogger.getLog().error("error in delete thread", e);
        }
    }

}