Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:org.kuali.rice.krad.service.impl.AmazonS3AttachmentServiceImpl.java

License:Educational Community License

/**
 * @see org.kuali.rice.krad.service.AttachmentService#createAttachment(GloballyUnique,
 * String, String, int, java.io.InputStream, String)
 *///from   w ww.  j  a  v  a 2 s .  co m
@Override
public Attachment createAttachment(GloballyUnique parent, String uploadedFileName, String mimeType,
        int fileSize, InputStream fileContents, String attachmentTypeCode) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("starting to create attachment for document: " + parent.getObjectId());
    }
    if (parent == null) {
        throw new IllegalArgumentException("invalid (null or uninitialized) document");
    }
    if (StringUtils.isBlank(uploadedFileName)) {
        throw new IllegalArgumentException("invalid (blank) fileName");
    }
    if (StringUtils.isBlank(mimeType)) {
        throw new IllegalArgumentException("invalid (blank) mimeType");
    }
    if (fileSize <= 0) {
        throw new IllegalArgumentException("invalid (non-positive) fileSize");
    }
    if (fileContents == null) {
        throw new IllegalArgumentException("invalid (null) inputStream");
    }

    String uniqueFileNameGuid = UUID.randomUUID().toString();

    TransferManager manager = new TransferManager(this.amazonS3);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(mimeType);
    metadata.setContentDisposition("attachment; filename=" + URLEncoder.encode(uploadedFileName, "UTF-8"));
    metadata.setContentLength(fileSize);
    Upload upload = manager.upload(this.bucketName, generateObjectKey(uniqueFileNameGuid), fileContents,
            metadata);
    try {
        upload.waitForCompletion();
    } catch (InterruptedException e) {
        throw new IllegalStateException("Failed to upload file to s3", e);
    }

    // create DocumentAttachment
    Attachment attachment = new Attachment();
    attachment.setAttachmentIdentifier(uniqueFileNameGuid);
    attachment.setAttachmentFileName(uploadedFileName);
    attachment.setAttachmentFileSize(new Long(fileSize));
    attachment.setAttachmentMimeTypeCode(mimeType);
    attachment.setAttachmentTypeCode(attachmentTypeCode);

    if (LOG.isDebugEnabled()) {
        LOG.debug("finished creating attachment for document: " + parent.getObjectId());
    }
    return attachment;
}

From source file:org.mule.module.s3.simpleapi.content.InputStreamS3ObjectContent.java

License:Open Source License

public PutObjectRequest createPutObjectRequest() {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(length);
    if (md5base64 != null) {
        metadata.setContentMD5(md5base64);
    }//  ww w .ja  va  2  s.c o  m
    return new PutObjectRequest(null, null, inputStream, metadata);
}

From source file:org.nickelproject.nickel.blobStore.S3BlobStore.java

License:Apache License

private void putSinglePartByteArray(final BlobRef blobRef, final byte[] pBytes) {
    final ByteArrayInputStream vByteArrayInputStream = new ByteArrayInputStream(pBytes);
    final ObjectMetadata vMetadata = new ObjectMetadata();
    vMetadata.setContentLength(pBytes.length);
    s3Client.putObject(new PutObjectRequest(bucketName, blobRef.toString(), vByteArrayInputStream, vMetadata));
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void close() {
    this.closed = true;
    try {/*  w  w w  .ja va2s  . co  m*/
        SDFSLogger.getLog().info("############ Closing Bucket##################");
        HashBlobArchive.close();

        ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
        Map<String, String> md = omd.getUserMetadata();
        ObjectMetadata nmd = new ObjectMetadata();
        nmd.setUserMetadata(md);
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("lastupdate", Long.toString(System.currentTimeMillis()));
        md.put("hostname", InetAddress.getLocalHost().getHostName());
        md.put("port", Integer.toString(Main.sdfsCliPort));
        byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
        String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
        md.put("md5sum", st);
        nmd.setContentMD5(st);
        nmd.setContentLength(sz.length);
        nmd.setUserMetadata(md);
        try {
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
        } catch (AmazonS3Exception e1) {
            if (e1.getStatusCode() == 409) {
                try {
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
                } catch (Exception e2) {
                    throw new IOException(e2);
                }
            } else {

                throw new IOException(e1);
            }
        } catch (Exception e1) {
            // SDFSLogger.getLog().error("error uploading", e1);
            throw new IOException(e1);
        }
    } catch (Exception e) {
        SDFSLogger.getLog().warn("error while closing bucket " + this.name, e);
    } finally {
        try {
            s3Service.shutdown();
        } catch (Exception e) {
            SDFSLogger.getLog().debug("error while closing bucket " + this.name, e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {//from   w w  w. java  2  s .  c o m
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void writeHashBlobArchive(HashBlobArchive arc, long id) throws IOException {
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {/* w w w  . j a v  a2s.  c  om*/
        int csz = toIntExact(arc.getFile().length());
        ObjectMetadata md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.getLen()));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.setContentType("binary/octet-stream");
        md.setContentLength(csz);
        if (md5sum) {
            FileInputStream in = new FileInputStream(arc.getFile());
            String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(in));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
            IOUtils.closeQuietly(in);
        }
        PutObjectRequest req = new PutObjectRequest(this.name, "blocks/" + haName,
                new FileInputStream(arc.getFile()), md);

        if (this.simpleS3)
            s3Service.putObject(req);
        else
            this.multiPartUpload(req);
        byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
        md.setContentMD5(mds);
        md.addUserMetadata("md5sum", mds);
        if (this.clustered) {
            md.setContentType("binary/octet-stream");
            md.setContentLength(msg.length);
            PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                    new ByteArrayInputStream(msg), md);

            s3Service.putObject(creq);
        }
        byte[] hs = arc.getHashesString().getBytes();
        int sz = hs.length;
        if (Main.compress) {
            hs = CompressionUtils.compressLz4(hs);
        }
        byte[] ivb = PassPhrase.getByteIV();
        if (Main.chunkStoreEncryptionEnabled) {
            hs = EncryptUtils.encryptCBC(hs, new IvParameterSpec(ivb));
        }
        md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(sz));
        md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
        md.addUserMetadata("lastaccessed", "0");
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));

        md.setContentType("binary/octet-stream");
        md.setContentLength(hs.length);
        if (md5sum) {
            mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(hs));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
        }
        req = new PutObjectRequest(this.name, "keys/" + haName, new ByteArrayInputStream(hs), md);
        s3Service.putObject(req);
    } catch (Throwable e) {
        SDFSLogger.getLog().fatal("unable to upload " + arc.getID() + " with id " + id, e);
        throw new IOException(e);
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void run() {
    while (!closed) {
        try {/*from   www  .j  av a2s  .  com*/
            Thread.sleep(60000);
            try {
                ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                Map<String, String> md = omd.getUserMetadata();
                ObjectMetadata nmd = new ObjectMetadata();
                nmd.setUserMetadata(md);
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                md.put("hostname", InetAddress.getLocalHost().getHostName());
                md.put("port", Integer.toString(Main.sdfsCliPort));
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.put("md5sum", st);
                nmd.setContentMD5(st);
                nmd.setContentLength(sz.length);
                nmd.setUserMetadata(md);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
            } catch (Exception e) {
                try {
                    ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                    Map<String, String> md = omd.getUserMetadata();
                    ObjectMetadata nmd = new ObjectMetadata();
                    nmd.setUserMetadata(md);
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.put("hostname", InetAddress.getLocalHost().getHostName());
                    md.put("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.put("md5sum", st);
                    nmd.setContentMD5(st);
                    nmd.setContentLength(sz.length);
                    nmd.setUserMetadata(md);

                    this.updateObject(binm, nmd);
                } catch (Exception e1) {
                    SDFSLogger.getLog().error("unable to update metadata for " + binm, e);
                }
            }

            if (this.deletes.size() > 0) {
                SDFSLogger.getLog().info("running garbage collection");
                RejectedExecutionHandler executionHandler = new BlockPolicy();
                BlockingQueue<Runnable> worksQueue = new SynchronousQueue<Runnable>();
                ThreadPoolExecutor executor = new ThreadPoolExecutor(1, Main.dseIOThreads, 10, TimeUnit.SECONDS,
                        worksQueue, executionHandler);
                this.delLock.lock();
                HashMap<Long, Integer> odel = null;
                try {
                    odel = this.deletes;
                    this.deletes = new HashMap<Long, Integer>();
                    // SDFSLogger.getLog().info("delete hash table size of "
                    // + odel.size());
                } finally {
                    this.delLock.unlock();
                }
                Set<Long> iter = odel.keySet();
                for (Long k : iter) {
                    DeleteObject obj = new DeleteObject();
                    obj.k = k;
                    obj.odel = odel;
                    obj.st = this;
                    executor.execute(obj);
                }
                executor.shutdown();
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    SDFSLogger.getLog().debug("Awaiting deletion task completion of threads.");
                }
                SDFSLogger.getLog().info("done running garbage collection");
            }
        } catch (InterruptedException e) {
            break;
        } catch (Exception e) {
            SDFSLogger.getLog().error("error in delete thread", e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void uploadFile(File f, String to, String pp) throws IOException {
    this.s3clientLock.readLock().lock();
    try {/*from   w  ww  . ja  va2 s  .c om*/
        InputStream in = null;
        while (to.startsWith(File.separator))
            to = to.substring(1);

        String pth = pp + "/" + EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled);
        SDFSLogger.getLog().info("uploading " + f.getPath() + " to " + to + " pth " + pth);
        boolean isDir = false;
        boolean isSymlink = false;
        if (!OSValidator.isWindows()) {
            isDir = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isDirectory();
            isSymlink = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isSymbolicLink();
        } else {
            isDir = f.isDirectory();
        }
        if (isSymlink) {
            try {
                HashMap<String, String> metaData = new HashMap<String, String>();
                metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                metaData.put("lastmodified", Long.toString(f.lastModified()));
                String slp = EncyptUtils.encString(Files.readSymbolicLink(f.toPath()).toFile().getPath(),
                        Main.chunkStoreEncryptionEnabled);
                metaData.put("symlink", slp);
                ObjectMetadata md = new ObjectMetadata();
                md.setContentType("binary/octet-stream");
                md.setContentLength(pth.getBytes().length);
                md.setUserMetadata(metaData);
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                throw new IOException(e1);
            }
        } else if (isDir) {
            HashMap<String, String> metaData = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
            metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
            metaData.put("lastmodified", Long.toString(f.lastModified()));
            metaData.put("directory", "true");
            ObjectMetadata md = new ObjectMetadata();
            md.setContentType("binary/octet-stream");
            md.setContentLength(pth.getBytes().length);
            md.setUserMetadata(metaData);
            try {
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        } else {
            String rnd = RandomGUID.getGuid();
            File p = new File(this.staged_sync_location, rnd);
            File z = new File(this.staged_sync_location, rnd + ".z");
            File e = new File(this.staged_sync_location, rnd + ".e");
            while (z.exists()) {
                rnd = RandomGUID.getGuid();
                p = new File(this.staged_sync_location, rnd);
                z = new File(this.staged_sync_location, rnd + ".z");
                e = new File(this.staged_sync_location, rnd + ".e");
            }
            try {
                BufferedInputStream is = new BufferedInputStream(new FileInputStream(f));
                BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(p));
                IOUtils.copy(is, os);
                os.flush();
                os.close();
                is.close();
                if (Main.compress) {
                    CompressionUtils.compressFile(p, z);
                    p.delete();
                    p = z;
                }
                byte[] ivb = null;
                if (Main.chunkStoreEncryptionEnabled) {
                    try {
                        ivb = PassPhrase.getByteIV();
                        EncryptUtils.encryptFile(p, e, new IvParameterSpec(ivb));

                    } catch (Exception e1) {
                        throw new IOException(e1);
                    }
                    p.delete();
                    p = e;
                }
                String objName = pth;
                ObjectMetadata md = new ObjectMetadata();
                Map<String, String> umd = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
                md.setUserMetadata(umd);
                md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
                md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                if (ivb != null)
                    md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
                md.addUserMetadata("lastmodified", Long.toString(f.lastModified()));
                if (simpleS3) {
                    md.setContentType("binary/octet-stream");
                    in = new BufferedInputStream(new FileInputStream(p), 32768);
                    try {
                        if (md5sum) {
                            byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                            in.close();
                            String mds = BaseEncoding.base64().encode(md5Hash);
                            md.setContentMD5(mds);
                            md.addUserMetadata("md5sum", mds);
                        }

                    } catch (NoSuchAlgorithmException e2) {
                        SDFSLogger.getLog().error("while hashing", e2);
                        throw new IOException(e2);
                    }

                    in = new FileInputStream(p);
                    md.setContentLength(p.length());
                    try {
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        s3Service.putObject(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                        SDFSLogger.getLog().debug(
                                "uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified"));
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                } else {
                    try {
                        md.setContentType("binary/octet-stream");
                        in = new BufferedInputStream(new FileInputStream(p), 32768);
                        byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                        in.close();
                        String mds = BaseEncoding.base64().encode(md5Hash);
                        md.setContentMD5(mds);
                        md.addUserMetadata("md5sum", mds);
                        in = new BufferedInputStream(new FileInputStream(p), 32768);

                        md.setContentLength(p.length());
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        multiPartUpload(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                }
            } finally {
                try {
                    if (in != null)
                        in.close();
                } finally {
                    p.delete();
                    z.delete();
                    e.delete();
                }
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutObject(long id, int claims) throws IOException {
    this.s3clientLock.readLock().lock();
    try {//from w  ww .  ja v a2 s  .  com
        if (!this.clustered)
            throw new IOException("volume is not clustered");
        ObjectMetadata om = this.getClaimMetaData(id);
        if (om != null)
            return;
        else {
            String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
            om = s3Service.getObjectMetadata(this.name, "keys/" + haName);
            Map<String, String> md = om.getUserMetadata();
            md.put("objects", Integer.toString(claims));
            if (md.containsKey("deleted")) {
                md.remove("deleted");
            }
            if (md.containsKey("deleted-objects")) {
                md.remove("deleted-objects");
            }
            if (md.containsKey("bsize")) {
                HashBlobArchive.currentLength.addAndGet(Integer.parseInt(md.get("bsize")));
            }
            if (md.containsKey("bcompressedsize")) {
                HashBlobArchive.compressedLength.addAndGet(Integer.parseInt(md.get("bcompressedsize")));
            }
            byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();

            om.setContentLength(msg.length);
            try {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
                om.setContentMD5(mds);
                om.addUserMetadata("md5sum", mds);
            } catch (Exception e) {
                throw new IOException(e);
            }
            try {
                PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                        new ByteArrayInputStream(msg), om);
                s3Service.putObject(creq);
            } catch (AmazonS3Exception e1) {
                if (e1.getStatusCode() == 409) {
                    try {
                        s3Service.deleteObject(this.name, this.getClaimName(id));
                        this.checkoutObject(id, claims);
                        return;
                    } catch (Exception e2) {
                        throw new IOException(e2);
                    }
                } else {

                    throw new IOException(e1);
                }
            } catch (Exception e1) {
                // SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutFile(String name) throws IOException {
    String pth = "claims/" + name + "/"
            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {/*w  ww  . ja v  a2s  .c om*/
        byte[] b = Long.toString(System.currentTimeMillis()).getBytes();
        ObjectMetadata om = new ObjectMetadata();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(b));
        om.setContentMD5(mds);
        om.addUserMetadata("md5sum", mds);
        om.setContentLength(b.length);
        PutObjectRequest creq = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(b), om);
        s3Service.putObject(creq);
    } catch (AmazonS3Exception e1) {
        if (e1.getStatusCode() == 409) {
            try {
                s3Service.deleteObject(this.name, pth);
                this.checkoutFile(name);
                return;
            } catch (Exception e2) {
                throw new IOException(e2);
            }
        } else {

            throw new IOException(e1);
        }
    } catch (Exception e1) {
        // SDFSLogger.getLog().error("error uploading", e1);
        throw new IOException(e1);
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}