Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentMD5

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentMD5

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentMD5.

Prototype

public void setContentMD5(String md5Base64) 

Source Link

Document

Sets the base64 encoded 128-bit MD5 digest of the associated object (content - not including headers) according to RFC 1864.

Usage

From source file:org.mule.module.s3.simpleapi.content.InputStreamS3ObjectContent.java

License:Open Source License

public PutObjectRequest createPutObjectRequest() {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(length);//from w  w  w.  j  ava  2 s .c  om
    if (md5base64 != null) {
        metadata.setContentMD5(md5base64);
    }
    return new PutObjectRequest(null, null, inputStream, metadata);
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void close() {
    this.closed = true;
    try {/*w w w.j  a  va 2s.  c o  m*/
        SDFSLogger.getLog().info("############ Closing Bucket##################");
        HashBlobArchive.close();

        ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
        Map<String, String> md = omd.getUserMetadata();
        ObjectMetadata nmd = new ObjectMetadata();
        nmd.setUserMetadata(md);
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("lastupdate", Long.toString(System.currentTimeMillis()));
        md.put("hostname", InetAddress.getLocalHost().getHostName());
        md.put("port", Integer.toString(Main.sdfsCliPort));
        byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
        String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
        md.put("md5sum", st);
        nmd.setContentMD5(st);
        nmd.setContentLength(sz.length);
        nmd.setUserMetadata(md);
        try {
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
        } catch (AmazonS3Exception e1) {
            if (e1.getStatusCode() == 409) {
                try {
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
                } catch (Exception e2) {
                    throw new IOException(e2);
                }
            } else {

                throw new IOException(e1);
            }
        } catch (Exception e1) {
            // SDFSLogger.getLog().error("error uploading", e1);
            throw new IOException(e1);
        }
    } catch (Exception e) {
        SDFSLogger.getLog().warn("error while closing bucket " + this.name, e);
    } finally {
        try {
            s3Service.shutdown();
        } catch (Exception e) {
            SDFSLogger.getLog().debug("error while closing bucket " + this.name, e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {//from   ww  w.  j  av a 2s .  c o  m
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void writeHashBlobArchive(HashBlobArchive arc, long id) throws IOException {
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {/*  w  ww . j  a  va  2  s .  co m*/
        int csz = toIntExact(arc.getFile().length());
        ObjectMetadata md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.getLen()));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.setContentType("binary/octet-stream");
        md.setContentLength(csz);
        if (md5sum) {
            FileInputStream in = new FileInputStream(arc.getFile());
            String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(in));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
            IOUtils.closeQuietly(in);
        }
        PutObjectRequest req = new PutObjectRequest(this.name, "blocks/" + haName,
                new FileInputStream(arc.getFile()), md);

        if (this.simpleS3)
            s3Service.putObject(req);
        else
            this.multiPartUpload(req);
        byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
        md.setContentMD5(mds);
        md.addUserMetadata("md5sum", mds);
        if (this.clustered) {
            md.setContentType("binary/octet-stream");
            md.setContentLength(msg.length);
            PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                    new ByteArrayInputStream(msg), md);

            s3Service.putObject(creq);
        }
        byte[] hs = arc.getHashesString().getBytes();
        int sz = hs.length;
        if (Main.compress) {
            hs = CompressionUtils.compressLz4(hs);
        }
        byte[] ivb = PassPhrase.getByteIV();
        if (Main.chunkStoreEncryptionEnabled) {
            hs = EncryptUtils.encryptCBC(hs, new IvParameterSpec(ivb));
        }
        md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(sz));
        md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
        md.addUserMetadata("lastaccessed", "0");
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));

        md.setContentType("binary/octet-stream");
        md.setContentLength(hs.length);
        if (md5sum) {
            mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(hs));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
        }
        req = new PutObjectRequest(this.name, "keys/" + haName, new ByteArrayInputStream(hs), md);
        s3Service.putObject(req);
    } catch (Throwable e) {
        SDFSLogger.getLog().fatal("unable to upload " + arc.getID() + " with id " + id, e);
        throw new IOException(e);
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void run() {
    while (!closed) {
        try {//from   w ww . jav  a 2 s  . co m
            Thread.sleep(60000);
            try {
                ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                Map<String, String> md = omd.getUserMetadata();
                ObjectMetadata nmd = new ObjectMetadata();
                nmd.setUserMetadata(md);
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                md.put("hostname", InetAddress.getLocalHost().getHostName());
                md.put("port", Integer.toString(Main.sdfsCliPort));
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.put("md5sum", st);
                nmd.setContentMD5(st);
                nmd.setContentLength(sz.length);
                nmd.setUserMetadata(md);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
            } catch (Exception e) {
                try {
                    ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                    Map<String, String> md = omd.getUserMetadata();
                    ObjectMetadata nmd = new ObjectMetadata();
                    nmd.setUserMetadata(md);
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.put("hostname", InetAddress.getLocalHost().getHostName());
                    md.put("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.put("md5sum", st);
                    nmd.setContentMD5(st);
                    nmd.setContentLength(sz.length);
                    nmd.setUserMetadata(md);

                    this.updateObject(binm, nmd);
                } catch (Exception e1) {
                    SDFSLogger.getLog().error("unable to update metadata for " + binm, e);
                }
            }

            if (this.deletes.size() > 0) {
                SDFSLogger.getLog().info("running garbage collection");
                RejectedExecutionHandler executionHandler = new BlockPolicy();
                BlockingQueue<Runnable> worksQueue = new SynchronousQueue<Runnable>();
                ThreadPoolExecutor executor = new ThreadPoolExecutor(1, Main.dseIOThreads, 10, TimeUnit.SECONDS,
                        worksQueue, executionHandler);
                this.delLock.lock();
                HashMap<Long, Integer> odel = null;
                try {
                    odel = this.deletes;
                    this.deletes = new HashMap<Long, Integer>();
                    // SDFSLogger.getLog().info("delete hash table size of "
                    // + odel.size());
                } finally {
                    this.delLock.unlock();
                }
                Set<Long> iter = odel.keySet();
                for (Long k : iter) {
                    DeleteObject obj = new DeleteObject();
                    obj.k = k;
                    obj.odel = odel;
                    obj.st = this;
                    executor.execute(obj);
                }
                executor.shutdown();
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    SDFSLogger.getLog().debug("Awaiting deletion task completion of threads.");
                }
                SDFSLogger.getLog().info("done running garbage collection");
            }
        } catch (InterruptedException e) {
            break;
        } catch (Exception e) {
            SDFSLogger.getLog().error("error in delete thread", e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void uploadFile(File f, String to, String pp) throws IOException {
    this.s3clientLock.readLock().lock();
    try {/*from   ww  w . j  av a2  s . c o  m*/
        InputStream in = null;
        while (to.startsWith(File.separator))
            to = to.substring(1);

        String pth = pp + "/" + EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled);
        SDFSLogger.getLog().info("uploading " + f.getPath() + " to " + to + " pth " + pth);
        boolean isDir = false;
        boolean isSymlink = false;
        if (!OSValidator.isWindows()) {
            isDir = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isDirectory();
            isSymlink = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isSymbolicLink();
        } else {
            isDir = f.isDirectory();
        }
        if (isSymlink) {
            try {
                HashMap<String, String> metaData = new HashMap<String, String>();
                metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                metaData.put("lastmodified", Long.toString(f.lastModified()));
                String slp = EncyptUtils.encString(Files.readSymbolicLink(f.toPath()).toFile().getPath(),
                        Main.chunkStoreEncryptionEnabled);
                metaData.put("symlink", slp);
                ObjectMetadata md = new ObjectMetadata();
                md.setContentType("binary/octet-stream");
                md.setContentLength(pth.getBytes().length);
                md.setUserMetadata(metaData);
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                throw new IOException(e1);
            }
        } else if (isDir) {
            HashMap<String, String> metaData = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
            metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
            metaData.put("lastmodified", Long.toString(f.lastModified()));
            metaData.put("directory", "true");
            ObjectMetadata md = new ObjectMetadata();
            md.setContentType("binary/octet-stream");
            md.setContentLength(pth.getBytes().length);
            md.setUserMetadata(metaData);
            try {
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        } else {
            String rnd = RandomGUID.getGuid();
            File p = new File(this.staged_sync_location, rnd);
            File z = new File(this.staged_sync_location, rnd + ".z");
            File e = new File(this.staged_sync_location, rnd + ".e");
            while (z.exists()) {
                rnd = RandomGUID.getGuid();
                p = new File(this.staged_sync_location, rnd);
                z = new File(this.staged_sync_location, rnd + ".z");
                e = new File(this.staged_sync_location, rnd + ".e");
            }
            try {
                BufferedInputStream is = new BufferedInputStream(new FileInputStream(f));
                BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(p));
                IOUtils.copy(is, os);
                os.flush();
                os.close();
                is.close();
                if (Main.compress) {
                    CompressionUtils.compressFile(p, z);
                    p.delete();
                    p = z;
                }
                byte[] ivb = null;
                if (Main.chunkStoreEncryptionEnabled) {
                    try {
                        ivb = PassPhrase.getByteIV();
                        EncryptUtils.encryptFile(p, e, new IvParameterSpec(ivb));

                    } catch (Exception e1) {
                        throw new IOException(e1);
                    }
                    p.delete();
                    p = e;
                }
                String objName = pth;
                ObjectMetadata md = new ObjectMetadata();
                Map<String, String> umd = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
                md.setUserMetadata(umd);
                md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
                md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                if (ivb != null)
                    md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
                md.addUserMetadata("lastmodified", Long.toString(f.lastModified()));
                if (simpleS3) {
                    md.setContentType("binary/octet-stream");
                    in = new BufferedInputStream(new FileInputStream(p), 32768);
                    try {
                        if (md5sum) {
                            byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                            in.close();
                            String mds = BaseEncoding.base64().encode(md5Hash);
                            md.setContentMD5(mds);
                            md.addUserMetadata("md5sum", mds);
                        }

                    } catch (NoSuchAlgorithmException e2) {
                        SDFSLogger.getLog().error("while hashing", e2);
                        throw new IOException(e2);
                    }

                    in = new FileInputStream(p);
                    md.setContentLength(p.length());
                    try {
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        s3Service.putObject(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                        SDFSLogger.getLog().debug(
                                "uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified"));
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                } else {
                    try {
                        md.setContentType("binary/octet-stream");
                        in = new BufferedInputStream(new FileInputStream(p), 32768);
                        byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                        in.close();
                        String mds = BaseEncoding.base64().encode(md5Hash);
                        md.setContentMD5(mds);
                        md.addUserMetadata("md5sum", mds);
                        in = new BufferedInputStream(new FileInputStream(p), 32768);

                        md.setContentLength(p.length());
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        multiPartUpload(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                }
            } finally {
                try {
                    if (in != null)
                        in.close();
                } finally {
                    p.delete();
                    z.delete();
                    e.delete();
                }
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutObject(long id, int claims) throws IOException {
    this.s3clientLock.readLock().lock();
    try {// w  w  w.  j  a va 2s .c  o m
        if (!this.clustered)
            throw new IOException("volume is not clustered");
        ObjectMetadata om = this.getClaimMetaData(id);
        if (om != null)
            return;
        else {
            String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
            om = s3Service.getObjectMetadata(this.name, "keys/" + haName);
            Map<String, String> md = om.getUserMetadata();
            md.put("objects", Integer.toString(claims));
            if (md.containsKey("deleted")) {
                md.remove("deleted");
            }
            if (md.containsKey("deleted-objects")) {
                md.remove("deleted-objects");
            }
            if (md.containsKey("bsize")) {
                HashBlobArchive.currentLength.addAndGet(Integer.parseInt(md.get("bsize")));
            }
            if (md.containsKey("bcompressedsize")) {
                HashBlobArchive.compressedLength.addAndGet(Integer.parseInt(md.get("bcompressedsize")));
            }
            byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();

            om.setContentLength(msg.length);
            try {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
                om.setContentMD5(mds);
                om.addUserMetadata("md5sum", mds);
            } catch (Exception e) {
                throw new IOException(e);
            }
            try {
                PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                        new ByteArrayInputStream(msg), om);
                s3Service.putObject(creq);
            } catch (AmazonS3Exception e1) {
                if (e1.getStatusCode() == 409) {
                    try {
                        s3Service.deleteObject(this.name, this.getClaimName(id));
                        this.checkoutObject(id, claims);
                        return;
                    } catch (Exception e2) {
                        throw new IOException(e2);
                    }
                } else {

                    throw new IOException(e1);
                }
            } catch (Exception e1) {
                // SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutFile(String name) throws IOException {
    String pth = "claims/" + name + "/"
            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {//from  w  w  w . ja va  2 s.c  o m
        byte[] b = Long.toString(System.currentTimeMillis()).getBytes();
        ObjectMetadata om = new ObjectMetadata();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(b));
        om.setContentMD5(mds);
        om.addUserMetadata("md5sum", mds);
        om.setContentLength(b.length);
        PutObjectRequest creq = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(b), om);
        s3Service.putObject(creq);
    } catch (AmazonS3Exception e1) {
        if (e1.getStatusCode() == 409) {
            try {
                s3Service.deleteObject(this.name, pth);
                this.checkoutFile(name);
                return;
            } catch (Exception e2) {
                throw new IOException(e2);
            }
        } else {

            throw new IOException(e1);
        }
    } catch (Exception e1) {
        // SDFSLogger.getLog().error("error uploading", e1);
        throw new IOException(e1);
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.springframework.integration.aws.outbound.S3MessageHandler.java

License:Apache License

private Transfer upload(Message<?> requestMessage) {
    Object payload = requestMessage.getPayload();
    String bucketName = obtainBucket(requestMessage);

    String key = null;/*from w ww. j  a va2s.  com*/
    if (this.keyExpression != null) {
        key = this.keyExpression.getValue(this.evaluationContext, requestMessage, String.class);
    }

    if (payload instanceof File && ((File) payload).isDirectory()) {
        File fileToUpload = (File) payload;
        if (key == null) {
            key = fileToUpload.getName();
        }
        return this.transferManager.uploadDirectory(bucketName, key, fileToUpload, true,
                new MessageHeadersObjectMetadataProvider(requestMessage.getHeaders()));
    } else {
        ObjectMetadata metadata = new ObjectMetadata();
        if (this.uploadMetadataProvider != null) {
            this.uploadMetadataProvider.populateMetadata(metadata, requestMessage);
        }
        InputStream inputStream;
        if (payload instanceof InputStream) {
            inputStream = (InputStream) payload;
        } else if (payload instanceof File) {
            File fileToUpload = (File) payload;
            if (key == null) {
                key = fileToUpload.getName();
            }
            try {
                inputStream = new FileInputStream(fileToUpload);

                if (metadata.getContentLength() == 0) {
                    metadata.setContentLength(fileToUpload.length());
                }
                if (metadata.getContentType() == null) {
                    metadata.setContentType(Mimetypes.getInstance().getMimetype(fileToUpload));
                }

            } catch (FileNotFoundException e) {
                throw new AmazonClientException(e);
            }
        } else if (payload instanceof byte[]) {
            inputStream = new ByteArrayInputStream((byte[]) payload);
        } else {
            throw new IllegalArgumentException("Unsupported payload type: [" + payload.getClass()
                    + "]. The only supported payloads for the upload request are "
                    + "java.io.File, java.io.InputStream, byte[] and PutObjectRequest.");
        }

        Assert.state(key != null,
                "The 'keyExpression' must not be null for non-File payloads and can't evaluate to null. "
                        + "Root object is: " + requestMessage);

        if (key == null) {
            if (this.keyExpression != null) {
                throw new IllegalStateException(
                        "The 'keyExpression' [" + this.keyExpression.getExpressionString()
                                + "] must not evaluate to null. Root object is: " + requestMessage);
            } else {
                throw new IllegalStateException("Specify a 'keyExpression' for non-java.io.File payloads");
            }
        }

        if (metadata.getContentMD5() == null) {
            String contentMd5 = null;
            try {
                contentMd5 = Md5Utils.md5AsBase64(StreamUtils.copyToByteArray(inputStream));
                if (inputStream.markSupported()) {
                    inputStream.reset();
                }
                metadata.setContentMD5(contentMd5);
            } catch (IOException e) {
                throw new MessageHandlingException(requestMessage, e);
            }
        }
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, metadata);

        S3ProgressListener progressListener = this.s3ProgressListener;

        if (this.objectAclExpression != null) {
            Object acl = this.objectAclExpression.getValue(this.evaluationContext, requestMessage);
            Assert.state(acl instanceof AccessControlList || acl instanceof CannedAccessControlList,
                    "The 'objectAclExpression' [" + this.objectAclExpression.getExpressionString()
                            + "] must evaluate to com.amazonaws.services.s3.model.AccessControlList "
                            + "or must evaluate to com.amazonaws.services.s3.model.CannedAccessControlList. "
                            + "Gotten: [" + acl + "]");

            SetObjectAclRequest aclRequest;

            if (acl instanceof AccessControlList) {
                aclRequest = new SetObjectAclRequest(bucketName, key, (AccessControlList) acl);
            } else {
                aclRequest = new SetObjectAclRequest(bucketName, key, (CannedAccessControlList) acl);
            }

            final SetObjectAclRequest theAclRequest = aclRequest;
            progressListener = new S3ProgressListener() {

                @Override
                public void onPersistableTransfer(PersistableTransfer persistableTransfer) {

                }

                @Override
                public void progressChanged(ProgressEvent progressEvent) {
                    if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(progressEvent.getEventType())) {
                        S3MessageHandler.this.transferManager.getAmazonS3Client().setObjectAcl(theAclRequest);
                    }
                }

            };

            if (this.s3ProgressListener != null) {
                progressListener = new S3ProgressListenerChain(this.s3ProgressListener, progressListener);
            }

        }

        if (progressListener != null) {
            return this.transferManager.upload(putObjectRequest, progressListener);
        } else {
            return this.transferManager.upload(putObjectRequest);
        }
    }
}

From source file:org.springframework.integration.aws.s3.core.AmazonS3OperationsImpl.java

License:Apache License

public void putObject(String bucketName, String folder, String objectName, AmazonS3Object s3Object) {
    if (logger.isDebugEnabled()) {
        logger.debug("Putting object to bucket " + bucketName + " and folder " + folder);
        logger.debug("Object Name is " + objectName);
    }// www. j a v a2s .  c om

    if (objectName == null)
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Object Name is Mandatory");

    boolean isTempFile = false;
    File file = s3Object.getFileSource();
    InputStream in = s3Object.getInputStream();

    if (file != null && in != null)
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "File Object and Input Stream in the S3 Object are mutually exclusive");

    if (file == null && in == null)
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "At lease one of File object or Input Stream in the S3 Object are mandatory");

    String key;
    if (folder != null) {
        key = folder.endsWith(PATH_SEPARATOR) ? folder + objectName : folder + PATH_SEPARATOR + objectName;
    } else {
        key = objectName;
    }

    if (in != null) {
        file = getTempFile(in, bucketName, objectName);
        isTempFile = true;
    }

    PutObjectRequest request;
    if (file != null) {
        request = new PutObjectRequest(bucketName, key, file);
        //if the size of the file is greater than the threshold for multipart upload,
        //set the Content-MD5 header for this upload. This header will also come handy
        //later in inbound-channel-adapter where we cant find the MD5 sum of the 
        //multipart upload file from its ETag
        String stringContentMD5 = null;
        try {
            stringContentMD5 = AmazonWSCommonUtils.encodeHex(AmazonWSCommonUtils.getContentsMD5AsBytes(file));
        } catch (UnsupportedEncodingException e) {
            logger.error("Exception while generating the content's MD5 of the file " + file.getAbsolutePath(),
                    e);
        }

        if (stringContentMD5 != null) {
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentMD5(stringContentMD5);
            request.withMetadata(metadata);
        }
    } else
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Unable to get the File handle to upload the file to S3");

    Upload upload;
    try {
        upload = transferManager.upload(request);
    } catch (Exception e) {
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Encountered Exception while invoking upload on multipart/single thread file, "
                        + "see nested exceptions for more details",
                e);
    }
    //Wait till the upload completes, the call to putObject is synchronous
    try {
        if (logger.isInfoEnabled())
            logger.info("Waiting for Upload to complete");
        upload.waitForCompletion();
        if (logger.isInfoEnabled())
            logger.info("Upload completed");
    } catch (Exception e) {
        throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                "Encountered Exception while uploading the multipart/single thread file, "
                        + "see nested exceptions for more details",
                e);
    }
    if (isTempFile) {
        //Delete the temp file
        if (logger.isDebugEnabled())
            logger.debug("Deleting temp file: " + file.getName());
        file.delete();
    }

    //Now since the object is present on S3, set the AccessControl list on it
    //Please note that it is not possible to set the object ACL with the
    //put object request, and hence both these operations cannot be atomic
    //it is possible the objects is uploaded and the ACl not set due to some
    //failure

    AmazonS3ObjectACL acl = s3Object.getObjectACL();
    AccessControlList objectACL = getAccessControlList(bucketName, key, acl);
    if (objectACL != null) {
        if (logger.isInfoEnabled())
            logger.info("Setting Access control list for key " + key);
        try {
            client.setObjectAcl(bucketName, key, objectACL);
        } catch (Exception e) {
            throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName,
                    "Encountered Exception while setting the Object ACL for key , " + key
                            + "see nested exceptions for more details",
                    e);
        }
        if (logger.isDebugEnabled())
            logger.debug("Successfully set the object ACL");
    } else {
        if (logger.isInfoEnabled())
            logger.info("No Object ACL found to be set");
    }
}