Example usage for com.amazonaws.services.s3.model ObjectMetadata setUserMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setUserMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setUserMetadata.

Prototype

public void setUserMetadata(Map<String, String> userMetadata) 

Source Link

Document

Sets the custom user-metadata for the associated object.

Usage

From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java

License:Apache License

@Override
public BinaryValue storeValue(InputStream stream, boolean markAsUnused) throws BinaryStoreException {
    // Cache file on the file system in order to have SHA-1 hash calculated
    BinaryValue cachedFile = fileSystemCache.storeValue(stream, markAsUnused);
    try {//w w  w .jav a2s.  c om
        // Retrieve SHA-1 hash
        BinaryKey key = new BinaryKey(cachedFile.getKey().toString());

        // If file is NOT already in S3 storage, store it
        if (!s3Client.doesObjectExist(bucketName, key.toString())) {
            ObjectMetadata metadata = new ObjectMetadata();
            // Set Mimetype
            metadata.setContentType(fileSystemCache.getMimeType(cachedFile, key.toString()));
            // Set Unused value
            Map<String, String> userMetadata = metadata.getUserMetadata();
            userMetadata.put(UNUSED_KEY, String.valueOf(markAsUnused));
            metadata.setUserMetadata(userMetadata);
            // Store content in S3
            s3Client.putObject(bucketName, key.toString(), fileSystemCache.getInputStream(key), metadata);
        } else {
            // Set the unused value, if necessary
            if (markAsUnused) {
                markAsUnused(Collections.singleton(key));
            } else {
                markAsUsed(Collections.singleton(key));
            }
        }
        return new StoredBinaryValue(this, key, cachedFile.getSize());
    } catch (AmazonClientException | RepositoryException | IOException e) {
        throw new BinaryStoreException(e);
    } finally {
        // Remove cached file
        fileSystemCache.markAsUnused(Collections.singleton(cachedFile.getKey()));
        fileSystemCache.removeValuesUnusedLongerThan(1, TimeUnit.MICROSECONDS);
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void close() {
    this.closed = true;
    try {/*  w  w w.java  2  s. com*/
        SDFSLogger.getLog().info("############ Closing Bucket##################");
        HashBlobArchive.close();

        ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
        Map<String, String> md = omd.getUserMetadata();
        ObjectMetadata nmd = new ObjectMetadata();
        nmd.setUserMetadata(md);
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("lastupdate", Long.toString(System.currentTimeMillis()));
        md.put("hostname", InetAddress.getLocalHost().getHostName());
        md.put("port", Integer.toString(Main.sdfsCliPort));
        byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
        String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
        md.put("md5sum", st);
        nmd.setContentMD5(st);
        nmd.setContentLength(sz.length);
        nmd.setUserMetadata(md);
        try {
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
        } catch (AmazonS3Exception e1) {
            if (e1.getStatusCode() == 409) {
                try {
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
                } catch (Exception e2) {
                    throw new IOException(e2);
                }
            } else {

                throw new IOException(e1);
            }
        } catch (Exception e1) {
            // SDFSLogger.getLog().error("error uploading", e1);
            throw new IOException(e1);
        }
    } catch (Exception e) {
        SDFSLogger.getLog().warn("error while closing bucket " + this.name, e);
    } finally {
        try {
            s3Service.shutdown();
        } catch (Exception e) {
            SDFSLogger.getLog().debug("error while closing bucket " + this.name, e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {//from w  w w .j  a  v a 2 s.com
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

private void getData(long id, File f) throws Exception {
    // SDFSLogger.getLog().info("Downloading " + id);
    // SDFSLogger.getLog().info("Current readers :" + rr.incrementAndGet());
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    S3Object sobj = null;/*from  www . ja  v a  2s  .  c o m*/
    try {

        long tm = System.currentTimeMillis();
        ObjectMetadata omd = s3Service.getObjectMetadata(this.name, "blocks/" + haName);

        try {
            sobj = s3Service.getObject(this.name, "blocks/" + haName);
        } catch (Exception e) {
            throw new IOException(e);
        }
        int cl = (int) omd.getContentLength();
        if (this.simpleS3) {

            FileOutputStream out = null;
            InputStream in = null;
            try {
                out = new FileOutputStream(f);
                in = sobj.getObjectContent();
                IOUtils.copy(in, out);
                out.flush();

            } catch (Exception e) {
                throw new IOException(e);
            } finally {
                IOUtils.closeQuietly(out);
                IOUtils.closeQuietly(in);

            }
        } else {
            this.multiPartDownload("blocks/" + haName, f);
        }
        double dtm = (System.currentTimeMillis() - tm) / 1000d;
        double bps = (cl / 1024) / dtm;
        SDFSLogger.getLog().debug("read [" + id + "] at " + bps + " kbps");
        Map<String, String> mp = this.getUserMetaData(omd);
        if (md5sum && mp.containsKey("md5sum")) {
            byte[] shash = BaseEncoding.base64().decode(mp.get("md5sum"));

            InputStream in = new FileInputStream(f);
            byte[] chash = ServiceUtils.computeMD5Hash(in);
            IOUtils.closeQuietly(in);
            if (!Arrays.equals(shash, chash))
                throw new IOException("download corrupt at " + id);
        }

        try {
            mp.put("lastaccessed", Long.toString(System.currentTimeMillis()));
            omd.setUserMetadata(mp);

            updateObject("blocks/" + haName, omd);
        } catch (Exception e) {
            SDFSLogger.getLog().debug("error setting last accessed", e);
        }
        if (mp.containsKey("deleted")) {
            boolean del = Boolean.parseBoolean((String) mp.get("deleted"));
            if (del) {
                S3Object kobj = s3Service.getObject(this.name, "keys/" + haName);

                int claims = this.getClaimedObjects(kobj, id);

                int delobj = 0;
                if (mp.containsKey("deleted-objects")) {
                    delobj = Integer.parseInt((String) mp.get("deleted-objects")) - claims;
                    if (delobj < 0)
                        delobj = 0;
                }
                mp.remove("deleted");
                mp.put("deleted-objects", Integer.toString(delobj));
                mp.put("suspect", "true");
                omd.setUserMetadata(mp);

                updateObject("keys/" + haName, omd);
                int _size = Integer.parseInt((String) mp.get("size"));
                int _compressedSize = Integer.parseInt((String) mp.get("compressedsize"));
                HashBlobArchive.currentLength.addAndGet(_size);
                HashBlobArchive.compressedLength.addAndGet(_compressedSize);
                SDFSLogger.getLog().warn("Reclaimed [" + claims + "] blocks marked for deletion");
                kobj.close();
            }
        }
        dtm = (System.currentTimeMillis() - tm) / 1000d;
        bps = (cl / 1024) / dtm;
    } catch (AmazonS3Exception e) {
        if (e.getErrorCode().equalsIgnoreCase("InvalidObjectState"))
            throw new DataArchivedException(id, null);
        else {
            SDFSLogger.getLog().error("unable to get block [" + id + "] at [blocks/" + haName + "]", e);
            throw e;

        }
    } finally {
        try {
            if (sobj != null) {
                sobj.close();
            }
        } catch (Exception e) {

        }
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

private int verifyDelete(long id) throws IOException, Exception {
    this.s3clientLock.readLock().lock();
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    ObjectMetadata om = null;
    S3Object kobj = null;//from   ww w.j  ava 2  s  . c  o m

    int claims = 0;
    this.s3clientLock.readLock().lock();
    try {
        kobj = s3Service.getObject(this.name, "keys/" + haName);
        claims = this.getClaimedObjects(kobj, id);
        Map<String, String> mp = this.getUserMetaData(om);
        if (claims > 0) {
            if (this.clustered)
                om = this.getClaimMetaData(id);
            else {
                om = s3Service.getObjectMetadata(this.name, "keys/" + haName);
            }

            int delobj = 0;
            if (mp.containsKey("deleted-objects")) {
                delobj = Integer.parseInt((String) mp.get("deleted-objects")) - claims;
                if (delobj < 0)
                    delobj = 0;
            }
            mp.remove("deleted");
            mp.put("deleted-objects", Integer.toString(delobj));
            mp.put("suspect", "true");
            om.setUserMetadata(mp);
            String kn = null;
            if (this.clustered)
                kn = this.getClaimName(id);
            else
                kn = "keys/" + haName;

            this.updateObject(kn, om);

            SDFSLogger.getLog().warn("Reclaimed [" + claims + "] blocks marked for deletion");

        }

        if (claims == 0) {
            if (!clustered) {
                s3Service.deleteObject(this.name, "blocks/" + haName);
                s3Service.deleteObject(this.name, "keys/" + haName);
                SDFSLogger.getLog().debug("deleted block " + "blocks/" + haName + " id " + id);
            } else {
                s3Service.deleteObject(this.name, this.getClaimName(id));
                int _size = Integer.parseInt((String) mp.get("size"));
                int _compressedSize = Integer.parseInt((String) mp.get("compressedsize"));
                HashBlobArchive.currentLength.addAndGet(-1 * _size);
                HashBlobArchive.compressedLength.addAndGet(-1 * _compressedSize);
                ObjectListing ol = s3Service.listObjects(this.getName(), "claims/keys/" + haName);
                if (ol.getObjectSummaries().size() == 0) {
                    s3Service.deleteObject(this.name, "blocks/" + haName);
                    s3Service.deleteObject(this.name, "keys/" + haName);
                    SDFSLogger.getLog().debug("deleted block " + "blocks/" + haName + " id " + id);
                }
            }
        }
    } finally {
        try {
            kobj.close();
        } catch (Exception e) {
        }
        this.s3clientLock.readLock().unlock();
    }
    return claims;
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void run() {
    while (!closed) {
        try {//  w w w.j  a  v a2  s .c  om
            Thread.sleep(60000);
            try {
                ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                Map<String, String> md = omd.getUserMetadata();
                ObjectMetadata nmd = new ObjectMetadata();
                nmd.setUserMetadata(md);
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                md.put("hostname", InetAddress.getLocalHost().getHostName());
                md.put("port", Integer.toString(Main.sdfsCliPort));
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.put("md5sum", st);
                nmd.setContentMD5(st);
                nmd.setContentLength(sz.length);
                nmd.setUserMetadata(md);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
            } catch (Exception e) {
                try {
                    ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
                    Map<String, String> md = omd.getUserMetadata();
                    ObjectMetadata nmd = new ObjectMetadata();
                    nmd.setUserMetadata(md);
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
                    md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
                    md.put("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.put("hostname", InetAddress.getLocalHost().getHostName());
                    md.put("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.put("md5sum", st);
                    nmd.setContentMD5(st);
                    nmd.setContentLength(sz.length);
                    nmd.setUserMetadata(md);

                    this.updateObject(binm, nmd);
                } catch (Exception e1) {
                    SDFSLogger.getLog().error("unable to update metadata for " + binm, e);
                }
            }

            if (this.deletes.size() > 0) {
                SDFSLogger.getLog().info("running garbage collection");
                RejectedExecutionHandler executionHandler = new BlockPolicy();
                BlockingQueue<Runnable> worksQueue = new SynchronousQueue<Runnable>();
                ThreadPoolExecutor executor = new ThreadPoolExecutor(1, Main.dseIOThreads, 10, TimeUnit.SECONDS,
                        worksQueue, executionHandler);
                this.delLock.lock();
                HashMap<Long, Integer> odel = null;
                try {
                    odel = this.deletes;
                    this.deletes = new HashMap<Long, Integer>();
                    // SDFSLogger.getLog().info("delete hash table size of "
                    // + odel.size());
                } finally {
                    this.delLock.unlock();
                }
                Set<Long> iter = odel.keySet();
                for (Long k : iter) {
                    DeleteObject obj = new DeleteObject();
                    obj.k = k;
                    obj.odel = odel;
                    obj.st = this;
                    executor.execute(obj);
                }
                executor.shutdown();
                while (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
                    SDFSLogger.getLog().debug("Awaiting deletion task completion of threads.");
                }
                SDFSLogger.getLog().info("done running garbage collection");
            }
        } catch (InterruptedException e) {
            break;
        } catch (Exception e) {
            SDFSLogger.getLog().error("error in delete thread", e);
        }
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

public StringResult getStringResult(String key) throws IOException, InterruptedException {
    this.s3clientLock.readLock().lock();
    S3Object sobj = null;/*from   ww w.j av a 2s . co m*/
    try {

        ObjectMetadata md = null;
        try {
            sobj = s3Service.getObject(getName(), key);
            md = s3Service.getObjectMetadata(this.name, key);
        } catch (Exception e) {
            throw new IOException(e);
        }
        int cl = (int) md.getContentLength();

        byte[] data = new byte[cl];
        DataInputStream in = null;
        try {
            in = new DataInputStream(sobj.getObjectContent());
            in.readFully(data);

        } catch (Exception e) {
            throw new IOException(e);
        } finally {
            if (in != null)
                in.close();
        }
        boolean encrypt = false;
        boolean compress = false;
        boolean lz4compress = false;
        Map<String, String> mp = this.getUserMetaData(md);
        byte[] ivb = null;
        if (mp.containsKey("ivspec")) {
            ivb = BaseEncoding.base64().decode(mp.get("ivspec"));
        }
        if (mp.containsKey("md5sum")) {
            try {
                byte[] shash = BaseEncoding.base64().decode(mp.get("md5sum"));
                byte[] chash = ServiceUtils.computeMD5Hash(data);
                if (!Arrays.equals(shash, chash))
                    throw new IOException("download corrupt at " + sobj.getKey());
            } catch (NoSuchAlgorithmException e) {
                throw new IOException(e);
            }
        }
        int size = Integer.parseInt(mp.get("size"));
        encrypt = Boolean.parseBoolean(mp.get("encrypt"));

        lz4compress = Boolean.parseBoolean(mp.get("lz4compress"));
        boolean changed = false;

        Long hid = EncyptUtils.decHashArchiveName(sobj.getKey().substring(5), encrypt);
        if (this.clustered)
            mp = s3Service.getObjectMetadata(this.name, this.getClaimName(hid)).getUserMetadata();
        if (mp.containsKey("deleted")) {
            mp.remove("deleted");
            changed = true;
        }
        if (mp.containsKey("deleted-objects")) {
            mp.remove("deleted-objects");
            changed = true;
        }

        if (encrypt) {

            if (ivb != null) {
                data = EncryptUtils.decryptCBC(data, new IvParameterSpec(ivb));
            } else {
                data = EncryptUtils.decryptCBC(data);
            }
        }
        if (compress)
            data = CompressionUtils.decompressZLIB(data);
        else if (lz4compress) {
            data = CompressionUtils.decompressLz4(data, size);
        }

        String hast = new String(data);
        SDFSLogger.getLog().debug("reading hashes " + (String) mp.get("objects") + " from " + hid + " encn "
                + sobj.getKey().substring(5));
        StringTokenizer ht = new StringTokenizer(hast, ",");
        StringResult st = new StringResult();
        st.id = hid;
        st.st = ht;
        if (mp.containsKey("bsize")) {
            HashBlobArchive.currentLength.addAndGet(Integer.parseInt(mp.get("bsize")));
        }
        if (mp.containsKey("bcompressedsize")) {
            HashBlobArchive.compressedLength.addAndGet(Integer.parseInt(mp.get("bcompressedsize")));
        }
        if (changed) {
            try {
                md = sobj.getObjectMetadata();
                md.setUserMetadata(mp);
                String kn = null;
                if (this.clustered)
                    kn = this.getClaimName(hid);
                else
                    kn = sobj.getKey();

                this.updateObject(kn, md);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }
        return st;
    } finally {
        if (sobj != null)
            sobj.close();
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void uploadFile(File f, String to, String pp) throws IOException {
    this.s3clientLock.readLock().lock();
    try {/*from   w ww  .j av  a2s  . c  o m*/
        InputStream in = null;
        while (to.startsWith(File.separator))
            to = to.substring(1);

        String pth = pp + "/" + EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled);
        SDFSLogger.getLog().info("uploading " + f.getPath() + " to " + to + " pth " + pth);
        boolean isDir = false;
        boolean isSymlink = false;
        if (!OSValidator.isWindows()) {
            isDir = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isDirectory();
            isSymlink = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isSymbolicLink();
        } else {
            isDir = f.isDirectory();
        }
        if (isSymlink) {
            try {
                HashMap<String, String> metaData = new HashMap<String, String>();
                metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                metaData.put("lastmodified", Long.toString(f.lastModified()));
                String slp = EncyptUtils.encString(Files.readSymbolicLink(f.toPath()).toFile().getPath(),
                        Main.chunkStoreEncryptionEnabled);
                metaData.put("symlink", slp);
                ObjectMetadata md = new ObjectMetadata();
                md.setContentType("binary/octet-stream");
                md.setContentLength(pth.getBytes().length);
                md.setUserMetadata(metaData);
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                throw new IOException(e1);
            }
        } else if (isDir) {
            HashMap<String, String> metaData = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
            metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
            metaData.put("lastmodified", Long.toString(f.lastModified()));
            metaData.put("directory", "true");
            ObjectMetadata md = new ObjectMetadata();
            md.setContentType("binary/octet-stream");
            md.setContentLength(pth.getBytes().length);
            md.setUserMetadata(metaData);
            try {
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        } else {
            String rnd = RandomGUID.getGuid();
            File p = new File(this.staged_sync_location, rnd);
            File z = new File(this.staged_sync_location, rnd + ".z");
            File e = new File(this.staged_sync_location, rnd + ".e");
            while (z.exists()) {
                rnd = RandomGUID.getGuid();
                p = new File(this.staged_sync_location, rnd);
                z = new File(this.staged_sync_location, rnd + ".z");
                e = new File(this.staged_sync_location, rnd + ".e");
            }
            try {
                BufferedInputStream is = new BufferedInputStream(new FileInputStream(f));
                BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(p));
                IOUtils.copy(is, os);
                os.flush();
                os.close();
                is.close();
                if (Main.compress) {
                    CompressionUtils.compressFile(p, z);
                    p.delete();
                    p = z;
                }
                byte[] ivb = null;
                if (Main.chunkStoreEncryptionEnabled) {
                    try {
                        ivb = PassPhrase.getByteIV();
                        EncryptUtils.encryptFile(p, e, new IvParameterSpec(ivb));

                    } catch (Exception e1) {
                        throw new IOException(e1);
                    }
                    p.delete();
                    p = e;
                }
                String objName = pth;
                ObjectMetadata md = new ObjectMetadata();
                Map<String, String> umd = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
                md.setUserMetadata(umd);
                md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
                md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                if (ivb != null)
                    md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
                md.addUserMetadata("lastmodified", Long.toString(f.lastModified()));
                if (simpleS3) {
                    md.setContentType("binary/octet-stream");
                    in = new BufferedInputStream(new FileInputStream(p), 32768);
                    try {
                        if (md5sum) {
                            byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                            in.close();
                            String mds = BaseEncoding.base64().encode(md5Hash);
                            md.setContentMD5(mds);
                            md.addUserMetadata("md5sum", mds);
                        }

                    } catch (NoSuchAlgorithmException e2) {
                        SDFSLogger.getLog().error("while hashing", e2);
                        throw new IOException(e2);
                    }

                    in = new FileInputStream(p);
                    md.setContentLength(p.length());
                    try {
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        s3Service.putObject(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                        SDFSLogger.getLog().debug(
                                "uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified"));
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                } else {
                    try {
                        md.setContentType("binary/octet-stream");
                        in = new BufferedInputStream(new FileInputStream(p), 32768);
                        byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                        in.close();
                        String mds = BaseEncoding.base64().encode(md5Hash);
                        md.setContentMD5(mds);
                        md.addUserMetadata("md5sum", mds);
                        in = new BufferedInputStream(new FileInputStream(p), 32768);

                        md.setContentLength(p.length());
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        multiPartUpload(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                }
            } finally {
                try {
                    if (in != null)
                        in.close();
                } finally {
                    p.delete();
                    z.delete();
                    e.delete();
                }
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.openinfinity.cloud.domain.repository.deployer.BucketRepositoryAWSImpl.java

License:Apache License

/**
 * Creates bucket based on file input stream.
 * //from  ww  w . j  a va  2s  . co m
 * @param inputStream Represents the file input stream.
 * @param bucketName Represents the file name of the deployment. 
 * @param key Represents the key under which to store the new object.
 * @return key Defines the created resource key.
 */
public String createBucket(InputStream inputStream, String bucketName, String key,
        Map<String, String> metadataMap) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadataMap);
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, objectMetadata);
    simpleStorageService.createBucket(bucketName);
    simpleStorageService.putObject(putObjectRequest);
    return key;
}

From source file:org.plos.repo.service.S3StoreService.java

License:Open Source License

@Override
public boolean saveUploadedObject(Bucket bucket, UploadInfo uploadInfo, RepoObject repoObject) {
    int retries = 5;
    int tryCount = 0;
    int waitSecond = 4;

    ObjectMapper m = new ObjectMapper();
    Map<String, java.lang.Object> propsObj = m.convertValue(repoObject, Map.class);

    Map<String, String> propsStr = new HashMap<>();

    for (Map.Entry<String, java.lang.Object> entry : propsObj.entrySet()) {
        try {//from  ww w . ja  va 2s .  com
            if (entry.getValue() == null) {
                propsStr.put(entry.getKey(), "");
            } else {
                propsStr.put(entry.getKey(), entry.getValue().toString());
            }
        } catch (ClassCastException cce) {
            log.error("Problem converting object to metadata", cce);
        }
    }

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(uploadInfo.getSize());
    objectMetadata.setUserMetadata(propsStr);

    File tempFile = new File(uploadInfo.getTempLocation());

    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket.getBucketName(), uploadInfo.getChecksum(),
            tempFile);
    putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead);
    putObjectRequest.setMetadata(objectMetadata);

    while (tryCount < retries) {
        try {
            s3Client.putObject(putObjectRequest); // TODO: check result and do something about it
            tempFile.delete();
            return true;
        } catch (Exception e) {
            tryCount++;

            log.error("Error during putObject", e);

            try {
                Thread.sleep(waitSecond * 1000);
            } catch (Exception e2) {
            }
        }
    }

    return false;
}