Example usage for com.amazonaws.services.s3.model ObjectMetadata addUserMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata addUserMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata addUserMetadata.

Prototype

public void addUserMetadata(String key, String value) 

Source Link

Document

Adds the key value pair of custom user-metadata for the associated object.

Usage

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * {@inheritDoc}/* w w  w  . j  a  v  a2s .  c  o m*/
 */
public String addContent(String spaceId, String contentId, String contentMimeType,
        Map<String, String> userProperties, long contentSize, String contentChecksum, InputStream content) {
    log.debug("addContent(" + spaceId + ", " + contentId + ", " + contentMimeType + ", " + contentSize + ", "
            + contentChecksum + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    // Wrap the content in order to be able to retrieve a checksum
    ChecksumInputStream wrappedContent = new ChecksumInputStream(content, contentChecksum);

    String contentEncoding = removeContentEncoding(userProperties);

    userProperties = removeCalculatedProperties(userProperties);

    if (contentMimeType == null || contentMimeType.equals("")) {
        contentMimeType = DEFAULT_MIMETYPE;
    }

    ObjectMetadata objMetadata = new ObjectMetadata();
    objMetadata.setContentType(contentMimeType);
    if (contentSize > 0) {
        objMetadata.setContentLength(contentSize);
    }
    if (null != contentChecksum && !contentChecksum.isEmpty()) {
        String encodedChecksum = ChecksumUtil.convertToBase64Encoding(contentChecksum);
        objMetadata.setContentMD5(encodedChecksum);
    }

    if (contentEncoding != null) {
        objMetadata.setContentEncoding(contentEncoding);
    }

    if (userProperties != null) {
        for (String key : userProperties.keySet()) {
            String value = userProperties.get(key);

            if (log.isDebugEnabled()) {
                log.debug("[" + key + "|" + value + "]");
            }

            objMetadata.addUserMetadata(getSpaceFree(encodeHeaderKey(key)), encodeHeaderValue(value));
        }
    }

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, contentId, wrappedContent, objMetadata);
    putRequest.setStorageClass(DEFAULT_STORAGE_CLASS);
    putRequest.setCannedAcl(CannedAccessControlList.Private);

    // Add the object
    String etag;
    try {
        PutObjectResult putResult = s3Client.putObject(putRequest);
        etag = putResult.getETag();
    } catch (AmazonClientException e) {
        if (e instanceof AmazonS3Exception) {
            AmazonS3Exception s3Ex = (AmazonS3Exception) e;
            String errorCode = s3Ex.getErrorCode();
            Integer statusCode = s3Ex.getStatusCode();
            String message = MessageFormat.format(
                    "exception putting object {0} into {1}: errorCode={2},"
                            + "  statusCode={3}, errorMessage={4}",
                    contentId, bucketName, errorCode, statusCode, e.getMessage());

            if (errorCode.equals("InvalidDigest") || errorCode.equals("BadDigest")) {
                log.error(message, e);

                String err = "Checksum mismatch detected attempting to add " + "content " + contentId
                        + " to S3 bucket " + bucketName + ". Content was not added.";
                throw new ChecksumMismatchException(err, e, NO_RETRY);
            } else if (errorCode.equals("IncompleteBody")) {
                log.error(message, e);
                throw new StorageException("The content body was incomplete for " + contentId + " to S3 bucket "
                        + bucketName + ". Content was not added.", e, NO_RETRY);
            } else if (!statusCode.equals(HttpStatus.SC_SERVICE_UNAVAILABLE)
                    && !statusCode.equals(HttpStatus.SC_NOT_FOUND)) {
                log.error(message, e);
            } else {
                log.warn(message, e);
            }
        } else {
            String err = MessageFormat.format("exception putting object {0} into {1}: {2}", contentId,
                    bucketName, e.getMessage());
            log.error(err, e);
        }

        // Check to see if file landed successfully in S3, despite the exception
        etag = doesContentExistWithExpectedChecksum(bucketName, contentId, contentChecksum);
        if (null == etag) {
            String err = "Could not add content " + contentId + " with type " + contentMimeType + " and size "
                    + contentSize + " to S3 bucket " + bucketName + " due to error: " + e.getMessage();
            throw new StorageException(err, e, NO_RETRY);
        }
    }

    // Compare checksum
    String providerChecksum = getETagValue(etag);
    String checksum = wrappedContent.getMD5();
    StorageProviderUtil.compareChecksum(providerChecksum, spaceId, contentId, checksum);
    return providerChecksum;
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * {@inheritDoc}/*from   w ww .j a v  a2 s .co m*/
 */
public void setContentProperties(String spaceId, String contentId, Map<String, String> contentProperties) {
    log.debug("setContentProperties(" + spaceId + ", " + contentId + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    String contentEncoding = removeContentEncoding(contentProperties);

    contentProperties = removeCalculatedProperties(contentProperties);

    // Determine mimetype, from properties list or existing value
    String mimeType = contentProperties.remove(PROPERTIES_CONTENT_MIMETYPE);
    if (mimeType == null || mimeType.equals("")) {
        Map<String, String> existingMeta = getContentProperties(spaceId, contentId);
        String existingMime = existingMeta.get(StorageProvider.PROPERTIES_CONTENT_MIMETYPE);
        if (existingMime != null) {
            mimeType = existingMime;
        }
    }

    // Collect all object properties
    ObjectMetadata objMetadata = new ObjectMetadata();
    for (String key : contentProperties.keySet()) {
        if (log.isDebugEnabled()) {
            log.debug("[" + key + "|" + contentProperties.get(key) + "]");
        }
        objMetadata.addUserMetadata(getSpaceFree(key), contentProperties.get(key));
    }

    // Set Content-Type
    if (mimeType != null && !mimeType.equals("")) {
        objMetadata.setContentType(mimeType);
    }

    // Set Content-Encoding
    if (contentEncoding != null && !contentEncoding.equals("")) {
        objMetadata.setContentEncoding(contentEncoding);
    }

    updateObjectProperties(bucketName, contentId, objMetadata);
}

From source file:org.entando.entando.plugins.jps3awsclient.aps.system.services.storage.AmazonS3StorageManager.java

License:Open Source License

public void store(IStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl)
        throws ApsSystemException {
    try {//from w w  w .j av  a  2  s .  c  om
        AmazonS3Client client = this.getS3Client();
        String bucketName = obj.getBucketName().toLowerCase();
        this.checkForAndCreateBucket(bucketName, client);
        ObjectMetadata omd = new ObjectMetadata();
        omd.setContentType(obj.getContentType());
        omd.setContentLength(obj.getContentLength());
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, obj.getStoragePath(),
                obj.getInputStream(), omd);
        // Check if reduced redundancy is enabled
        if (reducedRedundancy) {
            putObjectRequest.setStorageClass(StorageClass.ReducedRedundancy);
        }
        if (null != obj.getUserMetadata()) {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            putObjectRequest.setMetadata(objectMetadata);
            Iterator<String> iter = obj.getUserMetadata().keySet().iterator();
            while (iter.hasNext()) {
                String key = iter.next();
                objectMetadata.addUserMetadata(key, obj.getUserMetadata().get(key));
            }
        }
        client.putObject(putObjectRequest);
        // If we have an ACL set access permissions for the the data on S3
        if (acl != null) {
            client.setObjectAcl(bucketName, obj.getStoragePath(), acl);
        }
    } catch (Throwable t) {
        _logger.error("Error storing object", t);
        throw new ApsSystemException("Error storing object", t);
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void init(Element config) throws IOException {
    this.name = Main.cloudBucket.toLowerCase();
    this.staged_sync_location.mkdirs();
    try {/*from  w w  w  . ja v  a 2 s  .  c o m*/
        if (config.hasAttribute("default-bucket-location")) {
            bucketLocation = RegionUtils.getRegion(config.getAttribute("default-bucket-location"));

        }
        if (config.hasAttribute("connection-check-interval")) {
            this.checkInterval = Integer.parseInt(config.getAttribute("connection-check-interval"));
        }
        if (config.hasAttribute("block-size")) {
            int sz = (int) StringUtils.parseSize(config.getAttribute("block-size"));
            HashBlobArchive.MAX_LEN = sz;
        }
        if (config.hasAttribute("allow-sync")) {
            HashBlobArchive.allowSync = Boolean.parseBoolean(config.getAttribute("allow-sync"));
            if (config.hasAttribute("sync-check-schedule")) {
                try {
                    new SyncFSScheduler(config.getAttribute("sync-check-schedule"));
                } catch (Exception e) {
                    SDFSLogger.getLog().error("unable to start sync scheduler", e);
                }
            }

        }
        if (config.hasAttribute("upload-thread-sleep-time")) {
            int tm = Integer.parseInt(config.getAttribute("upload-thread-sleep-time"));
            HashBlobArchive.THREAD_SLEEP_TIME = tm;
        }
        if (config.hasAttribute("cache-writes")) {
            HashBlobArchive.cacheWrites = Boolean.parseBoolean(config.getAttribute("cache-writes"));
        }
        if (config.hasAttribute("cache-reads")) {
            HashBlobArchive.cacheReads = Boolean.parseBoolean(config.getAttribute("cache-reads"));
        }
        if (config.hasAttribute("sync-files")) {
            boolean syncf = Boolean.parseBoolean(config.getAttribute("sync-files"));
            if (syncf) {
                new FileReplicationService(this);
            }
        }
        int rsp = 0;
        int wsp = 0;
        if (config.hasAttribute("read-speed")) {
            rsp = Integer.parseInt(config.getAttribute("read-speed"));
        }
        if (config.hasAttribute("write-speed")) {
            wsp = Integer.parseInt(config.getAttribute("write-speed"));
        }
        if (config.hasAttribute("local-cache-size")) {
            long sz = StringUtils.parseSize(config.getAttribute("local-cache-size"));
            HashBlobArchive.setLocalCacheSize(sz);
        }
        if (config.hasAttribute("metadata-version")) {
            this.mdVersion = Integer.parseInt(config.getAttribute("metadata-version"));
        }
        if (config.hasAttribute("map-cache-size")) {
            int sz = Integer.parseInt(config.getAttribute("map-cache-size"));
            HashBlobArchive.MAP_CACHE_SIZE = sz;
        }
        if (config.hasAttribute("io-threads")) {
            int sz = Integer.parseInt(config.getAttribute("io-threads"));
            Main.dseIOThreads = sz;
        }
        if (config.hasAttribute("clustered")) {
            this.clustered = Boolean.parseBoolean(config.getAttribute("clustered"));
        }
        if (config.hasAttribute("delete-unclaimed")) {
            this.deleteUnclaimed = Boolean.parseBoolean(config.getAttribute("delete-unclaimed"));
        }
        if (config.hasAttribute("glacier-archive-days")) {
            this.glacierDays = Integer.parseInt(config.getAttribute("glacier-archive-days"));
            if (this.glacierDays > 0)
                Main.checkArchiveOnRead = true;
        }
        if (config.hasAttribute("infrequent-access-days")) {
            this.infrequentAccess = Integer.parseInt(config.getAttribute("infrequent-access-days"));
        }
        if (config.hasAttribute("simple-s3")) {
            EncyptUtils.baseEncode = Boolean.parseBoolean(config.getAttribute("simple-s3"));
            this.simpleS3 = true;
        }
        if (config.hasAttribute("md5-sum")) {
            this.md5sum = Boolean.parseBoolean(config.getAttribute("md5-sum"));
            if (!this.md5sum) {
                System.setProperty("com.amazonaws.services.s3.disableGetObjectMD5Validation", "true");
                System.setProperty("com.amazonaws.services.s3.disablePutObjectMD5Validation", "true");
            }

        }
        ClientConfiguration clientConfig = new ClientConfiguration();
        if (config.hasAttribute("use-v4-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-v4-signer"));

            if (v4s) {
                clientConfig.setSignerOverride("AWSS3V4SignerType");
            }
        }
        if (config.hasAttribute("use-basic-signer")) {
            boolean v4s = Boolean.parseBoolean(config.getAttribute("use-basic-signer"));
            if (v4s) {
                clientConfig.setSignerOverride("S3SignerType");
            }
        }

        clientConfig.setMaxConnections(Main.dseIOThreads * 2);
        clientConfig.setConnectionTimeout(10000);
        clientConfig.setSocketTimeout(10000);

        String s3Target = null;
        if (config.getElementsByTagName("connection-props").getLength() > 0) {
            Element el = (Element) config.getElementsByTagName("connection-props").item(0);
            if (el.hasAttribute("connection-timeout"))
                clientConfig.setConnectionTimeout(Integer.parseInt(el.getAttribute("connection-timeout")));
            if (el.hasAttribute("socket-timeout"))
                clientConfig.setSocketTimeout(Integer.parseInt(el.getAttribute("socket-timeout")));
            if (el.hasAttribute("local-address"))
                clientConfig.setLocalAddress(InetAddress.getByName(el.getAttribute("local-address")));
            if (el.hasAttribute("max-retry"))
                clientConfig.setMaxErrorRetry(Integer.parseInt(el.getAttribute("max-retry")));
            if (el.hasAttribute("protocol")) {
                String pr = el.getAttribute("protocol");
                if (pr.equalsIgnoreCase("http"))
                    clientConfig.setProtocol(Protocol.HTTP);
                else
                    clientConfig.setProtocol(Protocol.HTTPS);

            }
            if (el.hasAttribute("s3-target")) {
                s3Target = el.getAttribute("s3-target");
            }
            if (el.hasAttribute("proxy-host")) {
                clientConfig.setProxyHost(el.getAttribute("proxy-host"));
            }
            if (el.hasAttribute("proxy-domain")) {
                clientConfig.setProxyDomain(el.getAttribute("proxy-domain"));
            }
            if (el.hasAttribute("proxy-password")) {
                clientConfig.setProxyPassword(el.getAttribute("proxy-password"));
            }
            if (el.hasAttribute("proxy-port")) {
                clientConfig.setProxyPort(Integer.parseInt(el.getAttribute("proxy-port")));
            }
            if (el.hasAttribute("proxy-username")) {
                clientConfig.setProxyUsername(el.getAttribute("proxy-username"));
            }
        }

        if (s3Target != null && s3Target.toLowerCase().startsWith("https")) {
            TrustStrategy acceptingTrustStrategy = new TrustStrategy() {
                @Override
                public boolean isTrusted(X509Certificate[] certificate, String authType) {
                    return true;
                }
            };
            SSLSocketFactory sf = new SSLSocketFactory(acceptingTrustStrategy,
                    SSLSocketFactory.ALLOW_ALL_HOSTNAME_VERIFIER);
            clientConfig.getApacheHttpClientConfig().withSslSocketFactory(sf);
        }
        if (awsCredentials != null)
            s3Service = new AmazonS3Client(awsCredentials, clientConfig);
        else
            s3Service = new AmazonS3Client(new InstanceProfileCredentialsProvider(), clientConfig);
        if (bucketLocation != null) {
            s3Service.setRegion(bucketLocation);
            System.out.println("bucketLocation=" + bucketLocation.toString());
        }
        if (s3Target != null) {
            s3Service.setEndpoint(s3Target);
            System.out.println("target=" + s3Target);
        }
        if (config.hasAttribute("disableDNSBucket")) {
            s3Service.setS3ClientOptions(new S3ClientOptions()
                    .withPathStyleAccess(Boolean.parseBoolean(config.getAttribute("disableDNSBucket")))
                    .disableChunkedEncoding());
            System.out.println(
                    "disableDNSBucket=" + Boolean.parseBoolean(config.getAttribute("disableDNSBucket")));
        }
        if (!s3Service.doesBucketExist(this.name)) {
            s3Service.createBucket(this.name);
            SDFSLogger.getLog().info("created new store " + name);
            ObjectMetadata md = new ObjectMetadata();
            md.addUserMetadata("currentsize", "0");
            md.addUserMetadata("currentcompressedsize", "0");
            md.addUserMetadata("clustered", "true");
            md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
            md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
            md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

            this.clustered = true;
            byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
            if (md5sum) {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                md.setContentMD5(mds);
            }
            md.setContentLength(sz.length);
            this.binm = "bucketinfo/"
                    + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
        } else {
            Map<String, String> obj = null;
            ObjectMetadata omd = null;
            try {
                omd = s3Service.getObjectMetadata(this.name, binm);
                obj = omd.getUserMetadata();
                obj.get("currentsize");
            } catch (Exception e) {
                omd = null;
                SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
            }
            if (omd == null) {
                try {
                    this.binm = "bucketinfo/"
                            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                    omd = s3Service.getObjectMetadata(this.name, binm);
                    obj = omd.getUserMetadata();
                    obj.get("currentsize");
                } catch (Exception e) {
                    omd = null;
                    SDFSLogger.getLog().debug("unable to find bucketinfo object", e);
                }
            }
            if (omd == null) {
                ObjectMetadata md = new ObjectMetadata();
                md.addUserMetadata("currentsize", "0");
                md.addUserMetadata("currentcompressedsize", "0");
                md.addUserMetadata("clustered", "true");
                md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));

                this.clustered = true;
                this.binm = "bucketinfo/"
                        + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
                byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                if (md5sum) {
                    String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                    md.setContentMD5(mds);
                }
                md.setContentLength(sz.length);
                s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);
            } else {
                if (obj.containsKey("currentsize")) {
                    long cl = Long.parseLong((String) obj.get("currentsize"));
                    if (cl >= 0) {
                        HashBlobArchive.currentLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly len=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }

                if (obj.containsKey("currentcompressedsize")) {
                    long cl = Long.parseLong((String) obj.get("currentcompressedsize"));
                    if (cl >= 0) {
                        HashBlobArchive.compressedLength.set(cl);

                    } else
                        SDFSLogger.getLog().warn("The S3 objectstore DSE did not close correctly clen=" + cl);
                } else {
                    SDFSLogger.getLog().warn(
                            "The S3 objectstore DSE did not close correctly. Metadata tag currentsize was not added");
                }
                if (obj.containsKey("clustered")) {
                    this.clustered = Boolean.parseBoolean(obj.get("clustered"));
                } else
                    this.clustered = false;

                obj.put("clustered", Boolean.toString(this.clustered));
                omd.setUserMetadata(obj);
                try {

                    updateObject(binm, omd);
                } catch (Exception e) {
                    SDFSLogger.getLog().warn("unable to update bucket info in init", e);
                    SDFSLogger.getLog().info("created new store " + name);
                    ObjectMetadata md = new ObjectMetadata();
                    md.addUserMetadata("currentsize", "0");
                    md.addUserMetadata("lastupdate", Long.toString(System.currentTimeMillis()));
                    md.addUserMetadata("currentcompressedsize", "0");
                    md.addUserMetadata("clustered", Boolean.toString(this.clustered));
                    md.addUserMetadata("hostname", InetAddress.getLocalHost().getHostName());
                    md.addUserMetadata("port", Integer.toString(Main.sdfsCliPort));
                    byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
                    if (md5sum) {
                        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
                        md.setContentMD5(mds);
                    }
                    md.setContentLength(sz.length);
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), md);

                }
            }
        }
        ArrayList<Transition> trs = new ArrayList<Transition>();
        if (this.glacierDays > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.glacierDays)
                    .withStorageClass(StorageClass.Glacier);
            trs.add(transToArchive);
        }

        if (this.infrequentAccess > 0 && s3Target == null) {
            Transition transToArchive = new Transition().withDays(this.infrequentAccess)
                    .withStorageClass(StorageClass.StandardInfrequentAccess);
            trs.add(transToArchive);

        }
        if (trs.size() > 0) {
            BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
                    .withId("SDFS Automated Archive Rule for Block Data").withPrefix("blocks/")
                    .withTransitions(trs).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
            List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
            rules.add(ruleArchiveAndExpire);

            BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);

            // Save configuration.
            s3Service.setBucketLifecycleConfiguration(this.name, configuration);
        } else if (s3Target == null) {
            s3Service.deleteBucketLifecycleConfiguration(this.name);
        }
        HashBlobArchive.init(this);
        HashBlobArchive.setReadSpeed(rsp);
        HashBlobArchive.setWriteSpeed(wsp);
        Thread th = new Thread(this);
        th.start();
    } catch (Exception e) {
        SDFSLogger.getLog().error("unable to start service", e);
        throw new IOException(e);
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void writeHashBlobArchive(HashBlobArchive arc, long id) throws IOException {
    String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {//  w w  w  .  j  a v  a 2  s  .c o m
        int csz = toIntExact(arc.getFile().length());
        ObjectMetadata md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.getLen()));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.setContentType("binary/octet-stream");
        md.setContentLength(csz);
        if (md5sum) {
            FileInputStream in = new FileInputStream(arc.getFile());
            String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(in));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
            IOUtils.closeQuietly(in);
        }
        PutObjectRequest req = new PutObjectRequest(this.name, "blocks/" + haName,
                new FileInputStream(arc.getFile()), md);

        if (this.simpleS3)
            s3Service.putObject(req);
        else
            this.multiPartUpload(req);
        byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
        md.setContentMD5(mds);
        md.addUserMetadata("md5sum", mds);
        if (this.clustered) {
            md.setContentType("binary/octet-stream");
            md.setContentLength(msg.length);
            PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                    new ByteArrayInputStream(msg), md);

            s3Service.putObject(creq);
        }
        byte[] hs = arc.getHashesString().getBytes();
        int sz = hs.length;
        if (Main.compress) {
            hs = CompressionUtils.compressLz4(hs);
        }
        byte[] ivb = PassPhrase.getByteIV();
        if (Main.chunkStoreEncryptionEnabled) {
            hs = EncryptUtils.encryptCBC(hs, new IvParameterSpec(ivb));
        }
        md = new ObjectMetadata();
        md.addUserMetadata("size", Integer.toString(sz));
        md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
        md.addUserMetadata("lastaccessed", "0");
        md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
        md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
        md.addUserMetadata("compressedsize", Integer.toString(csz));
        md.addUserMetadata("bsize", Integer.toString(arc.uncompressedLength.get()));
        md.addUserMetadata("bcompressedsize", Integer.toString(csz));
        md.addUserMetadata("objects", Integer.toString(arc.getSz()));

        md.setContentType("binary/octet-stream");
        md.setContentLength(hs.length);
        if (md5sum) {
            mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(hs));
            md.setContentMD5(mds);
            md.addUserMetadata("md5sum", mds);
        }
        req = new PutObjectRequest(this.name, "keys/" + haName, new ByteArrayInputStream(hs), md);
        s3Service.putObject(req);
    } catch (Throwable e) {
        SDFSLogger.getLog().fatal("unable to upload " + arc.getID() + " with id " + id, e);
        throw new IOException(e);
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void uploadFile(File f, String to, String pp) throws IOException {
    this.s3clientLock.readLock().lock();
    try {/*from  w ww  . j a  v  a2s.c  om*/
        InputStream in = null;
        while (to.startsWith(File.separator))
            to = to.substring(1);

        String pth = pp + "/" + EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled);
        SDFSLogger.getLog().info("uploading " + f.getPath() + " to " + to + " pth " + pth);
        boolean isDir = false;
        boolean isSymlink = false;
        if (!OSValidator.isWindows()) {
            isDir = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isDirectory();
            isSymlink = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS)
                    .isSymbolicLink();
        } else {
            isDir = f.isDirectory();
        }
        if (isSymlink) {
            try {
                HashMap<String, String> metaData = new HashMap<String, String>();
                metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                metaData.put("lastmodified", Long.toString(f.lastModified()));
                String slp = EncyptUtils.encString(Files.readSymbolicLink(f.toPath()).toFile().getPath(),
                        Main.chunkStoreEncryptionEnabled);
                metaData.put("symlink", slp);
                ObjectMetadata md = new ObjectMetadata();
                md.setContentType("binary/octet-stream");
                md.setContentLength(pth.getBytes().length);
                md.setUserMetadata(metaData);
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                throw new IOException(e1);
            }
        } else if (isDir) {
            HashMap<String, String> metaData = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
            metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
            metaData.put("lastmodified", Long.toString(f.lastModified()));
            metaData.put("directory", "true");
            ObjectMetadata md = new ObjectMetadata();
            md.setContentType("binary/octet-stream");
            md.setContentLength(pth.getBytes().length);
            md.setUserMetadata(metaData);
            try {
                PutObjectRequest req = new PutObjectRequest(this.name, pth,
                        new ByteArrayInputStream(pth.getBytes()), md);
                s3Service.putObject(req);
                if (this.isClustered())
                    this.checkoutFile(pth);
            } catch (Exception e1) {
                SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        } else {
            String rnd = RandomGUID.getGuid();
            File p = new File(this.staged_sync_location, rnd);
            File z = new File(this.staged_sync_location, rnd + ".z");
            File e = new File(this.staged_sync_location, rnd + ".e");
            while (z.exists()) {
                rnd = RandomGUID.getGuid();
                p = new File(this.staged_sync_location, rnd);
                z = new File(this.staged_sync_location, rnd + ".z");
                e = new File(this.staged_sync_location, rnd + ".e");
            }
            try {
                BufferedInputStream is = new BufferedInputStream(new FileInputStream(f));
                BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(p));
                IOUtils.copy(is, os);
                os.flush();
                os.close();
                is.close();
                if (Main.compress) {
                    CompressionUtils.compressFile(p, z);
                    p.delete();
                    p = z;
                }
                byte[] ivb = null;
                if (Main.chunkStoreEncryptionEnabled) {
                    try {
                        ivb = PassPhrase.getByteIV();
                        EncryptUtils.encryptFile(p, e, new IvParameterSpec(ivb));

                    } catch (Exception e1) {
                        throw new IOException(e1);
                    }
                    p.delete();
                    p = e;
                }
                String objName = pth;
                ObjectMetadata md = new ObjectMetadata();
                Map<String, String> umd = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled);
                md.setUserMetadata(umd);
                md.addUserMetadata("lz4compress", Boolean.toString(Main.compress));
                md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled));
                if (ivb != null)
                    md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb));
                md.addUserMetadata("lastmodified", Long.toString(f.lastModified()));
                if (simpleS3) {
                    md.setContentType("binary/octet-stream");
                    in = new BufferedInputStream(new FileInputStream(p), 32768);
                    try {
                        if (md5sum) {
                            byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                            in.close();
                            String mds = BaseEncoding.base64().encode(md5Hash);
                            md.setContentMD5(mds);
                            md.addUserMetadata("md5sum", mds);
                        }

                    } catch (NoSuchAlgorithmException e2) {
                        SDFSLogger.getLog().error("while hashing", e2);
                        throw new IOException(e2);
                    }

                    in = new FileInputStream(p);
                    md.setContentLength(p.length());
                    try {
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        s3Service.putObject(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                        SDFSLogger.getLog().debug(
                                "uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified"));
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                } else {
                    try {
                        md.setContentType("binary/octet-stream");
                        in = new BufferedInputStream(new FileInputStream(p), 32768);
                        byte[] md5Hash = ServiceUtils.computeMD5Hash(in);
                        in.close();
                        String mds = BaseEncoding.base64().encode(md5Hash);
                        md.setContentMD5(mds);
                        md.addUserMetadata("md5sum", mds);
                        in = new BufferedInputStream(new FileInputStream(p), 32768);

                        md.setContentLength(p.length());
                        PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md);
                        multiPartUpload(req);
                        if (this.isClustered())
                            this.checkoutFile(pth);
                    } catch (AmazonS3Exception e1) {
                        if (e1.getStatusCode() == 409) {
                            try {
                                s3Service.deleteObject(this.name, objName);
                                this.uploadFile(f, to, pp);
                                return;
                            } catch (Exception e2) {
                                throw new IOException(e2);
                            }
                        } else {

                            throw new IOException(e1);
                        }
                    } catch (Exception e1) {
                        // SDFSLogger.getLog().error("error uploading", e1);
                        throw new IOException(e1);
                    }
                }
            } finally {
                try {
                    if (in != null)
                        in.close();
                } finally {
                    p.delete();
                    z.delete();
                    e.delete();
                }
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }

}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutObject(long id, int claims) throws IOException {
    this.s3clientLock.readLock().lock();
    try {/*from   w w  w  .jav  a 2 s. c  o  m*/
        if (!this.clustered)
            throw new IOException("volume is not clustered");
        ObjectMetadata om = this.getClaimMetaData(id);
        if (om != null)
            return;
        else {
            String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled);
            om = s3Service.getObjectMetadata(this.name, "keys/" + haName);
            Map<String, String> md = om.getUserMetadata();
            md.put("objects", Integer.toString(claims));
            if (md.containsKey("deleted")) {
                md.remove("deleted");
            }
            if (md.containsKey("deleted-objects")) {
                md.remove("deleted-objects");
            }
            if (md.containsKey("bsize")) {
                HashBlobArchive.currentLength.addAndGet(Integer.parseInt(md.get("bsize")));
            }
            if (md.containsKey("bcompressedsize")) {
                HashBlobArchive.compressedLength.addAndGet(Integer.parseInt(md.get("bcompressedsize")));
            }
            byte[] msg = Long.toString(System.currentTimeMillis()).getBytes();

            om.setContentLength(msg.length);
            try {
                String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(msg));
                om.setContentMD5(mds);
                om.addUserMetadata("md5sum", mds);
            } catch (Exception e) {
                throw new IOException(e);
            }
            try {
                PutObjectRequest creq = new PutObjectRequest(this.name, this.getClaimName(id),
                        new ByteArrayInputStream(msg), om);
                s3Service.putObject(creq);
            } catch (AmazonS3Exception e1) {
                if (e1.getStatusCode() == 409) {
                    try {
                        s3Service.deleteObject(this.name, this.getClaimName(id));
                        this.checkoutObject(id, claims);
                        return;
                    } catch (Exception e2) {
                        throw new IOException(e2);
                    }
                } else {

                    throw new IOException(e1);
                }
            } catch (Exception e1) {
                // SDFSLogger.getLog().error("error uploading", e1);
                throw new IOException(e1);
            }
        }
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void checkoutFile(String name) throws IOException {
    String pth = "claims/" + name + "/"
            + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled);
    this.s3clientLock.readLock().lock();
    try {//from w ww. ja  v a 2s  .c  om
        byte[] b = Long.toString(System.currentTimeMillis()).getBytes();
        ObjectMetadata om = new ObjectMetadata();
        String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(b));
        om.setContentMD5(mds);
        om.addUserMetadata("md5sum", mds);
        om.setContentLength(b.length);
        PutObjectRequest creq = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(b), om);
        s3Service.putObject(creq);
    } catch (AmazonS3Exception e1) {
        if (e1.getStatusCode() == 409) {
            try {
                s3Service.deleteObject(this.name, pth);
                this.checkoutFile(name);
                return;
            } catch (Exception e2) {
                throw new IOException(e2);
            }
        } else {

            throw new IOException(e1);
        }
    } catch (Exception e1) {
        // SDFSLogger.getLog().error("error uploading", e1);
        throw new IOException(e1);
    } finally {
        this.s3clientLock.readLock().unlock();
    }
}