Example usage for com.mongodb.gridfs GridFSInputFile getOutputStream

List of usage examples for com.mongodb.gridfs GridFSInputFile getOutputStream

Introduction

In this page you can find the example usage for com.mongodb.gridfs GridFSInputFile getOutputStream.

Prototype

public OutputStream getOutputStream() 

Source Link

Document

After retrieving this java.io.OutputStream , this object will be capable of accepting successively written data to the output stream.

Usage

From source file:net.kamradtfamily.mongorest.GridfsServlet.java

License:GNU General Public License

@Override
protected void doPut(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException {

    log.fine("doPut()");

    InputStream tmp = req.getInputStream();
    InputStream is = new BufferedInputStream(tmp);
    String db_name = req.getParameter("dbname");
    String bucket_name = req.getParameter("bucketname");
    if (db_name == null || bucket_name == null) {
        String names[] = req2mongonames(req);
        if (names != null) {
            db_name = names[0];/*from w w  w  . j a v a 2  s . com*/
            bucket_name = names[1];
        }
        if (db_name == null) {
            error(res, SC_BAD_REQUEST, Status.get("param name missing"));
            return;
        }
    }

    if (bucket_name == null)
        bucket_name = "fs";

    String file_name = req.getParameter("filename");

    if (file_name == null) {
        error(res, SC_BAD_REQUEST, Status.get("param name missing"));
        return;
    }

    DB db = mongo.getDB(db_name);

    String fs_cache_key = db_name + bucket_name;
    GridFS fs = fs_cache.get(fs_cache_key);
    if (fs == null) {
        fs = new GridFS(db, bucket_name);
        fs_cache.put(fs_cache_key, fs);
    }

    GridFSDBFile db_file_old = fs.findOne(file_name);
    if (db_file_old != null) {
        error(res, SC_BAD_REQUEST, Status.get("file already exists, use POST"));
        return;
    }

    String ct = req.getContentType();
    GridFSInputFile db_file = fs.createFile(file_name);
    if (ct != null)
        db_file.setContentType(ct);
    OutputStream os = db_file.getOutputStream();

    final int len = 4096;
    byte data[] = new byte[len];
    int n;
    while ((n = is.read(data, 0, len)) > 0) {
        os.write(data, 0, n);
    }
    os.flush();
    os.close();

    is.close();

    out_json(req, Status.OK);

}

From source file:org.exist.mongodb.xquery.gridfs.Store.java

License:Open Source License

void writeCompressed(GridFSInputFile gfsFile, StopWatch stopWatch, Item content, int dataType)
        throws NoSuchAlgorithmException, IOException, XPathException {
    // Store data compressed, add statistics
    try (OutputStream stream = gfsFile.getOutputStream()) {
        MessageDigest md = MessageDigest.getInstance("MD5");
        CountingOutputStream cosGZ = new CountingOutputStream(stream);
        GZIPOutputStream gos = new GZIPOutputStream(cosGZ);
        DigestOutputStream dos = new DigestOutputStream(gos, md);
        CountingOutputStream cosRaw = new CountingOutputStream(dos);

        stopWatch.start();/* w ww .j a  v a 2  s.c o m*/
        ContentSerializer.serialize(content, context, cosRaw);
        cosRaw.flush();
        cosRaw.close();
        stopWatch.stop();

        long nrBytesRaw = cosRaw.getByteCount();
        long nrBytesGZ = cosGZ.getByteCount();
        String checksum = Hex.encodeHexString(dos.getMessageDigest().digest());

        BasicDBObject info = new BasicDBObject();
        info.put(Constants.EXIST_COMPRESSION, GZIP);
        info.put(Constants.EXIST_ORIGINAL_SIZE, nrBytesRaw);
        info.put(Constants.EXIST_ORIGINAL_MD5, checksum);
        info.put(Constants.EXIST_DATATYPE, dataType);
        info.put(Constants.EXIST_DATATYPE_TEXT, Type.getTypeName(dataType));

        gfsFile.setMetaData(info);

        LOG.info("original_md5:" + checksum);
        LOG.info("compression ratio:" + ((100l * nrBytesGZ) / nrBytesRaw));

    }
}

From source file:org.exist.mongodb.xquery.gridfs.Store.java

License:Open Source License

void writeRaw(GridFSInputFile gfsFile, StopWatch stopWatch, Item content) throws XPathException, IOException {
    // Write data as is
    try (OutputStream stream = gfsFile.getOutputStream()) {
        stopWatch.start();//from ww  w  . ja v a  2s.c o m
        ContentSerializer.serialize(content, context, stream);
        stream.flush();
        stopWatch.stop();
    }
}

From source file:org.s1.mongodb.cluster.GridFSFileStorage.java

License:Apache License

@Override
public FileStorage.FileWriteBean createFileWriteBean(Id id, FileStorage.FileMetaBean meta) {
    meta.setLastModified(new Date());
    meta.setCreated(new Date());
    GridFS fs = new GridFS(MongoDBConnectionHelper.getConnection(id.getDatabase()), id.getCollection());
    fs.remove(id.getEntity());//w  w w  .  ja v a2  s.c om

    GridFSInputFile gfsFile = fs.createFile(id.getEntity());
    gfsFile.setContentType(meta.getContentType());
    gfsFile.setMetaData(MongoDBFormat.fromMap(meta.toMap()));

    GridFSFileWriteBean gridFSFileWriteBean = new GridFSFileWriteBean(id, gfsFile.getOutputStream(), meta);
    gridFSFileWriteBean.gfsFile = gfsFile;
    return gridFSFileWriteBean;
}

From source file:se.inera.axel.shs.broker.messagestore.internal.MongoMessageStoreServiceIT.java

License:Open Source License

private void saveMessage(ShsMessageEntry entry, ShsMessage message, DB db) {
    GridFS gridFs = new GridFS(db);
    GridFSInputFile inputFile = gridFs.createFile(entry.getId());

    OutputStream out = null;/*w ww . j a  v  a2  s. c  o m*/
    try {
        out = inputFile.getOutputStream();

        new ShsMessageMarshaller().marshal(message, out);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException e) {
                // ignore
            }
        }
    }
}

From source file:tango.mongo.ImageManager.java

License:Open Source License

public synchronized boolean saveInputImage(ObjectId xpId, ObjectId field_id, int fileRank, ImageHandler img,
        boolean flushImage) {
    if (img == null)
        return false;

    //IJ.log("file: "+img.getTitle()+" size:"+img.getSizeInMb()+ " available memory:"+Core.getAvailableMemory()+ " please free memory");

    double scaleZ = img.getScaleZ();
    String unit = img.getUnit();/*from w  ww . java  2 s  .  c o m*/
    String title = img.getTitle();
    MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
    try {
        byte[] data = img.getBinaryData();
        if (data == null) {
            IJ.log("couldn't save image:" + title);
            return false;
        }
        if (flushImage)
            img.flush();
        GridFSInputFile gfi = this.gfsField.get(xpId).createFile(data);
        data = null;
        gfi.setFilename(title);
        gfi.put("field_id", field_id);
        gfi.put("fileRank", fileRank);
        gfi.put("pixelDepth", scaleZ);
        gfi.put("unit", unit);
        removeInputImage(xpId, field_id, fileRank);
        gfi.save();
        gfi.getOutputStream().close();
        return true;
    } catch (Exception e) {
        exceptionPrinter.print(e, "Error while saving image: " + title, true);
    } catch (OutOfMemoryError e) {
        int MEGABYTE = (1024 * 1024);
        MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage();
        long maxMemory = heapUsage.getMax() / MEGABYTE;
        long usedMemory = heapUsage.getUsed() / MEGABYTE;
        IJ.log("Error while saving image:" + title + " Out of memory. Memory Use :" + usedMemory + "M/"
                + maxMemory + "M");
    }
    return false;
}

From source file:tango.mongo.ImageManager.java

License:Open Source License

public synchronized void saveFieldThumbnail(ObjectId field_id, ImageHandler img, int sizeX, int sizeY) {
    GridFSInputFile gfi = this.gfsFieldThumbnail.createFile(img.getThumbNail(sizeX, sizeY));
    BasicDBObject query = new BasicDBObject("field_id", field_id);
    gfsFieldThumbnail.remove(query);/*from  w  w w  .ja  v  a2s  .c  om*/
    gfi.put("field_id", field_id);
    gfi.save();
    try {
        gfi.getOutputStream().close();
    } catch (Exception e) {
        exceptionPrinter.print(e, "", Core.GUIMode);
    }
}

From source file:tango.mongo.ImageManager.java

License:Open Source License

public synchronized void saveNucleusImage(ObjectId xpId, ObjectId nucleus_id, int fileIdx, int fileType,
        ImageHandler img) {/*w  w  w  . j a va2  s.  c  o m*/
    if (img == null)
        return;
    removeNucleusImage(xpId, nucleus_id, fileIdx, fileType);
    try {
        GridFSInputFile gfi = this.gfsNucleus.get(xpId).createFile(img.getBinaryData());
        gfi.setFilename(img.getImagePlus().getShortTitle());
        gfi.put("nucleus_id", nucleus_id);
        gfi.put("fileIdx", fileIdx);
        gfi.put("fileType", fileType);
        gfi.put("pixelDepth", img.getScaleZ());
        gfi.put("unit", img.getUnit());
        gfi.save();
        if (gfi != null)
            gfi.getOutputStream().close();
    } catch (Exception e) {
        exceptionPrinter.print(e, "Error while saving image:" + img.getTitle(), Core.GUIMode);
    }
}

From source file:tango.mongo.ImageManager.java

License:Open Source License

public synchronized void saveChannelImageThumbnail(ObjectId nucleus_id, int fileIdx, ImageHandler img,
        int sizeX, int sizeY, ImageInt mask) {
    GridFSInputFile gfi = this.gfsNucleusThumbnail.createFile(img.getThumbNail(sizeX, sizeY, mask));
    BasicDBObject query = new BasicDBObject("nucleus_id", nucleus_id).append("fileRank", fileIdx);
    gfsNucleusThumbnail.remove(query);//from w  w  w.  j  a  va  2 s  .  c o  m
    gfi.put("nucleus_id", nucleus_id);
    gfi.put("fileRank", fileIdx);
    gfi.save();
    try {
        gfi.getOutputStream().close();
    } catch (Exception e) {
        exceptionPrinter.print(e, "", Core.GUIMode);
    }
}

From source file:tango.mongo.ImageManager.java

License:Open Source License

private void transferFiles(String queryField, ObjectId queryValue, GridFS gfsSource, GridFS gfsDestination) {
    BasicDBObject query = new BasicDBObject(queryField, queryValue);
    List<GridFSDBFile> files = gfsSource.find(query); // FIXME charge tout en mmoire?
    for (GridFSDBFile file : files) {
        GridFSInputFile gfi = gfsDestination.createFile(file.getInputStream(), file.getFilename());
        gfi.put(queryField, queryValue);
        gfi.put("fileIdx", file.get("fileIdx"));
        gfi.put("fileType", file.get("fileType"));
        gfi.put("pixelDepth", file.get("pixelDepth"));
        gfi.put("unit", file.get("unit"));
        gfi.save();/*from w  w w .  ja  v a2  s.  c o m*/
        if (gfi != null)
            try {
                gfi.getOutputStream().close();
            } catch (IOException ex) {
                Logger.getLogger(ImageManager.class.getName()).log(Level.SEVERE, null, ex);
            }
    }
    gfsSource.remove(query);
}