List of usage examples for com.mongodb.gridfs GridFSInputFile save
@Override public void save()
From source file:com.foodtruckdata.mongodb.UsersInput.java
private void storeFile(String filepath, String filetype, ObjectId truck_id) { try {//from w w w . j a va2 s.com GridFS gridFS = new GridFS(mongoDB); InputStream file_stream = getFTPInputStream(filepath); StringWriter writer = new StringWriter(); Charset par = null; IOUtils.copy(file_stream, writer, par); GridFSInputFile in = gridFS.createFile(file_stream); in.setFilename(filepath); in.put("TruckID", truck_id); in.put("FileType", filetype); in.save(); } catch (IOException ex) { Logger.getLogger(UsersInput.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:com.glaf.core.test.MongoDBGridFSThread.java
License:Apache License
public void run() { if (file.exists() && file.isFile()) { String path = file.getAbsolutePath(); path = path.replace('\\', '/'); if (StringUtils.contains(path, "/temp/") || StringUtils.contains(path, "/tmp/") || StringUtils.contains(path, "/logs/") || StringUtils.contains(path, "/work/") || StringUtils.endsWith(path, ".log") || StringUtils.endsWith(path, ".class")) { return; }//from w w w . ja v a2 s . c o m int retry = 0; boolean success = false; byte[] bytes = null; GridFSInputFile inputFile = null; while (retry < 1 && !success) { try { retry++; bytes = FileUtils.getBytes(file); if (bytes != null) { inputFile = gridFS.createFile(bytes); DBObject metadata = new BasicDBObject(); metadata.put("path", path); metadata.put("filename", file.getName()); metadata.put("size", bytes.length); inputFile.setMetaData(metadata); inputFile.setId(path); inputFile.setFilename(file.getName());// ?? inputFile.save();// ? bytes = null; success = true; logger.debug(file.getAbsolutePath() + " save ok."); } } catch (Exception ex) { logger.error(ex); ex.printStackTrace(); try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } } finally { bytes = null; inputFile = null; } } } }
From source file:com.hangum.tadpole.mongodb.core.query.MongoDBQuery.java
License:Open Source License
/** * insert gridfs/* w w w . j a v a 2 s .c om*/ * * @param userDB * @param strBucket * @param fileList * @throws Exception */ public static void insertGridFS(UserDBDAO userDB, String strBucket, List<String> fileList) throws Exception { DB mongoDb = findDB(userDB); GridFS gridFs = null; if ("".equals(strBucket)) gridFs = new GridFS(mongoDb); else gridFs = new GridFS(mongoDb, strBucket); for (String strFile : fileList) { String saveFileName = strFile.substring(strFile.lastIndexOf(File.separator) + 1); GridFSInputFile gfsFile = gridFs.createFile(new File(strFile)); gfsFile.setFilename(saveFileName); gfsFile.save(); } }
From source file:com.hangum.tadpole.mongodb.core.test.MongoTestGridFS.java
License:Open Source License
private static void saveImage(DB db) throws Exception { String newFileName = "currentop"; File imageFile = new File("c:/temp/currentop.png"); GridFS gfsPhoto = new GridFS(db); GridFSInputFile gfsFile = gfsPhoto.createFile(imageFile); gfsFile.setFilename(newFileName);/*from w ww. j ava2 s. co m*/ gfsFile.save(); }
From source file:com.ibm.ws.lars.rest.PersistenceBean.java
License:Apache License
/** * @param attachmentContentStream/* w ww.j av a 2 s .c om*/ * @return */ @Override public AttachmentContentMetadata createAttachmentContent(String name, String contentType, InputStream attachmentContentStream) { // Do not specify a bucket (so the data will be stored in fs.files and fs.chunks) GridFSInputFile gfsFile = gridFS.createFile(attachmentContentStream); ObjectId id = new ObjectId(); gfsFile.setContentType(contentType); gfsFile.setId(id); String filename = id.toString(); gfsFile.setFilename(filename); gfsFile.save(); return new AttachmentContentMetadata(gfsFile.getFilename(), gfsFile.getLength()); }
From source file:com.ikanow.infinit.e.api.social.sharing.ShareHandler.java
License:Open Source License
/** * Saves bytes into a new gridfile// ww w.jav a 2s. c om * * @param bytes * @return the id of the gridfile */ private ObjectId saveGridFile(byte[] bytes) { try { GridFSInputFile file = DbManager.getSocial().getShareBinary().createFile(bytes); file.save(); return (ObjectId) file.getId(); } catch (Exception ex) { } return null; }
From source file:com.ikanow.infinit.e.api.social.sharing.ShareHandler.java
License:Open Source License
/** * Updates a gridfile with new data, if binaryId is null * the old gridfile did not exist, just create a new one. * //from ww w.ja va2 s . co m * If it is not null, will remove and create a new entry. * * @param binaryId * @param bytes * @return */ private ObjectId updateGridFile(ObjectId binaryId, byte[] bytes) { try { //create new file GridFSInputFile file = DbManager.getSocial().getShareBinary().createFile(bytes); file.save(); //remove old file if exists (this way if file throws an exception we don't lose the old file) if (binaryId != null) DbManager.getSocial().getShareBinary().remove(binaryId); return (ObjectId) file.getId(); } catch (Exception ex) { } return binaryId; }
From source file:com.imaginea.mongodb.services.GridFSServiceImpl.java
License:Apache License
/** * Service implementation for uploading a file to GridFS. * * @param dbName Name of Database/*from w w w . java2 s .co m*/ * @param bucketName Name of GridFS Bucket * @param formData formDataBodyPart of the uploaded file * @param inputStream inputStream of the uploaded file * @param dbInfo Mongo Db Configuration provided by user to connect to. * @returns Success message with additional file details such as name, size, * download url & deletion url as JSON Array string. */ public JSONArray insertFile(String dbName, String bucketName, String dbInfo, InputStream inputStream, FormDataBodyPart formData) throws DatabaseException, CollectionException, DocumentException, ValidationException { mongoInstance = mongoInstanceProvider.getMongoInstance(); if (dbName == null) { throw new EmptyDatabaseNameException("Database name is null"); } if (dbName.equals("")) { throw new EmptyDatabaseNameException("Database Name Empty"); } if (bucketName == null) { throw new EmptyCollectionNameException("Bucket name is null"); } if (bucketName.equals("")) { throw new EmptyCollectionNameException("Bucket Name Empty"); } JSONArray result = new JSONArray(); FormDataContentDisposition fileData = formData.getFormDataContentDisposition(); try { if (!mongoInstance.getDatabaseNames().contains(dbName)) { throw new UndefinedDatabaseException("DB [" + dbName + "] DOES NOT EXIST"); } GridFS gridFS = new GridFS(mongoInstance.getDB(dbName), bucketName); GridFSInputFile fsInputFile = gridFS.createFile(inputStream, fileData.getFileName()); fsInputFile.setContentType(formData.getMediaType().toString()); fsInputFile.save(); JSONObject obj = new JSONObject(); obj.put("name", fsInputFile.getFilename()); obj.put("size", fsInputFile.getLength()); obj.put("url", String.format("services/%s/%s/gridfs/getfile?id=%s&download=%s&dbInfo=%s&ts=%s", dbName, bucketName, fsInputFile.getId().toString(), false, dbInfo, new Date())); obj.put("delete_url", String.format("services/%s/%s/gridfs/dropfile?id=%s&dbInfo=%s&ts=%s", dbName, bucketName, fsInputFile.getId().toString(), dbInfo, new Date().getTime())); obj.put("delete_type", "GET"); result.put(obj); } catch (Exception e) { CollectionException ce = new CollectionException(ErrorCodes.UPLOAD_FILE_EXCEPTION, "UPLOAD_FILE_EXCEPTION", e.getCause()); throw ce; } return result; }
From source file:com.impetus.client.mongodb.MongoDBClient.java
License:Apache License
/** * Save GRID FS file./* w w w .j a v a2 s. co m*/ * * @param gfsInputFile * the gfs input file * @param m * the m */ private void saveGridFSFile(GridFSInputFile gfsInputFile, EntityMetadata m) { try { DBCollection coll = mongoDb.getCollection(m.getTableName() + MongoDBUtils.FILES); createUniqueIndexGFS(coll, ((AbstractAttribute) m.getIdAttribute()).getJPAColumnName()); gfsInputFile.save(); log.info("Input GridFS file: " + gfsInputFile.getFilename() + " is saved successfully in " + m.getTableName() + MongoDBUtils.CHUNKS + " and metadata in " + m.getTableName() + MongoDBUtils.FILES); } catch (MongoException e) { log.error("Error in saving GridFS file in " + m.getTableName() + MongoDBUtils.FILES + " or " + m.getTableName() + MongoDBUtils.CHUNKS + " collections."); throw new KunderaException("Error in saving GridFS file in " + m.getTableName() + MongoDBUtils.FILES + " or " + m.getTableName() + MongoDBUtils.CHUNKS + " collections. Caused By: ", e); } try { gfsInputFile.validate(); log.info("Input GridFS file: " + gfsInputFile.getFilename() + " is validated."); } catch (MongoException e) { log.error( "Error in validating GridFS file in " + m.getTableName() + MongoDBUtils.FILES + " collection."); throw new KunderaException("Error in validating GridFS file in " + m.getTableName() + MongoDBUtils.FILES + " collection. Caused By: ", e); } }
From source file:com.linuxbox.enkive.docstore.mongogrid.MongoGridDocStoreService.java
License:Open Source License
@Override protected StoreRequestResult storeKnownHash(Document document, byte[] hash, byte[] data, int length) throws DocStoreException { try {// ww w . j a va 2s . com final String identifier = getIdentifierFromHash(hash); final int shardKey = getShardIndexFromHash(hash); try { documentLockingService.lockWithRetries(identifier, DocStoreConstants.LOCK_TO_STORE, LOCK_RETRIES, LOCK_RETRY_DELAY_MILLISECONDS); } catch (LockAcquisitionException e) { throw new DocStoreException("could not acquire lock to store document \"" + identifier + "\""); } try { if (fileExists(identifier)) { return new StoreRequestResultImpl(identifier, true, shardKey); } GridFSInputFile newFile = gridFS.createFile(new ByteArrayInputStream(data, 0, length)); newFile.setFilename(identifier); setFileMetaData(newFile, document, shardKey); newFile.save(); try { newFile.validate(); } catch (MongoException e) { throw new DocStoreException("file saved to GridFS did not validate", e); } return new StoreRequestResultImpl(identifier, false, shardKey); } finally { documentLockingService.releaseLock(identifier); } } catch (Exception e) { LOGGER.error("Could not save document to MongoDB GridFS", e); throw new DocStoreException(e); } }