List of usage examples for com.mongodb.gridfs GridFSInputFile setFilename
public void setFilename(final String filename)
From source file:com.hangum.tadpole.mongodb.core.test.MongoTestGridFS.java
License:Open Source License
private static void saveImage(DB db) throws Exception { String newFileName = "currentop"; File imageFile = new File("c:/temp/currentop.png"); GridFS gfsPhoto = new GridFS(db); GridFSInputFile gfsFile = gfsPhoto.createFile(imageFile); gfsFile.setFilename(newFileName); gfsFile.save();/*from www . j a va 2 s . co m*/ }
From source file:com.ibm.ws.lars.rest.PersistenceBean.java
License:Apache License
/** * @param attachmentContentStream//from w ww . ja va 2s . c o m * @return */ @Override public AttachmentContentMetadata createAttachmentContent(String name, String contentType, InputStream attachmentContentStream) { // Do not specify a bucket (so the data will be stored in fs.files and fs.chunks) GridFSInputFile gfsFile = gridFS.createFile(attachmentContentStream); ObjectId id = new ObjectId(); gfsFile.setContentType(contentType); gfsFile.setId(id); String filename = id.toString(); gfsFile.setFilename(filename); gfsFile.save(); return new AttachmentContentMetadata(gfsFile.getFilename(), gfsFile.getLength()); }
From source file:com.impetus.client.mongodb.DefaultMongoDBDataHandler.java
License:Apache License
/** * Gets the GFSInputFile from entity.//from w w w . jav a2 s . c om * * @param gfs * the gfs * @param m * the m * @param entity * the entity * @param kunderaMetadata * the kundera metadata * @return the GFS iuput file from entity */ public GridFSInputFile getGFSInputFileFromEntity(GridFS gfs, EntityMetadata m, Object entity, KunderaMetadata kunderaMetadata, boolean isUpdate) { MetamodelImpl metaModel = (MetamodelImpl) kunderaMetadata.getApplicationMetadata() .getMetamodel(m.getPersistenceUnit()); EntityType entityType = metaModel.entity(m.getEntityClazz()); GridFSInputFile gridFSInputFile = null; DBObject gfsMetadata = new BasicDBObject(); Set<Attribute> columns = entityType.getAttributes(); for (Attribute column : columns) { boolean isLob = ((Field) column.getJavaMember()).getAnnotation(Lob.class) != null; if (isLob) { gridFSInputFile = createGFSInputFile(gfs, entity, (Field) column.getJavaMember()); gridFSInputFile.setFilename(column.getName()); } else { if (isUpdate && column.getName().equals(m.getIdAttribute().getName())) { gfsMetadata.put(((AbstractAttribute) column).getJPAColumnName(), new ObjectId()); } else DocumentObjectMapper.extractFieldValue(entity, gfsMetadata, column); } } gridFSInputFile.setMetaData(gfsMetadata); return gridFSInputFile; }
From source file:com.kurento.kmf.repository.internal.repoimpl.mongo.MongoRepository.java
License:Open Source License
@Override public RepositoryItem createRepositoryItem() { GridFSInputFile dbFile = gridFS.createFile(); dbFile.setFilename(dbFile.getId().toString()); return createRepositoryItem(dbFile); }
From source file:com.linuxbox.enkive.docstore.mongogrid.MongoGridDocStoreService.java
License:Open Source License
@Override protected StoreRequestResult storeKnownHash(Document document, byte[] hash, byte[] data, int length) throws DocStoreException { try {/* www . j a v a 2 s . c om*/ final String identifier = getIdentifierFromHash(hash); final int shardKey = getShardIndexFromHash(hash); try { documentLockingService.lockWithRetries(identifier, DocStoreConstants.LOCK_TO_STORE, LOCK_RETRIES, LOCK_RETRY_DELAY_MILLISECONDS); } catch (LockAcquisitionException e) { throw new DocStoreException("could not acquire lock to store document \"" + identifier + "\""); } try { if (fileExists(identifier)) { return new StoreRequestResultImpl(identifier, true, shardKey); } GridFSInputFile newFile = gridFS.createFile(new ByteArrayInputStream(data, 0, length)); newFile.setFilename(identifier); setFileMetaData(newFile, document, shardKey); newFile.save(); try { newFile.validate(); } catch (MongoException e) { throw new DocStoreException("file saved to GridFS did not validate", e); } return new StoreRequestResultImpl(identifier, false, shardKey); } finally { documentLockingService.releaseLock(identifier); } } catch (Exception e) { LOGGER.error("Could not save document to MongoDB GridFS", e); throw new DocStoreException(e); } }
From source file:com.linuxbox.enkive.docstore.mongogrid.MongoGridDocStoreService.java
License:Open Source License
/** * Since we don't know the name, we'll have to save the data before we can * determine the name. So save it under a random UUID, calculate the name, * and if the name is not already in the file system then rename it. * Otherwise delete it./* w ww. j a v a2 s .co m*/ * * @throws DocSearchException */ @Override protected StoreRequestResult storeAndDetermineHash(Document document, HashingInputStream inputStream) throws DocStoreException { final String temporaryName = java.util.UUID.randomUUID().toString(); GridFSInputFile newFile = gridFS.createFile(inputStream); newFile.setFilename(temporaryName); setFileMetaData(newFile, document, -1); newFile.save(); try { newFile.validate(); } catch (MongoException e) { throw new DocStoreException("file saved to GridFS did not validate", e); } final byte[] actualHash = inputStream.getDigest(); final String actualName = getIdentifierFromHash(actualHash); final int shardKey = getShardIndexFromHash(actualHash); try { try { documentLockingService.lockWithRetries(actualName, DocStoreConstants.LOCK_TO_STORE, LOCK_RETRIES, LOCK_RETRY_DELAY_MILLISECONDS); } catch (LockAcquisitionException e) { gridFS.remove((ObjectId) newFile.getId()); throw new DocStoreException("could not acquire lock to store document \"" + actualName + "\""); } // so now we're in "control" of that file try { if (fileExists(actualName)) { gridFS.remove(temporaryName); return new StoreRequestResultImpl(actualName, true, shardKey); } else { final boolean wasRenamed = setFileNameAndShardKey(newFile.getId(), actualName, shardKey); if (!wasRenamed) { throw new DocStoreException("expected to find and rename a GridFS file with id \"" + newFile.getId() + "\" but could not find it"); } return new StoreRequestResultImpl(actualName, false, shardKey); } } finally { documentLockingService.releaseLock(actualName); } } catch (LockServiceException e) { throw new DocStoreException(e); } }
From source file:com.mongo.gridfs.GridFSFileLoader.java
License:Open Source License
public void loadFile(String fileToLoad, String contentType, Map<String, String> metaData) { InputStream is = new GridFSFileValidator().loadFile(fileToLoad); GridFSInputFile file = gridfs.createFile(is); file.setContentType(contentType);// w w w .ja v a 2 s . c o m file.setFilename(fileToLoad); BasicDBObject metadata = new BasicDBObject(); if (metaData != null) { for (Entry<String, String> entry : metaData.entrySet()) { metadata.put(entry.getKey(), entry.getValue()); } } file.setMetaData(metadata); file.save(); }
From source file:com.photon.phresco.service.impl.DbService.java
License:Apache License
protected void saveFileToDB(String id, InputStream is) throws PhrescoException { if (isDebugEnabled) { LOGGER.debug("DbService.saveFileToDB:Entry"); if (StringUtils.isEmpty(id)) { LOGGER.warn("DbService.saveFileToDB", STATUS_BAD_REQUEST, "message=\"id is empty\""); throw new PhrescoException("id is empty"); }// w w w. java2 s. c o m LOGGER.info("DbService.saveFileToDB", "id=\"" + id + "\""); } getGridFs().remove(id); GridFSInputFile file = getGridFs().createFile(is); file.setFilename(id); file.save(); if (isDebugEnabled) { LOGGER.debug("DbService.saveFileToDB:Exit"); } }
From source file:com.pubkit.platform.persistence.impl.ApplicationDaoImpl.java
License:Open Source License
public String saveFile(byte[] fileData, String fileName, String contentType) { GridFS gridFs = new GridFS(mongoTemplate.getDb(), PK_FILES_BUCKET); GridFSInputFile gfsFile = gridFs.createFile(fileData); gfsFile.setFilename(fileName); gfsFile.setContentType(contentType); gfsFile.save();//from w w w. j av a 2 s.com LOG.info("Saved new file :" + fileName); return gfsFile.getId().toString(); }
From source file:com.spring.tutorial.entitites.FileUploader.java
public String upload() throws IOException, ServletException, FacebookException { OutputStream output = null;/* w w w .j a v a 2 s . co m*/ InputStream fileContent = null; final Part filePart; final File file; try { filePart = request.getPart("file"); fileContent = filePart.getInputStream(); MongoClient mongoClient = new MongoClient(); mongoClient = new MongoClient(); DB db = mongoClient.getDB("fou"); char[] pass = "mongo".toCharArray(); boolean auth = db.authenticate("admin", pass); file = File.createTempFile("fileToStore", "tmp"); file.deleteOnExit(); FileOutputStream fout = new FileOutputStream(file); int read = 0; byte[] bytes = new byte[1024]; while ((read = fileContent.read(bytes)) != -1) { fout.write(bytes, 0, read); } GridFS gridFS = new GridFS(db, request.getSession().getAttribute("id") + "_files"); GridFSInputFile gfsInputFile = gridFS.createFile(file); gfsInputFile.setFilename(filePart.getSubmittedFileName()); gfsInputFile.save(); DBCollection collection = db.getCollection(request.getSession().getAttribute("id") + "_files_meta"); BasicDBObject metaDocument = new BasicDBObject(); metaDocument.append("name", filePart.getSubmittedFileName()); metaDocument.append("size", filePart.getSize()); metaDocument.append("content-type", filePart.getContentType()); metaDocument.append("file-id", gfsInputFile.getId()); metaDocument.append("tags", request.getParameter("tags")); metaDocument.append("description", request.getParameter("description")); DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss"); metaDocument.append("last_modified", dateFormat.format(new Date())); collection.insert(metaDocument); } catch (Exception e) { return "message:" + e.getMessage(); } finally { if (output != null) { output.close(); } if (fileContent != null) { fileContent.close(); } } return "success"; }