List of usage examples for com.mongodb BasicDBObjectBuilder start
public static BasicDBObjectBuilder start()
From source file:cc.acs.mongofs.gridfs.GridFS.java
License:Apache License
/** * Creates a GridFS instance for the specified bucket * in the given database.//from ww w . j a v a 2 s . c o m * * @param db database to work with * @param bucket bucket to use in the given database */ public GridFS(DB db, String bucket) { _db = db; _bucketName = bucket; _filesCollection = _db.getCollection(_bucketName + ".files"); _chunkCollection = _db.getCollection(_bucketName + ".chunks"); _chunkCollection.ensureIndex(BasicDBObjectBuilder.start().add("files_id", 1).add("n", 1).get()); _filesCollection.setObjectClass(GridFSDBFile.class); }
From source file:cc.acs.mongofs.gridfs.GridFSInputFile.java
License:Apache License
public int saveChunks(int chunkSize) throws IOException { if (_saved)/*from w w w. j a va 2 s .c o m*/ throw new RuntimeException("already saved!"); if (chunkSize > 3.5 * 1000 * 1000) throw new RuntimeException("chunkSize must be less than 3.5MiB!"); byte[] b = new byte[chunkSize]; long total = 0; int cn = 0; MessageDigest md = _md5Pool.get(); md.reset(); DigestInputStream in = new DigestInputStream(_in, md); while (true) { int start = 0; while (start < b.length) { int r = in.read(b, start, b.length - start); if (r == 0) throw new RuntimeException("i'm doing something wrong"); if (r < 0) break; start += r; } total += start; byte[] mine = b; if (start != b.length) { mine = new byte[start]; System.arraycopy(b, 0, mine, 0, start); } DBObject chunk = BasicDBObjectBuilder.start().add("files_id", _id).add("n", cn++).add("data", mine) .get(); _fs._chunkCollection.save(chunk); if (start < b.length) break; } _md5 = Util.toHex(md.digest()); _md5Pool.done(md); _length = total; _saved = true; return cn; }
From source file:cn.vlabs.clb.server.storage.mongo.extend.MyGridFS.java
License:Apache License
public MyGridFS(DB db, String bucket) { _db = db;//from ww w. j av a 2 s . c o m _bucketName = bucket; _filesCollection = _db.getCollection(_bucketName + ".files"); _chunkCollection = _db.getCollection(_bucketName + ".chunks"); // ensure standard indexes as long as collections are small try { if (_filesCollection.count() < 1000) { _filesCollection .ensureIndex(BasicDBObjectBuilder.start().add("filename", 1).add("uploadDate", 1).get()); } if (_chunkCollection.count() < 1000) { _chunkCollection.ensureIndex(BasicDBObjectBuilder.start().add("files_id", 1).add("n", 1).get(), BasicDBObjectBuilder.start().add("unique", true).get()); } } catch (MongoException e) { LOGGER.info( String.format("Unable to ensure indices on GridFS collections in database %s", db.getName())); } _filesCollection.setObjectClass(GridFSDBFile.class); }
From source file:cn.vlabs.clb.server.storage.mongo.extend.MyGridFSInputFile.java
License:Apache License
protected DBObject createChunk(Object id, int currentChunkNumber, byte[] writeBuffer) { return BasicDBObjectBuilder.start().add("files_id", id).add("n", currentChunkNumber) .add("data", writeBuffer).get(); }
From source file:co.cask.hydrator.plugin.batch.sink.MongoDBBatchSink.java
License:Apache License
@Override public void transform(StructuredRecord input, Emitter<KeyValue<NullWritable, BSONWritable>> emitter) throws Exception { BasicDBObjectBuilder bsonBuilder = BasicDBObjectBuilder.start(); for (Schema.Field field : input.getSchema().getFields()) { bsonBuilder.add(field.getName(), input.get(field.getName())); }/*from w w w .j a v a 2s. c om*/ emitter.emit(new KeyValue<>(NullWritable.get(), new BSONWritable(bsonBuilder.get()))); }
From source file:com.bbc.remarc.util.ResourceManager.java
License:Apache License
private static void createDocumentsFromFileMap(HashMap<String, List<File>> fileMap, HashMap<String, ResourceType> typeMap, File properties, String resourcesDir) { DB db = MongoClient.getDB();/*ww w . ja v a2 s . c om*/ Properties documentProps = processPropertiesFile(properties); if (documentProps == null) { log.error("could not create properties file. Abort directory."); return; } String theme = documentProps.getProperty("theme"); String decade = documentProps.getProperty("decade"); if (theme == null && decade == null) { log.error("ERROR! Properties file contained neither THEME nor DECADE. Abort directory."); return; } // now we process each key (document) in the hashmap, copying the // resources (file array) into the correct folder Set<String> keys = fileMap.keySet(); for (String key : keys) { log.debug("processing [" + key + "]"); // create document with id, theme and decade BasicDBObjectBuilder documentBuilder = BasicDBObjectBuilder.start(); documentBuilder.add("id", key); documentBuilder.add("theme", theme); documentBuilder.add("decade", decade); // based upon the documentType, we can determine all our urls and // storage variables ResourceType documentType = typeMap.get(key); File fileDestDirectory = null; // Get the relative base URL from an Environment variable if it has been set String relativefileBaseUrl = System.getenv(Configuration.ENV_BASE_URL); if (relativefileBaseUrl == null || "".equals(relativefileBaseUrl)) { relativefileBaseUrl = Configuration.DEFAULT_RELATIVE_BASE_URL; } else { relativefileBaseUrl += Configuration.CONTENT_DIR; } String mongoCollection = ""; switch (documentType) { case IMAGE: mongoCollection = "images"; fileDestDirectory = new File(resourcesDir + Configuration.IMAGE_DIR_NAME); relativefileBaseUrl += Configuration.IMAGE_DIR; break; case AUDIO: mongoCollection = "audio"; fileDestDirectory = new File(resourcesDir + Configuration.AUDIO_DIR_NAME); relativefileBaseUrl += Configuration.AUDIO_DIR; break; case VIDEO: mongoCollection = "video"; fileDestDirectory = new File(resourcesDir + Configuration.VIDEO_DIR_NAME); relativefileBaseUrl += Configuration.VIDEO_DIR; break; default: break; } List<File> files = fileMap.get(key); for (File resource : files) { log.debug("--- processing [" + resource.getName() + "]"); String resourceLocation = relativefileBaseUrl + resource.getName(); String extension = FilenameUtils.getExtension(resource.getName()); ResourceType fileType = getTypeFromExtension(extension); // now determine the value to store the resource under in MongoDB, different if an image or metadata String urlKey; switch (fileType) { case IMAGE: urlKey = "imageUrl"; break; case INFORMATION: urlKey = "metadata"; break; default: urlKey = (extension + "ContentUrl"); break; } // If the file is a metadata file, we want to read from it, otherwise we just add the location to the db if (fileType == ResourceType.INFORMATION) { String metadata = processMetadata(resource.getPath()); documentBuilder.add(urlKey, metadata); } else { documentBuilder.add(urlKey, resourceLocation); } } // insert the document into the database try { DBObject obj = documentBuilder.get(); log.debug("writing document to collection (" + mongoCollection + "): " + obj); db.requestStart(); DBCollection collection = db.getCollection(mongoCollection); collection.insert(documentBuilder.get()); } finally { db.requestDone(); } // write all the resource files to the correct directory log.debug("copying resources into " + fileDestDirectory.getPath()); for (File resource : files) { // We don't want to copy the metadata into the directory, so remove it here String extension = FilenameUtils.getExtension(resource.getName()); ResourceType fileType = getTypeFromExtension(extension); if (fileType != ResourceType.INFORMATION) { try { FileUtils.copyFileToDirectory(resource, fileDestDirectory); } catch (IOException e) { log.error("ERROR! Couldn't copy resource to directory: " + e); } } } } }
From source file:com.bbc.remarc.ws.AudioServiceImpl.java
License:Apache License
@GET @Produces("application/json") @Path("/metadata/{id}") public Response getAudioMetadata(@PathParam("id") String id) { log.trace("getAudioMetadata"); DB db = MongoClient.getDB();//w w w . j a v a2 s . c o m DBCursor results = null; try { db.requestStart(); results = db.getCollection(COLLECTION_NAME).find(BasicDBObjectBuilder.start().add("id", id).get()); if (results.count() == 0) { return Response.status(404).build(); } } finally { db.requestDone(); } return Response.ok(results.next().toString()).build(); }
From source file:com.bbc.remarc.ws.ImageServiceImpl.java
License:Apache License
@GET @Produces("application/json") @Path("/metadata/{id}") public Response getImageMetadata(@PathParam("id") String id) { log.trace("getImageMetadata"); DB db = MongoClient.getDB();//from ww w. j a v a2 s. c o m DBCursor results = null; try { db.requestStart(); results = db.getCollection(COLLECTION_NAME).find(BasicDBObjectBuilder.start().add("id", id).get()); if (results.count() == 0) { return Response.status(404).build(); } } finally { db.requestDone(); } return Response.ok(results.next().toString()).build(); }
From source file:com.bbc.remarc.ws.VideoServiceImpl.java
License:Apache License
@GET @Produces("application/json") @Path("/metadata/{id}") public Response getVideoMetadata(@PathParam("id") String id) { log.trace("getAudioMetadata"); DB db = MongoClient.getDB();/* ww w . j a v a2s. c om*/ DBCursor results = null; try { db.requestStart(); results = db.getCollection(COLLECTION_NAME).find(BasicDBObjectBuilder.start().add("id", id).get()); if (results.count() == 0) { return Response.status(404).build(); } } finally { db.requestDone(); } return Response.ok(results.next().toString()).build(); }
From source file:com.continuent.tungsten.replicator.applier.MongoApplier.java
License:Open Source License
private void ensureIndexes(DBCollection coll, OneRowChange orc) { // If we have not seen this table before, check whether it // needs an index. if (autoIndex) { String schema = orc.getSchemaName(); String table = orc.getTableName(); Table t = tableMetadataCache.retrieve(schema, table); if (t == null) { if (logger.isDebugEnabled()) { logger.debug("Ensuring index exists on collection: db=" + schema + " collection=" + table); }/*from ww w. jav a 2 s. c o m*/ // Compute required index keys and ensure they // exist in MongoDB. List<ColumnSpec> keySpecs = orc.getKeySpec(); if (keySpecs.size() > 0) { BasicDBObjectBuilder builder = BasicDBObjectBuilder.start(); for (ColumnSpec keySpec : keySpecs) { builder.add(keySpec.getName(), 1); } coll.ensureIndex(builder.get()); } // Note that we have processed the table. t = new Table(schema, table); tableMetadataCache.store(t); } } }