List of usage examples for com.mongodb BasicDBObjectBuilder BasicDBObjectBuilder
public BasicDBObjectBuilder()
From source file:net.cogz.friends.GearzFriends.java
License:Open Source License
public void addFriendRequest(String receiver, String from) { DBObject playerDocument = getPlayerDocument(receiver); Object friendsObj = playerDocument.get("friend_requests"); if (friendsObj == null || !(friendsObj instanceof BasicDBList)) { friendsObj = new BasicDBList(); }/* w w w . j ava 2s. c om*/ BasicDBList friendsList = (BasicDBList) friendsObj; if (hasRequest(receiver, from)) { throw new IllegalStateException("Friend request exists for that player"); } else { DBObject newRequest = new BasicDBObjectBuilder().add("name", getObjectId(getPlayerDocument(from))) .add("sent", new Date()).get(); friendsList.add(newRequest); } playerDocument.put("friend_requests", friendsList); getCollection().save(playerDocument); }
From source file:net.tbnr.gearz.activerecord.GModel.java
License:Open Source License
/** * Updates the internal variables to be consistent. *//*from w w w . jav a2 s. c o m*/ private void updateObjects() { this.basicDBObjectBuilder = new BasicDBObjectBuilder(); for (BasicAnalyzedField analyzedField : getAllFields()) { if (!isValidValue(analyzedField.getValue())) continue; this.basicDBObjectBuilder.append(analyzedField.getKey(), analyzedField.getValue()); } if (this.objectId != null) { this.basicDBObjectBuilder.append("_id", this.objectId); } this.basicDBObjectBuilder.append("_class", this.getClass().getName()); this.basicDBObjectBuilder.append("_schema_v", "1.0"); }
From source file:net.tbnr.gearz.arena.ArenaManager.java
License:Open Source License
/** * Converts an Arena object into a DB Object that can be reversed using the other method (which is not static) * * @param arena The arena to convert into a DBObject * @return The DBObject version of this Arena. */// w w w . j a v a 2s.co m public static DBObject objectFromArena(Arena arena) { BasicDBObjectBuilder objectBuilder = new BasicDBObjectBuilder(); //Start building the database object for this arena for (Field field : arena.getClass().getFields()) { //Get all the fields ... if (!field.isAnnotationPresent(ArenaField.class)) { continue; //... that we can access, and are annotated by ArenaField ... } ArenaField annotation = field.getAnnotation(ArenaField.class); //Get the Annotation from the field as an object ArenaIterator iterator; //Setup an object to put the iterator in try { iterator = (ArenaIterator) field.get(arena); //Try to getSetting the arena iterator } catch (IllegalAccessException e) { continue; //Didn't work :o } BasicDBList list = new BasicDBList(); //Pour our list into the DB List object while (iterator.hasNext()) { Object next = iterator.next(); ArenaFieldSerializer.SerializationDelegate<?> serializerFor = ArenaFieldSerializer .getSerializerFor(next.getClass()); if (serializerFor == null) continue; list.add(serializerFor.getObjectFor(next)); //Add whatever "next" is now. Depending on code above, it could be a DBObject, or whatever the iterator has in store. } objectBuilder.append(annotation.key(), list); //Put that in the database } objectBuilder.append("name", arena.getName()); //Meta for a bit. objectBuilder.append("description", arena.getDescription()); objectBuilder.append("worldId", arena.getWorldId()); objectBuilder.append("last-updated", Calendar.getInstance().getTimeInMillis()); objectBuilder.append("author", arena.getAuthors()); if (arena.getId() != null) { objectBuilder.append("_id", new ObjectId(arena.getId())); //Good for replacing/updating } return objectBuilder.get(); //Finish the object off! :D }
From source file:net.vz.mongodb.jackson.JacksonDBCollection.java
License:Apache License
DBObject createIdQuery(K object) {
return new BasicDBObjectBuilder().add("_id", idHandler.toDbId(object)).get();
}
From source file:org.apache.hadoop.contrib.mongoreduce.MongoOutputCommitter.java
License:Apache License
@Override public void setupJob(JobContext jobContext) throws IOException { /**//from w w w . j a va2 s . c o m * note: we don't really have to do anything here - * MongoDB is one of the few systems that don't require you to * create a database or collection before writing to it * * but in order to ingest a ton of data quickly we have to * pre-split the output collection * */ Configuration conf = jobContext.getConfiguration(); if (conf.getBoolean("mongo.output.skip_splitting", false)) return; String database = conf.get("mongo.output.database"); String collection = conf.get("mongo.output.collection"); // connect to global db Mongo m = new Mongo("localhost"); DB db = m.getDB(database); DB admindb = m.getDB("admin"); DB configdb = m.getDB("config"); // optionally drop the existing collection boolean drop = conf.getBoolean("mongo.output.drop", false); DBCollection coll = db.getCollection(collection); if (drop) { coll.drop(); } else { if (coll.count() > 0) { // don't shard an existing collection - may already be sharded ... return; } } // get a list of shards ArrayList<String> shards = new ArrayList<String>(); for (DBObject s : configdb.getCollection("shards").find()) { shards.add((String) s.get("_id")); } if (shards.size() < 2) { // don't let's be silly - nice sharded cluster, no shard return; } // shard the new output collection BasicDBObjectBuilder builder = new BasicDBObjectBuilder(); builder.add("enableSharding", database); admindb.command(builder.get()); builder = new BasicDBObjectBuilder(); builder.add("shardCollection", database + "." + collection); // just shard on _id - but user gets to decide what the _id is builder.add("key", new BasicDBObject("_id", 1)); admindb.command(builder.get()); // pre-split to get parallel writes // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says // balancer moving chunks should take 5 minutes ... too long // wonder if moveChunk command is faster // well we could do it anyway - the jobs that can benefit from it will // check for user-submitted splitPoints String[] splits; String splitString = conf.get("mongo.output.split_points", ""); // generate our own split points if necessary if (splitString.equals("")) { long max = (long) Math.pow(93.0, 5.0); long step = max / shards.size(); splits = new String[shards.size() - 1]; // assume human readable keys for (int i = 0; i < shards.size() - 1; i++) { splits[i] = splitPointForLong(step * (i + 1)); } } else { splits = splitString.split(","); } HashMap<String, Object> splitCmd = new HashMap<String, Object>(); splitCmd.put("split", database + "." + collection); splitCmd.put("middle", ""); HashMap<String, Object> moveCmd = new HashMap<String, Object>(); moveCmd.put("moveChunk", database + "." + collection); moveCmd.put("find", ""); moveCmd.put("to", ""); // do the splitting and migrating // we assign chunks to shards in a round-robin manner int i = 0; for (String split : splits) { splitCmd.remove("middle"); splitCmd.put("middle", new BasicDBObject("_id", split)); // create new chunk admindb.command(new BasicDBObject(splitCmd)); // move to shard moveCmd.remove("find"); moveCmd.put("find", new BasicDBObject("_id", split)); moveCmd.put("to", shards.get(i)); admindb.command(new BasicDBObject(moveCmd)); i = (i + 1) % shards.size(); } }
From source file:org.apache.hadoop.contrib.mongoreduce.MongoStreamOutputFormat.java
License:Apache License
public void checkOutputSpecs(FileSystem ignored, JobConf conf) throws IOException { if (conf.getBoolean("mongo.output.skip_splitting", false)) return;//www.j a v a2s . c o m String database = conf.get("mongo.output.database", ""); if (database.equals("")) { throw new IOException("must specify a value for mongo.output.database"); } String collection = conf.get("mongo.output.collection", ""); if (collection.equals("")) { throw new IOException("must supply a value for mongo.output.collection"); } // connect to global db Mongo m = new Mongo("localhost"); DB db = m.getDB(database); DB admindb = m.getDB("admin"); DB configdb = m.getDB("config"); // optionally drop the existing collection boolean drop = conf.getBoolean("mongo.output.drop", false); DBCollection coll = db.getCollection(collection); if (drop) { coll.drop(); } else { if (coll.count() > 0) { // don't shard an existing collection - may already be sharded ... return; } } // get a list of shards ArrayList<String> shards = new ArrayList<String>(); for (DBObject s : configdb.getCollection("shards").find()) { shards.add((String) s.get("_id")); } if (shards.size() < 2) { // don't let's be silly return; } // shard the new output collection BasicDBObjectBuilder builder = new BasicDBObjectBuilder(); builder.add("enableSharding", database); admindb.command(builder.get()); builder = new BasicDBObjectBuilder(); builder.add("shardCollection", database + "." + collection); // just shard on _id - but user gets to decide what the _id is builder.add("key", new BasicDBObject("_id", 1)); admindb.command(builder.get()); // pre-split to get parallel writes // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says // balancer moving chunks should take 5 minutes ... too long // wonder if moveChunk command is faster // well we could do it anyway - the jobs that can benefit from it will // check for user-submitted splitPoints String[] splits; String splitString = conf.get("mongo.output.split_points", ""); // generate our own split points if necessary if (splitString.equals("")) { long max = (long) Math.pow(93.0, 5.0); long step = max / shards.size(); splits = new String[shards.size() - 1]; // assume human readable keys for (int i = 0; i < shards.size() - 1; i++) { splits[i] = splitPointForLong(step * (i + 1)); } } else { splits = splitString.split(","); } HashMap<String, Object> splitCmd = new HashMap<String, Object>(); splitCmd.put("split", database + "." + collection); splitCmd.put("middle", ""); HashMap<String, Object> moveCmd = new HashMap<String, Object>(); moveCmd.put("moveChunk", database + "." + collection); moveCmd.put("find", ""); moveCmd.put("to", ""); // do the splitting and migrating // we assign chunks to shards in a round-robin manner int i = 0; for (String split : splits) { splitCmd.remove("middle"); splitCmd.put("middle", new BasicDBObject("_id", split)); // create new chunk admindb.command(new BasicDBObject(splitCmd)); // move to shard moveCmd.remove("find"); moveCmd.put("find", new BasicDBObject("_id", split)); moveCmd.put("to", shards.get(i)); admindb.command(new BasicDBObject(moveCmd)); i = (i + 1) % shards.size(); } }
From source file:org.apache.karaf.jaas.modules.mongo.internal.DefaultUserDetailService.java
License:Apache License
public java.util.List<String> getUserNames() throws Exception { List<String> result = new LinkedList<String>(); DBCollection users = getDB().getCollection(configuration.getUserCollectionName()); DBObject userProjection = new BasicDBObjectBuilder().add("_id", 0).add("username", 1).get(); DBCursor uc = users.find(null, userProjection); while (uc.hasNext()) { DBObject group = uc.next();//w ww . j a v a 2 s .c o m result.add((String) group.get("username")); } uc.close(); return result; }
From source file:org.apache.rya.export.mongo.policy.TimestampPolicyMongoRyaStatementStore.java
License:Apache License
@Override public Iterator<RyaStatement> fetchStatements() throws FetchStatementException { final DBObject timeObj = new BasicDBObjectBuilder().add(SimpleMongoDBStorageStrategy.TIMESTAMP, new BasicDBObjectBuilder().add("$gte", timestamp.getTime()).get()).get(); final Cursor cur = db.getCollection(TRIPLES_COLLECTION).find(timeObj).sort(new BasicDBObject(TIMESTAMP, 1)); final List<RyaStatement> statements = new ArrayList<>(); while (cur.hasNext()) { final RyaStatement statement = adapter.deserializeDBObject(cur.next()); statements.add(statement);/*from w ww . j ava2 s . c o m*/ } return statements.iterator(); }
From source file:org.envirocar.server.mongo.dao.MongoMeasurementDao.java
License:Open Source License
private Measurements getMongo(MeasurementFilter request) { BasicDBObjectBuilder q = new BasicDBObjectBuilder(); if (request.hasGeometry()) { q.add(MongoMeasurement.GEOMETRY, withinGeometry(request.getGeometry())); }/* ww w . j a v a 2s . c om*/ if (request.hasTrack()) { q.add(MongoMeasurement.TRACK, ref(request.getTrack())); } if (request.hasUser()) { q.add(MongoMeasurement.USER, ref(request.getUser())); } if (request.hasTemporalFilter()) { q.add(MongoMeasurement.TIME, MongoUtils.temporalFilter(request.getTemporalFilter())); } return query(q.get(), request.getPagination()); }
From source file:org.envirocar.server.mongo.dao.MongoStatisticsDao.java
License:Open Source License
private DBObject matches(StatisticsFilter request) { BasicDBObjectBuilder b = new BasicDBObjectBuilder(); BasicDBObjectBuilder match = b.push(Ops.MATCH); if (request.hasTrack()) { DBRef track = mongoDB.ref(request.getTrack()); match.add(MongoMeasurement.TRACK, track); }/*from w w w. j av a2 s . c o m*/ if (request.hasUser()) { DBRef user = mongoDB.ref(request.getUser()); match.add(MongoMeasurement.USER, user); } if (request.hasSensor()) { MongoSensor sensor = (MongoSensor) request.getSensor(); match.add(SENSOR_ID_PATH, sensor.getId()); } return b.get(); }