List of usage examples for com.mongodb DBObject toMap
Map toMap();
From source file:com.sangupta.dryrun.mongo.DryRunGridFSTemplate.java
License:Apache License
@SuppressWarnings("unchecked") private List<GridFSDBFile> findObjects(DBObject matchingObject, int max) { List<GridFSDBFile> found = new ArrayList<GridFSDBFile>(); Map<String, Object> query = null; if (matchingObject != null) { query = matchingObject.toMap(); }/*from w ww . j av a 2s . c om*/ boolean runMatch = AssertUtils.isNotEmpty(query); Collection<DryRunGridFSDBFile> currentFiles = this.files.values(); for (DryRunGridFSDBFile candidate : currentFiles) { if (runMatch) { // match candidate against matching map if (match(query, candidate.getMetadata())) { found.add(candidate); } } else { found.add(candidate); } if (found.size() == max) { // break the for-loop as we found number of items we needed break; } } return found; }
From source file:com.sfelf.connectors.mongoOplogCursorConnector.java
License:Open Source License
/** * <b>dbObjectToMap</b> - Convert a DBObject into Map. * <p/>// w w w . j a v a 2s. co m * {@sample.xml ../../../doc/mongoOplogCursor-connector.xml.sample mongooplogcursor:dbobjectToMap} * * @param input the input for this transformer * @return the converted Map representation */ @SuppressWarnings("rawtypes") @Transformer(sourceTypes = { DBObject.class }) public static Map dbobjectToMap(final DBObject input) { return input.toMap(); }
From source file:com.socialsky.mods.MongoPersistor.java
License:Apache License
private void sendBatch(Message<JsonObject> message, final DBCursor cursor, final int max, final int timeout) { int count = 0; JsonArray results = new JsonArray(); while (cursor.hasNext() && count < max) { DBObject obj = cursor.next(); JsonObject m = new JsonObject(obj.toMap()); results.add(m);//from ww w .ja v a 2 s. co m count++; } if (cursor.hasNext()) { JsonObject reply = createBatchMessage("more-exist", results); // If the user doesn't reply within timeout, close the cursor final long timerID = vertx.setTimer(timeout, new Handler<Long>() { @Override public void handle(Long timerID) { container.logger().warn("Closing DB cursor on timeout"); try { cursor.close(); } catch (Exception ignore) { } } }); message.reply(reply, new Handler<Message<JsonObject>>() { @Override public void handle(Message<JsonObject> msg) { vertx.cancelTimer(timerID); // Get the next batch sendBatch(msg, cursor, max, timeout); } }); } else { JsonObject reply = createBatchMessage("ok", results); message.reply(reply); cursor.close(); } }
From source file:com.socialsky.mods.MongoPersistor.java
License:Apache License
private void doFindOne(Message<JsonObject> message) { String collection = getMandatoryString("collection", message); if (collection == null) { return;/*ww w . j a v a 2s . co m*/ } JsonObject matcher = message.body().getObject("matcher"); JsonObject keys = message.body().getObject("keys"); DBCollection coll = db.getCollection(collection); DBObject res; if (matcher == null) { res = keys != null ? coll.findOne(null, jsonToDBObject(keys)) : coll.findOne(); } else { res = keys != null ? coll.findOne(jsonToDBObject(matcher), jsonToDBObject(keys)) : coll.findOne(jsonToDBObject(matcher)); } JsonObject reply = new JsonObject(); if (res != null) { JsonObject m = new JsonObject(res.toMap()); reply.putObject("result", m); } sendOK(message, reply); }
From source file:com.socialsky.mods.MongoPersistor.java
License:Apache License
private void doFindAndModify(Message<JsonObject> message) { String collectionName = getMandatoryString("collection", message); if (collectionName == null) { return;//from w ww .j a v a 2 s. com } JsonObject msgBody = message.body(); DBObject update = jsonToDBObjectNullSafe(msgBody.getObject("update")); DBObject query = jsonToDBObjectNullSafe(msgBody.getObject("matcher")); DBObject sort = jsonToDBObjectNullSafe(msgBody.getObject("sort")); DBObject fields = jsonToDBObjectNullSafe(msgBody.getObject("fields")); boolean remove = msgBody.getBoolean("remove", false); boolean returnNew = msgBody.getBoolean("new", false); boolean upsert = msgBody.getBoolean("upsert", false); DBCollection collection = db.getCollection(collectionName); DBObject result = collection.findAndModify(query, fields, sort, remove, update, returnNew, upsert); JsonObject reply = new JsonObject(); if (result != null) { JsonObject resultJson = new JsonObject(result.toMap()); reply.putObject("result", resultJson); } sendOK(message, reply); }
From source file:com.streamreduce.storm.MongoClient.java
License:Apache License
/** * Returns the event for a particular object and version. * * @param targetId the object id of the target we're intersted in * @param version the version of the object * @return the object representing the event or null *//*from w w w . ja v a 2 s .com*/ public Map<String, Object> getEventForTargetAndVersion(String targetId, int version) { // Quick return if (targetId == null) { return null; } DB connectionsDb = getDB("nodeablemsgdb"); BasicDBObject query = new BasicDBObject(); query.put("targetId", new ObjectId(targetId)); query.put("metadata.targetVersion", version); DBObject result = connectionsDb.getCollection("eventStream").findOne(query); mapMongoToPlainJavaTypes(result); return result != null ? result.toMap() : null; }
From source file:com.streamreduce.storm.MongoClient.java
License:Apache License
/** * Returns the last two metrics for a given account, metricName and granularity. * eg: db.Inbox_4f8c34d3cea02afbc4aa8ce8.find({"metricName":"INVENTORY_ITEM_RESOURCE_USAGE.4f8c3e50cea02afbc4aa8f49.NetworkOut.average","metricGranularity":86400000}).sort({"metricTimestamp":-1}).limit(2); * * @param metricAccount the metric account * @param metricName the metric name * @param metricGranularity the metric granularity * @return the last 2 metrics/*from www.j ava 2s .c o m*/ */ public List<Map<String, Object>> getLastTwoTuples(String metricAccount, String metricName, long metricGranularity) { DB metricsDB = getDB("nodeablemsgdb"); String collectionName = Constants.METRIC_COLLECTION_PREFIX + metricAccount; DBCollection metricsCollection = metricsDB.getCollection(collectionName); BasicDBObject query = new BasicDBObject(); query.put("metricName", metricName); query.put("metricGranularity", metricGranularity); List<Map<String, Object>> list = new ArrayList<>(); DBCursor cursor = metricsCollection.find(query).sort(new BasicDBObject("metricTimestamp", -1)).limit(2); while (cursor.hasNext()) { DBObject obj = cursor.next(); mapMongoToPlainJavaTypes(obj); list.add(obj.toMap()); } return list; }
From source file:com.timboudreau.netbeans.mongodb.CollectionChildFactory.java
License:Open Source License
@Override protected Node createNodeForKey(DBObject key) { String idOrName = key.get("_id") + ""; //NOI18N Object o = key.get("name"); //NOI18N if (o != null) { String s = o.toString();/* w ww.jav a 2 s .co m*/ if (!s.isEmpty()) { idOrName = s; } } return new GenericNode(lookup, idOrName, key.toMap()); }
From source file:com.zjy.mongo.splitter.StandaloneMongoSplitter.java
License:Apache License
@Override public List<InputSplit> calculateSplits() throws SplitFailedException { final DBObject splitKey = MongoConfigUtil.getInputSplitKey(getConfiguration()); final DBObject splitKeyMax = MongoConfigUtil.getMaxSplitKey(getConfiguration()); final DBObject splitKeyMin = MongoConfigUtil.getMinSplitKey(getConfiguration()); final int splitSize = MongoConfigUtil.getSplitSize(getConfiguration()); final MongoClientURI inputURI; DBCollection inputCollection = null; final ArrayList<InputSplit> returnVal; try {/*from w ww. j av a2 s. c om*/ inputURI = MongoConfigUtil.getInputURI(getConfiguration()); MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration()); if (authURI != null) { inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI); } else { inputCollection = MongoConfigUtil.getCollection(inputURI); } returnVal = new ArrayList<InputSplit>(); final String ns = inputCollection.getFullName(); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Running splitVector on namespace: %s.%s; hosts: %s", inputURI.getDatabase(), inputURI.getCollection(), inputURI.getHosts())); } final DBObject cmd = BasicDBObjectBuilder.start("splitVector", ns).add("keyPattern", splitKey) .add("min", splitKeyMin).add("max", splitKeyMax) // force:True is misbehaving it seems .add("force", false).add("maxChunkSize", splitSize).get(); CommandResult data; boolean ok = true; try { data = inputCollection.getDB().getSisterDB(inputURI.getDatabase()).command(cmd, ReadPreference.primary()); } catch (final MongoException e) { // 2.0 servers throw exceptions rather than info in a CommandResult data = null; LOG.info(e.getMessage(), e); if (e.getMessage().contains("unrecognized command: splitVector")) { ok = false; } else { throw e; } } if (data != null) { if (data.containsField("$err")) { throw new SplitFailedException("Error calculating splits: " + data); } else if (!data.get("ok").equals(1.0)) { ok = false; } } if (!ok) { final CommandResult stats = inputCollection.getStats(); if (stats.containsField("primary")) { final DBCursor shards = inputCollection.getDB().getSisterDB("config").getCollection("shards") .find(new BasicDBObject("_id", stats.getString("primary"))); try { if (shards.hasNext()) { final DBObject shard = shards.next(); final String host = ((String) shard.get("host")).replace(shard.get("_id") + "/", ""); final MongoClientURI shardHost; if (authURI != null) { shardHost = new MongoClientURIBuilder(authURI).host(host).build(); } else { shardHost = new MongoClientURIBuilder(inputURI).host(host).build(); } MongoClient shardClient = null; try { shardClient = new MongoClient(shardHost); data = shardClient.getDB(shardHost.getDatabase()).command(cmd, ReadPreference.primary()); } catch (final Exception e) { LOG.error(e.getMessage(), e); } finally { if (shardClient != null) { shardClient.close(); } } } } finally { shards.close(); } } if (data != null && !data.get("ok").equals(1.0)) { throw new SplitFailedException("Unable to calculate input splits: " + data.get("errmsg")); } } // Comes in a format where "min" and "max" are implicit // and each entry is just a boundary key; not ranged final BasicDBList splitData = (BasicDBList) data.get("splitKeys"); if (splitData.size() == 0) { LOG.warn( "WARNING: No Input Splits were calculated by the split code. Proceeding with a *single* split. Data may be too" + " small, try lowering 'mongo.input.split_size' if this is undesirable."); } BasicDBObject lastKey = null; // Lower boundary of the first min split // If splitKeyMin was given, use it as first boundary. if (!splitKeyMin.toMap().isEmpty()) { lastKey = new BasicDBObject(splitKeyMin.toMap()); } for (final Object aSplitData : splitData) { final BasicDBObject currentKey = (BasicDBObject) aSplitData; returnVal.add(createSplitFromBounds(lastKey, currentKey)); lastKey = currentKey; } BasicDBObject maxKey = null; // If splitKeyMax was given, use it as last boundary. if (!splitKeyMax.toMap().isEmpty()) { maxKey = new BasicDBObject(splitKeyMax.toMap()); } // Last max split final MongoInputSplit lastSplit = createSplitFromBounds(lastKey, maxKey); returnVal.add(lastSplit); } finally { if (inputCollection != null) { MongoConfigUtil.close(inputCollection.getDB().getMongo()); } } return returnVal; }
From source file:de.fhg.igd.mongomvcc.impl.MongoDBVCollection.java
License:Open Source License
@SuppressWarnings("unchecked") @Override// ww w . j av a 2 s. c om public Map<String, Object> findOne(Map<String, Object> example) { DBObject o = new BasicDBObject(); o.putAll(_branch.getQueryObject()); o.putAll(example); OIDInIndexFilter filter = new OIDInIndexFilter(); DBCursor c = _delegate.find(o, EXCLUDEHIDDENATTRS); for (DBObject obj : c) { if (filter.filter(obj)) { if (obj instanceof Map) { return (Map<String, Object>) obj; } return obj.toMap(); } } return null; }