List of usage examples for com.mongodb BasicDBObjectBuilder start
public static BasicDBObjectBuilder start(final String key, final Object val)
From source file:com.zjy.mongo.splitter.StandaloneMongoSplitter.java
License:Apache License
@Override public List<InputSplit> calculateSplits() throws SplitFailedException { final DBObject splitKey = MongoConfigUtil.getInputSplitKey(getConfiguration()); final DBObject splitKeyMax = MongoConfigUtil.getMaxSplitKey(getConfiguration()); final DBObject splitKeyMin = MongoConfigUtil.getMinSplitKey(getConfiguration()); final int splitSize = MongoConfigUtil.getSplitSize(getConfiguration()); final MongoClientURI inputURI; DBCollection inputCollection = null; final ArrayList<InputSplit> returnVal; try {//from ww w .jav a 2s . c o m inputURI = MongoConfigUtil.getInputURI(getConfiguration()); MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration()); if (authURI != null) { inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI); } else { inputCollection = MongoConfigUtil.getCollection(inputURI); } returnVal = new ArrayList<InputSplit>(); final String ns = inputCollection.getFullName(); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Running splitVector on namespace: %s.%s; hosts: %s", inputURI.getDatabase(), inputURI.getCollection(), inputURI.getHosts())); } final DBObject cmd = BasicDBObjectBuilder.start("splitVector", ns).add("keyPattern", splitKey) .add("min", splitKeyMin).add("max", splitKeyMax) // force:True is misbehaving it seems .add("force", false).add("maxChunkSize", splitSize).get(); CommandResult data; boolean ok = true; try { data = inputCollection.getDB().getSisterDB(inputURI.getDatabase()).command(cmd, ReadPreference.primary()); } catch (final MongoException e) { // 2.0 servers throw exceptions rather than info in a CommandResult data = null; LOG.info(e.getMessage(), e); if (e.getMessage().contains("unrecognized command: splitVector")) { ok = false; } else { throw e; } } if (data != null) { if (data.containsField("$err")) { throw new SplitFailedException("Error calculating splits: " + data); } else if (!data.get("ok").equals(1.0)) { ok = false; } } if (!ok) { final CommandResult stats = inputCollection.getStats(); if (stats.containsField("primary")) { final DBCursor shards = inputCollection.getDB().getSisterDB("config").getCollection("shards") .find(new BasicDBObject("_id", stats.getString("primary"))); try { if (shards.hasNext()) { final DBObject shard = shards.next(); final String host = ((String) shard.get("host")).replace(shard.get("_id") + "/", ""); final MongoClientURI shardHost; if (authURI != null) { shardHost = new MongoClientURIBuilder(authURI).host(host).build(); } else { shardHost = new MongoClientURIBuilder(inputURI).host(host).build(); } MongoClient shardClient = null; try { shardClient = new MongoClient(shardHost); data = shardClient.getDB(shardHost.getDatabase()).command(cmd, ReadPreference.primary()); } catch (final Exception e) { LOG.error(e.getMessage(), e); } finally { if (shardClient != null) { shardClient.close(); } } } } finally { shards.close(); } } if (data != null && !data.get("ok").equals(1.0)) { throw new SplitFailedException("Unable to calculate input splits: " + data.get("errmsg")); } } // Comes in a format where "min" and "max" are implicit // and each entry is just a boundary key; not ranged final BasicDBList splitData = (BasicDBList) data.get("splitKeys"); if (splitData.size() == 0) { LOG.warn( "WARNING: No Input Splits were calculated by the split code. Proceeding with a *single* split. Data may be too" + " small, try lowering 'mongo.input.split_size' if this is undesirable."); } BasicDBObject lastKey = null; // Lower boundary of the first min split // If splitKeyMin was given, use it as first boundary. if (!splitKeyMin.toMap().isEmpty()) { lastKey = new BasicDBObject(splitKeyMin.toMap()); } for (final Object aSplitData : splitData) { final BasicDBObject currentKey = (BasicDBObject) aSplitData; returnVal.add(createSplitFromBounds(lastKey, currentKey)); lastKey = currentKey; } BasicDBObject maxKey = null; // If splitKeyMax was given, use it as last boundary. if (!splitKeyMax.toMap().isEmpty()) { maxKey = new BasicDBObject(splitKeyMax.toMap()); } // Last max split final MongoInputSplit lastSplit = createSplitFromBounds(lastKey, maxKey); returnVal.add(lastSplit); } finally { if (inputCollection != null) { MongoConfigUtil.close(inputCollection.getDB().getMongo()); } } return returnVal; }
From source file:de.flapdoodle.mongoom.datastore.Caps.java
License:Apache License
public static void ensureCaps(DB db, String collectionName, ICollectionCap capCollection) { if (capCollection != null) { long count = capCollection.count(); long size = capCollection.size(); if (size == 0) throw new ObjectMapperException("Size == 0"); if ((size < Long.MAX_VALUE) && (count < Long.MAX_VALUE)) { try { db.requestStart();/*from w w w .ja v a 2 s .c o m*/ BasicDBObjectBuilder dbCapOpts = BasicDBObjectBuilder.start("capped", true); dbCapOpts.add("size", size); if (count > 0) dbCapOpts.add("max", count); DBCollection dbColl = db.getCollection(collectionName); if (db.getCollectionNames().contains(collectionName)) { DBObject dbResult = db .command(BasicDBObjectBuilder.start("collstats", collectionName).get()); if (dbResult.containsField("capped")) { // TODO: check the cap options. _logger.warning( "DBCollection already exists is cap'd already; doing nothing. " + dbResult); } else { _logger.warning("DBCollection already exists with same name(" + collectionName + ") and is not cap'd; not creating cap'd version!"); } } else { db.createCollection(collectionName, dbCapOpts.get()); _logger.info( "Created cap'd DBCollection (" + collectionName + ") with opts " + capCollection); } } finally { Errors.checkError(db, Operation.Insert); db.requestDone(); } } } }
From source file:de.iew.imageread.Main.java
License:Apache License
protected DBObject queryFromOptions() throws Exception { BasicDBObject dbObject = null;/* www . ja v a2 s. c o m*/ if (this.queryFilters != null) { if (QUERY_OPTION.FROM.available(this.queryFilters) && QUERY_OPTION.TO.available(this.queryFilters)) { Date fromDate = parseDateString(this.queryFilters.getProperty(QUERY_OPTION.FROM.name())); Date toDate = parseDateString(this.queryFilters.getProperty(QUERY_OPTION.TO.name())); dbObject = new BasicDBObject(); dbObject.put("uploadDate", BasicDBObjectBuilder.start("$gte", dayBegin(fromDate)).add("$lte", dayEnd(toDate)).get()); } else if (QUERY_OPTION.FROM.available(this.queryFilters)) { Date fromDate = parseDateString(QUERY_OPTION.FROM.getValue(this.queryFilters)); dbObject = new BasicDBObject(); dbObject.put("uploadDate", BasicDBObjectBuilder.start("$gte", dayBegin(fromDate)).get()); } else if (QUERY_OPTION.TO.available(this.queryFilters)) { Date toDate = parseDateString(QUERY_OPTION.TO.getValue(this.queryFilters)); dbObject = new BasicDBObject(); dbObject.put("uploadDate", BasicDBObjectBuilder.start("$lte", dayEnd(toDate)).get()); } else if (QUERY_OPTION.EXACT.available(this.queryFilters)) { Date exactDate = parseDateString(QUERY_OPTION.EXACT.getValue(this.queryFilters)); dbObject = new BasicDBObject(); dbObject.put("uploadDate", BasicDBObjectBuilder.start("$gte", dayBegin(exactDate)) .add("$lte", dayEnd(exactDate)).get()); } } return dbObject; }
From source file:de.otto.mongodb.profiler.op.DefaultOpProfiler.java
License:Apache License
@Override public ProfilingLevel getProfilingLevel() { final CommandResult result = db.command(BasicDBObjectBuilder.start("profile", -1).get()); result.throwOnError();// ww w . ja v a 2 s . c o m return ProfilingLevel.forValue(result.getInt("was", Integer.MIN_VALUE)); }
From source file:de.otto.mongodb.profiler.op.DefaultOpProfiler.java
License:Apache License
@Override public void setProfilingLevel(ProfilingLevel level) { db.command(BasicDBObjectBuilder.start("profile", level.value).get()).throwOnError(); }
From source file:edu.umass.cs.gnsserver.database.MongoRecords.java
License:Apache License
@Override public void createIndex(String collectionName, String field, String index) { MongoCollectionSpec spec = mongoCollectionSpecs.getCollectionSpec(collectionName); // Prepend this because of the way we store the records. //DBObject index2d = BasicDBObjectBuilder.start(NameRecord.VALUES_MAP.getName() + "." + field, index).get(); DBObject index2d = BasicDBObjectBuilder.start(NameRecord.VALUES_MAP.getName() + "." + field, isIndexInt(index) ? Integer.parseInt(index) : index).get(); db.getCollection(spec.getName()).createIndex(index2d); }
From source file:essex.bigessexnew.OplogListener.java
public void listen(boolean processHistory, String... logFields) { BasicDBObject query = prepareOplogListenQuery(processHistory); BasicDBObject fields = prepareOplogListenFields(logFields); DBCursor cur = collection.find(query, fields).sort((BasicDBObjectBuilder.start("$natural", 1)).get()) .addOption(Bytes.QUERYOPTION_TAILABLE | Bytes.QUERYOPTION_AWAITDATA | Bytes.QUERYOPTION_NOTIMEOUT); performListenTask(cur);//from w w w. ja va 2 s .c om }
From source file:eu.cassandra.server.mongo.util.MongoDBQueries.java
License:Apache License
/** * curl -i --header "dbname:run_id" 'http://localhost:8080/cassandra/api/results?inst_id=instID_&aggr_unit=3&metric=1&from=3&to=100' * // w w w . j a v a2 s . c om * @param installationId * @param metric * @param aggregationUnit * @param fromTick * @param toTick * @return */ public DBObject mongoResultQuery(HttpHeaders httpHeaders, String installationId, String metricS, String aggregationUnitS, String fromTickS, String toTickS) { try { String runId = getDbNameFromHTTPHeader(httpHeaders); if (runId == null && installationId == null) throw new RestQueryParamMissingException( "QueryParamMissing: Both run_id and installation_id are null"); String aggrUnit = " (Minute)"; String defaultAggrUnit = " (Minute)"; Integer aggregationUnit = null; Integer defaultAggregationUnit = null; if (aggregationUnitS != null) { aggregationUnit = Integer.parseInt(aggregationUnitS); aggrUnit = " " + aggregationUnit + " Minute" + (aggregationUnit == 1 ? "" : "s") + ")"; } int numberOfDays = Integer.parseInt( DBConn.getConn(runId).getCollection("sim_param").findOne().get("numberOfDays").toString()); if (numberOfDays == 1) { defaultAggregationUnit = 5; defaultAggrUnit = " (5 Minutes)"; } else if (numberOfDays <= 5) { defaultAggregationUnit = 15; defaultAggrUnit = " (15 Minutes)"; } else if (numberOfDays <= 20) { defaultAggregationUnit = 60; defaultAggrUnit = " (1 Hour)"; } else if (numberOfDays <= 60) { defaultAggregationUnit = 180; defaultAggrUnit = " (3 Hours)"; } else if (numberOfDays <= 360) { defaultAggregationUnit = 720; defaultAggrUnit = " (12 Hours)"; } if (aggregationUnit == null) { aggregationUnit = defaultAggregationUnit; aggrUnit = defaultAggrUnit; } Integer fromTick = null; if (fromTickS != null) fromTick = Integer.parseInt(fromTickS); Integer toTick = null; if (toTickS != null) toTick = Integer.parseInt(toTickS); String coll = MongoResults.COL_AGGRRESULTS; if (aggregationUnit == null || aggregationUnit <= 0) aggregationUnit = 1; if (installationId != null) coll = MongoResults.COL_INSTRESULTS; String yMetric = ACTIVE_POWER_P; if (metricS != null && metricS.equalsIgnoreCase(REACTIVE_POWER_Q)) yMetric = REACTIVE_POWER_Q; //db.inst_results.find({inst_id:"dszfs123",tick:{$gt:1}}).sort({tick:1}).pretty() //db.inst_results.group( // { // keyf:function(doc) // {var key=new NumberInt(doc.tick/4); return {x:key} // } , // cond:{inst_id:"instID_"}, // reduce:function(obj,prev) // {prev.csum+=obj.p}, // initial:{csum:0} // } //) BasicDBObject condition = null; if (installationId != null || fromTick != null || toTick != null) condition = new BasicDBObject(); if (installationId != null) condition.append("inst_id", installationId); if (fromTick != null && toTick != null) condition.append("tick", BasicDBObjectBuilder.start("$gte", fromTick).add("$lte", toTick).get()); else if (fromTick != null) condition.append("tick", new BasicDBObject("$gte", fromTick)); else if (toTick != null) condition.append("tick", new BasicDBObject("$lte", toTick)); BasicDBObject groupCmd = new BasicDBObject("ns", coll); groupCmd.append("$keyf", "function(doc){var key=new NumberInt(doc.tick/" + aggregationUnit + "); return {x:key}}"); if (condition != null) groupCmd.append("cond", condition); groupCmd.append("$reduce", "function(obj,prev){prev.y+=obj." + yMetric + "}"); groupCmd.append("initial", new BasicDBObject("y", 0)); @SuppressWarnings("deprecation") BasicDBList dbList = (BasicDBList) DBConn.getConn(getDbNameFromHTTPHeader(httpHeaders)) .getCollection(coll).group(groupCmd); if (aggregationUnit > 1) { for (int i = 0; i < dbList.size(); i++) { BasicDBObject obj = (BasicDBObject) dbList.get(i); obj.put("y", Double.parseDouble(obj.get("y").toString()) / aggregationUnit); } } return jSON2Rrn.createJSONPlot(dbList, "Data for plot retrieved successfully", "Consumption " + (yMetric.equalsIgnoreCase(REACTIVE_POWER_Q) ? "Reactive Power" : "Active Power"), "Time" + aggrUnit, yMetric.equalsIgnoreCase(REACTIVE_POWER_Q) ? "VAr" : "W", defaultAggregationUnit, numberOfDays); } catch (Exception e) { e.printStackTrace(); return jSON2Rrn.createJSONError("Error in retrieving results", e.getMessage()); } }
From source file:eu.trentorise.smartcampus.parcheggiausiliari.data.GeoStorage.java
License:Apache License
@Autowired public GeoStorage(MongoOperations mongoTemplate) { super(mongoTemplate); mongoTemplate.getCollection(mongoTemplate.getCollectionName(GeoSyncObjectBean.class)) .ensureIndex(BasicDBObjectBuilder.start("location", "2d").get()); }
From source file:fr.wseduc.gridfs.GridFSPersistor.java
License:Apache License
private void countChunks(Message<Buffer> message, JsonObject json) { String filesId = json.getString("files_id"); if (filesId == null || filesId.trim().isEmpty()) { replyError(message, "Invalid file id."); return;/*from www.j a v a 2 s .c o m*/ } try { DBObject file = BasicDBObjectBuilder.start("files_id", filesId).get(); DBCollection collection = db.getCollection(bucket + CHUNKS); long count = collection.count(file); message.reply(count); } catch (RuntimeException e) { logger.error(e.getMessage(), e); replyError(message, e.getMessage()); } }