List of usage examples for com.mongodb DBCollection aggregate
public Cursor aggregate(final List<? extends DBObject> pipeline, final AggregationOptions options)
From source file:datapreparation.MongoStatistics.java
public void topUrls() { // TODO code application logic here int limit = 0; String filename = "Top_Urls_More.txt"; // To directly connect to a single MongoDB server (note that this will not auto-discover the primary even MongoClient mongoClient;/*w w w . j a va 2 s. c o m*/ try { mongoClient = new MongoClient("localhost"); //use database DB db = mongoClient.getDB("users"); //get collection DBCollection coll = db.getCollection("urls"); // build the $projection operation DBObject fields = new BasicDBObject("url", 1); fields.put("_id", 0); BasicDBObject project = new BasicDBObject("$project", fields); // Now the $group operation DBObject groupFields = new BasicDBObject("_id", "$url"); groupFields.put("count", new BasicDBObject("$sum", 1)); DBObject group = new BasicDBObject("$group", groupFields); // Finally the $sort operation BasicDBObject sort = new BasicDBObject("$sort", new BasicDBObject("count", -1)); // run aggregation List<DBObject> pipeline; if (limit == 0) {// without limits! pipeline = Arrays.asList(project, group, sort); } else { // create new BasicDBObject that limit query result in only 100 rows DBObject limitRes = new BasicDBObject("$limit", limit); pipeline = Arrays.asList(project, group, sort, limitRes); } AggregationOptions aggregationOptions = AggregationOptions.builder().batchSize(100) .outputMode(AggregationOptions.OutputMode.CURSOR).allowDiskUse(true).build(); Cursor cursor = coll.aggregate(pipeline, aggregationOptions); writeToFile2(cursor, filename, "URL\t Count"); cursor.close(); mongoClient.close(); } catch (IOException ex) { System.out.println("Something's Wrong! " + ex); } }
From source file:datapreparation.MongoStatistics.java
public void timeIntervals() { // TODO code application logic here int limit = 0; String filename = "Times.txt"; // To directly connect to a single MongoDB server (note that this will not auto-discover the primary even MongoClient mongoClient;/*from w w w. ja v a 2s. c o m*/ try { mongoClient = new MongoClient("localhost"); //use database DB db = mongoClient.getDB("users"); //get collection DBCollection coll = db.getCollection("urls"); // build the $projection operation DBObject fields = new BasicDBObject("time", 1); fields.put("_id", 0); BasicDBObject project = new BasicDBObject("$project", fields); // Now the $group operation DBObject groupFields = new BasicDBObject("_id", "$time"); //groupFields.put("count", new BasicDBObject("$sum", 1)); DBObject group = new BasicDBObject("$group", groupFields); // Finally the $sort operation //BasicDBObject sort = new BasicDBObject("$sort", new BasicDBObject("count", -1)); // run aggregation List<DBObject> pipeline; if (limit == 0) {// without limits! pipeline = Arrays.asList(project, group); } else { // create new BasicDBObject that limit query result in only 100 rows DBObject limitRes = new BasicDBObject("$limit", limit); pipeline = Arrays.asList(project, group, limitRes); } AggregationOptions aggregationOptions = AggregationOptions.builder().batchSize(100) .outputMode(AggregationOptions.OutputMode.CURSOR).allowDiskUse(true).build(); Cursor cursor = coll.aggregate(pipeline, aggregationOptions); writeToFile3(cursor, filename, "Times"); cursor.close(); mongoClient.close(); } catch (IOException ex) { System.out.println("Something's Wrong! " + ex); } }
From source file:io.liveoak.mongo.MongoAggregationResource.java
License:Open Source License
private BasicDBList aggregate(RequestContext ctx) { BasicDBList queryObject = new BasicDBList(); if (ctx.resourceParams() != null && ctx.resourceParams().contains("q")) { String queryString = ctx.resourceParams().value("q"); DBObject paramObject = (DBObject) JSON.parse(queryString); if (paramObject instanceof BasicDBList) { queryObject = (BasicDBList) paramObject; } else {//from w ww . ja va2 s.co m queryObject.add(paramObject); } } DBCollection dbCollection = parent().getDBCollection(); try { BasicDBList result = new BasicDBList(); AggregationOutput output = dbCollection.aggregate((DBObject) queryObject.remove(0), queryObject.toArray(new DBObject[queryObject.size()])); for (DBObject dbObject : output.results()) { result.add(dbObject); } return result; } catch (Exception e) { logger().error("", e); throw new RuntimeException("Aggregation query failed: ", e); } }
From source file:kiaanfx.Kiaanfx.java
private static void getBuy() { try {//from ww w. j a v a 2 s . c om MongoClient mongoClient = new MongoClient("localhost", 27017); DB db = mongoClient.getDB("kiaan"); DBCollection coll = db.getCollection("buy"); //aggregate DBObject unwind = new BasicDBObject("$unwind", "$items"); //$group DBObject group_id = new BasicDBObject("_id", "$_id"); group_id.put("num", "$num"); group_id.put("person_id", "$person_id"); group_id.put("discount", "$discount"); group_id.put("increase", "$increase"); //$group -> $multiply BasicDBList args = new BasicDBList(); args.add("$items.value"); args.add("$items.price"); DBObject multiply = new BasicDBObject("$multiply", args); //$group -> $sum // DBObject group_sum = new BasicDBObject("$sum", multiply); DBObject group_field = new BasicDBObject(); group_field.put("_id", group_id); group_field.put("total", new BasicDBObject("$sum", multiply)); DBObject group = new BasicDBObject("$group", group_field); //$project DBObject project_field = new BasicDBObject("_id", "$_id._id"); project_field.put("person_id", "$_id.person_id"); project_field.put("num", "$_id.num"); BasicDBList arr = new BasicDBList(); arr.add("$total"); arr.add("$_id.discount"); arr.add("$_id.increase"); DBObject field_add = new BasicDBObject("$add", arr); project_field.put("sum", field_add); DBObject project = new BasicDBObject("$project", project_field); DBObject sort = new BasicDBObject("$sort", new BasicDBObject("_id", 1)); List<DBObject> pipeline = Arrays.asList(unwind, group, project, sort); // AggregationOutput output = coll.aggregate(pipeline); // for (DBObject result : output.results()) { // System.out.println(result); // } AggregationOptions aggregationOptions = AggregationOptions.builder().batchSize(100) .outputMode(AggregationOptions.OutputMode.CURSOR).allowDiskUse(true).build(); BasicDBObject dbo = new BasicDBObject(); BasicDBList dbl = new BasicDBList(); Cursor cursor = coll.aggregate(pipeline, aggregationOptions); // DBCollection person_col = db.getCollection("persons"); // BasicDBObject query = new BasicDBObject("items.personId",1); BasicDBObject fields = new BasicDBObject("items.$", 1).append("_id", false); // BasicDBList l_per = (BasicDBList) person_col.findOne(query, fields).get("items"); // BasicDBObject[] lightArr = l_per.toArray(new BasicDBObject[0]); // System.out.println(lightArr[0].get("_id")); // System.out.println(lightArr[0].get("first_name")); // BasicDBList result = new BasicDBList(); while (cursor.hasNext()) { dbo = (BasicDBObject) cursor.next(); // System.out.println(dbo.toString()); DBObject query = new BasicDBObject("items._id", (ObjectId) dbo.get("person_id")); BasicDBList lst_person = (BasicDBList) person_col.findOne(query, fields).get("items"); BasicDBObject[] lightArr = lst_person.toArray(new BasicDBObject[0]); // System.out.println(lightArr[0].get("first_name")); Date date = ((ObjectId) lightArr[0].get("_id")).getDate(); Calendar calendar = Calendar.getInstance(); calendar.setTime(date); persianCalendar persianCalendar = new persianCalendar(calendar); dbo.put("date", persianCalendar.getNumericDateFormatWithTime()); dbo.put("personId", lightArr[0].get("personId").toString()); dbo.put("first_name", lightArr[0].get("first_name").toString()); dbo.put("last_name", lightArr[0].get("last_name").toString()); data.add(new Person(dbo.get("num").toString(), dbo.get("date").toString(), dbo.get("personId").toString(), dbo.get("first_name").toString(), dbo.get("last_name").toString())); // buy_data.add(new buys(dbo.get("num").toString(), // dbo.get("date").toString(), // dbo.get("personId").toString(), // dbo.get("first_name").toString(), // dbo.get("last_name").toString(), // dbo.get("sum").toString() // )); dbo.remove("person_id"); // result.add(dbo); // System.out.println(dbo.get("first_name")); } System.out.println(dbo.toString()); } catch (Exception e) { System.err.println(e.getClass().getName() + ": " + e.getMessage()); } }
From source file:org.apache.calcite.adapter.mongodb.MongoTable.java
License:Apache License
/** Executes an "aggregate" operation for pre-2.6 mongo servers. * * <p>Return document is limited to 4M or 16M in size depending on * version of mongo./* w w w .j ava 2s .c om*/ * <p>Helper method for * {@link org.apache.calcite.adapter.mongodb.MongoTable#aggregate}. * * @param dbCollection Collection * @param first First aggregate action * @param rest Rest of the aggregate actions * @return Aggregation output */ private AggregationOutput aggregateOldWay(DBCollection dbCollection, DBObject first, List<DBObject> rest) { return dbCollection.aggregate(first, rest.toArray(new DBObject[rest.size()])); }
From source file:org.apache.camel.component.mongodb.MongoDbProducer.java
License:Apache License
/** * All headers except collection and database are non available for this * operation.//from www. ja va2 s. c o m * * @param exchange * @throws Exception */ protected void doAggregate(Exchange exchange) throws Exception { DBCollection dbCol = calculateCollection(exchange); DBObject query = exchange.getIn().getMandatoryBody(DBObject.class); // Impossible with java driver to get the batch size and number to skip Iterable<DBObject> dbIterator = null; try { AggregationOutput aggregationResult = null; // Allow body to be a pipeline // @see http://docs.mongodb.org/manual/core/aggregation/ if (query instanceof BasicDBList) { BasicDBList queryList = (BasicDBList) query; aggregationResult = dbCol.aggregate((DBObject) queryList.get(0), queryList.subList(1, queryList.size()).toArray(new BasicDBObject[queryList.size() - 1])); } else { aggregationResult = dbCol.aggregate(query); } dbIterator = aggregationResult.results(); Message resultMessage = prepareResponseMessage(exchange, MongoDbOperation.aggregate); resultMessage.setBody(dbIterator); // Mongo Driver does not allow to read size and to paginate aggregate result } catch (Exception e) { // rethrow the exception throw e; } }
From source file:org.eclipse.birt.data.oda.mongodb.internal.impl.MDbOperation.java
License:Open Source License
static Iterable<DBObject> callAggregateCmd(DBCollection dbCollection, QueryProperties queryProps) throws OdaException { if (!queryProps.hasAggregateCommand()) return null; DBObject operationExprObj = queryProps.getOperationExprAsParsedObject(true); if (operationExprObj == null) return null; // convert user-specified operation expression to operation pipeline DBObject firstOp = QueryProperties.getFirstObjectSet(operationExprObj); if (firstOp == null) return null; // no valid DBObject operation DBObject[] addlOps = QueryProperties.getSecondaryObjectSets(operationExprObj); // aggregation $limit and $skip operators applies to the number // of documents in the *input* pipeline, and thus cannot be used to apply // the searchLimit and numSkipDocuments properties defined for data set // $match and $sort pipeline operators are built in an aggregate command // execute the aggregate command AggregationOutput output;/*from www . ja va 2 s .c o m*/ try { output = addlOps != null ? dbCollection.aggregate(firstOp, addlOps) : dbCollection.aggregate(firstOp); output.getCommandResult().throwOnError(); return output.results(); } catch (RuntimeException ex) { OdaException odaEx = new OdaException(Messages.mDbOp_aggrCmdFailed); odaEx.initCause(ex); throw odaEx; } }
From source file:org.forgerock.openidm.repo.mongodb.impl.query.Queries.java
License:Open Source License
protected List<DBObject> executeQuery(QueryInfo queryInfo, Map<String, Object> params, DBCollection collection) throws BadRequestException { List<DBObject> result = null; if (queryInfo.isGroupQuery()) { String resultKey = ""; List<String> list = queryInfo.getAggregationParams(); List<DBObject> dboList = new ArrayList<DBObject>(); DBObject firstParam = new BasicDBObject(); boolean first = true; for (String s : list) { DBObject query = resolveQuery(s, params); if (first) { firstParam = query;//from ww w. j a v a 2 s . c o m first = !first; } else { dboList.add(query); } } AggregationOutput output = collection.aggregate(firstParam, (DBObject[]) dboList.toArray(new BasicDBObject[0])); if (output.results().iterator().hasNext()) { result = new ArrayList<DBObject>(); } for (DBObject obj : output.results()) { result.add(obj); } } else { String q = (queryInfo.getQuery() == null) ? "{}" : queryInfo.getQuery(); DBObject query = resolveQuery(q, params); String f = (queryInfo.getFileds() == null) ? "{}" : queryInfo.getFileds(); DBObject fields = resolveQuery(f, params); String s = (queryInfo.getSort() == null) ? "{}" : queryInfo.getSort(); DBObject sort = resolveQuery(s, params); DBCursor cur = null; try { cur = collection.find(query, fields).sort(sort); result = cur.toArray(); } catch (Exception ex) { throw new BadRequestException(ex.getMessage()); } finally { cur.close(); } } return result; }
From source file:org.jmingo.executor.AbstractQueryExecutor.java
License:Apache License
/** * Perform aggregation query./* ww w.j a va2s.c o m*/ * * @param dbCollection db collection * @param operators operators * @return {@link AggregationOutput} */ protected AggregationOutput performAggregationQuery(DBCollection dbCollection, BasicDBList operators) { Validate.notNull(dbCollection, "dbCollection cannot be null"); Validate.notEmpty(operators, "operators cannot be null or empty"); DBObject firstOperator = (DBObject) operators.remove(FIRST_ELEMENT); return dbCollection.aggregate(firstOperator, operators.toArray(new DBObject[FIRST_ELEMENT])); }
From source file:org.kiaan.Main.java
public static void getBranchesWithProject() { try {/*from ww w . jav a2s .c o m*/ MongoClient mongoClient = new MongoClient("localhost", 27017); DB db = mongoClient.getDB("kiaan"); DBCollection coll = db.getCollection("banks"); // BasicDBObject doc = new BasicDBObject("_id" , false).append("name", true); // BasicDBObject doc1 = new BasicDBObject("name" , ""); // BasicDBObject doc = new BasicDBObject("_id" , false).append("branches.name", true); // DBCursor cursor = coll.find(null, doc); DBObject unwind = new BasicDBObject("$unwind", "$branches"); DBObject field = new BasicDBObject("_id", false); field.put("name", "$name"); field.put("branch_id", "$branches.branch_id"); field.put("branch_name", "$branches.name"); DBObject project = new BasicDBObject("$project", field); DBObject sort = new BasicDBObject("$sort", new BasicDBObject("name", 1)); List<DBObject> pipeline = Arrays.asList(unwind, project, sort); // AggregationOutput output = coll.aggregate(pipeline); // for (DBObject result : output.results()) { // System.out.println(result); // } AggregationOptions aggregationOptions = AggregationOptions.builder().batchSize(100) .outputMode(AggregationOptions.OutputMode.CURSOR).allowDiskUse(true).build(); BasicDBObject dbo = new BasicDBObject(); BasicDBList dbl = new BasicDBList(); Cursor cursor = coll.aggregate(pipeline, aggregationOptions); while (cursor.hasNext()) { dbo = (BasicDBObject) cursor.next(); System.out.println(dbo.toString()); // dbl.add(cursor.next()); } System.out.println(dbl.toString()); // while(cursor.hasNext()) { // DBObject obj = cursor.next(); // System.out.println(obj.get("branches")); //// Double first = (Double)obj.get("id"); //// String last = (String)obj.get("name"); //// ObjectId id = (ObjectId)obj.get("_id"); //// model.addRow(new Object[] { id, first, last }); // } // tbl.setModel(model); } catch (Exception e) { System.err.println(e.getClass().getName() + ": " + e.getMessage()); } }