Example usage for com.mongodb DBCollection count

List of usage examples for com.mongodb DBCollection count

Introduction

In this page you can find the example usage for com.mongodb DBCollection count.

Prototype

public long count() 

Source Link

Document

Same as #getCount()

Usage

From source file:net.handle.server.MongoDBHandleStorage.java

License:Open Source License

/**
 * Removes a collection//from  w ww  .  j  a  v a2 s  .c o  m
 *
 * @param na Naming authority
 */
public long deleteAllRecords(String na) {

    final DBCollection collection = getCollection(Util.encodeString(na));
    long count = collection.count();
    collection.drop();
    return count;
}

From source file:net.kamradtfamily.mongorest.CollectionsServlet.java

License:GNU General Public License

@Override
protected void doGet(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException {

    log.fine("doGet()");

    String db_name = req.getParameter("dbname");
    String op = req.getParameter("op");
    if (db_name == null) {
        String names[] = req2mongonames(req);
        if (names != null) {
            db_name = names[0];//www. j  a va2s . co m
        }
        if (db_name == null) {
            error(res, SC_BAD_REQUEST, Status.get("param name missing"));
            return;
        }
    }

    if (op == null)
        op = "list";

    DB db = mongo.getDB(db_name);

    if ("list".equals(op)) {
        Set<String> cols = db.getCollectionNames();
        out_json(req, cols);
        return;
    }

    // requires colname
    String col_name = req.getParameter("colname");
    if (col_name == null) {
        error(res, SC_BAD_REQUEST, Status.get("param name missing"));
        return;
    }
    DBCollection col = db.getCollection(col_name);

    if ("count".equals(op)) {
        out_str(req, "{\"count\":" + col.count() + "}", "application/json");
    } else if ("stats".equals(op)) {
        BasicBSONObject o = col.getStats();
        out_str(req, o.toString(), "application/json");
    } else
        res.sendError(SC_BAD_REQUEST);

}

From source file:nl.knaw.huygens.timbuctoo.storage.mongo.MongoDB.java

License:Open Source License

/**
 * Returns the number of documents in the specified collection.
 *//*from   w  w w  .java  2  s . com*/
public long count(DBCollection collection) throws StorageException {
    try {
        return collection.count();
    } catch (MongoException e) {
        throw new StorageException(e);
    }
}

From source file:org.alfresco.provision.Mongo.java

License:Open Source License

private boolean isEmpty(String alfrescoHost, String mirror) {
    boolean isEmpty = true;

    String collectionName = collectionName(alfrescoHost, mirror);
    if (db.collectionExists(collectionName)) {
        DBCollection collection = db.getCollection(collectionName);
        long count = collection.count();
        if (count > 0) {
            isEmpty = false;/*from  w w  w  .j a  v  a  2  s.  c o  m*/
        }
    }

    return isEmpty;
}

From source file:org.apache.camel.component.gridfs.GridFsConsumer.java

License:Apache License

@Override
public void run() {
    DBCursor c = null;/*from ww w.j  a va  2s.  co m*/
    java.util.Date fromDate = null;

    QueryStrategy s = endpoint.getQueryStrategy();
    boolean usesTimestamp = (s != QueryStrategy.FileAttribute);
    boolean persistsTimestamp = (s == QueryStrategy.PersistentTimestamp
            || s == QueryStrategy.PersistentTimestampAndFileAttribute);
    boolean usesAttribute = (s == QueryStrategy.FileAttribute || s == QueryStrategy.TimeStampAndFileAttribute
            || s == QueryStrategy.PersistentTimestampAndFileAttribute);

    DBCollection ptsCollection = null;
    DBObject persistentTimestamp = null;
    if (persistsTimestamp) {
        ptsCollection = endpoint.getDB().getCollection(endpoint.getPersistentTSCollection());
        // ensure standard indexes as long as collections are small
        try {
            if (ptsCollection.count() < 1000) {
                ptsCollection.createIndex(new BasicDBObject("id", 1));
            }
        } catch (MongoException e) {
            //TODO: Logging
        }
        persistentTimestamp = ptsCollection.findOne(new BasicDBObject("id", endpoint.getPersistentTSObject()));
        if (persistentTimestamp == null) {
            persistentTimestamp = new BasicDBObject("id", endpoint.getPersistentTSObject());
            fromDate = new java.util.Date();
            persistentTimestamp.put("timestamp", fromDate);
            ptsCollection.save(persistentTimestamp);
        }
        fromDate = (java.util.Date) persistentTimestamp.get("timestamp");
    } else if (usesTimestamp) {
        fromDate = new java.util.Date();
    }
    try {
        Thread.sleep(endpoint.getInitialDelay());
        while (isStarted()) {
            if (c == null || c.getCursorId() == 0) {
                if (c != null) {
                    c.close();
                }
                String queryString = endpoint.getQuery();
                DBObject query;
                if (queryString == null) {
                    query = new BasicDBObject();
                } else {
                    query = (DBObject) JSON.parse(queryString);
                }
                if (usesTimestamp) {
                    query.put("uploadDate", new BasicDBObject("$gt", fromDate));
                }
                if (usesAttribute) {
                    query.put(endpoint.getFileAttributeName(), null);
                }
                c = endpoint.getFilesCollection().find(query);
            }
            boolean dateModified = false;
            while (c.hasNext() && isStarted()) {
                GridFSDBFile file = (GridFSDBFile) c.next();
                GridFSDBFile forig = file;
                if (usesAttribute) {
                    file.put(endpoint.getFileAttributeName(), "processing");
                    DBObject q = BasicDBObjectBuilder.start("_id", file.getId()).append("camel-processed", null)
                            .get();
                    forig = (GridFSDBFile) endpoint.getFilesCollection().findAndModify(q, null, null, false,
                            file, true, false);
                }
                if (forig != null) {
                    file = endpoint.getGridFs().findOne(new BasicDBObject("_id", file.getId()));

                    Exchange exchange = endpoint.createExchange();
                    exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_METADATA,
                            JSON.serialize(file.getMetaData()));
                    exchange.getIn().setHeader(Exchange.FILE_CONTENT_TYPE, file.getContentType());
                    exchange.getIn().setHeader(Exchange.FILE_LENGTH, file.getLength());
                    exchange.getIn().setHeader(Exchange.FILE_LAST_MODIFIED, file.getUploadDate());
                    exchange.getIn().setBody(file.getInputStream(), InputStream.class);
                    try {
                        getProcessor().process(exchange);
                        //System.out.println("Processing " + file.getFilename());
                        if (usesAttribute) {
                            forig.put(endpoint.getFileAttributeName(), "done");
                            endpoint.getFilesCollection().save(forig);
                        }
                        if (usesTimestamp) {
                            if (file.getUploadDate().compareTo(fromDate) > 0) {
                                fromDate = file.getUploadDate();
                                dateModified = true;
                            }
                        }
                    } catch (Exception e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                }
            }
            if (persistsTimestamp && dateModified) {
                persistentTimestamp.put("timestamp", fromDate);
                ptsCollection.save(persistentTimestamp);
            }
            Thread.sleep(endpoint.getDelay());
        }
    } catch (Throwable e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    if (c != null) {
        c.close();
    }
}

From source file:org.apache.camel.component.mongodb.MongoDbProducer.java

License:Apache License

protected void doCount(Exchange exchange) throws Exception {
    DBCollection dbCol = calculateCollection(exchange);
    Long answer = Long.valueOf(dbCol.count());
    Message resultMessage = prepareResponseMessage(exchange, MongoDbOperation.count);
    resultMessage.setBody(answer);//from w w  w  .  j a  v  a2  s .c o  m
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoOutputCommitter.java

License:Apache License

@Override
public void setupJob(JobContext jobContext) throws IOException {

    /**/*from   www  . j  ava 2  s.  c om*/
     * note: we don't really have to do anything here -
     * MongoDB is one of the few systems that don't require you to
     * create a database or collection before writing to it
     * 
     * but in order to ingest a ton of data quickly we have to 
     * pre-split the output collection
     *
     */

    Configuration conf = jobContext.getConfiguration();
    if (conf.getBoolean("mongo.output.skip_splitting", false))
        return;

    String database = conf.get("mongo.output.database");
    String collection = conf.get("mongo.output.collection");

    // connect to global db
    Mongo m = new Mongo("localhost");
    DB db = m.getDB(database);
    DB admindb = m.getDB("admin");
    DB configdb = m.getDB("config");

    // optionally drop the existing collection
    boolean drop = conf.getBoolean("mongo.output.drop", false);
    DBCollection coll = db.getCollection(collection);
    if (drop) {
        coll.drop();
    } else {
        if (coll.count() > 0) {
            // don't shard an existing collection - may already be sharded ...
            return;
        }
    }

    // get a list of shards
    ArrayList<String> shards = new ArrayList<String>();
    for (DBObject s : configdb.getCollection("shards").find()) {
        shards.add((String) s.get("_id"));
    }

    if (shards.size() < 2) {
        // don't let's be silly - nice sharded cluster, no shard
        return;
    }

    // shard the new output collection
    BasicDBObjectBuilder builder = new BasicDBObjectBuilder();
    builder.add("enableSharding", database);
    admindb.command(builder.get());

    builder = new BasicDBObjectBuilder();
    builder.add("shardCollection", database + "." + collection);

    // just shard on _id - but user gets to decide what the _id is
    builder.add("key", new BasicDBObject("_id", 1));
    admindb.command(builder.get());

    // pre-split to get parallel writes
    // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says 
    // balancer moving chunks should take 5 minutes ... too long
    // wonder if moveChunk command is faster
    // well we could do it anyway - the jobs that can benefit from it will

    // check for user-submitted splitPoints
    String[] splits;
    String splitString = conf.get("mongo.output.split_points", "");

    // generate our own split points if necessary
    if (splitString.equals("")) {
        long max = (long) Math.pow(93.0, 5.0);

        long step = max / shards.size();
        splits = new String[shards.size() - 1];

        // assume human readable keys
        for (int i = 0; i < shards.size() - 1; i++) {
            splits[i] = splitPointForLong(step * (i + 1));
        }
    } else {
        splits = splitString.split(",");
    }

    HashMap<String, Object> splitCmd = new HashMap<String, Object>();
    splitCmd.put("split", database + "." + collection);
    splitCmd.put("middle", "");

    HashMap<String, Object> moveCmd = new HashMap<String, Object>();
    moveCmd.put("moveChunk", database + "." + collection);
    moveCmd.put("find", "");
    moveCmd.put("to", "");

    // do the splitting and migrating
    // we assign chunks to shards in a round-robin manner
    int i = 0;
    for (String split : splits) {

        splitCmd.remove("middle");
        splitCmd.put("middle", new BasicDBObject("_id", split));

        // create new chunk
        admindb.command(new BasicDBObject(splitCmd));

        // move to shard
        moveCmd.remove("find");
        moveCmd.put("find", new BasicDBObject("_id", split));
        moveCmd.put("to", shards.get(i));

        admindb.command(new BasicDBObject(moveCmd));

        i = (i + 1) % shards.size();
    }
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoStreamOutputFormat.java

License:Apache License

public void checkOutputSpecs(FileSystem ignored, JobConf conf) throws IOException {

    if (conf.getBoolean("mongo.output.skip_splitting", false))
        return;//from  ww  w  .ja  v  a2s.  c  om

    String database = conf.get("mongo.output.database", "");
    if (database.equals("")) {
        throw new IOException("must specify a value for mongo.output.database");
    }

    String collection = conf.get("mongo.output.collection", "");
    if (collection.equals("")) {
        throw new IOException("must supply a value for mongo.output.collection");
    }

    // connect to global db
    Mongo m = new Mongo("localhost");
    DB db = m.getDB(database);
    DB admindb = m.getDB("admin");
    DB configdb = m.getDB("config");

    // optionally drop the existing collection
    boolean drop = conf.getBoolean("mongo.output.drop", false);
    DBCollection coll = db.getCollection(collection);
    if (drop) {
        coll.drop();
    } else {
        if (coll.count() > 0) {
            // don't shard an existing collection - may already be sharded ...
            return;
        }
    }

    // get a list of shards
    ArrayList<String> shards = new ArrayList<String>();
    for (DBObject s : configdb.getCollection("shards").find()) {
        shards.add((String) s.get("_id"));
    }

    if (shards.size() < 2) {
        // don't let's be silly
        return;
    }

    // shard the new output collection
    BasicDBObjectBuilder builder = new BasicDBObjectBuilder();
    builder.add("enableSharding", database);
    admindb.command(builder.get());

    builder = new BasicDBObjectBuilder();
    builder.add("shardCollection", database + "." + collection);

    // just shard on _id - but user gets to decide what the _id is
    builder.add("key", new BasicDBObject("_id", 1));
    admindb.command(builder.get());

    // pre-split to get parallel writes
    // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says 
    // balancer moving chunks should take 5 minutes ... too long
    // wonder if moveChunk command is faster
    // well we could do it anyway - the jobs that can benefit from it will

    // check for user-submitted splitPoints
    String[] splits;
    String splitString = conf.get("mongo.output.split_points", "");

    // generate our own split points if necessary
    if (splitString.equals("")) {
        long max = (long) Math.pow(93.0, 5.0);

        long step = max / shards.size();
        splits = new String[shards.size() - 1];

        // assume human readable keys
        for (int i = 0; i < shards.size() - 1; i++) {
            splits[i] = splitPointForLong(step * (i + 1));
        }
    } else {
        splits = splitString.split(",");
    }

    HashMap<String, Object> splitCmd = new HashMap<String, Object>();
    splitCmd.put("split", database + "." + collection);
    splitCmd.put("middle", "");

    HashMap<String, Object> moveCmd = new HashMap<String, Object>();
    moveCmd.put("moveChunk", database + "." + collection);
    moveCmd.put("find", "");
    moveCmd.put("to", "");

    // do the splitting and migrating
    // we assign chunks to shards in a round-robin manner
    int i = 0;
    for (String split : splits) {

        splitCmd.remove("middle");
        splitCmd.put("middle", new BasicDBObject("_id", split));

        // create new chunk
        admindb.command(new BasicDBObject(splitCmd));

        // move to shard
        moveCmd.remove("find");
        moveCmd.put("find", new BasicDBObject("_id", split));
        moveCmd.put("to", shards.get(i));

        admindb.command(new BasicDBObject(moveCmd));

        i = (i + 1) % shards.size();
    }
}

From source file:org.apache.rya.mongodb.instance.MongoRyaInstanceDetailsRepository.java

License:Apache License

@Override
public boolean isInitialized() throws RyaDetailsRepositoryException {
    final DBCollection col = db.getCollection(INSTANCE_DETAILS_COLLECTION_NAME);
    return col.count() == 1;
}

From source file:org.aw20.mongoworkbench.command.CollectionCountMongoCommand.java

License:Open Source License

@Override
public void execute() throws Exception {
    MongoClient mdb = MongoFactory.getInst().getMongo(sName);
    if (mdb == null)
        throw new Exception("no server selected");

    if (sDb == null)
        throw new Exception("no database selected");

    MongoFactory.getInst().setActiveDB(sDb);

    DB db = mdb.getDB(sDb);//from  w  w w. j a v a 2 s.com

    DBCollection collection = db.getCollection(sColl);

    setMessage("count=" + collection.count());
}