Example usage for com.mongodb DBCollection find

List of usage examples for com.mongodb DBCollection find

Introduction

In this page you can find the example usage for com.mongodb DBCollection find.

Prototype

public DBCursor find() 

Source Link

Document

Select all documents in collection and get a cursor to the selected documents.

Usage

From source file:com.tengen.Week3Hw3_1.java

License:Apache License

public static void main(String[] args) throws UnknownHostException {
    MongoClient client = new MongoClient();

    DB database = client.getDB("school");
    DBCollection collection = database.getCollection("students");

    DBCursor cursor = collection.find();
    int student_id = -1;
    int count = 0;

    try {// ww  w .  j  a va2 s.  co m
        while (cursor.hasNext()) {
            DBObject student = cursor.next();
            //student.grades;
            BasicDBObject searchQuery = new BasicDBObject().append("_id", student.get("_id"));
            BasicDBList scores = (BasicDBList) student.get("scores");
            BasicDBObject[] scoresArr = scores.toArray(new BasicDBObject[0]);
            double score = 100.0;
            BasicDBObject rem = new BasicDBObject();
            for (BasicDBObject dbObj : scoresArr) {
                String type = dbObj.get("type").toString();
                if (type.equals("homework")) {
                    double s = Double.parseDouble(dbObj.get("score").toString());

                    if (score > s) {
                        score = s;
                        rem = dbObj;
                    }
                }
            }
            BasicDBObject update = new BasicDBObject("scores", rem);
            collection.update(searchQuery, new BasicDBObject("$pull", update));
        }
    } finally {
        cursor.close();
    }
}

From source file:com.timboudreau.netbeans.mongodb.CollectionChildFactory.java

License:Open Source License

@Override
protected boolean createKeys(final List<DBObject> list) {
    ConnectionProblems pblms = lookup.lookup(ConnectionProblems.class);
    Boolean done = pblms.invoke(new Callable<Boolean>() {
        @Override/*from  w w w. ja v a  2  s.  c o  m*/
        public Boolean call() throws Exception {
            if (cursor == null) {
                DBCollection coll = lookup.lookup(DBCollection.class);
                cursor = coll.find();
                int ct = cursor.count();
                if (ct > 0) {
                    cursor.batchSize(Math.min(CollectionNode.maxCursorSize, ct));
                } else {
                    cursor = null;
                }
            }
            boolean done = cursor == null;
            if (!done) {
                for (int i = 0; i < CollectionNode.maxCursorSize && cursor != null && cursor.hasNext(); i++) {
                    list.add(cursor.next());
                }
                done = !cursor.hasNext();
                if (done) {
                    cursor.close();
                    cursor = null;
                }
            }
            return done;
        }
    });
    return done == null ? true : done;
}

From source file:com.tomtom.speedtools.mongodb.migratedb.MongoDBMigration.java

License:Apache License

/**
 * Used to modify top-level documents. Documents will be stored in the collection when modified.
 *
 * @param db             Database./*from  www  .  j  a  v  a 2s. c  om*/
 * @param collectionName Collection to iterate over.
 * @return Iterable to loop over all documents.
 */
@Nonnull
protected Iterable<DBObject> migrateCollection(@Nonnull final MongoDB db,
        @Nonnull final String collectionName) {
    assert db != null;
    assert collectionName != null;

    rootContext.flush();
    final DBCollection collection = db.getCollection(collectionName);

    final long count = collection.count();
    if (count > Integer.MAX_VALUE) {
        addProblem("",
                "Collection has too many records (" + count + ", where " + Integer.MAX_VALUE + " is max)");
    }

    /**
     * This set is going to to contain all records for sure, so make sure it is large enough not to get
     * re-allocated all the time.
     *
     * See HashMap's class description at [http://docs.oracle.com/javase/6/docs/api/java/util/HashMap.html],
     * specifically "The expected number of entries in the map and its load factor should be taken into account
     * when setting its initial capacity, so as to minimize the number of rehash operations. If the initial
     * capacity is greater than the maximum number of entries divided by the load factor, no rehash operations
     * will ever occur.".
     */
    @SuppressWarnings("NumericCastThatLosesPrecision")
    final Set<Object> recordIds = new HashSet<>((int) ((double) count / 0.75) + 1);

    return new IterableDelegate<DBObject, DBObject>(collection.find()) {

        private int index = 1;

        @Nullable
        @Override
        public DBObject next(@Nonnull final DBObject value) {

            final Context context = rootContext.createChild(value, collectionName + ':' + index);
            index++;

            // Each document should have an _id field.
            final Object id = value.get("_id");
            if (id == null) {
                addProblem(context.path, "Document has no _id field: " + value);
                return null;
            }

            // Don't process records we have already processed. This can happen if a record
            // is modified.
            if (recordIds.contains(id)) {
                return null;
            }
            recordIds.add(id);

            // Keep original value in immutable string, referenced from 'flush()'.
            final String originalStringValue = value.toString();

            // Save object.
            context.add(new Command() {

                @Override
                public void flush() {

                    // If the new value differs from the old one, store it and print it.
                    final String stringValue = value.toString();
                    if (!originalStringValue.equals(stringValue)) {
                        if (!dryRun) {
                            collection.save(value);
                        }
                        LOG.debug(context.path + " - original document: " + originalStringValue);
                        LOG.debug(context.path + " - migrated document: " + value);
                    }
                }

                @Override
                public int ranking() {
                    return Integer.MAX_VALUE; // Saves should be executed last.
                }
            });

            return value;
        }
    };
}

From source file:com.uquetignyadminapp.connection.ConnectionMongoDB.java

public void supprimerTable(String table) {
    DBCollection col = getRecordsOfSpecificCollection(table);
    DBCursor cursor = col.find();
    while (cursor.hasNext()) {
        col.remove(cursor.next());/* ww  w .  ja  va 2 s .com*/
    }
    col.drop();

}

From source file:com.uquetignyadminapp.visual.Admin_NewTuples.java

public ArrayList<Map> fillSpecificCollection(DBCollection ObjectsOfCollection) {
    ArrayList<Map> objectData = new ArrayList<Map>();
    DBCursor dbcur = ObjectsOfCollection.find();
    int i = 0;/*w ww. jav  a 2s . c  om*/
    while (dbcur.hasNext()) {
        objectData.add(ObjectsOfCollection.find().toArray().get(i).toMap());
        dbcur.next();
        i++;
    }

    dbcur.close();

    return objectData;
}

From source file:com.uquetignywebapp.common.MainServlet.java

public ArrayList<Map> fillSpecificCollection(DBCollection ObjectsOfCollection) {
    ArrayList<Map> objectData = new ArrayList<>();
    try (DBCursor dbcur = ObjectsOfCollection.find()) {
        int i = 0;
        while (dbcur.hasNext()) {
            objectData.add(ObjectsOfCollection.find().toArray().get(i).toMap());
            dbcur.next();/*w  w w.ja v a 2  s . c om*/
            i++;
        }
    }
    return objectData;
}

From source file:com.xemsdoom.xeco.core.storage.mongodb.MongoDBStorage.java

License:Open Source License

@Override
public Set<Account> loadAccounts() {

    DBCollection coll = db.getCollection(collection);
    DBCursor cursor = coll.find();
    HashSet<Account> accounts = new HashSet<Account>();

    try {/*from   ww  w. java2  s  .  c  o m*/

        // Iterate over all documents
        while (cursor.hasNext()) {

            DBObject document = cursor.next();

            String accName = (String) document.get("_id");
            double amount = (Double) document.get("balance");
            boolean frozen = (Boolean) document.get("freezed");

            // Cleaner
            if (main.getConfig().getNode("Xeco.Cleaner.CleanUnusedAccounts").getBoolean()
                    && (amount == main.getConfig().getNode("Xeco.Account.DefaultBalance").getDouble()
                            && !frozen)) {
                accManager.removeAccount(accName);
                continue;
            }
            accounts.add(new Account(accName, amount, frozen));
        }
    } finally {
        if (cursor != null) {
            cursor.close();
        }
    }
    return accounts;
}

From source file:com.zjy.mongo.splitter.MongoCollectionSplitter.java

License:Apache License

/**
 * Contacts the config server and builds a map of each shard's name to its
 * host(s) by examining config.shards./*from  w  w w  .j  av  a  2 s .  c  om*/
 * @return a Map of shard name onto shard hostnames
 */
protected Map<String, String> getShardsMap() {
    DBCursor cur = null;
    HashMap<String, String> shardsMap = new HashMap<String, String>();
    DB configDB = null;
    try {
        configDB = getConfigDB();
        DBCollection shardsCollection = configDB.getCollection("shards");
        cur = shardsCollection.find();
        while (cur.hasNext()) {
            final BasicDBObject row = (BasicDBObject) cur.next();
            String host = row.getString("host");
            // for replica sets host will look like: "setname/localhost:20003,localhost:20004"
            int slashIndex = host.indexOf('/');
            if (slashIndex > 0) {
                host = host.substring(slashIndex + 1);
            }
            shardsMap.put((String) row.get("_id"), host);
        }
    } finally {
        if (cur != null) {
            cur.close();
        }
    }
    return shardsMap;
}

From source file:com.zousu.mongopresser.MongoHandler.java

License:Open Source License

public List<DBObject> getObjectsFromCollection(String collectionName) throws CollectionMissingException {
    if (!mongoOperation.collectionExists(collectionName))
        throw new CollectionMissingException();

    DBCollection dbCol = mongoOperation.getCollection(collectionName);
    List<DBObject> objList = new ArrayList<DBObject>();
    Iterator<DBObject> it = dbCol.find().iterator();
    while (it.hasNext()) {
        DBObject obj = it.next();//from   w ww  .  ja va 2s .  c o m
        objList.add(obj);
    }
    return objList;
}

From source file:controllers.FilterController.java

License:Apache License

public static Graph getGraph(String collection, String property) {
    final PersistenceLayer p = Configurator.getDefaultConfigurator().getPersistence();
    final List<String> keys = new ArrayList<String>();
    final List<String> values = new ArrayList<String>();
    final Graph result = new Graph(property, keys, values);

    DBCollection dbc = p.getDB().getCollection("histogram_" + collection + "_" + property);

    if (dbc.find().count() == 0) {
        final MapReduceJob job = new HistogramJob(collection, property);
        final MapReduceOutput output = job.execute();

        calculateHistogramResults(output, keys, values);

    } else {//w  w w .  j a v  a2 s  . c  o m
        DBCursor cursor = dbc.find();
        while (cursor.hasNext()) {
            BasicDBObject dbo = (BasicDBObject) cursor.next();
            parseHistogram(dbo, keys, values);
        }
    }

    result.sort();

    if (result.getKeys().size() > 100) {
        result.cutLongTail();
    }

    return result;
}