Example usage for com.mongodb DBCursor close

List of usage examples for com.mongodb DBCursor close

Introduction

In this page you can find the example usage for com.mongodb DBCursor close.

Prototype

@Override
    public void close() 

Source Link

Usage

From source file:com.ibm.bluemix.smartveggie.dao.SubOutletVendorAllocationDaoImpl.java

@Override
public BasicDBObject getVendorForSubOutlet(String vendorUserName) {
    try {//from  w  w  w.  j a va  2  s .c  o m
        System.out.println("starting object retrieve..");
        DB db = MongodbConnection.getMongoDB();
        BasicDBObject query = new BasicDBObject();
        query.append("vendorUsername", vendorUserName);
        System.out.println("Querying for getting vendor suboutlet: " + query);
        DBCollection col = db.getCollection(ICollectionName.COLLECTION_ALLOC_SUBOUTLETS);
        DBCursor cursor = col.find(query);
        BasicDBObject obj = null;
        while (cursor.hasNext()) {
            obj = (BasicDBObject) cursor.next();
            //Check the date
            Date currentDate = Calendar.getInstance().getTime();
            SimpleDateFormat dateFormat = new SimpleDateFormat("dd/MM/yyyy");
            String allocationTillString = (String) obj.get("suboutletAllocatedTo");
            if (allocationTillString != null) {
                Date allocEndDate = null;
                try {
                    allocEndDate = dateFormat.parse(allocationTillString);
                    if (allocEndDate.before(currentDate)) {
                        System.out.println("Suboutlet Allocation already ended....");
                        //subOutletAvailableList.add(allocation);
                    } else {
                        break;
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
                System.out.println("Retrieved Allocated Vendor suboutlet: " + obj);
            }
        }
        cursor.close();
        return obj;
    } catch (Exception e) {
        throw e;
    }
}

From source file:com.ibm.bluemix.smartveggie.dao.UserDaoImpl.java

@Override
public BasicDBObject getUser(String userName, String password) {
    DB db = MongodbConnection.getMongoDB();
    DBCollection col = db.getCollection(ICollectionName.COLLECTION_USERS);
    BasicDBObject obj = null;/*w  ww .j  ava 2  s  .c  o  m*/
    try {
        System.out.println("starting object read..");
        BasicDBObject query = new BasicDBObject();
        if ((userName != null) && (userName != ""))
            query.append("userName", userName);
        if ((password != null) && (password != ""))
            query.append("password", password);
        System.out.println("Querying for: " + query);
        DBCursor cursor = col.find(query);
        while (cursor.hasNext()) {
            obj = (BasicDBObject) cursor.next();
            System.out.println("Retrieved: " + obj);
        }
        cursor.close();
    } catch (Exception e) {
        throw e;
    }
    return obj;
}

From source file:com.ibm.bluemix.smartveggie.dao.UserDaoImpl.java

@Override
public List<BasicDBObject> retrieveAllUsersForType(String userTypeCode) {
    List<BasicDBObject> listDBObjects = null;
    try {/*from  www.  j  a va  2  s  . c o  m*/
        System.out.println("Retrieving All User of Type..." + userTypeCode);
        listDBObjects = new ArrayList<BasicDBObject>();
        DB db = MongodbConnection.getMongoDB();
        DBCollection col = db.getCollection(ICollectionName.COLLECTION_USERS);

        BasicDBObject query = new BasicDBObject();
        if ((userTypeCode != null) && (userTypeCode != ""))
            query.append("userType", userTypeCode);
        System.out.println("Querying for: " + query);

        DBCursor cursor = col.find(query);
        BasicDBObject obj = null;
        while (cursor.hasNext()) {
            obj = (BasicDBObject) cursor.next();
            System.out.println("Retrieved: " + obj);
            listDBObjects.add(obj);
        }
        cursor.close();
    } catch (Exception e) {
        throw e;
    }
    return listDBObjects;
}

From source file:com.ifactory.service.weather.photo.PhotoService.java

License:Apache License

public ArrayList<Photo> get() {
    DB db = mongoClient.getDB(this.dbName);
    DBCollection coll = db.getCollection(PHOTO_COLLECTION);
    BasicDBObject query = null;/*w  w w  .  j a v  a 2s  .  c  om*/
    DBCursor cursor = null;
    ArrayList<Photo> photoList = new ArrayList();
    int weatherClassMin = -1;
    int weatherClassMax = -1;
    double rad = this.radius;

    while (true) {
        // If latitude and longitude were given, append geo search query
        if (this.lat != UNAVAILABLE_LATITUDE && this.lng != UNAVAILABLE_LONGITUDE) {
            query = setGeoCoord(this.lat, this.lng, rad);
        } else {
            query = new BasicDBObject();
        }

        // It the weather Id has given, append weather search query
        if (this.weatherId > 0) {
            if (weatherClassMin == -1 && weatherClassMax == -1) {
                query.append("weather", this.weatherId);
            } else {
                System.out.println("query with weatherClassMin(" + weatherClassMin + ") and weatherClassMax("
                        + weatherClassMax + ")");
                query.append("weather",
                        new BasicDBObject("$gte", weatherClassMin).append("$lte", weatherClassMax));
                System.out.println(query.toString());
            }
        }

        try {
            cursor = coll.find(query).limit(this.limit);
            if (cursor.count() > 0) {
                System.out.println(
                        "photo found(lat: " + this.lat + ", lng: " + this.lng + ", radius: " + rad + ")");
                break;
            } else if (this.growable == false || rad >= UNAVAILABLE_LATITUDE) {
                if (rad >= UNAVAILABLE_LATITUDE) {
                    rad = this.radius / 2;
                    if (weatherClassMin == -1 && weatherClassMax == -1) {
                        // In this case, there is no proper photos by the given weather.
                        // Let's find any photos bound for same weather class.
                        weatherClassMin = ((int) this.weatherId / 100) * 100;
                        weatherClassMax = (((int) this.weatherId / 100) + 1) * 100;
                        System.out.println("weatherClassMin and weatherClassMax exist");
                    } else if (this.weatherId > 0) {
                        this.weatherId = 0;
                        System.out.println("weatherid goes to zero");
                        continue;
                    } else {
                        break;
                    }
                } else {
                    break;
                }
            }
        } catch (CommandFailureException e) {
            cursor = null;
            break;
        }

        rad = rad * 2;
    }

    try {
        while (cursor != null && cursor.hasNext()) {
            DBObject obj = cursor.next();
            Photo.Builder b = new Photo.Builder((String) obj.get("name"),
                    ((Number) obj.get("weather")).intValue());

            ArrayList<Double> coord = ((ArrayList<Double>) ((DBObject) obj.get("geo")).get("coordinates"));
            b.geoCoord(coord.get(0), coord.get(1)).day(((Boolean) obj.get("day")).booleanValue())
                    .timestamp(((Number) obj.get("timestamp")).longValue()).origin(this.origin);

            photoList.add(b.build());
        }
    } finally {
        if (cursor != null) {
            cursor.close();
        }
    }
    return photoList;
}

From source file:com.ikanow.infinit.e.harvest.extraction.document.file.InternalInfiniteFile.java

License:Open Source License

@Override
public InfiniteFile[] listFiles(Date optionalFilterDate, int maxDocsPerCycle) {
    if (_isDirectory) {
        if (_isShare) { // must be a zip file
            ArrayList<InfiniteFile> zipFiles = new ArrayList<InfiniteFile>();
            @SuppressWarnings("unchecked")
            Enumeration<net.sf.jazzlib.ZipEntry> entries = _zipView.entries();
            while (entries.hasMoreElements()) {
                net.sf.jazzlib.ZipEntry zipInfo = entries.nextElement();
                InternalInfiniteFile newFile = new InternalInfiniteFile(this, zipInfo.getName());
                zipFiles.add(newFile);/*from   w  ww . ja v  a 2 s . c  om*/
            }
            return zipFiles.toArray(new InfiniteFile[zipFiles.size()]);
        } //TESTED (3.2)
        else if (_isCustom) { // create some virtual directories eg at most 10K per "virtual directory"
            String outputDatabase = _resultObj.getString(CustomMapReduceJobPojo.outputDatabase_);
            String outputCollection = _resultObj.getString(CustomMapReduceJobPojo.outputCollection_);
            if (null == outputDatabase) {
                outputDatabase = "custommr";
            }
            DBCollection outColl = null;
            DBCursor dbc = null;
            if ((null == _virtualDirStartLimit) && (null == _virtualDirEndLimit)) { // Actual directory

                DBCollection chunks = MongoDbManager.getCollection("config", "chunks");
                StringBuffer ns = new StringBuffer(outputDatabase).append(".").append(outputCollection);
                dbc = chunks.find(new BasicDBObject("ns", ns.toString()));
                int splits = dbc.count();

                if (splits < 2) { // Nothing to do (unsharded or 1 chunk)
                    dbc.close();

                    outColl = MongoDbManager.getCollection(outputDatabase, outputCollection);
                    dbc = outColl.find();
                } //TESTED (4.2)
                else { // Create one virtual dir per split
                    InfiniteFile[] virtualDirs = new InfiniteFile[splits];
                    int added = 0;
                    for (DBObject splitObj : dbc) {
                        BasicDBObject minObj = (BasicDBObject) splitObj.get("min");
                        BasicDBObject maxObj = (BasicDBObject) splitObj.get("max");
                        ObjectId minId = null;
                        try {
                            minId = (ObjectId) minObj.get("_id");
                        } catch (Exception e) {
                        } // min key..
                        ObjectId maxId = null;
                        try {
                            maxId = (ObjectId) maxObj.get("_id");
                        } catch (Exception e) {
                        } // max key..

                        //Handle current case where custom jobs are all dumped in with the wrong _id type                     
                        if ((null != minId) || (null != maxId)) {
                            if ((null != maxId) && (null != optionalFilterDate)) { // (also used on the files below)

                                if (maxId.getTime() < optionalFilterDate.getTime()) {
                                    // (the "getTime()"s can overlap across chunks so we have to use minId
                                    //  and accept that we'll often deserialize 1+ extra chunk every harvest)
                                    continue;
                                }
                            } //TESTED (by hand)

                            InternalInfiniteFile split = new InternalInfiniteFile(this, minId, maxId);
                            virtualDirs[added] = split;
                            added++;
                        } //TESTED (5.2.2, 6.2.2) (chunk skipping by hand)
                    }
                    dbc.close();
                    return virtualDirs;
                } //TESTED (5.2.2, 6.2.2)
            } //TESTED
            else { // Virtual directory
                BasicDBObject query = new BasicDBObject();
                if (null != _virtualDirStartLimit) {
                    if (null != optionalFilterDate) {
                        ObjectId altStartId = new ObjectId((int) (optionalFilterDate.getTime() / 1000L), 0, 0);
                        //(zero out the inc/machine ids so this query is independent to calling service)

                        if (altStartId.compareTo(_virtualDirStartLimit) > 0) { // (altStartId > _virtualDirStartLimit)
                            query.put(MongoDbManager.gte_, altStartId);
                        } else {
                            query.put(MongoDbManager.gte_, _virtualDirStartLimit);
                        }
                    } //TESTED (by hand)
                    else { // normal case
                        query.put(MongoDbManager.gte_, _virtualDirStartLimit);
                    }
                } else if (null != optionalFilterDate) { // (first chunk so always overwrite with optionalFilter date if applicable)
                    ObjectId altStartId = new ObjectId((int) (optionalFilterDate.getTime() / 1000L), 0, 0);
                    query.put(MongoDbManager.gte_, altStartId);
                } //TESTED (by hand)
                if (null != _virtualDirEndLimit) {
                    query.put(MongoDbManager.lt_, _virtualDirEndLimit);
                }

                outColl = MongoDbManager.getCollection(outputDatabase, outputCollection);
                dbc = outColl.find(new BasicDBObject("_id", query)).limit(1 + maxDocsPerCycle);
            } //TESTED (6.2.2) (doc skipping by hand)

            if (null != outColl) { // has files, create the actual file objects
                //DEBUG
                //System.out.println("CHUNK: GOT " + dbc.count());

                int docCount = dbc.count();
                if (docCount > 1 + maxDocsPerCycle) {
                    docCount = 1 + maxDocsPerCycle; // (we're limiting it here anyway)
                }
                InfiniteFile[] docs = new InfiniteFile[docCount];
                int added = 0;
                for (DBObject docObj : dbc) {
                    // (if didn't use a query then apply internal filter date by hand)
                    if ((null == _virtualDirStartLimit) && (null == _virtualDirEndLimit)
                            && (null != optionalFilterDate)) {
                        ObjectId docId = (ObjectId) docObj.get("_id");
                        if (optionalFilterDate.getTime() > docId.getTime()) {
                            continue;
                        }
                    } //TESTED

                    if (added >= maxDocsPerCycle) { // (we've reached our limit so put the remaining docs in a new directory, will only be used if it has to)
                        docs[added] = new InternalInfiniteFile(this, (ObjectId) docObj.get("_id"),
                                _virtualDirEndLimit);
                        break;
                    } else {
                        InternalInfiniteFile doc = new InternalInfiniteFile(this, (BasicDBObject) docObj);
                        docs[added] = doc;
                    } //TESTED (both cases)
                    added++;
                }
                dbc.close();
                return docs;

            } //TESTED (4.2)
        }
    } else { // can just return myself
        InfiniteFile[] retVal = new InfiniteFile[1];
        retVal[0] = this;
        return retVal;
    } //TESTED (1.2, 2.2)
    return null;
}

From source file:com.impetus.client.mongodb.query.gfs.KunderaGridFS.java

License:Apache License

/**
 * Finds a list of files matching the given query.
 * // w w w  . java  2s . c o m
 * @param query
 *            the filter to apply
 * @param sort
 *            the fields to sort with
 * @param firstResult
 *            number of files to skip
 * @param maxResult
 *            number of files to return
 * @return list of gridfs files
 */
public List<GridFSDBFile> find(final DBObject query, final DBObject sort, final int firstResult,
        final int maxResult) {
    List<GridFSDBFile> files = new ArrayList<GridFSDBFile>();

    DBCursor c = null;
    try {
        c = _filesCollection.find(query);
        if (sort != null) {
            c.sort(sort);
        }
        c.skip(firstResult).limit(maxResult);
        while (c.hasNext()) {
            files.add(_fix(c.next()));
        }
    } finally {
        if (c != null) {
            c.close();
        }
    }
    return files;
}

From source file:com.jagornet.dhcp.db.MongoLeaseManager.java

License:Open Source License

/**
 * Find dhcp leases for ia.//from w ww . j  a  v  a  2 s .  c o  m
 *
 * @param duid the duid
 * @param iatype the iatype
 * @param iaid the iaid
 * @return the list
 */
protected List<DhcpLease> findDhcpLeasesForIA(final byte[] duid, final byte iatype, final long iaid) {
    List<DhcpLease> leases = null;
    DBObject query = new BasicDBObject("duid", duid).append("iatype", iatype).append("iaid", iaid);
    // SQL databases will store in ipAddress order because it is a primary key,
    // but Mongo is not so smart because it is just an index on the field
    DBCursor cursor = dhcpLeases.find(query).sort(new BasicDBObject("ipAddress", 1));
    ;
    try {
        if (cursor.count() > 0) {
            leases = new ArrayList<DhcpLease>();
            while (cursor.hasNext()) {
                leases.add(convertDBObject(cursor.next()));
            }
        }
    } finally {
        cursor.close();
    }
    return leases;
}

From source file:com.jagornet.dhcp.db.MongoLeaseManager.java

License:Open Source License

/**
 * Find dhcp lease for InetAddr.//w w w .j  ava2s  . c om
 *
 * @param inetAddr the InetAddr
 * @return the DhcpLease
 */
protected DhcpLease findDhcpLeaseForInetAddr(final InetAddress inetAddr) {
    DhcpLease lease = null;
    DBObject query = ipAddressQuery(inetAddr);
    DBCursor cursor = dhcpLeases.find(query);
    try {
        if (cursor.count() > 0) {
            if (cursor.count() == 1) {
                lease = convertDBObject(cursor.next());
            } else {
                //TODO: ensure this is impossible with mongo's unique index?
                log.error("Found more than one lease for IP=" + inetAddr.getHostAddress());
            }
        }
    } finally {
        cursor.close();
    }
    return lease;
}

From source file:com.jagornet.dhcp.db.MongoLeaseManager.java

License:Open Source License

@Override
public List<InetAddress> findExistingIPs(final InetAddress startAddr, final InetAddress endAddr) {
    List<InetAddress> inetAddrs = new ArrayList<InetAddress>();

    BasicDBList ipBetw = new BasicDBList();
    ipBetw.add(new BasicDBObject("ipAddress", new BasicDBObject("$gte", startAddr.getAddress())));
    ipBetw.add(new BasicDBObject("ipAddress", new BasicDBObject("$lte", endAddr.getAddress())));

    DBObject query = new BasicDBObject("$and", ipBetw);

    DBCursor cursor = dhcpLeases.find(query).sort(new BasicDBObject("ipAddress", 1));
    try {/*from   ww  w . j  a va2 s  .c  o m*/
        if (cursor.count() > 0) {
            while (cursor.hasNext()) {
                inetAddrs.add(convertDBObject(cursor.next()).getIpAddress());
            }
        }
    } finally {
        cursor.close();
    }
    return inetAddrs;
}

From source file:com.jagornet.dhcp.db.MongoLeaseManager.java

License:Open Source License

@Override
public List<IaAddress> findUnusedIaAddresses(final InetAddress startAddr, final InetAddress endAddr) {
    long offerExpireMillis = DhcpServerPolicies.globalPolicyAsLong(Property.BINDING_MANAGER_OFFER_EXPIRATION);
    final Date offerExpiration = new Date(new Date().getTime() - offerExpireMillis);

    BasicDBList ipAdvBetw = new BasicDBList();
    ipAdvBetw.add(new BasicDBObject("state", IaAddress.ADVERTISED));
    ipAdvBetw.add(new BasicDBObject("startTime", new BasicDBObject("$lte", offerExpiration)));
    ipAdvBetw.add(new BasicDBObject("ipAddress", new BasicDBObject("$gte", startAddr.getAddress())));
    ipAdvBetw.add(new BasicDBObject("ipAddress", new BasicDBObject("$lte", endAddr.getAddress())));

    BasicDBList ipExpRel = new BasicDBList();
    ipExpRel.add(IaAddress.EXPIRED);// www  .  j a va2 s. c  o m
    ipExpRel.add(IaAddress.RELEASED);

    BasicDBList ipExpRelBetw = new BasicDBList();
    ipExpRelBetw.add(new BasicDBObject("state", new BasicDBObject("$in", ipExpRel)));
    ipExpRelBetw.add(new BasicDBObject("ipAddress", new BasicDBObject("$gte", startAddr.getAddress())));
    ipExpRelBetw.add(new BasicDBObject("ipAddress", new BasicDBObject("$lte", endAddr.getAddress())));

    BasicDBList ipBetw = new BasicDBList();
    ipBetw.add(new BasicDBObject("$and", ipAdvBetw));
    ipBetw.add(new BasicDBObject("$and", ipExpRelBetw));

    DBObject query = new BasicDBObject("$or", ipBetw);
    DBCursor cursor = dhcpLeases.find(query).sort(new BasicDBObject("state", 1))
            .sort(new BasicDBObject("validEndTime", 1)).sort(new BasicDBObject("ipAddress", 1));
    try {
        if (cursor.count() > 0) {
            List<DhcpLease> leases = new ArrayList<DhcpLease>();
            while (cursor.hasNext()) {
                leases.add(convertDBObject(cursor.next()));
            }
            return toIaAddresses(leases);
        }
    } finally {
        cursor.close();
    }

    return null;
}