List of usage examples for com.mongodb DBCollection remove
public WriteResult remove(final DBObject query)
From source file:com.hangum.tadpole.mongodb.core.query.MongoDBQuery.java
License:Open Source License
/** * delete document/*from w ww. j a v a 2 s .co m*/ * * @param userDB * @param colName * @param objectId * @throws Exception */ public static void deleteDocument(UserDBDAO userDB, String colName, DBObject dbObject) throws Exception { DBCollection collection = findCollection(userDB, colName); WriteResult wr = collection.remove(dbObject); if (logger.isDebugEnabled()) { logger.debug("[writer document]" + wr.toString()); logger.debug(wr.getError()); logger.debug("[n]" + wr.getN()); } // ? if (wr.getN() == 0 && !"".equals(wr.getError())) { throw new Exception(wr.getError()); } }
From source file:com.hangum.tadpole.mongodb.core.query.MongoDBQuery.java
License:Open Source License
/** * ? ./* ww w. j a v a 2 s. c o m*/ * @param userDB * @param id * @throws Exception */ public static void deleteUser(UserDBDAO userDB, String id) throws Exception { DBCollection collection = findCollection(userDB, "system.users"); DBObject query = new BasicDBObject("user", id); WriteResult wr = collection.remove(query); }
From source file:com.health.smart.util.MongoC.java
public static void remove(String collection, DBObject criteria) throws Exception { DBCollection coll = getClient().getDB(database).getCollection(collection); coll.remove(criteria); }
From source file:com.ibm.bluemix.smartveggie.dao.SubOutletVendorAllocationDaoImpl.java
@Override public BasicDBObject deallocatedSubOutlet(SubOutletVendorAllocationDTO subOutletVendorAllocationDTO) { try {/*www . j ava 2 s . co m*/ System.out.println("starting object delete.."); DB db = MongodbConnection.getMongoDB(); BasicDBObject query = new BasicDBObject(); if (subOutletVendorAllocationDTO != null) { if (subOutletVendorAllocationDTO.getVendorUsername() != null && !subOutletVendorAllocationDTO.getVendorUsername().equalsIgnoreCase("")) { query.append("vendorUsername", subOutletVendorAllocationDTO.getVendorUsername()); } if (subOutletVendorAllocationDTO.getSmartCityCode() != null && !subOutletVendorAllocationDTO.getSmartCityCode().equalsIgnoreCase("")) { query.append("smartCityCode", subOutletVendorAllocationDTO.getSmartCityCode()); } if (subOutletVendorAllocationDTO.getSmartOutletCode() != null && !subOutletVendorAllocationDTO.getSmartOutletCode().equalsIgnoreCase("")) { query.append("smartOutletCode", subOutletVendorAllocationDTO.getSmartOutletCode()); } if (subOutletVendorAllocationDTO.getSuboutletCode() != null && !subOutletVendorAllocationDTO.getSuboutletCode().equalsIgnoreCase("")) { query.append("suboutletCode", subOutletVendorAllocationDTO.getSuboutletCode()); } if (subOutletVendorAllocationDTO.getSuboutletAllocatedFrom() != null && !subOutletVendorAllocationDTO.getSuboutletAllocatedFrom().equalsIgnoreCase("")) { query.append("suboutletAllocatedFrom", subOutletVendorAllocationDTO.getSuboutletAllocatedFrom()); } if (subOutletVendorAllocationDTO.getSuboutletAllocatedTo() != null && !subOutletVendorAllocationDTO.getSuboutletAllocatedTo().equalsIgnoreCase("")) { query.append("suboutletAllocatedTo", subOutletVendorAllocationDTO.getSuboutletAllocatedTo()); } } System.out.println("Querying for Delete: " + query); DBCollection col = db.getCollection(ICollectionName.COLLECTION_ALLOC_SUBOUTLETS); DBCursor cursor = col.find(query); BasicDBObject obj = null; while (cursor.hasNext()) { obj = (BasicDBObject) cursor.next(); System.out.println("Retrieved Allocated Vendor manager outlet: " + obj); } col.remove(query); cursor.close(); return obj; } catch (Exception e) { throw e; } }
From source file:com.ibm.ws.lars.rest.PersistenceBean.java
License:Apache License
/** * Delete the asset with the specified id. *//*from w ww . j a v a2s. co m*/ @Override public void deleteAsset(String assetId) { DBCollection coll = getAssetCollection(); DBObject query = new BasicDBObject(ID, new ObjectId(assetId)); coll.remove(query); }
From source file:com.ibm.ws.lars.testutils.RepositoryFixture.java
License:Apache License
@Override public Statement apply(Statement base, Description description) { final Statement baseStatement = base; return new Statement() { @Override//from ww w . java 2s.com public void evaluate() throws Throwable { // Before running the test, clear the database DB db = getDb(); for (String collectionName : db.getCollectionNames()) { if (!collectionName.startsWith("system.")) { DBCollection c = db.getCollection(collectionName); c.remove(new BasicDBObject()); } } // Now check that the repository contains no resources Collection<MassiveResource> resources = MassiveResource.getAllResources(adminLoginInfo); assertEquals("Wrong number of resources in the server", 0, resources.size()); // Finally continue with executing the test baseStatement.evaluate(); } }; }
From source file:com.ikanow.infinit.e.api.knowledge.federated.SimpleFederatedQueryEngine.java
License:Open Source License
private BasicDBObject getCache(String url, SourceFederatedQueryConfigPojo endpoint) { if ((null != endpoint.cacheTime_days) && (endpoint.cacheTime_days <= 0)) { // cache disabled return null; }// ww w . j a v a2s . co m DBCollection endpointCacheCollection = getCacheCollection(); if (!_staticInitializer) { _staticInitializer = true; endpointCacheCollection.ensureIndex(new BasicDBObject(SimpleFederatedCache.expiryDate_, 1)); } BasicDBObject cacheObj = (BasicDBObject) endpointCacheCollection .findOne(new BasicDBObject(SimpleFederatedCache._id_, url)); if (null == cacheObj) { return null; } // else found something, means there's stuff in the DB // so check it's not too big: Date now = new Date(); if ((-1 == _lastChecked) || (now.getTime() > (_lastChecked + (600L * 1000L)))) { // (only check every 10 minutes) if (endpointCacheCollection.count() > SimpleFederatedCache.QUERY_FEDERATION_CACHE_CLEANSE_SIZE) { _lastChecked = now.getTime(); // Remove everything with expiry date older than now endpointCacheCollection.remove(new BasicDBObject(SimpleFederatedCache.expiryDate_, new BasicDBObject(DbManager.lt_, new Date()))); } } //TESTED (4.3) Date expiryDate = cacheObj.getDate(SimpleFederatedCache.expiryDate_, now); if (now.getTime() < expiryDate.getTime()) { return cacheObj; } else { return null; } //TESTED (4.2) }
From source file:com.ikanow.infinit.e.api.knowledge.federated.SimpleFederatedQueryEngine.java
License:Open Source License
public void test_queryClear(boolean clearCacheAlso) { if (null != this._asyncRequestsPerQuery) { for (FederatedRequest req : this._asyncRequestsPerQuery) { try { req.responseFuture.get(); if (null != req.asyncClient) { req.asyncClient.close(); }/*www. j a v a2 s . c o m*/ } catch (Exception e) { } } this._asyncRequestsPerQuery.clear(); } if (clearCacheAlso) { DBCollection endpointCacheCollection = getCacheCollection(); endpointCacheCollection.remove(new BasicDBObject()); } }
From source file:com.ikanow.infinit.e.api.utils.RESTTools.java
License:Open Source License
/** * Creates a new session for a user, adding * an entry to our cookie table (maps cookieid * to userid) and starts the clock//ww w . ja v a 2s. c om * * @param username * @param bMulti if true lets you login from many sources * @param bOverride if false will fail if already logged in * @return */ public static ObjectId createSession(ObjectId userid, boolean bMulti, boolean bOverride) { try { DBCollection cookieColl = DbManager.getSocial().getCookies(); if (!bMulti) { // Otherwise allow multiple cookies for this user //remove any old cookie for this user BasicDBObject dbQuery = new BasicDBObject(); dbQuery.put("profileId", userid); dbQuery.put("apiKey", new BasicDBObject(DbManager.exists_, false)); DBCursor dbc = cookieColl.find(dbQuery); if (bOverride) { while (dbc.hasNext()) { cookieColl.remove(dbc.next()); } } //TESTED else if (dbc.length() > 0) { return null; } //TESTED } //Find user //create a new entry CookiePojo cp = new CookiePojo(); ObjectId randomObjectId = generateRandomId(); cp.set_id(randomObjectId); cp.setCookieId(randomObjectId); cp.setLastActivity(new Date()); cp.setProfileId(userid); cp.setStartDate(new Date()); cookieColl.insert(cp.toDb()); //return cookieid return cp.getCookieId(); } catch (Exception e) { logger.error("Line: [" + e.getStackTrace()[2].getLineNumber() + "] " + e.getMessage()); e.printStackTrace(); } return null; }
From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java
License:Open Source License
/** * Moves the output of a job from output_tmp to output and deletes * the tmp collection./*from w w w . j av a2 s . c om*/ * * @param cmr * @throws IOException * @throws ParserConfigurationException * @throws SAXException */ private void moveTempOutput(CustomMapReduceJobPojo cmr) throws IOException, SAXException, ParserConfigurationException { // If we are an export job then move files: bringTempOutputToFront(cmr); // (the rest of this will just do nothing) /** * Atomic plan: * If not append, move customlookup pointer to tmp collection, drop old collection. * If append, set sync flag (find/mod), move results from tmp to old, unset sync flag. * */ //step1 build out any of the post proc arguments DBObject postProcObject = null; boolean limitAllData = true; boolean hasSort = false; int limit = 0; BasicDBObject sort = new BasicDBObject(); try { postProcObject = (DBObject) com.mongodb.util.JSON .parse(getQueryOrProcessing(cmr.query, QuerySpec.POSTPROC)); if (postProcObject != null) { if (postProcObject.containsField("limitAllData")) { limitAllData = (Boolean) postProcObject.get("limitAllData"); } if (postProcObject.containsField("limit")) { limit = (Integer) postProcObject.get("limit"); if (postProcObject.containsField("sortField")) { String sfield = (String) postProcObject.get("sortField"); int sortDir = 1; if (postProcObject.containsField("sortDirection")) { sortDir = (Integer) postProcObject.get("sortDirection"); } sort.put(sfield, sortDir); hasSort = true; } else if (limit > 0) { //set a default sort because the user posted a limit sort.put("_id", -1); hasSort = true; } } } } catch (Exception ex) { _logger.info( "job_error_post_proc_title=" + cmr.jobtitle + " job_error_post_proc_id=" + cmr._id.toString() + " job_error_post_proc_message=" + HarvestExceptionUtils.createExceptionMessage(ex)); } //step 2a if not appending results then work on temp collection and swap to main if ((null == cmr.appendResults) || !cmr.appendResults) //format temp then change lookup pointer to temp collection { //transform all the results into necessary format: DBCursor dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp) .find(new BasicDBObject("key", null)).sort(sort).limit(limit); while (dbc_tmp.hasNext()) { DBObject dbo = dbc_tmp.next(); Object key = dbo.get("_id"); dbo.put("key", key); dbo.removeField("_id"); DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).insert(dbo); } DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp) .remove(new BasicDBObject("key", null)); //swap the output collections BasicDBObject notappendupdates = new BasicDBObject(CustomMapReduceJobPojo.outputCollection_, cmr.outputCollectionTemp); notappendupdates.append(CustomMapReduceJobPojo.outputCollectionTemp_, cmr.outputCollection); DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id), new BasicDBObject(MongoDbManager.set_, notappendupdates)); String temp = cmr.outputCollectionTemp; cmr.outputCollectionTemp = cmr.outputCollection; cmr.outputCollection = temp; } else //step 2b if appending results then drop modified results in output collection { DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id), new BasicDBObject(MongoDbManager.set_, new BasicDBObject("isUpdatingOutput", true))); //remove any aged out results if ((null != cmr.appendAgeOutInDays) && cmr.appendAgeOutInDays > 0) { //remove any results that have aged out long ageOutMS = (long) (cmr.appendAgeOutInDays * MS_IN_DAY); Date lastAgeOut = new Date(((new Date()).getTime() - ageOutMS)); DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection).remove( new BasicDBObject("_id", new BasicDBObject(MongoDbManager.lt_, new ObjectId(lastAgeOut)))); } DBCursor dbc_tmp; if (!limitAllData) { //sort and limit the temp data set because we only want to process it dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp) .find(new BasicDBObject("key", null)).sort(sort).limit(limit); limit = 0; //reset limit so we get everything in a few steps (we only want to limit the new data) } else { dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp) .find(new BasicDBObject("key", null)); } DBCollection dbc = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection); //transform temp results and dump into output collection while (dbc_tmp.hasNext()) { DBObject dbo = dbc_tmp.next(); //transform the dbo to format {_id:ObjectId, key:(prev_id), value:value} Object key = dbo.get("_id"); dbo.put("key", key); dbo.removeField("_id"); //_id field should be automatically set to objectid when inserting now dbc.insert(dbo); } //if there is a sort, we need to apply it to all the data now if (hasSort) { ObjectId OID = new ObjectId(); BasicDBObject query = new BasicDBObject("_id", new BasicDBObject(MongoDbManager.lt_, OID)); //find everything inserted before now and sort/limit the data DBCursor dbc_sort = dbc.find(query).sort(sort).limit(limit); while (dbc_sort.hasNext()) { //reinsert the data into db (it should be in sorted order naturally now) DBObject dbo = dbc_sort.next(); dbo.removeField("_id"); dbc.insert(dbo); } //remove everything inserted before we reorganized everything (should leave only the new results in natural order) dbc.remove(query); } DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id), new BasicDBObject(MongoDbManager.set_, new BasicDBObject("isUpdatingOutput", false))); } //step3 clean up temp output collection so we can use it again // (drop it, removing chunks) try { DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).drop(); } catch (Exception e) { } // That's fine, it probably just doesn't exist yet... }