List of usage examples for com.mongodb DBCursor close
@Override public void close()
From source file:com.owly.srv.RemoteServerMongoDAOImpl.java
License:Apache License
public ArrayList<RemoteServer> getRemoteServers() { logger.debug("Get the all remote servers in the database"); ArrayList<RemoteServer> lstSrv = new ArrayList<RemoteServer>(); // create an empty query BasicDBObject query = new BasicDBObject(); logger.info("MONGODB : Get list of all objects in the database"); DBCursor cursor = remoteSrvCfgCollection.find(query); try {// www . j a v a 2 s.c om while (cursor.hasNext()) { DBObject cur = cursor.next(); RemoteServer rmtSrv = new RemoteServer(); rmtSrv.setName((String) cur.get("Name")); rmtSrv.setNodeIPAddress((String) cur.get("NodeIPAddress")); rmtSrv.setSrvType((String) cur.get("SrvType")); rmtSrv.setClientPort((Integer) cur.get("ClientPort")); rmtSrv.setListTypeOfStats((ArrayList<String>) cur.get("ListTypeOfStats")); rmtSrv.setServerStatus((Boolean) cur.get("ServerStatus")); rmtSrv.setEnabled((Boolean) cur.get("Enabled")); // Add this object into the list. lstSrv.add(rmtSrv); } } finally { cursor.close(); } logger.info("MONGODB : List of remoteservers obtained" + lstSrv.toString()); return lstSrv; }
From source file:com.ratzia.pfc.webpageanalyticaltool.dbupdater.Watcher.java
public void processDatabase(LinkedList<String> pluginsCode, String currentVersion) throws UnknownHostException { /**** Plugins ****/ LinkedList<SysPlugin> plugins = loadPlugins(pluginsCode); /*****************/ /*SecurityManager oldSecurityManager = System.getSecurityManager(); DBPluginSecurityManager dbPluginSecurityManager = new DBPluginSecurityManager(); System.setSecurityManager(dbPluginSecurityManager); //Will open the security manager so we need to ensure it is closed afterwards try{*//*ww w .ja v a 2 s.c o m*/ MongoClient mongoClient = new MongoClient(serverAddress); DB local = mongoClient.getDB(db); DBCollection dbCol = local.getCollection(collection); BasicDBObject query = new BasicDBObject(); query.put(PLUGIN_VERSION_FIELD, new BasicDBObject("$ne", currentVersion)); DBCursor cursor = dbCol.find(query); long count = 0; while (cursor.hasNext()) { DBObject obj = cursor.next(); //Copy contents BasicDBObject res = new BasicDBObject(); for (String k : obj.keySet()) { res.put(k, obj.get(k)); } //Plugin operations for (SysPlugin plugin : plugins) { try { plugin.run(res); } catch (Exception ex) { System.out.println("Error en " + plugin.getClass()); Logger.getLogger(Watcher.class.getName()).log(Level.SEVERE, null, ex); } } //Put plugin only fields into the original object for (String k : res.keySet()) { if ((k.substring(0, 2)).compareTo("p_") == 0) { obj.put(k, res.get(k)); } } //Update version on object obj.put(PLUGIN_VERSION_FIELD, currentVersion); dbCol.save(obj); count++; } cursor.close(); if (count > 0) { System.out.println(count + " updated"); } /*}catch(Exception ex){ Logger.getLogger(Watcher.class.getName()).log(Level.SEVERE, null, ex); } //close sandbox System.setSecurityManager(oldSecurityManager);*/ }
From source file:com.redhat.lightblue.crud.mongo.AtomicIterateUpdate.java
License:Open Source License
@Override public void update(CRUDOperationContext ctx, DBCollection collection, EntityMetadata md, CRUDUpdateResponse response, DBObject query) { LOGGER.debug("atomicIterateUpdate: start"); Set<Path> inaccessibleFields = roleEval.getInaccessibleFields(FieldAccessRoleEvaluator.Operation.update); for (Path x : inaccessibleFields) { if (updatedFields.contains(x)) { ctx.addError(Error.get("update", CrudConstants.ERR_NO_FIELD_UPDATE_ACCESS, x.toString())); }/*from ww w. j a v a 2s . c o m*/ } int numFailed = 0; int numUpdated = 0; if (!ctx.hasErrors()) { LOGGER.debug("Computing the result set for {}", query); DBCursor cursor = null; int docIndex = 0; try { // Find docs cursor = new FindCommand(null, collection, query, null).execute(); LOGGER.debug("Found {} documents", cursor.count()); // read-update while (cursor.hasNext()) { DBObject document = cursor.next(); // Add the doc to context DocCtx doc = ctx.addDocument(translator.toJson(document)); try { Object id = document.get("_id"); LOGGER.debug("Retrieved doc {} id={}", docIndex, id); // Update doc DBObject modifiedDoc = new FindAndModifyCommand(null, collection, new BasicDBObject("_id", id), null, null, false, mongoUpdateExpr, true, false) .execute(); if (projector != null) { LOGGER.debug("Projecting document {}", docIndex); doc.setOutputDocument(projector.project(translator.toJson(modifiedDoc), nodeFactory)); doc.setOperationPerformed(Operation.UPDATE); } numUpdated++; } catch (MongoException e) { LOGGER.warn("Update exception for document {}: {}", docIndex, e); doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString())); numFailed++; } docIndex++; } } finally { if (cursor != null) { cursor.close(); } } } response.setNumUpdated(numUpdated); response.setNumFailed(numFailed); }
From source file:com.redhat.lightblue.crud.mongo.IterateAndUpdate.java
License:Open Source License
@Override public void update(CRUDOperationContext ctx, DBCollection collection, EntityMetadata md, CRUDUpdateResponse response, DBObject query) { LOGGER.debug("iterateUpdate: start"); LOGGER.debug("Computing the result set for {}", query); DBCursor cursor = null; int docIndex = 0; int numFailed = 0; try {/*from w ww.j av a2 s . c om*/ cursor = new FindCommand(null, collection, query, null).execute(); LOGGER.debug("Found {} documents", cursor.count()); // read-update-write while (cursor.hasNext()) { DBObject document = cursor.next(); boolean hasErrors = false; LOGGER.debug("Retrieved doc {}", docIndex); DocCtx doc = ctx.addDocument(translator.toJson(document)); doc.setOutputDocument(doc.copy()); // From now on: doc contains the old copy, and doc.getOutputDocument contains the new copy if (updater.update(doc.getOutputDocument(), md.getFieldTreeRoot(), Path.EMPTY)) { LOGGER.debug("Document {} modified, updating", docIndex); PredefinedFields.updateArraySizes(nodeFactory, doc.getOutputDocument()); LOGGER.debug("Running constraint validations"); validator.clearErrors(); validator.validateDoc(doc.getOutputDocument()); List<Error> errors = validator.getErrors(); if (errors != null && !errors.isEmpty()) { ctx.addErrors(errors); hasErrors = true; LOGGER.debug("Doc has errors"); } errors = validator.getDocErrors().get(doc.getOutputDocument()); if (errors != null && !errors.isEmpty()) { doc.addErrors(errors); hasErrors = true; LOGGER.debug("Doc has data errors"); } if (!hasErrors) { List<Path> paths = roleEval.getInaccessibleFields_Update(doc.getOutputDocument(), doc); LOGGER.debug("Inaccesible fields during update={}" + paths); if (paths != null && !paths.isEmpty()) { doc.addError(Error.get("update", CrudConstants.ERR_NO_FIELD_UPDATE_ACCESS, paths.toString())); hasErrors = true; } } if (!hasErrors) { try { DBObject updatedObject = translator.toBson(doc.getOutputDocument()); translator.addInvisibleFields(document, updatedObject, md); WriteResult result = new SaveCommand(null, collection, updatedObject).execute(); doc.setOperationPerformed(Operation.UPDATE); LOGGER.debug("Number of rows affected : ", result.getN()); } catch (Exception e) { LOGGER.warn("Update exception for document {}: {}", docIndex, e); doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString())); hasErrors = true; } } } else { LOGGER.debug("Document {} was not modified", docIndex); } if (hasErrors) { LOGGER.debug("Document {} has errors", docIndex); numFailed++; doc.setOutputDocument(errorProjector.project(doc.getOutputDocument(), nodeFactory)); } else { if (projector != null) { LOGGER.debug("Projecting document {}", docIndex); doc.setOutputDocument(projector.project(doc.getOutputDocument(), nodeFactory)); } } docIndex++; } } finally { if (cursor != null) { cursor.close(); } } response.setNumUpdated(docIndex); response.setNumFailed(numFailed); }
From source file:com.redhat.lightblue.crud.mongo.IterateDeleter.java
License:Open Source License
@Override public void delete(CRUDOperationContext ctx, DBCollection collection, DBObject mongoQuery, CRUDDeleteResponse response) {/*from w w w. j a v a2 s. co m*/ LOGGER.debug("Computing the result set for {}", mongoQuery); DBCursor cursor = null; int docIndex = 0; int numDeleted = 0; try { // Find docs cursor = new FindCommand(null, collection, mongoQuery, null).execute(); LOGGER.debug("Found {} documents", cursor.count()); // read-delet while (cursor.hasNext()) { DBObject document = cursor.next(); LOGGER.debug("Retrieved doc {}", docIndex); Object id = document.get(MongoCRUDController.ID_STR); DocCtx doc = ctx.addDocument(translator.toJson(document)); doc.setOriginalDocument(doc); WriteResult result = new RemoveCommand(null, collection, new BasicDBObject("_id", id), WriteConcern.SAFE).execute(); if (result.getN() == 1) { numDeleted++; doc.setOperationPerformed(Operation.DELETE); } docIndex++; } } finally { if (cursor != null) { cursor.close(); } } response.setNumDeleted(numDeleted); }
From source file:com.redhat.lightblue.mongo.crud.BasicDocFinder.java
License:Open Source License
@Override public long find(CRUDOperationContext ctx, DBCollection coll, DBObject mongoQuery, DBObject mongoProjection, DBObject mongoSort, Long from, Long to) { LOGGER.debug("Submitting query {}", mongoQuery); long executionTime = System.currentTimeMillis(); DBCursor cursor = null; boolean cursorInUse = false; try {// w ww. j a v a 2s .c om cursor = coll.find(mongoQuery, mongoProjection); if (readPreference != null) { cursor.setReadPreference(readPreference); } if (ctx.isLimitQueryTime() && maxQueryTimeMS > 0) { cursor.maxTime(maxQueryTimeMS, TimeUnit.MILLISECONDS); } executionTime = System.currentTimeMillis() - executionTime; LOGGER.debug("Query evaluated"); if (mongoSort != null) { cursor = cursor.sort(mongoSort); LOGGER.debug("Result set sorted"); } LOGGER.debug("Applying limits: {} - {}", from, to); boolean retrieve = true; int nRetrieve = 0; int numMatched = 0; // f and t are from and to indexes, both inclusive int f = from == null ? 0 : from.intValue(); if (f < 0) { f = 0; } cursor.skip(f); if (ctx.isComputeCounts()) { numMatched = cursor.count(); } int t; if (to != null) { t = to.intValue(); if (t < f) { retrieve = false; } else { cursor.limit(nRetrieve = t - f + 1); } } else { if (ctx.isComputeCounts()) { t = numMatched - 1; nRetrieve = numMatched - f; } else { t = Integer.MAX_VALUE; } } if (retrieve) { LOGGER.debug("Retrieving results"); CursorStream stream = new CursorStream(cursor, translator, mongoQuery, executionTime, f, t); ctx.setDocumentStream(stream); cursorInUse = true; } else { ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<>())); } if (RESULTSET_LOGGER.isDebugEnabled() && (executionTime > 100)) { RESULTSET_LOGGER.debug("execution_time={}, query={}, from={}, to={}", executionTime, mongoQuery, f, t); } return numMatched; } finally { if (cursor != null && !cursorInUse) { cursor.close(); } } }
From source file:com.redhat.lightblue.mongo.crud.IterateAndUpdate.java
License:Open Source License
@Override public void update(CRUDOperationContext ctx, DBCollection collection, EntityMetadata md, CRUDUpdateResponse response, DBObject query) { LOGGER.debug("iterateUpdate: start"); LOGGER.debug("Computing the result set for {}", query); Measure measure = new Measure(); BatchUpdate sup = getUpdateProtocol(ctx, collection, query, md, measure); DBCursor cursor = null; int docIndex = 0; int numMatched = 0; int numUpdated = 0; int numFailed = 0; BsonMerge merge = new BsonMerge(md); List<DocCtx> docUpdateAttempts = new ArrayList<>(); List<DocCtx> resultDocs = new ArrayList<>(); ctx.setInputDocuments(resultDocs);//w w w .ja v a2 s . co m try { ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_RESULTSET, ctx); measure.begin("collection.find"); cursor = collection.find(query, null); // Read from primary for read-for-update operations cursor.setReadPreference(ReadPreference.primary()); measure.end("collection.find"); LOGGER.debug("Found {} documents", cursor.count()); // read-update-write measure.begin("iteration"); int batchStartIndex = 0; // docUpdateAttempts[batchStartIndex] is the first doc in this batch // TODO: This code is very messy and probably has several logic bugs. I do not have time to fix it. // Things I noticed: // 1. numFailed is not updated consistently. Depending on where failure occurs, it may not be updated! // 2. resultDocs are not updated consistently. Depending on the branch, the document may not end up in the response. // It is not clear from reading the code when it's expected to be in the response or not. // I know from some failing tests in dependent services that at least some cases are bugged. // The amount of branching needs to be toned down, and low level state fiddling needs to be better abstracted // so it can be expressed in fewer places. while (cursor.hasNext()) { DBObject document = cursor.next(); numMatched++; boolean hasErrors = false; LOGGER.debug("Retrieved doc {}", docIndex); measure.begin("ctx.addDocument"); DocTranslator.TranslatedDoc translatedDoc = translator.toJson(document); DocCtx doc = new DocCtx(translatedDoc.doc, translatedDoc.rmd); doc.startModifications(); measure.end("ctx.addDocument"); // From now on: doc contains the working copy, and doc.originalDoc contains the original copy if (updateDoc(md, doc, measure)) { LOGGER.debug("Document {} modified, updating", docIndex); ctx.getFactory().getInterceptors() .callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_DOC_VALIDATION, ctx, doc); LOGGER.debug("Running constraint validations"); measure.begin("validation"); validator.clearErrors(); validator.validateDoc(doc); measure.end("validation"); List<Error> errors = validator.getErrors(); if (errors != null && !errors.isEmpty()) { ctx.addErrors(errors); hasErrors = true; LOGGER.debug("Doc has errors"); } errors = validator.getDocErrors().get(doc); if (errors != null && !errors.isEmpty()) { doc.addErrors(errors); hasErrors = true; LOGGER.debug("Doc has data errors"); } if (!hasErrors) { hasErrors = accessCheck(doc, measure); } if (!hasErrors) { try { ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_DOC, ctx, doc); DocTranslator.TranslatedBsonDoc updatedObject = translate(md, doc, document, merge, measure); sup.addDoc(updatedObject.doc); docUpdateAttempts.add(doc); // update in batches if (docUpdateAttempts.size() - batchStartIndex >= batchSize) { preCommit(); measure.begin("bulkUpdate"); BatchUpdate.CommitInfo ci = sup.commit(); measure.end("bulkUpdate"); for (Map.Entry<Integer, Error> entry : ci.errors.entrySet()) { docUpdateAttempts.get(entry.getKey() + batchStartIndex) .addError(entry.getValue()); } numFailed += ci.errors.size(); numUpdated += docUpdateAttempts.size() - batchStartIndex - ci.errors.size() - ci.lostDocs.size(); numMatched -= ci.lostDocs.size(); batchStartIndex = docUpdateAttempts.size(); int di = 0; // Only add the docs that were not lost for (DocCtx d : docUpdateAttempts) { if (!ci.lostDocs.contains(di)) { enforceMemoryLimit(d); resultDocs.add(d); } di++; } } doc.setCRUDOperationPerformed(CRUDOperation.UPDATE); doc.setUpdatedDocument(doc); } catch (Error e) { if (MongoCrudConstants.ERROR_RESULT_SIZE_TOO_LARGE.equals(e.getErrorCode())) { throw e; } else { LOGGER.warn("Update exception for document {}: {}", docIndex, e); doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString())); hasErrors = true; } } catch (Exception e) { LOGGER.warn("Update exception for document {}: {}", docIndex, e); doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString())); hasErrors = true; } } else { numFailed++; resultDocs.add(doc); } } else { LOGGER.debug("Document {} was not modified", docIndex); resultDocs.add(doc); } if (hasErrors) { LOGGER.debug("Document {} has errors", docIndex); doc.setOutputDocument(errorProjector.project(doc, nodeFactory)); } else if (projector != null) { LOGGER.debug("Projecting document {}", docIndex); doc.setOutputDocument(projector.project(doc, nodeFactory)); } docIndex++; } measure.end("iteration"); // if we have any remaining items to update if (docUpdateAttempts.size() > batchStartIndex) { preCommit(); BatchUpdate.CommitInfo ci = sup.commit(); for (Map.Entry<Integer, Error> entry : ci.errors.entrySet()) { docUpdateAttempts.get(entry.getKey() + batchStartIndex).addError(entry.getValue()); } numFailed += ci.errors.size(); numUpdated += docUpdateAttempts.size() - batchStartIndex - ci.errors.size() - ci.lostDocs.size(); numMatched -= ci.lostDocs.size(); int di = 0; for (DocCtx d : docUpdateAttempts) { if (!ci.lostDocs.contains(di)) { enforceMemoryLimit(d); resultDocs.add(d); } di++; } } } finally { if (cursor != null) { cursor.close(); } } ctx.setDocumentStream(new ListDocumentStream<DocCtx>(resultDocs)); response.setNumUpdated(numUpdated); response.setNumFailed(numFailed); response.setNumMatched(numMatched); METRICS.debug("IterateAndUpdate:\n{}", measure); }
From source file:com.redhat.lightblue.mongo.crud.MongoCRUDController.java
License:Open Source License
protected void populateHiddenFields(EntityInfo ei, Metadata md, String version, List<Path> fields, QueryExpression query) throws IOException { LOGGER.info("Starting population of hidden fields due to new or modified indexes."); MongoDataStore ds = (MongoDataStore) ei.getDataStore(); DB entityDB = dbResolver.get(ds);//from ww w .j ava 2s . c o m DBCollection coll = entityDB.getCollection(ds.getCollectionName()); DBCursor cursor = null; try { if (query != null) { MetadataResolver mdResolver = new MetadataResolver() { @Override public EntityMetadata getEntityMetadata(String entityName) { String v = version == null ? ei.getDefaultVersion() : version; return md.getEntityMetadata(entityName, v); } }; ExpressionTranslator trans = new ExpressionTranslator(mdResolver, JsonNodeFactory.instance); DBObject mongoQuery = trans.translate(mdResolver.getEntityMetadata(ei.getName()), query); cursor = coll.find(mongoQuery); } else { cursor = coll.find(); } while (cursor.hasNext()) { DBObject doc = cursor.next(); DBObject original = (DBObject) ((BasicDBObject) doc).copy(); try { DocTranslator.populateDocHiddenFields(doc, fields); LOGGER.debug("Original doc:{}, new doc:{}", original, doc); if (!doc.equals(original)) { coll.save(doc); } } catch (Exception e) { // skip the doc if there's a problem, don't outright fail LOGGER.error(e.getMessage()); LOGGER.debug("Original doc:\n{}", original); LOGGER.debug("Error saving doc:\n{}", doc); } } } catch (Exception e) { LOGGER.error("Error during reindexing"); LOGGER.error(e.getMessage()); throw new RuntimeException(e); } finally { cursor.close(); } LOGGER.info("Finished population of hidden fields."); }
From source file:com.redhat.lightblue.mongo.metadata.MongoMetadata.java
License:Open Source License
@Override public VersionInfo[] getEntityVersions(String entityName) { if (entityName == null || entityName.length() == 0) { throw new IllegalArgumentException(LITERAL_ENTITY_NAME); }//from www . j av a2 s .co m Error.push("getEntityVersions(" + entityName + ")"); DBCursor cursor = null; try { // Get the default version BasicDBObject query = new BasicDBObject(LITERAL_ID, entityName + BSONParser.DELIMITER_ID); DBObject ei = collection.findOne(query); String defaultVersion = ei == null ? null : (String) ei.get("defaultVersion"); // query by name but only return documents that have a version query = new BasicDBObject(LITERAL_NAME, entityName).append(LITERAL_VERSION, new BasicDBObject("$exists", 1)); DBObject project = new BasicDBObject(LITERAL_VERSION, 1).append(LITERAL_STATUS, 1).append(LITERAL_ID, 0); cursor = collection.find(query, project); int n = cursor.count(); VersionInfo[] ret = new VersionInfo[n]; int i = 0; while (cursor.hasNext()) { DBObject object = cursor.next(); ret[i] = new VersionInfo(); Version v = mdParser.parseVersion((BSONObject) object.get(LITERAL_VERSION)); ret[i].setValue(v.getValue()); ret[i].setExtendsVersions(v.getExtendsVersions()); ret[i].setChangelog(v.getChangelog()); ret[i].setStatus(MetadataParser .statusFromString((String) ((DBObject) object.get(LITERAL_STATUS)).get("value"))); if (defaultVersion != null && defaultVersion.equals(ret[i].getValue())) { ret[i].setDefault(true); } i++; } return ret; } catch (Error e) { // rethrow lightblue error throw e; } catch (Exception e) { throw analyzeException(e, MetadataConstants.ERR_ILL_FORMED_METADATA); } finally { if (cursor != null) { cursor.close(); } Error.pop(); } }
From source file:com.redhat.lightblue.mongo.metadata.MongoMetadata.java
License:Open Source License
/** * When EntityInfo is updated, we have to make sure any active/deprecated * metadata is still valid/* www .j a v a 2s .c om*/ */ protected void validateAllVersions(EntityInfo ei) { LOGGER.debug("Validating all versions of {}", ei.getName()); String version = null; DBCursor cursor = null; try { cursor = collection.find( new BasicDBObject(LITERAL_NAME, ei.getName()) .append(LITERAL_VERSION, new BasicDBObject("$exists", 1)).append(LITERAL_STATUS_VALUE, new BasicDBObject("$ne", MetadataParser.toString(MetadataStatus.DISABLED))), null); while (cursor.hasNext()) { DBObject object = cursor.next(); EntitySchema schema = mdParser.parseEntitySchema(object); version = schema.getVersion().getValue(); LOGGER.debug("Validating {} {}", ei.getName(), version); EntityMetadata md = new EntityMetadata(ei, schema); md.validate(); } } catch (Exception e) { String msg = ei.getName() + ":" + version + e.toString(); throw analyzeException(e, MongoMetadataConstants.ERR_UPDATE_INVALIDATES_METADATA, msg); } finally { if (cursor != null) { cursor.close(); } } }