Example usage for com.mongodb DBCollection find

List of usage examples for com.mongodb DBCollection find

Introduction

In this page you can find the example usage for com.mongodb DBCollection find.

Prototype

public DBCursor find(@Nullable final DBObject query, final DBCollectionFindOptions options) 

Source Link

Document

Select documents in collection and get a cursor to the selected documents.

Usage

From source file:com.redhat.lightblue.mongo.crud.BasicDocDeleter.java

License:Open Source License

@Override
public void delete(CRUDOperationContext ctx, DBCollection collection, DBObject mongoQuery,
        CRUDDeleteResponse response) {/* w  ww. j  a v  a  2s .c  o  m*/
    LOGGER.debug("Removing docs with {}", mongoQuery);

    int numDeleted = 0;

    if (!hookOptimization || ctx.getHookManager().hasHooks(ctx, CRUDOperation.DELETE)) {
        LOGGER.debug("There are hooks, retrieve-delete");
        try (DBCursor cursor = collection.find(mongoQuery, null)) {
            // Set read preference to primary for read-for-update operations
            cursor.setReadPreference(ReadPreference.primary());

            // All docs, to be put into the context
            ArrayList<DocCtx> contextDocs = new ArrayList<>();
            // ids to delete from the db
            List<Object> idsToDelete = new ArrayList<>(batchSize);
            while (cursor.hasNext()) {

                // We will use this index to access the documents deleted in this batch
                int thisBatchIndex = contextDocs.size();
                if (idsToDelete.size() < batchSize) {
                    // build batch
                    DBObject doc = cursor.next();
                    DocTranslator.TranslatedDoc tdoc = translator.toJson(doc);
                    DocCtx docCtx = new DocCtx(tdoc.doc, tdoc.rmd);
                    docCtx.setOriginalDocument(docCtx);
                    docCtx.setCRUDOperationPerformed(CRUDOperation.DELETE);
                    contextDocs.add(docCtx);
                    idsToDelete.add(doc.get(MongoCRUDController.ID_STR));
                }

                if (idsToDelete.size() == batchSize || !cursor.hasNext()) {
                    // batch built or run out of documents                        
                    BulkWriteOperation bw = collection.initializeUnorderedBulkOperation();

                    for (Object id : idsToDelete) {
                        // doing a bulk of single operations instead of removing by initial query
                        // that way we know which documents were not removed
                        bw.find(new BasicDBObject("_id", id)).remove();
                    }

                    BulkWriteResult result = null;
                    try {
                        if (writeConcern == null) {
                            LOGGER.debug("Bulk deleting docs");
                            result = bw.execute();
                        } else {
                            LOGGER.debug("Bulk deleting docs with writeConcern={} from execution",
                                    writeConcern);
                            result = bw.execute(writeConcern);
                        }
                        LOGGER.debug("Bulk deleted docs - attempted {}, deleted {}", idsToDelete.size(),
                                result.getRemovedCount());
                    } catch (BulkWriteException bwe) {
                        LOGGER.error("Bulk write exception", bwe);
                        handleBulkWriteError(bwe.getWriteErrors(),
                                contextDocs.subList(thisBatchIndex, contextDocs.size()));
                        result = bwe.getWriteResult();
                    } catch (RuntimeException e) {
                        LOGGER.error("Exception", e);
                        throw e;
                    } finally {

                        numDeleted += result.getRemovedCount();
                        // clear list before processing next batch
                        idsToDelete.clear();
                    }
                }
            }
            ctx.setDocumentStream(new ListDocumentStream<DocCtx>(contextDocs));
        }
    } else {
        LOGGER.debug("There are no hooks, deleting in bulk");
        try {
            if (writeConcern == null) {
                numDeleted = collection.remove(mongoQuery).getN();
            } else {
                numDeleted = collection.remove(mongoQuery, writeConcern).getN();
            }
        } catch (MongoException e) {
            LOGGER.error("Deletion error", e);
            throw e;
        }
        ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<DocCtx>()));
    }

    response.setNumDeleted(numDeleted);
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocFinder.java

License:Open Source License

@Override
public long find(CRUDOperationContext ctx, DBCollection coll, DBObject mongoQuery, DBObject mongoProjection,
        DBObject mongoSort, Long from, Long to) {
    LOGGER.debug("Submitting query {}", mongoQuery);

    long executionTime = System.currentTimeMillis();
    DBCursor cursor = null;//from  w  ww  .j  a va2s.c  om
    boolean cursorInUse = false;
    try {
        cursor = coll.find(mongoQuery, mongoProjection);
        if (readPreference != null) {
            cursor.setReadPreference(readPreference);
        }

        if (ctx.isLimitQueryTime() && maxQueryTimeMS > 0) {
            cursor.maxTime(maxQueryTimeMS, TimeUnit.MILLISECONDS);
        }

        executionTime = System.currentTimeMillis() - executionTime;

        LOGGER.debug("Query evaluated");
        if (mongoSort != null) {
            cursor = cursor.sort(mongoSort);
            LOGGER.debug("Result set sorted");
        }

        LOGGER.debug("Applying limits: {} - {}", from, to);
        boolean retrieve = true;
        int nRetrieve = 0;
        int numMatched = 0;
        // f and t are from and to indexes, both inclusive
        int f = from == null ? 0 : from.intValue();
        if (f < 0) {
            f = 0;
        }
        cursor.skip(f);
        if (ctx.isComputeCounts()) {
            numMatched = cursor.count();
        }
        int t;
        if (to != null) {
            t = to.intValue();
            if (t < f) {
                retrieve = false;
            } else {
                cursor.limit(nRetrieve = t - f + 1);
            }
        } else {
            if (ctx.isComputeCounts()) {
                t = numMatched - 1;
                nRetrieve = numMatched - f;
            } else {
                t = Integer.MAX_VALUE;
            }
        }
        if (retrieve) {
            LOGGER.debug("Retrieving results");
            CursorStream stream = new CursorStream(cursor, translator, mongoQuery, executionTime, f, t);
            ctx.setDocumentStream(stream);
            cursorInUse = true;
        } else {
            ctx.setDocumentStream(new ListDocumentStream<DocCtx>(new ArrayList<>()));
        }
        if (RESULTSET_LOGGER.isDebugEnabled() && (executionTime > 100)) {
            RESULTSET_LOGGER.debug("execution_time={}, query={}, from={}, to={}", executionTime, mongoQuery, f,
                    t);
        }
        return numMatched;
    } finally {
        if (cursor != null && !cursorInUse) {
            cursor.close();
        }
    }
}

From source file:com.redhat.lightblue.mongo.crud.BasicDocSaver.java

License:Open Source License

private void saveDocs(CRUDOperationContext ctx, Op op, boolean upsert, DBCollection collection,
        List<DocInfo> batch) {
    // If this is a save operation, we have to load the existing DB objects
    if (op == DocSaver.Op.save) {
        LOGGER.debug("Retrieving existing {} documents for save operation", batch.size());
        List<BasicDBObject> idQueries = new ArrayList<>(batch.size());
        for (DocInfo doc : batch) {
            doc.id = getFieldValues(doc.newDoc, idPaths);
            if (!isNull(doc.id)) {
                idQueries.add(doc.getIdQuery());
            }// w w  w .  ja v a2s .  c  o  m
        }
        if (!idQueries.isEmpty()) {
            BasicDBObject retrievalq = new BasicDBObject("$or", idQueries);
            LOGGER.debug("Existing document retrieval query={}", retrievalq);
            try (DBCursor cursor = collection.find(retrievalq, null)) {
                // Make sure we read from primary, because that's where we'll write
                cursor.setReadPreference(ReadPreference.primary());
                List<DBObject> results = cursor.toArray();
                LOGGER.debug("Retrieved {} docs", results.size());

                // Now associate the docs in the retrieved results with the docs in the batch
                for (DBObject dbDoc : results) {
                    // Get the id from the doc
                    Object[] id = getFieldValues(dbDoc, idPaths);
                    // Find this doc in the batch
                    DocInfo doc = findDoc(batch, id);
                    if (doc != null) {
                        doc.oldDoc = dbDoc;
                    } else {
                        LOGGER.warn("Cannot find doc with id={}", id);
                    }
                }
            }
        }
    }
    // Some docs in the batch will be inserted, some saved, based on the operation. Lets decide that now
    List<DocInfo> saveList;
    List<DocInfo> insertList;
    if (op == DocSaver.Op.insert) {
        saveList = new ArrayList<>();
        insertList = batch;
    } else {
        // This is a save operation
        saveList = new ArrayList<>();
        insertList = new ArrayList<>();
        for (DocInfo doc : batch) {
            if (doc.oldDoc == null) {
                // there is no doc in the db
                if (upsert) {
                    // This is an insertion
                    insertList.add(doc);
                } else {
                    // This is an invalid  request
                    LOGGER.warn("Invalid request, cannot update or insert");
                    doc.inputDoc.addError(
                            Error.get(op.toString(), MongoCrudConstants.ERR_SAVE_ERROR_INS_WITH_NO_UPSERT,
                                    "New document, but upsert=false"));
                }
            } else {
                // There is a doc in the db
                saveList.add(doc);
            }
        }
    }
    LOGGER.debug("Save docs={}, insert docs={}, error docs={}", saveList.size(), insertList.size(),
            batch.size() - saveList.size() - insertList.size());
    insertDocs(ctx, collection, insertList);
    updateDocs(ctx, collection, saveList, upsert);
}

From source file:com.redhat.lightblue.mongo.crud.BatchUpdate.java

License:Open Source License

/**
 * Returns the set of document ids that were not updated with docver
 *
 * @param docver The current document version
 * @param documentIds The document ids to scan
 *
 * @return The set of document ids that were not updated with docver
 *///w  w w .j  a v a 2 s  .c  o  m
public static Set<Object> getFailedUpdates(DBCollection collection, ObjectId docver, List<Object> documentIds) {
    Set<Object> failedIds = new HashSet<>();
    if (!documentIds.isEmpty()) {
        // documents with the given _ids and whose docver contains our docVer are the ones we managed to update
        // others are failures
        BasicDBObject query = new BasicDBObject(DOCVER_FLD, new BasicDBObject("$ne", docver));
        query.append("_id", new BasicDBObject("$in", documentIds));
        try (DBCursor cursor = collection.find(query, new BasicDBObject("_id", 1))
                .setReadPreference(ReadPreference.primary())) {
            while (cursor.hasNext()) {
                failedIds.add(cursor.next().get("_id"));
            }
        }
    }
    return failedIds;
}

From source file:com.redhat.lightblue.mongo.crud.IterateAndUpdate.java

License:Open Source License

@Override
public void update(CRUDOperationContext ctx, DBCollection collection, EntityMetadata md,
        CRUDUpdateResponse response, DBObject query) {
    LOGGER.debug("iterateUpdate: start");
    LOGGER.debug("Computing the result set for {}", query);
    Measure measure = new Measure();
    BatchUpdate sup = getUpdateProtocol(ctx, collection, query, md, measure);
    DBCursor cursor = null;//from ww  w  .  j  a  v a  2  s.  co m
    int docIndex = 0;
    int numMatched = 0;
    int numUpdated = 0;
    int numFailed = 0;
    BsonMerge merge = new BsonMerge(md);
    List<DocCtx> docUpdateAttempts = new ArrayList<>();
    List<DocCtx> resultDocs = new ArrayList<>();
    ctx.setInputDocuments(resultDocs);
    try {
        ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_RESULTSET, ctx);
        measure.begin("collection.find");
        cursor = collection.find(query, null);
        // Read from primary for read-for-update operations
        cursor.setReadPreference(ReadPreference.primary());
        measure.end("collection.find");
        LOGGER.debug("Found {} documents", cursor.count());
        // read-update-write
        measure.begin("iteration");
        int batchStartIndex = 0; // docUpdateAttempts[batchStartIndex] is the first doc in this batch
        // TODO: This code is very messy and probably has several logic bugs. I do not have time to fix it.
        // Things I noticed:
        // 1. numFailed is not updated consistently. Depending on where failure occurs, it may not be updated!
        // 2. resultDocs are not updated consistently. Depending on the branch, the document may not end up in the response.
        //    It is not clear from reading the code when it's expected to be in the response or not.
        //    I know from some failing tests in dependent services that at least some cases are bugged.
        // The amount of branching needs to be toned down, and low level state fiddling needs to be better abstracted
        // so it can be expressed in fewer places.
        while (cursor.hasNext()) {
            DBObject document = cursor.next();
            numMatched++;
            boolean hasErrors = false;
            LOGGER.debug("Retrieved doc {}", docIndex);
            measure.begin("ctx.addDocument");
            DocTranslator.TranslatedDoc translatedDoc = translator.toJson(document);
            DocCtx doc = new DocCtx(translatedDoc.doc, translatedDoc.rmd);
            doc.startModifications();
            measure.end("ctx.addDocument");
            // From now on: doc contains the working copy, and doc.originalDoc contains the original copy
            if (updateDoc(md, doc, measure)) {
                LOGGER.debug("Document {} modified, updating", docIndex);
                ctx.getFactory().getInterceptors()
                        .callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_DOC_VALIDATION, ctx, doc);
                LOGGER.debug("Running constraint validations");
                measure.begin("validation");
                validator.clearErrors();
                validator.validateDoc(doc);
                measure.end("validation");
                List<Error> errors = validator.getErrors();
                if (errors != null && !errors.isEmpty()) {
                    ctx.addErrors(errors);
                    hasErrors = true;
                    LOGGER.debug("Doc has errors");
                }
                errors = validator.getDocErrors().get(doc);
                if (errors != null && !errors.isEmpty()) {
                    doc.addErrors(errors);
                    hasErrors = true;
                    LOGGER.debug("Doc has data errors");
                }
                if (!hasErrors) {
                    hasErrors = accessCheck(doc, measure);
                }
                if (!hasErrors) {
                    try {
                        ctx.getFactory().getInterceptors().callInterceptors(InterceptPoint.PRE_CRUD_UPDATE_DOC,
                                ctx, doc);
                        DocTranslator.TranslatedBsonDoc updatedObject = translate(md, doc, document, merge,
                                measure);

                        sup.addDoc(updatedObject.doc);
                        docUpdateAttempts.add(doc);
                        // update in batches
                        if (docUpdateAttempts.size() - batchStartIndex >= batchSize) {
                            preCommit();
                            measure.begin("bulkUpdate");
                            BatchUpdate.CommitInfo ci = sup.commit();
                            measure.end("bulkUpdate");
                            for (Map.Entry<Integer, Error> entry : ci.errors.entrySet()) {
                                docUpdateAttempts.get(entry.getKey() + batchStartIndex)
                                        .addError(entry.getValue());
                            }
                            numFailed += ci.errors.size();
                            numUpdated += docUpdateAttempts.size() - batchStartIndex - ci.errors.size()
                                    - ci.lostDocs.size();
                            numMatched -= ci.lostDocs.size();
                            batchStartIndex = docUpdateAttempts.size();
                            int di = 0;
                            // Only add the docs that were not lost
                            for (DocCtx d : docUpdateAttempts) {
                                if (!ci.lostDocs.contains(di)) {
                                    enforceMemoryLimit(d);
                                    resultDocs.add(d);
                                }
                                di++;
                            }
                        }
                        doc.setCRUDOperationPerformed(CRUDOperation.UPDATE);
                        doc.setUpdatedDocument(doc);
                    } catch (Error e) {
                        if (MongoCrudConstants.ERROR_RESULT_SIZE_TOO_LARGE.equals(e.getErrorCode())) {
                            throw e;
                        } else {
                            LOGGER.warn("Update exception for document {}: {}", docIndex, e);
                            doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString()));
                            hasErrors = true;
                        }
                    } catch (Exception e) {
                        LOGGER.warn("Update exception for document {}: {}", docIndex, e);
                        doc.addError(Error.get(MongoCrudConstants.ERR_UPDATE_ERROR, e.toString()));
                        hasErrors = true;
                    }
                } else {
                    numFailed++;
                    resultDocs.add(doc);
                }
            } else {
                LOGGER.debug("Document {} was not modified", docIndex);
                resultDocs.add(doc);
            }
            if (hasErrors) {
                LOGGER.debug("Document {} has errors", docIndex);
                doc.setOutputDocument(errorProjector.project(doc, nodeFactory));
            } else if (projector != null) {
                LOGGER.debug("Projecting document {}", docIndex);
                doc.setOutputDocument(projector.project(doc, nodeFactory));
            }
            docIndex++;
        }
        measure.end("iteration");
        // if we have any remaining items to update
        if (docUpdateAttempts.size() > batchStartIndex) {
            preCommit();
            BatchUpdate.CommitInfo ci = sup.commit();
            for (Map.Entry<Integer, Error> entry : ci.errors.entrySet()) {
                docUpdateAttempts.get(entry.getKey() + batchStartIndex).addError(entry.getValue());
            }
            numFailed += ci.errors.size();
            numUpdated += docUpdateAttempts.size() - batchStartIndex - ci.errors.size() - ci.lostDocs.size();
            numMatched -= ci.lostDocs.size();
            int di = 0;
            for (DocCtx d : docUpdateAttempts) {
                if (!ci.lostDocs.contains(di)) {
                    enforceMemoryLimit(d);
                    resultDocs.add(d);
                }
                di++;
            }
        }
    } finally {
        if (cursor != null) {
            cursor.close();
        }
    }

    ctx.setDocumentStream(new ListDocumentStream<DocCtx>(resultDocs));

    response.setNumUpdated(numUpdated);
    response.setNumFailed(numFailed);
    response.setNumMatched(numMatched);
    METRICS.debug("IterateAndUpdate:\n{}", measure);
}

From source file:com.redhat.lightblue.mongo.crud.MongoCRUDController.java

License:Open Source License

@Override
public void explain(CRUDOperationContext ctx, QueryExpression query, Projection projection, Sort sort,
        Long from, Long to, JsonDoc destDoc) {

    LOGGER.debug("explain start: q:{} p:{} sort:{} from:{} to:{}", query, projection, sort, from, to);
    Error.push("explain");
    ExpressionTranslator xtranslator = new ExpressionTranslator(ctx, ctx.getFactory().getNodeFactory());
    try {/*from  w w  w  .j a va  2s .  c  om*/
        EntityMetadata md = ctx.getEntityMetadata(ctx.getEntityName());
        FieldAccessRoleEvaluator roleEval = new FieldAccessRoleEvaluator(md, ctx.getCallerRoles());
        LOGGER.debug("Translating query {}", query);
        DBObject mongoQuery = xtranslator.translate(md,
                ExpressionTranslator.appendObjectType(query, ctx.getEntityName()));
        LOGGER.debug("Translated query {}", mongoQuery);
        DBObject mongoProjection = xtranslator.translateProjection(md, getProjectionFields(projection, md),
                query, sort);
        LOGGER.debug("Translated projection {}", mongoProjection);
        DB db = dbResolver.get((MongoDataStore) md.getDataStore());
        DBCollection coll = db.getCollection(((MongoDataStore) md.getDataStore()).getCollectionName());
        LOGGER.debug("Retrieve db collection:" + coll);

        try (DBCursor cursor = coll.find(mongoQuery, mongoProjection)) {
            DBObject plan = cursor.explain();
            JsonNode jsonPlan = DocTranslator.rawObjectToJson(plan);
            if (mongoQuery != null)
                destDoc.modify(new Path("mongo.query"), DocTranslator.rawObjectToJson(mongoQuery), true);
            if (mongoProjection != null)
                destDoc.modify(new Path("mongo.projection"), DocTranslator.rawObjectToJson(mongoProjection),
                        true);
            destDoc.modify(new Path("mongo.plan"), jsonPlan, true);
        }

    } catch (Error e) {
        ctx.addError(e);
    } catch (Exception e) {
        LOGGER.error("Error during explain:", e);
        ctx.addError(analyzeException(e, CrudConstants.ERR_CRUD));
    } finally {
        Error.pop();
    }
    LOGGER.debug("explain end: query: {} ", query);
}

From source file:com.softlyinspired.jlw.menus.mongoMenuSet.java

License:Open Source License

int readAllCustomMenus() throws UnknownHostException {
    // get handle to "mydb"
    DB db = repoConnection.getConnection();
    DBCollection coll = db.getCollection("customMenus");

    // get all the documents in the collection and print them out
    BasicDBObject query = new BasicDBObject();
    BasicDBObject fields = new BasicDBObject();
    fields.put("menuTitle", 1);
    fields.put("menuId", 1);
    fields.put("concernId", 1);
    fields.put("_id", 0);

    DBCursor cursor = coll.find(query, fields);
    int menuIdFound;
    String menuTitleFound = null;
    String menuConcernFound = null;
    DBObject doc;/*from   w  w  w .jav a  2 s. c  o m*/

    try {

        while (cursor.hasNext()) {
            doc = cursor.next();
            String menuIdText = doc.get("menuId").toString();
            try {
                menuIdFound = Integer.parseInt(menuIdText);
            } catch (Exception e) {
                menuIdFound = 0;
            }
            menuTitleFound = doc.get("menuTitle").toString();
            menuConcernFound = doc.get("concernId").toString();
            boolean menuFound = false;

            int i = 0;
            while ((i < menuCount) && (menuFound == false)) {
                if (customMenu[i].menuId == menuIdFound) {
                    menuFound = true;
                    mongoMenu currentMenu = new mongoMenu();
                    currentMenu = customMenu[i];
                    currentMenu.menuItem[currentMenu.menuItemCount] = menuConcernFound;
                    currentMenu.menuItemCount += 1;
                    customMenu[i] = currentMenu;
                }
                i += 1;
            }
            if (!menuFound) {
                mongoMenu currentMenu = new mongoMenu();
                currentMenu.menuId = menuIdFound;
                currentMenu.menuTitle = menuTitleFound;
                currentMenu.menuItem[0] = menuConcernFound;
                currentMenu.menuItemCount = 1;
                customMenu[menuCount] = currentMenu;
                menuCount = menuCount + 1;

            }
        }
    } catch (Exception e) {
        System.out.println(e);
    } finally {
        cursor.close();
    }

    return menuCount;

}

From source file:com.softlyinspired.jlw.reports.ReportReference.java

License:Open Source License

/**
 * Lists all the reports available/*from ww w . j  a  v  a 2 s.  c  o m*/
 * @return  Two dimensional array
 */
public String[][] listall() {
    String reportList[][] = new String[100][2];
    String tempId = new String();
    String tempTitle = new String();

    try {
        DBCollection coll = repoConnection.getReportsCollection();

        DBObject doc;
        BasicDBObject query = new BasicDBObject();
        BasicDBObject fields = new BasicDBObject();
        BasicDBObject sort = new BasicDBObject();

        fields.put("name", 1);
        fields.put("reportId", 1);
        fields.put("_id", 0);

        sort.put("reportId", 1);

        try {
            DBCursor allReports = coll.find(query, fields);
            allReports.sort(sort);
            reportCount = -1;
            while (allReports.hasNext()) {
                doc = allReports.next();
                reportCount = reportCount + 1;
                tempId = doc.get("reportId").toString();
                try {
                    tempTitle = doc.get("name").toString();
                } catch (Exception e) {
                    tempTitle = "";
                }

                reportList[reportCount][0] = tempId;
                reportList[reportCount][1] = tempTitle;
            }
        } catch (Exception e) {
            System.out.println("Report Missing");
        }
    } catch (Exception e) {
        JLWUtilities.scriptErrorMessage(e.toString());
    }
    return reportList;
}

From source file:com.softlyinspired.jlw.script.validationScript.java

License:Open Source License

/**
 * Lists all the scripts available//from  www .  j a  va2s . c  o  m
 * @return  Two dimensional array
 */
public String[][] listall() {
    String scriptList[][] = new String[100][2];
    String tempId = new String();
    String tempTitle = new String();

    try {
        DBCollection coll = repoConnection.getScriptCollection();

        DBObject doc;
        BasicDBObject query = new BasicDBObject();
        BasicDBObject fields = new BasicDBObject();
        BasicDBObject sort = new BasicDBObject();

        fields.put("scriptText", 1);
        fields.put("scriptId", 1);
        fields.put("scriptTitle", 1);
        fields.put("_id", 0);

        sort.put("scriptId", 1);

        try {
            DBCursor allScripts = coll.find(query, fields); //.sort(sort);   
            allScripts.sort(sort);
            scriptCount = -1;
            while (allScripts.hasNext()) {
                doc = allScripts.next();
                scriptCount = scriptCount + 1;
                tempId = doc.get("scriptId").toString();
                doc.get("scriptText").toString();
                try {
                    tempTitle = doc.get("scriptTitle").toString();
                } catch (Exception e) {
                    tempTitle = "";
                }

                scriptList[scriptCount][0] = tempId;
                scriptList[scriptCount][1] = tempTitle;
            }

        } catch (Exception e) {
            System.out.println("Script Missing");
        }

    } catch (Exception e) {
        JLWUtilities.scriptErrorMessage(e.toString());

    }
    return scriptList;

}

From source file:com.stratio.connector.mongodb.core.engine.query.BasicLogicalWorkflowExecutor.java

License:Apache License

/**
 * Execute an usual query./*ww  w  .j  ava 2 s  . co m*/
 *
 * @param mongoClient
 *            the MongoDB client.
 * @return the Crossdata ResultSet.
 * @throws MongoValidationException .
 * @throws ExecutionException
 *             if the execution fails or the query specified in the logical workflow is not supported.
 */
public ResultSet executeQuery(MongoClient mongoClient) throws ExecutionException {

    DB db = mongoClient.getDB(logicalWorkflowData.getProject().getCatalogName());
    DBCollection collection = db.getCollection(logicalWorkflowData.getProject().getTableName().getName());
    ResultSet resultSet = new ResultSet();
    resultSet.setColumnMetadata(
            MetaResultUtils.createMetadata(logicalWorkflowData.getProject(), logicalWorkflowData.getSelect()));

    if (logger.isDebugEnabled()) {
        logger.debug("Executing MongoQuery: " + query.get(0) + ", with fields: " + buildProject(false));
    }

    DBCursor cursor = collection.find(query.get(0), buildProject(false));

    if (logicalWorkflowData.getOrderBy() != null) {
        cursor = cursor.sort(buildOrderBy(false));
    }

    if (logicalWorkflowData.getLimit() != null) {
        cursor = cursor.limit(logicalWorkflowData.getLimit().getLimit());
    }

    DBObject rowDBObject;
    try {
        while (cursor.hasNext()) {
            rowDBObject = cursor.next();
            if (logger.isDebugEnabled()) {
                logger.debug("BResult: " + rowDBObject);
            }
            resultSet.add(MetaResultUtils.createRowWithAlias(rowDBObject, logicalWorkflowData.getSelect()));
        }
    } catch (MongoException e) {
        logger.error("Error executing a basic query :" + query.get(0) + ", with fields: " + buildProject(false)
                + "\n Error:" + e.getMessage());
        throw new MongoExecutionException(e.getMessage(), e);
    } finally {
        cursor.close();
    }

    return resultSet;
}