List of usage examples for com.mongodb.client.model Projections include
public static Bson include(final List<String> fieldNames)
From source file:module.test.CustomExport.java
License:Open Source License
public CustomExport() { // ===== Connection ===== MongoClient mongoClient = MongoUtil.buildMongoClient(); MongoDatabase db = mongoClient.getDatabase("epimed_experiments"); MongoCollection<Document> collection = db.getCollection("samples"); // ===== Find exp_group in the database ===== // === Query 1 === /*/*from w w w.ja va 2s . c o m*/ String queryName = "breast_cancer_GPL570"; List<Bson> filters = new ArrayList<Bson>(); filters.add(Filters.eq("exp_group.id_platform", "GPL570")); filters.add(Filters.eq("exp_group.id_topology_group", "C50")); filters.add(Filters.eq("exp_group.id_tissue_status", 3)); // tumoral */ // === Query 2 === /* String queryName = "breast_normal_GPL570"; List<Bson> filters = new ArrayList<Bson>(); filters.add(Filters.eq("exp_group.id_platform", "GPL570")); filters.add(Filters.eq("exp_group.id_topology_group", "C50")); filters.add(Filters.eq("exp_group.id_tissue_status", 1)); // normal */ // === Query 3 === String queryName = "breast_cancer_with_survival_GPL570"; List<Bson> filters = new ArrayList<Bson>(); filters.add(Filters.eq("exp_group.id_platform", "GPL570")); filters.add(Filters.eq("exp_group.id_topology_group", "C50")); filters.add(Filters.eq("exp_group.id_tissue_status", 3)); // tumoral filters.add(Filters.or(Filters.ne("exp_group.os_months", null), Filters.ne("exp_group.dfss_months", null), Filters.ne("exp_group.relapsed", null), Filters.ne("exp_group.dead", null))); Bson filter = Filters.and(filters); Long nbSamples = collection.count(filter); List<String> listSeries = collection.distinct("exp_group.main_gse_number", filter, String.class) .into(new ArrayList<String>()); queryName = queryName + "_" + nbSamples + "_samples_" + listSeries.size() + "_series"; List<Document> docExpGroup = collection.find(filter) .projection(Projections.fields(Projections.include("exp_group"), Projections.excludeId())) .into(new ArrayList<Document>()); List<Document> docParam = collection.find(filter) .projection(Projections.fields(Projections.include("parameters"), Projections.excludeId())) .into(new ArrayList<Document>()); mongoClient.close(); // ===== Load Exp Group into a matrix ===== List<String> headerExpGroup = new ArrayList<String>(); List<Object> dataExpGroup = new ArrayList<Object>(); for (int i = 0; i < docExpGroup.size(); i++) { Map<String, String> expGroup = (Map<String, String>) docExpGroup.get(i).get("exp_group"); if (i == 0) { headerExpGroup.addAll(expGroup.keySet()); } Object[] dataLine = new Object[headerExpGroup.size()]; for (int j = 0; j < headerExpGroup.size(); j++) { dataLine[j] = expGroup.get(headerExpGroup.get(j)); } dataExpGroup.add(dataLine); } // ===== Load Params into a matrix ===== Set<String> headerParamSet = new HashSet<String>(); List<String> headerParam = new ArrayList<String>(); List<Object> dataParam = new ArrayList<Object>(); for (int i = 0; i < docParam.size(); i++) { Map<String, String> param = (Map<String, String>) docParam.get(i).get("parameters"); headerParamSet.addAll(param.keySet()); } headerParam.addAll(headerParamSet); Collections.sort(headerParam); for (int i = 0; i < docParam.size(); i++) { Map<String, String> param = (Map<String, String>) docParam.get(i).get("parameters"); Object[] dataLine = new Object[headerParam.size()]; for (int j = 0; j < headerParam.size(); j++) { dataLine[j] = param.get(headerParam.get(j)); } // System.out.println(Arrays.toString(dataLine)); dataParam.add(dataLine); } // === Output === String fileName = this.getOutputDirectory() + this.getDirSeparator() + "EpiMed_database_" + queryName + "_" + dateFormat.format(new Date()) + ".xlsx"; System.out.println(fileName); XSSFWorkbook workbook = fileService.createWorkbook(); fileService.addSheet(workbook, "exp_group_" + dateFormat.format(new Date()), headerExpGroup, dataExpGroup); fileService.addSheet(workbook, "parameters_" + dateFormat.format(new Date()), headerParam, dataParam); fileService.writeWorkbook(workbook, fileName); }
From source file:mongodb.clients.percunia.mongo.Projection.java
License:Apache License
public static Bson include(String... fields) { return Projections.include(fields); }
From source file:org.apache.beam.sdk.io.mongodb.FindQuery.java
License:Apache License
@Override public MongoCursor<Document> apply(MongoCollection<Document> collection) { return collection.find().filter(filters()).limit(limit()).projection(Projections.include(projection())) .iterator();//from ww w. jav a 2 s .c o m }
From source file:org.apache.rya.mongodb.aggregation.AggregationPipelineQueryNode.java
License:Apache License
/** * Add a SPARQL projection or multi-projection operation to the pipeline. * The number of documents produced by the pipeline after this operation * will be the number of documents entering this stage (the number of * intermediate results) multiplied by the number of * {@link ProjectionElemList}s supplied here. Empty projections are * unsupported; if one or more projections given binds zero variables, then * the pipeline will be unchanged and the method will return false. * @param projections One or more projections, i.e. mappings from the result * at this stage of the query into a set of variables. * @return true if the projection(s) were added to the pipeline. */// w w w .j ava2s.com public boolean project(final Iterable<ProjectionElemList> projections) { if (projections == null || !projections.iterator().hasNext()) { return false; } final List<Bson> projectOpts = new LinkedList<>(); final Set<String> bindingNamesUnion = new HashSet<>(); Set<String> bindingNamesIntersection = null; for (final ProjectionElemList projection : projections) { if (projection.getElements().isEmpty()) { // Empty projections are unsupported -- fail when seen return false; } final Document valueDoc = new Document(); final Document hashDoc = new Document(); final Document typeDoc = new Document(); final Set<String> projectionBindingNames = new HashSet<>(); for (final ProjectionElem elem : projection.getElements()) { String to = elem.getTargetName(); // If the 'to' name is invalid, replace it internally if (!isValidFieldName(to)) { to = replace(to); } String from = elem.getSourceName(); // If the 'from' name is invalid, use the internal substitute if (varToOriginalName.containsValue(from)) { from = varToOriginalName.inverse().get(from); } projectionBindingNames.add(to); if (to.equals(from)) { valueDoc.append(to, 1); hashDoc.append(to, 1); typeDoc.append(to, 1); } else { valueDoc.append(to, valueFieldExpr(from)); hashDoc.append(to, hashFieldExpr(from)); typeDoc.append(to, typeFieldExpr(from)); } } bindingNamesUnion.addAll(projectionBindingNames); if (bindingNamesIntersection == null) { bindingNamesIntersection = new HashSet<>(projectionBindingNames); } else { bindingNamesIntersection.retainAll(projectionBindingNames); } projectOpts.add(new Document().append(VALUES, valueDoc).append(HASHES, hashDoc).append(TYPES, typeDoc) .append(LEVEL, "$" + LEVEL).append(TIMESTAMP, "$" + TIMESTAMP)); } if (projectOpts.size() == 1) { pipeline.add(Aggregates.project(projectOpts.get(0))); } else { final String listKey = "PROJECTIONS"; final Bson projectIndividual = Projections.fields( Projections.computed(VALUES, "$" + listKey + "." + VALUES), Projections.computed(HASHES, "$" + listKey + "." + HASHES), Projections.computed(TYPES, "$" + listKey + "." + TYPES), Projections.include(LEVEL), Projections.include(TIMESTAMP)); pipeline.add(Aggregates.project(Projections.computed(listKey, projectOpts))); pipeline.add(Aggregates.unwind("$" + listKey)); pipeline.add(Aggregates.project(projectIndividual)); } assuredBindingNames.clear(); bindingNames.clear(); assuredBindingNames.addAll(bindingNamesIntersection); bindingNames.addAll(bindingNamesUnion); return true; }
From source file:org.apache.rya.mongodb.aggregation.AggregationPipelineQueryNode.java
License:Apache License
/** * Add a SPARQL extension to the pipeline, if possible. An extension adds * some number of variables to the result. Adds a "$project" step to the * pipeline, but differs from the SPARQL project operation in that * 1) pre-existing variables are always kept, and 2) values of new variables * are defined by expressions, which may be more complex than simply * variable names. Not all expressions are supported. If unsupported * expression types are used in the extension, the pipeline will remain * unchanged and this method will return false. * @param extensionElements A list of new variables and their expressions * @return True if the extension was successfully converted into a pipeline * step, false otherwise./*from w w w. j a v a 2 s. com*/ */ public boolean extend(final Iterable<ExtensionElem> extensionElements) { final List<Bson> valueFields = new LinkedList<>(); final List<Bson> hashFields = new LinkedList<>(); final List<Bson> typeFields = new LinkedList<>(); for (final String varName : bindingNames) { valueFields.add(Projections.include(varName)); hashFields.add(Projections.include(varName)); typeFields.add(Projections.include(varName)); } final Set<String> newVarNames = new HashSet<>(); for (final ExtensionElem elem : extensionElements) { String name = elem.getName(); if (!isValidFieldName(name)) { // If the field name is invalid, replace it internally name = replace(name); } // We can only handle certain kinds of value expressions; return // failure for any others. final ValueExpr expr = elem.getExpr(); final Object valueField; final Object hashField; final Object typeField; if (expr instanceof Var) { final String varName = ((Var) expr).getName(); valueField = "$" + varName; hashField = "$" + varName; typeField = "$" + varName; } else if (expr instanceof ValueConstant) { final Value val = ((ValueConstant) expr).getValue(); valueField = new Document("$literal", val.stringValue()); hashField = new Document("$literal", SimpleMongoDBStorageStrategy.hash(val.stringValue())); if (val instanceof Literal) { typeField = new Document("$literal", ((Literal) val).getDatatype().stringValue()); } else { typeField = null; } } else { // if not understood, return failure return false; } valueFields.add(Projections.computed(name, valueField)); hashFields.add(Projections.computed(name, hashField)); if (typeField != null) { typeFields.add(Projections.computed(name, typeField)); } newVarNames.add(name); } assuredBindingNames.addAll(newVarNames); bindingNames.addAll(newVarNames); final Bson projectOpts = Projections.fields(Projections.computed(VALUES, Projections.fields(valueFields)), Projections.computed(HASHES, Projections.fields(hashFields)), Projections.computed(TYPES, Projections.fields(typeFields)), Projections.include(LEVEL), Projections.include(TIMESTAMP)); pipeline.add(Aggregates.project(projectOpts)); return true; }
From source file:org.opencb.cellbase.lib.impl.GeneMongoDBAdaptor.java
License:Apache License
@Override public QueryResult getTfbs(Query query, QueryOptions queryOptions) { Bson bsonQuery = parseQuery(query);//from w w w . jav a 2s.c om Bson match = Aggregates.match(bsonQuery); // We parse user's exclude options, ONLY _id can be added if exists Bson includeAndExclude; Bson exclude = null; if (queryOptions != null && queryOptions.containsKey("exclude")) { List<String> stringList = queryOptions.getAsStringList("exclude"); if (stringList.contains("_id")) { exclude = Aggregates.project(Projections.exclude("_id")); } } if (exclude != null) { includeAndExclude = Aggregates .project(Projections.fields(Projections.excludeId(), Projections.include("transcripts.tfbs"))); } else { includeAndExclude = Aggregates.project(Projections.include("transcripts.tfbs")); } Bson unwind = Aggregates.unwind("$transcripts"); Bson unwind2 = Aggregates.unwind("$transcripts.tfbs"); // This project the three fields of Xref to the top of the object Document document = new Document("tfName", "$transcripts.tfbs.tfName"); document.put("pwm", "$transcripts.tfbs.pwm"); document.put("chromosome", "$transcripts.tfbs.chromosome"); document.put("start", "$transcripts.tfbs.start"); document.put("end", "$transcripts.tfbs.end"); document.put("strand", "$transcripts.tfbs.strand"); document.put("relativeStart", "$transcripts.tfbs.relativeStart"); document.put("relativeEnd", "$transcripts.tfbs.relativeEnd"); document.put("score", "$transcripts.tfbs.score"); Bson project = Aggregates.project(document); return mongoDBCollection.aggregate(Arrays.asList(match, includeAndExclude, unwind, unwind2, project), queryOptions); }
From source file:org.opencb.cellbase.lib.impl.MongoDBAdaptor.java
License:Apache License
protected QueryResult groupBy(Bson query, List<String> groupByField, String featureIdField, QueryOptions options) {//w ww. ja v a2 s. c om if (groupByField == null || groupByField.isEmpty()) { return new QueryResult(); } if (groupByField.size() == 1) { // if only one field then we call to simple groupBy return groupBy(query, groupByField.get(0), featureIdField, options); } else { Bson match = Aggregates.match(query); // add all group-by fields to the projection together with the aggregation field name List<String> groupByFields = new ArrayList<>(groupByField); groupByFields.add(featureIdField); Bson project = Aggregates.project(Projections.include(groupByFields)); // _id document creation to have the multiple id Document id = new Document(); for (String s : groupByField) { id.append(s, "$" + s); } Bson group; if (options.getBoolean("count", false)) { group = Aggregates.group(id, Accumulators.sum("count", 1)); return mongoDBCollection.aggregate(Arrays.asList(match, project, group), options); } else { // Limit the documents passed if count is false Bson limit = Aggregates.limit(options.getInt("limit", 10)); group = Aggregates.group(id, Accumulators.addToSet("features", "$" + featureIdField)); // TODO change the default "_id" returned by mongodb to id return mongoDBCollection.aggregate(Arrays.asList(match, limit, project, group), options); } } }
From source file:org.opencb.cellbase.lib.impl.ProteinMongoDBAdaptor.java
License:Apache License
@Override public QueryResult<Score> getSubstitutionScores(Query query, QueryOptions options) { QueryResult result = null;/*w w w. ja v a 2 s . com*/ // Ensembl transcript id is needed for this collection if (query.getString("transcript") != null) { Bson transcript = Filters.eq("transcriptId", query.getString("transcript")); int position = -1; String aaShortName = null; // If position and aa change are provided we create a 'projection' to return only the required data from the database if (query.get("position") != null && !query.getString("position").isEmpty() && query.getInt("position", 0) != 0) { position = query.getInt("position"); String projectionString = "aaPositions." + position; // If aa change is provided we only return that information if (query.getString("aa") != null && !query.getString("aa").isEmpty()) { aaShortName = aaShortNameMap.get(query.getString("aa").toUpperCase()); projectionString += "." + aaShortName; } // Projection is used to minimize the returned data Bson positionProjection = Projections.include(projectionString); result = proteinSubstitutionMongoDBCollection.find(transcript, positionProjection, options); } else { // Return the whole transcript data result = proteinSubstitutionMongoDBCollection.find(transcript, options); } if (result != null && !result.getResult().isEmpty()) { Document document = (Document) result.getResult().get(0); Document aaPositionsDocument = (Document) document.get("aaPositions"); // Position or aa change were not provided, returning whole transcript data if (position == -1 || aaShortName == null) { // Return only the inner Document, not the whole document projected result.setResult(Collections.singletonList(aaPositionsDocument)); // Position and aa were provided, return only corresponding Score objects } else { List<Score> scoreList = null; if (result.getNumResults() == 1 && aaPositionsDocument != null) { scoreList = new ArrayList<>(NUM_PROTEIN_SUBSTITUTION_SCORE_METHODS); Document positionDocument = (Document) aaPositionsDocument.get(Integer.toString(position)); Document aaDocument = (Document) positionDocument.get(aaShortName); // Document proteinSubstitutionScores = (Document) result.getResult().get(0); if (aaDocument.get("ss") != null) { scoreList.add(new Score(Double.parseDouble("" + aaDocument.get("ss")), "sift", VariantAnnotationUtils.SIFT_DESCRIPTIONS.get(aaDocument.get("se")))); } if (aaDocument.get("ps") != null) { scoreList.add(new Score(Double.parseDouble("" + aaDocument.get("ps")), "polyphen", VariantAnnotationUtils.POLYPHEN_DESCRIPTIONS.get(aaDocument.get("pe")))); } } result.setResult(scoreList); // result.setResult(Collections.singletonList(scoreList)); } // // Return empty QueryResult if the query did not return any result // } else { // return result; } } // Return null if no transcript id is provided return result; }
From source file:org.opencb.cellbase.lib.impl.XRefMongoDBAdaptor.java
License:Apache License
@Override public QueryResult nativeGet(Query query, QueryOptions options) { Bson bson = parseQuery(query);/*from w w w. j a v a 2 s .c om*/ Bson match = Aggregates.match(bson); Bson project = Aggregates.project(Projections.include("transcripts.xrefs")); Bson unwind = Aggregates.unwind("$transcripts"); Bson unwind2 = Aggregates.unwind("$transcripts.xrefs"); // This project the three fields of Xref to the top of the object Document document = new Document("id", "$transcripts.xrefs.id"); document.put("dbName", "$transcripts.xrefs.dbName"); document.put("dbDisplayName", "$transcripts.xrefs.dbDisplayName"); Bson project1 = Aggregates.project(document); return mongoDBCollection.aggregate(Arrays.asList(match, project, unwind, unwind2, project1), options); }
From source file:org.opencb.cellbase.mongodb.impl.MongoDBAdaptor.java
License:Apache License
protected QueryResult groupBy(Bson query, List<String> groupByField, String featureIdField, QueryOptions options) {// w w w .ja v a 2s . c o m if (groupByField == null || groupByField.isEmpty()) { return new QueryResult(); } if (groupByField.size() == 1) { // if only one field then we call to simple groupBy return groupBy(query, groupByField.get(0), featureIdField, options); } else { Bson match = Aggregates.match(query); // add all group-by fields to the projection together with the aggregation field name List<String> groupByFields = new ArrayList<>(groupByField); groupByFields.add(featureIdField); Bson project = Aggregates.project(Projections.include(groupByFields)); // _id document creation to have the multiple id Document id = new Document(); for (String s : groupByField) { id.append(s, "$" + s); } Bson group; if (options.getBoolean("count", false)) { group = Aggregates.group(id, Accumulators.sum("count", 1)); } else { group = Aggregates.group(id, Accumulators.addToSet("features", "$" + featureIdField)); } return mongoDBCollection.aggregate(Arrays.asList(match, project, group), options); } }