List of usage examples for com.mongodb DBCollection getDB
public DB getDB()
From source file:com.stratio.connector.mongodb.core.engine.metadata.DiscoverMetadataUtils.java
License:Apache License
/** * Discover the existing fields stored in the collection and their data types. * * @param collection// w w w. j ava 2s . co m * the collection * @return the list of fields including the _id */ public static HashMap<String, String> discoverFieldsWithType(DBCollection collection, String sample_probability) { String map = "function() { if(Math.random() <= sample_number) {for (var key in this) {var type = typeof(this[key]); if(type == \"object\"){type = \"string\";};emit(key, type);}} } "; String reduce = "function(key, values) { var result = \"\"; for (var i = 0; i < values.length; i++){ var v = values[i];if(v == \"string\"){result = \"string\"; break;} if(v == \"number\"){result = \"number\"} if(v == \"boolean\" && result == \"number\"){result = \"string\"; break;}if(v == \"number\" && result == \"boolean\"){result = \"string\"; break;} if(v==\"boolean\"){result = \"boolean\"}};return result; }"; MapReduceCommand mapReduceCommand = new MapReduceCommand(collection, map, reduce, null, OutputType.INLINE, null); HashMap<String, Object> scope = new HashMap<>(); // connection scope.put("sample_number", sample_probability); mapReduceCommand.setScope(scope); DBObject getFieldsCommand = mapReduceCommand.toDBObject(); CommandResult command = collection.getDB().command(getFieldsCommand); BasicDBList results = (BasicDBList) command.get("results"); HashMap<String, String> fields = new HashMap<>(); if (results != null) { for (Object object : results) { DBObject bson = (DBObject) object; String nameField = (String) bson.get("_id"); String type = (String) bson.get("value"); fields.put(nameField, type); } } return fields; }
From source file:com.stratio.connector.mongodb.core.engine.metadata.DiscoverMetadataUtils.java
License:Apache License
/** * Discover the existing indexes stored in the collection. * * @param collection/* ww w .j a va 2 s.c o m*/ * the collection * @return the list of indexMetadata. */ public static List<IndexMetadata> discoverIndexes(DBCollection collection) { // TODO add TextIndex, Geospatial,etc... // TODO supported only simple, compound and hashed index // TODO remove _id? // TODO return options?? e.g sparse, unique?? // TODO custom (asc and desc) List<DBObject> indexInfo = collection.getIndexInfo(); String db = collection.getDB().getName(); String collName = collection.getName(); List<IndexMetadata> indexMetadataList = new ArrayList<>(indexInfo.size()); for (DBObject dbObject : indexInfo) { BasicDBObject key = (BasicDBObject) dbObject.get("key"); IndexMetadataBuilder indexMetadataBuilder = new IndexMetadataBuilder(db, collName, (String) dbObject.get("name"), getIndexType(key)); for (String field : key.keySet()) { indexMetadataBuilder.addColumn(field, null); } indexMetadataList.add(indexMetadataBuilder.build()); } return indexMetadataList; }
From source file:com.stratio.deep.mongodb.extractor.MongoNativeExtractor.java
License:Apache License
/** * Calculate splits./*from www . j a v a 2 s .co m*/ * * @param collection the collection * @return the deep partition [ ] */ private DeepPartition[] calculateSplits(DBCollection collection) { BasicDBList splitData = getSplitData(collection); List<ServerAddress> serverAddressList = collection.getDB().getMongo().getServerAddressList(); if (splitData == null) { Pair<BasicDBList, List<ServerAddress>> pair = getSplitDataCollectionShardEnviroment( getShards(collection), collection.getDB().getName(), collection.getName()); splitData = pair.left; serverAddressList = pair.right; } Object lastKey = null; // Lower boundary of the first min split List<String> stringHosts = new ArrayList<>(); for (ServerAddress serverAddress : serverAddressList) { stringHosts.add(serverAddress.toString()); } int i = 0; MongoPartition[] partitions = new MongoPartition[splitData.size() + 1]; for (Object aSplitData : splitData) { BasicDBObject currentKey = (BasicDBObject) aSplitData; Object currentO = currentKey.get(MONGO_DEFAULT_ID); partitions[i] = new MongoPartition(mongoDeepJobConfig.getRddId(), i, new DeepTokenRange(lastKey, currentO, stringHosts), MONGO_DEFAULT_ID); lastKey = currentO; i++; } QueryBuilder queryBuilder = QueryBuilder.start(MONGO_DEFAULT_ID); queryBuilder.greaterThanEquals(lastKey); partitions[i] = new MongoPartition(0, i, new DeepTokenRange(lastKey, null, stringHosts), MONGO_DEFAULT_ID); return partitions; }
From source file:com.zjy.mongo.splitter.MongoCollectionSplitter.java
License:Apache License
protected DB getConfigDB() { Mongo mongo;//from w w w .j a va2s. com MongoClientURI inputURI = MongoConfigUtil.getInputURI(getConfiguration()); MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration()); final DBCollection inputCollection; if (authURI != null && authURI.getUsername() != null && authURI.getPassword() != null) { inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI); } else { inputCollection = MongoConfigUtil.getCollection(inputURI); } DB db = inputCollection.getDB(); mongo = db.getMongo(); if (authURI != null) { if (authURI.getUsername() != null && authURI.getPassword() != null) { authDB = mongo.getDB(authURI.getDatabase()); } } return mongo.getDB("config"); }
From source file:com.zjy.mongo.splitter.MongoSplitterFactory.java
License:Apache License
public static MongoCollectionSplitter getSplitterByStats(final MongoClientURI uri, final Configuration config) { /* Looks at the collection in mongo.input.uri * and choose an implementation based on what's in there. */ MongoCollectionSplitter returnVal;//from w ww.ja va 2s . c om // If the split calculation is totally disabled, just make one // big split for the whole collection. if (!MongoConfigUtil.createInputSplits(config)) { returnVal = new SingleMongoSplitter(config); } else { MongoClientURI authURI = MongoConfigUtil.getAuthURI(config); CommandResult stats; DBCollection coll = null; try { if (authURI != null) { coll = MongoConfigUtil.getCollectionWithAuth(uri, authURI); stats = coll.getStats(); LOG.info("Retrieved Collection stats:" + stats); } else { coll = MongoConfigUtil.getCollection(uri); stats = coll.getStats(); } } finally { if (coll != null) { MongoConfigUtil.close(coll.getDB().getMongo()); } } if (!stats.getBoolean("ok", false)) { throw new RuntimeException( "Unable to calculate input splits from collection stats: " + stats.getString("errmsg")); } if (!stats.getBoolean("sharded", false)) { returnVal = new StandaloneMongoSplitter(config); } else { // Collection is sharded if (MongoConfigUtil.isShardChunkedSplittingEnabled(config)) { // Creates one split per chunk. returnVal = new ShardChunkMongoSplitter(config); } else if (MongoConfigUtil.canReadSplitsFromShards(config)) { // Creates one split per shard, but ignores chunk bounds. // Reads from shards directly (bypassing mongos). // Not usually recommended. returnVal = new ShardMongoSplitter(config); } else { //Not configured to use chunks or shards - //so treat this the same as if it was an unsharded collection returnVal = new StandaloneMongoSplitter(config); } } } return returnVal; }
From source file:com.zjy.mongo.splitter.StandaloneMongoSplitter.java
License:Apache License
@Override public List<InputSplit> calculateSplits() throws SplitFailedException { final DBObject splitKey = MongoConfigUtil.getInputSplitKey(getConfiguration()); final DBObject splitKeyMax = MongoConfigUtil.getMaxSplitKey(getConfiguration()); final DBObject splitKeyMin = MongoConfigUtil.getMinSplitKey(getConfiguration()); final int splitSize = MongoConfigUtil.getSplitSize(getConfiguration()); final MongoClientURI inputURI; DBCollection inputCollection = null; final ArrayList<InputSplit> returnVal; try {//from w w w .j a va 2s . c o m inputURI = MongoConfigUtil.getInputURI(getConfiguration()); MongoClientURI authURI = MongoConfigUtil.getAuthURI(getConfiguration()); if (authURI != null) { inputCollection = MongoConfigUtil.getCollectionWithAuth(inputURI, authURI); } else { inputCollection = MongoConfigUtil.getCollection(inputURI); } returnVal = new ArrayList<InputSplit>(); final String ns = inputCollection.getFullName(); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Running splitVector on namespace: %s.%s; hosts: %s", inputURI.getDatabase(), inputURI.getCollection(), inputURI.getHosts())); } final DBObject cmd = BasicDBObjectBuilder.start("splitVector", ns).add("keyPattern", splitKey) .add("min", splitKeyMin).add("max", splitKeyMax) // force:True is misbehaving it seems .add("force", false).add("maxChunkSize", splitSize).get(); CommandResult data; boolean ok = true; try { data = inputCollection.getDB().getSisterDB(inputURI.getDatabase()).command(cmd, ReadPreference.primary()); } catch (final MongoException e) { // 2.0 servers throw exceptions rather than info in a CommandResult data = null; LOG.info(e.getMessage(), e); if (e.getMessage().contains("unrecognized command: splitVector")) { ok = false; } else { throw e; } } if (data != null) { if (data.containsField("$err")) { throw new SplitFailedException("Error calculating splits: " + data); } else if (!data.get("ok").equals(1.0)) { ok = false; } } if (!ok) { final CommandResult stats = inputCollection.getStats(); if (stats.containsField("primary")) { final DBCursor shards = inputCollection.getDB().getSisterDB("config").getCollection("shards") .find(new BasicDBObject("_id", stats.getString("primary"))); try { if (shards.hasNext()) { final DBObject shard = shards.next(); final String host = ((String) shard.get("host")).replace(shard.get("_id") + "/", ""); final MongoClientURI shardHost; if (authURI != null) { shardHost = new MongoClientURIBuilder(authURI).host(host).build(); } else { shardHost = new MongoClientURIBuilder(inputURI).host(host).build(); } MongoClient shardClient = null; try { shardClient = new MongoClient(shardHost); data = shardClient.getDB(shardHost.getDatabase()).command(cmd, ReadPreference.primary()); } catch (final Exception e) { LOG.error(e.getMessage(), e); } finally { if (shardClient != null) { shardClient.close(); } } } } finally { shards.close(); } } if (data != null && !data.get("ok").equals(1.0)) { throw new SplitFailedException("Unable to calculate input splits: " + data.get("errmsg")); } } // Comes in a format where "min" and "max" are implicit // and each entry is just a boundary key; not ranged final BasicDBList splitData = (BasicDBList) data.get("splitKeys"); if (splitData.size() == 0) { LOG.warn( "WARNING: No Input Splits were calculated by the split code. Proceeding with a *single* split. Data may be too" + " small, try lowering 'mongo.input.split_size' if this is undesirable."); } BasicDBObject lastKey = null; // Lower boundary of the first min split // If splitKeyMin was given, use it as first boundary. if (!splitKeyMin.toMap().isEmpty()) { lastKey = new BasicDBObject(splitKeyMin.toMap()); } for (final Object aSplitData : splitData) { final BasicDBObject currentKey = (BasicDBObject) aSplitData; returnVal.add(createSplitFromBounds(lastKey, currentKey)); lastKey = currentKey; } BasicDBObject maxKey = null; // If splitKeyMax was given, use it as last boundary. if (!splitKeyMax.toMap().isEmpty()) { maxKey = new BasicDBObject(splitKeyMax.toMap()); } // Last max split final MongoInputSplit lastSplit = createSplitFromBounds(lastKey, maxKey); returnVal.add(lastSplit); } finally { if (inputCollection != null) { MongoConfigUtil.close(inputCollection.getDB().getMongo()); } } return returnVal; }
From source file:org.apache.camel.component.mongodb.MongoDbProducer.java
License:Apache License
private DBCollection calculateCollection(Exchange exchange) throws Exception { // dynamic calculation is an option. In most cases it won't be used and we should not penalise all users with running this // resolution logic on every Exchange if they won't be using this functionality at all if (!endpoint.isDynamicity()) { return endpoint.getDbCollection(); }//from w ww . j a v a2s. co m String dynamicDB = exchange.getIn().getHeader(MongoDbConstants.DATABASE, String.class); String dynamicCollection = exchange.getIn().getHeader(MongoDbConstants.COLLECTION, String.class); @SuppressWarnings("unchecked") List<DBObject> dynamicIndex = exchange.getIn().getHeader(MongoDbConstants.COLLECTION_INDEX, List.class); DBCollection dbCol = null; if (dynamicDB == null && dynamicCollection == null) { dbCol = endpoint.getDbCollection(); } else { DB db = null; if (dynamicDB == null) { db = endpoint.getDb(); } else { db = endpoint.getMongoConnection().getDB(dynamicDB); } if (dynamicCollection == null) { dbCol = db.getCollection(endpoint.getCollection()); } else { dbCol = db.getCollection(dynamicCollection); // on the fly add index if (dynamicIndex == null) { endpoint.ensureIndex(dbCol, endpoint.createIndex()); } else { endpoint.ensureIndex(dbCol, dynamicIndex); } } } if (LOG.isDebugEnabled()) { LOG.debug("Dynamic database and/or collection selected: {}->{}", dbCol.getDB().getName(), dbCol.getName()); } return dbCol; }
From source file:org.eclipselabs.mongoemf.builders.EObjectBuilderImpl.java
License:Open Source License
/** * Builds an EMF proxy object from the reference DBObject * //from w w w . ja v a2s . c o m * @param collection the collection containing the referencing object * @param dbReference the MongoDB reference - must be of the form { ECLASS_KEY : eClassURI, PROXY_KEY : proxyURI } * @param resourceSet the resource set to use for building the proxy * @param referenceResolvesProxies true if the reference resolves proxies; false otherwise * @return the proxy object when referenceResolvedProxies is true, the resolved object otherwise */ protected EObject buildProxy(DBCollection collection, DBObject dbReference, ResourceSet resourceSet, boolean referenceResolvesProxies) { EObject eObject; URI proxyURI = URI.createURI((String) dbReference.get(Keywords.PROXY_KEY)); URI resolvedProxyURI = uriHandler.resolve(proxyURI); if (!referenceResolvesProxies) { // When referenceResolvedProxies is false, we must resolve the proxy in place and get the referenced object eObject = resourceSet.getEObject(resolvedProxyURI, true); } else { eObject = createEObject(resourceSet, dbReference); ((InternalEObject) eObject).eSetProxyURI(resolvedProxyURI); if (includeAttributesForProxyReferences && proxyURI.isRelative() && "/".equals(proxyURI.fragment())) { DBCollection referenceCollection = null; if (proxyURI.segmentCount() == 3 && proxyURI.segment(0).equals("..")) { referenceCollection = collection.getDB().getCollection(proxyURI.segment(1)); } else if (proxyURI.segmentCount() == 1) { referenceCollection = collection; } if (referenceCollection != null) { DBObject referenceDBObject = new BasicDBObject(Keywords.ID_KEY, new ObjectId(proxyURI.lastSegment())); DBObject referencedDBObject = referenceCollection.findOne(referenceDBObject); if (referencedDBObject != null) { for (EAttribute attribute : eObject.eClass().getEAllAttributes()) { if (!attribute.isTransient() && !FeatureMapUtil.isFeatureMap(attribute)) buildAttribute(referenceCollection, referencedDBObject, null, eObject, attribute); } } } } } return eObject; }
From source file:org.fornax.cartridges.sculptor.framework.accessimpl.mongodb.MongoDbSaveAccessImpl.java
License:Apache License
protected void updateWithOptimisticLocking(T obj, DBObject dbObj) { Long version = (Long) dbObj.get("version"); DBObject q = new BasicDBObject(); q.put("_id", dbObj.get("_id")); // version in db must be same as old version q.put("version", version); Long newVersion;/*from w w w . j av a2 s. com*/ if (version == null) { newVersion = 1L; } else { newVersion = version + 1; } dbObj.put("version", newVersion); DBCollection dbCollection = getDBCollection(); dbCollection.update(q, dbObj); DBObject lastError = dbCollection.getDB().getLastError(); if (lastError.containsField("updatedExisting") && Boolean.FALSE.equals(lastError.get("updatedExisting"))) { throw new OptimisticLockingException( "Optimistic locking violation. Object was updated by someone else."); } checkLastError(); IdReflectionUtil.internalSetVersion(obj, newVersion); }
From source file:org.geotools.data.mongodb.MongoLayer.java
License:LGPL
/** * Generate model of collection records' data fields and types * /*from w w w .j av a 2 s.co m*/ * @param coll mongo collection * @param buildRule which rule to apply if same named fields with different types exist * @return JSON object describing collection record */ private DBObject getCollectionModel(DBCollection coll, RecordBuilder buildRule) { // call map-reduce job to generate metadata // mongo java driver calls mapReduce with the functions rather than the name of the // functions // function prototypes from scripts/mrscripts/MetaDataCompute.js // (do not include comments in quoted javascript functions below-gives mongo error) coll.mapReduce(metaMapFunc, metaReduceFunc, metaResultsColl, new BasicDBObject()); // get mapping of field names and types, and counts for different types DBCollection metaColl = coll.getDB().getCollection(metaResultsColl); HashMap<String, ClassCount> fieldMap = getFieldMap(metaColl); log.finest("fieldMap=" + fieldMap); // resulting collection may have dupes for fields of different types // use build rule to determine final type HashMap<String, String> finalMap = finalizeMajorityRule(fieldMap, buildRule); log.finest("finalMap=" + finalMap); // convert map of field names with types and associated counts to a JSON DBObject DBObject metaData = convertMapToJson(finalMap); log.finest("metaData=" + metaData); return metaData; }