List of usage examples for com.mongodb.client MongoDatabase getCollection
MongoCollection<Document> getCollection(String collectionName);
From source file:org.ang.streaming.SpeedStreaming.SpeedStreamingPerrosParques.java
private static void saveToMongo(String jsonData) throws Exception { try {// www .j a v a 2s.co m //toFile("Method: saveToMongo: Trace: " + "** BEGIN EXEC ** ", LOG_PATH); //MongoClient mongoClient = new MongoClient(new MongoClientURI("mongodb://10.1.1.110:27017")); MongoClient mongoClient = new MongoClient(new MongoClientURI(MONGODB)); MongoDatabase db = mongoClient.getDatabase("kml_db"); //toFile("Method: saveToMongo: Trace: " + "db: " + db.getName(), LOG_PATH); //toFile("Method: saveToMongo: Trace: " + "db: " + db.listCollections(), LOG_PATH); JSONArray array = JsonDecode(jsonData); //toFile("Method: saveToMongo: Trace: " + "JSONArray: " + array.toString(), LOG_PATH); //toFile("Method: saveToMongo: Trace: " + "JSONObject: " + array.get(0).toString(), LOG_PATH); JSONObject data = (JSONObject) array.get(0); //toFile("Method: saveToMongo: Trace: " + "Data: " + data.toJSONString(), LOG_PATH); Document key = new Document("id_collar", data.get("id")); Document doc = new Document(); doc.putAll(data); doc.append("id_collar", data.get("id")).append("createdAt", System.currentTimeMillis()); //.append("createdAt", System.currentTimeMillis()).remove("id"); //toFile("Method: saveToMongo: Trace: " + "key: " + key.toJson(), LOG_PATH); //toFile("Method: saveToMongo: Trace: " + "Data Exists: " + db.getCollection("perros_loc").find(key).first(), SpeedStreamingPerrosParques.LOG_PATH); if (db.getCollection("perros_loc").find(key).first() == null) { db.getCollection("perros_loc").insertOne(doc); } else { db.getCollection("perros_loc").updateOne(key, new Document("$set", doc)); } //toFile("Method: saveToMongo: Trace: " + "** END EXEC ** ", LOG_PATH); } catch (Exception e) { toFile("Method: saveToMongo, Exception: " + e.getMessage(), LOG_PATH); } }
From source file:org.apache.drill.exec.store.mongo.config.MongoPersistentStoreProvider.java
License:Apache License
@Override public void start() throws IOException { MongoClientURI clientURI = new MongoClientURI(mongoURL); client = new MongoClient(clientURI); MongoDatabase db = client.getDatabase(clientURI.getDatabase()); collection = db.getCollection(clientURI.getCollection()).withWriteConcern(WriteConcern.JOURNALED); Bson index = Indexes.ascending(pKey); collection.createIndex(index);/* w ww . j a v a2 s .com*/ }
From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java
License:Apache License
@SuppressWarnings({ "rawtypes" }) private void init() throws IOException { List<String> h = storagePluginConfig.getHosts(); List<ServerAddress> addresses = Lists.newArrayList(); for (String host : h) { addresses.add(new ServerAddress(host)); }//from w w w .j a va 2 s. co m MongoClient client = storagePlugin.getClient(); chunksMapping = Maps.newHashMap(); chunksInverseMapping = Maps.newLinkedHashMap(); if (isShardedCluster(client)) { MongoDatabase db = client.getDatabase(CONFIG); MongoCollection<Document> chunksCollection = db.getCollection(CHUNKS); Document filter = new Document(); filter.put(NS, this.scanSpec.getDbName() + "." + this.scanSpec.getCollectionName()); Document projection = new Document(); projection.put(SHARD, select); projection.put(MIN, select); projection.put(MAX, select); FindIterable<Document> chunkCursor = chunksCollection.find(filter).projection(projection); MongoCursor<Document> iterator = chunkCursor.iterator(); MongoCollection<Document> shardsCollection = db.getCollection(SHARDS); projection = new Document(); projection.put(HOST, select); boolean hasChunks = false; while (iterator.hasNext()) { Document chunkObj = iterator.next(); String shardName = (String) chunkObj.get(SHARD); String chunkId = (String) chunkObj.get(ID); filter = new Document(ID, shardName); FindIterable<Document> hostCursor = shardsCollection.find(filter).projection(projection); MongoCursor<Document> hostIterator = hostCursor.iterator(); while (hostIterator.hasNext()) { Document hostObj = hostIterator.next(); String hostEntry = (String) hostObj.get(HOST); String[] tagAndHost = StringUtils.split(hostEntry, '/'); String[] hosts = tagAndHost.length > 1 ? StringUtils.split(tagAndHost[1], ',') : StringUtils.split(tagAndHost[0], ','); List<String> chunkHosts = Arrays.asList(hosts); Set<ServerAddress> addressList = getPreferredHosts(storagePlugin.getClient(addresses), chunkHosts); if (addressList == null) { addressList = Sets.newHashSet(); for (String host : chunkHosts) { addressList.add(new ServerAddress(host)); } } chunksMapping.put(chunkId, addressList); ServerAddress address = addressList.iterator().next(); List<ChunkInfo> chunkList = chunksInverseMapping.get(address.getHost()); if (chunkList == null) { chunkList = Lists.newArrayList(); chunksInverseMapping.put(address.getHost(), chunkList); } List<String> chunkHostsList = new ArrayList<String>(); for (ServerAddress serverAddr : addressList) { chunkHostsList.add(serverAddr.toString()); } ChunkInfo chunkInfo = new ChunkInfo(chunkHostsList, chunkId); Document minMap = (Document) chunkObj.get(MIN); Map<String, Object> minFilters = Maps.newHashMap(); Set keySet = minMap.keySet(); for (Object keyObj : keySet) { Object object = minMap.get(keyObj); if (!(object instanceof MinKey)) { minFilters.put(keyObj.toString(), object); } } chunkInfo.setMinFilters(minFilters); Map<String, Object> maxFilters = Maps.newHashMap(); Map maxMap = (Document) chunkObj.get(MAX); keySet = maxMap.keySet(); for (Object keyObj : keySet) { Object object = maxMap.get(keyObj); if (!(object instanceof MaxKey)) { maxFilters.put(keyObj.toString(), object); } } chunkInfo.setMaxFilters(maxFilters); chunkList.add(chunkInfo); } hasChunks = true; } // In a sharded environment, if a collection doesn't have any chunks, it is considered as an // unsharded collection and it will be stored in the primary shard of that database. if (!hasChunks) { handleUnshardedCollection(getPrimaryShardInfo(client)); } } else { handleUnshardedCollection(storagePluginConfig.getHosts()); } }
From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java
License:Apache License
private List<String> getPrimaryShardInfo(MongoClient client) { MongoDatabase database = storagePlugin.getClient().getDatabase(CONFIG); //Identify the primary shard of the queried database. MongoCollection<Document> collection = database.getCollection(DATABASES); Bson filter = new Document(ID, this.scanSpec.getDbName()); Bson projection = new Document(PRIMARY, select); Document document = collection.find(filter).projection(projection).first(); Preconditions.checkNotNull(document); String shardName = document.getString(PRIMARY); Preconditions.checkNotNull(shardName); //Identify the host(s) on which this shard resides. MongoCollection<Document> shardsCol = database.getCollection(SHARDS); filter = new Document(ID, shardName); projection = new Document(HOST, select); Document hostInfo = shardsCol.find(filter).projection(projection).first(); Preconditions.checkNotNull(hostInfo); String hostEntry = hostInfo.getString(HOST); Preconditions.checkNotNull(hostEntry); String[] tagAndHost = StringUtils.split(hostEntry, '/'); String[] hosts = tagAndHost.length > 1 ? StringUtils.split(tagAndHost[1], ',') : StringUtils.split(tagAndHost[0], ','); return Lists.newArrayList(hosts); }
From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java
License:Apache License
@Override public ScanStats getScanStats() { try {/* w w w. j a v a2s . c o m*/ MongoClient client = storagePlugin.getClient(); MongoDatabase db = client.getDatabase(scanSpec.getDbName()); MongoCollection<Document> collection = db.getCollection(scanSpec.getCollectionName()); long numDocs = collection.count(); float approxDiskCost = 0; if (numDocs != 0) { String json = collection.find().first().toJson(); approxDiskCost = json.getBytes().length * numDocs; } return new ScanStats(GroupScanProperty.EXACT_ROW_COUNT, numDocs, 1, approxDiskCost); } catch (Exception e) { throw new DrillRuntimeException(e.getMessage(), e); } }
From source file:org.apache.drill.exec.store.mongo.MongoTestSuit.java
License:Apache License
private static void createDbAndCollections(String dbName, String collectionName, String indexFieldName) { MongoDatabase db = mongoClient.getDatabase(dbName); MongoCollection<Document> mongoCollection = db.getCollection(collectionName); if (mongoCollection == null) { db.createCollection(collectionName); mongoCollection = db.getCollection(collectionName); }// ww w. j a v a 2 s . c o m IndexOptions indexOptions = new IndexOptions().unique(true).background(false).name(indexFieldName); Bson keys = Indexes.ascending(indexFieldName); mongoCollection.createIndex(keys, indexOptions); }
From source file:org.apache.metamodel.mongodb.mongo3.MongoDbDataContext.java
License:Apache License
/** * Performs an analysis of an available collection in a Mongo {@link DB} * instance and tries to detect the table structure based on the first 1000 * documents in the collection.//from w w w. j a v a 2s. co m * * @param mongoDb * the mongo DB * @param collectionName * the name of the collection * @return a table definition for mongo db. */ public static SimpleTableDef detectTable(MongoDatabase mongoDb, String collectionName) { final MongoCollection<Document> collection = mongoDb.getCollection(collectionName); final FindIterable<Document> iterable = collection.find().limit(1000); final SortedMap<String, Set<Class<?>>> columnsAndTypes = new TreeMap<String, Set<Class<?>>>(); for (Document document : iterable) { Set<String> keysInObject = document.keySet(); for (String key : keysInObject) { Set<Class<?>> types = columnsAndTypes.get(key); if (types == null) { types = new HashSet<Class<?>>(); columnsAndTypes.put(key, types); } Object value = document.get(key); if (value != null) { types.add(value.getClass()); } } } final String[] columnNames = new String[columnsAndTypes.size()]; final ColumnType[] columnTypes = new ColumnType[columnsAndTypes.size()]; int i = 0; for (Entry<String, Set<Class<?>>> columnAndTypes : columnsAndTypes.entrySet()) { final String columnName = columnAndTypes.getKey(); final Set<Class<?>> columnTypeSet = columnAndTypes.getValue(); final Class<?> columnType; if (columnTypeSet.size() == 1) { columnType = columnTypeSet.iterator().next(); } else { columnType = Object.class; } columnNames[i] = columnName; if (columnType == ObjectId.class) { columnTypes[i] = ColumnType.ROWID; } else { columnTypes[i] = ColumnTypeImpl.convertColumnType(columnType); } i++; } return new SimpleTableDef(collectionName, columnNames, columnTypes); }
From source file:org.apache.metamodel.mongodb.mongo3.MongoDbUpdateCallback.java
License:Apache License
protected void createCollection(String name) { MongoDatabase mongoDb = _dataContext.getMongoDb(); mongoDb.createCollection(name);/*from www .j a v a 2 s. co m*/ MongoCollection<Document> collection = mongoDb.getCollection(name); _collections.put(name, collection); }
From source file:org.apache.rya.forwardchain.strategy.MongoPipelineStrategy.java
License:Apache License
/** * Initialize based on a configuration.//from w ww . j a va 2 s . c o m * @param mongoConf Should contain database information; cannot be null. If * passed a stateful configuration, uses the existing mongo client, * otherwise creates one. */ public MongoPipelineStrategy(MongoDBRdfConfiguration mongoConf) throws ForwardChainException { Preconditions.checkNotNull(mongoConf); final String mongoDBName = mongoConf.getMongoDBName(); final String collectionName = mongoConf.getTriplesCollectionName(); mongoConf.setFlush(false); final StatefulMongoDBRdfConfiguration statefulConf; try { if (mongoConf instanceof StatefulMongoDBRdfConfiguration) { statefulConf = (StatefulMongoDBRdfConfiguration) mongoConf; this.dao = new MongoDBRyaDAO(); this.dao.setConf(statefulConf); this.dao.init(); } else { this.dao = RyaSailFactory.getMongoDAO(mongoConf); statefulConf = this.dao.getConf(); } } catch (RyaDAOException e) { throw new ForwardChainException("Can't connect to Rya.", e); } final MongoClient mongoClient = statefulConf.getMongoClient(); final MongoDatabase mongoDB = mongoClient.getDatabase(mongoDBName); this.baseCollection = mongoDB.getCollection(collectionName); this.pipelineVisitor = new SparqlToPipelineTransformVisitor(this.baseCollection); this.engine = this.dao.getQueryEngine(); this.backup = new SailExecutionStrategy(statefulConf); final MongoDbBatchWriterConfig writerConfig = MongoDbBatchWriterUtils .getMongoDbBatchWriterConfig(statefulConf); final CollectionType<Document> ct = new MongoCollectionType(baseCollection); this.batchWriter = new MongoDbBatchWriter<>(ct, writerConfig); try { this.batchWriter.start(); } catch (final MongoDbBatchWriterException e) { throw new ForwardChainException("Error starting MongoDB batch writer", e); } }
From source file:org.apache.rya.mongodb.aggregation.SparqlToPipelineTransformVisitor.java
License:Apache License
/** * Instantiate a visitor from a {@link MongoDBRdfConfiguration}. * @param conf Contains database connection information. *///from w ww . j a v a2 s.c o m public SparqlToPipelineTransformVisitor(StatefulMongoDBRdfConfiguration conf) { Preconditions.checkNotNull(conf); MongoClient mongo = conf.getMongoClient(); MongoDatabase db = mongo.getDatabase(conf.getMongoDBName()); this.inputCollection = db.getCollection(conf.getTriplesCollectionName()); }