Java tutorial
/* * MongoDB.java * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2012 Randy Nott * */ package org.rnott.places; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; import javax.annotation.PostConstruct; import javax.enterprise.context.ApplicationScoped; import javax.inject.Inject; import org.bson.types.ObjectId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.code.morphia.Datastore; import com.google.code.morphia.Morphia; import com.google.code.morphia.logging.MorphiaLoggerFactory; import com.google.code.morphia.logging.slf4j.SLF4JLogrImplFactory; import com.mongodb.BasicDBList; import com.mongodb.DBObject; import com.mongodb.Mongo; import com.mongodb.MongoException; import com.mongodb.MongoOptions; import com.mongodb.ServerAddress; import com.mongodb.util.JSON; /** * MongoDB database access and initialization. This * should be a singleton instance. * <p> * @author Randy Nott */ @ApplicationScoped public class MongoDB { private static final Logger logger = LoggerFactory.getLogger(MongoDB.class); static { MorphiaLoggerFactory.registerLogger(SLF4JLogrImplFactory.class); } /* * Entity classes. */ private static final Class<?>[] MAPPED_CLASSES = { AdministrativePlace.class, CountryProfile.class, LanguageProfile.class, Place.class, }; private final Mongo mongo; private final Morphia morphia; @Inject private Environment env; private Datastore datastore; public MongoDB() { // TODO: look into 'strict' write setting /* * http://stackoverflow.com/questions/6520439/how-to-configure-mongodb-java-driver-mongooptions-for-production-use * * autoConnectRetry * Simply means the driver will automatically attempt to reconnect to the server(s) after unexpected disconnects. * In production environments you usually want this set to true. * * connectionsPerHost * the amount of physical connections a single Mongo instance (it's singleton so you usually have one per application) * can establish to a mongod/mongos process. At time of writing the java driver will establish this amount of connections eventually even if the * actual query throughput is low (in order words you will see the "conn" statistic in mongostat rise until it hits this number per app server). * There is no need to set this higher than 100 in most cases but this setting is one of those "test it and see" things. Do note that you will have * to make sure you set this low enough so that the total amount of connections to your server do not exceed db.serverStatus().connections.available. * In production we currently have this at 40. * * connectTimeout * As the name suggest number of milliseconds the driver will wait before a connection attempt is aborted. Keep the default unless there's a * realistic, expected change this will be in the way of otherwise successful connection attempts. * * maxWaitTime * Number of ms a thread will wait for a connection to become available on the connection pool, and raises an exception if this does not happen * in time. Keep default. * * socketTimeout * Standard socket timeout value. Keep default. * * threadsAllowedToBlockForConnectionMultiplier * Number of threads that are allowed to wait for connections to become available if the pool is currently exhausted. This is the setting that will * cause the "com.mongodb.DBPortPool$SemaphoresOut: Out of semaphores to get db connection" exception. It will throw this exception once this thread * queue exceeds the threadsAllowedToBlockForConnectionMultiplier value. If you expect big peaks in throughput that could case large queues temporarily * increase this value. We have it at 1500 at the moment for exactly that reason. If your query load consistently outpaces the server you should just * improve your hardware/scaling situation accordingly. * * slaveOk * Very important for increased read performance if you use replica sets. This basically allows mongo to direct reads to non-primary replica members, * spreading the read load. Note that this can also be configured per query in your code. If you use replica sets (and you should) and you can live * with eventual consistency issues (meaning your secondaries might be slightly behind the primary's state) enable this. * * safe * When enabled the driver is forced to send a getLastError() command to the server after every write operation. This ensures that any possible problems * (unique constraint violations, query issues, etc.) are noticed on the client side and possibly throw an error. It defaults to false but you should set * this to true and change the WriteConcern of your updates if, and only if, you know you will not care about the result of the update and/or desperately * require additional performance. * * w * Oddly named parameter ;). If safe = true and w > 0 it determines the amount of replica set members a write has to be propogated to before considering * it successful. Use this for increased durability at the (considerable) expense of throughput. * * fsync * Durability option that forces mongo to flush to disk after each write when enabled. I've never had any durability issues related to a write backlog so * we have this on false (the default) in production. */ MongoOptions options = new MongoOptions(); options.autoConnectRetry = true; //options.slaveOk = true; see ReadPreference.SECONDARY options.safe = true; options.fsync = true; // durability is a must if (env == null) { //throw new RuntimeException( "Runtime environment is not injected" ); // TODO: runtime environment bean is not being injected env = new Environment(); env.initialize(); } try { ServerAddress addr = new ServerAddress(env.getDatabaseHost(), env.getDatabasePort()); mongo = new Mongo(addr, options); } catch (UnknownHostException e) { throw new RuntimeException("Failed to locate MongoDB", e); } catch (MongoException e) { throw new RuntimeException("Failed MongoDB initialization", e); } // authenticate logger.info("Initializing database: {}", env.getDatabaseName()); logger.info(env.toString()); mongo.getDB(env.getDatabaseName()).authenticate(env.getDatabaseUser(), env.getDatabasePassword().toCharArray()); // initialize Morphia morphia = new Morphia(); // set up mapping for entity types for (Class<?> c : MAPPED_CLASSES) { morphia.map(c); } // enable JSR303 Validation // this is extended so that it can respond to dynamic validation groups // TODO: re-enable validation after figuring out how to deal with Address issues (when embedded in Place, city and postal code may be null) //new DynamicValidationExtension( morphia ); } @PostConstruct void intitialize() { // need to do this after the instance is initialized due to the // dependency on platform settings // create the datastore if necessary datastore = morphia.createDatastore(mongo, env.getDatabaseName()); // create annotated indexes if necessary datastore.ensureIndexes(); } public Morphia getMorphia() { return morphia; } public Datastore getDatastore() { return datastore; } public static String toString(ObjectId id) { return id == null ? null : id.toStringMongod(); } public <T> T toManagedInstance(String json, Class<T> type) { return morphia.fromDBObject(type, (DBObject) JSON.parse(json)); } public <T> List<T> toManagedList(String json, Class<T> type) { ArrayList<T> list = new ArrayList<T>(); for (Object o : (BasicDBList) JSON.parse(json)) { list.add(morphia.fromDBObject(type, (DBObject) o)); } return list; } }