List of usage examples for com.mongodb DBCollection drop
public void drop()
From source file:net.handle.server.MongoDBHandleStorage.java
License:Open Source License
/** * Removes a collection//w w w. j ava 2 s . c om * * @param na Naming authority */ public long deleteAllRecords(String na) { final DBCollection collection = getCollection(Util.encodeString(na)); long count = collection.count(); collection.drop(); return count; }
From source file:net.ion.framework.db.mongo.jdbc.Executor.java
License:Apache License
private int drop(Drop d) { DBCollection c = mdb.getCollection(d.getName()); c.drop(); return 1; }
From source file:net.jurre.edutil.persistence.MongoDB.java
License:Open Source License
public void clearCollection(String collectionName) { DBCollection coll = this.db.getCollection(collectionName); coll.drop(); }
From source file:net.kamradtfamily.mongorest.CollectionsServlet.java
License:GNU General Public License
@Override protected void doDelete(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { log.fine("doDelete()"); String db_name = req.getParameter("dbname"); String col_name = req.getParameter("colname"); if (db_name == null || col_name == null) { String names[] = req2mongonames(req); if (names != null) { db_name = names[0];/* ww w .j ava 2 s . c o m*/ col_name = names[1]; } if (db_name == null || col_name == null) { error(res, SC_BAD_REQUEST, Status.get("param name missing")); return; } } DB db = mongo.getDB(db_name); DBCollection col = db.getCollection(col_name); col.drop(); res.setStatus(SC_OK); }
From source file:net.sf.okapi.lib.tmdb.mongodb.Repository.java
License:Open Source License
@Override public void deleteTm(String tmName) { tm_coll = repository.getCollection(Repository.TM_COLL); BasicDBObject obj = new BasicDBObject(); obj.put(Repository.TM_COL_NAME, tmName); tm_coll.remove(obj);//from w w w. j a va 2 s. c o m DBCollection seg_col = repository.getCollection(tmName + "_SEG"); seg_col.drop(); }
From source file:nl.telecats.customcid.CustomCallerIdManagerImpl.java
License:Open Source License
private void setRewrites(String collection, Collection<CustomCallerAlias> rewrites) { DBCollection col = m_db.getDb().getCollection(collection); BasicDBObject[] dbo = new BasicDBObject[rewrites.size()]; Iterator<CustomCallerAlias> iRewrites = rewrites.iterator(); for (int i = 0; i < rewrites.size(); i++) { dbo[i] = new BasicDBObject(); CustomCallerAlias cca = iRewrites.next(); dbo[i].put(FROM_ATTR, cca.getFrom()); dbo[i].put(TO_ATTR, cca.getTo()); }/*from w w w . ja v a 2 s . com*/ col.drop(); col.insert(dbo); }
From source file:no.nlf.avvik.melwinSOAPconnection.MongoOperations.java
/** * /*w w w . ja v a 2s . c om*/ * @param parachutists * @return */ public void addJumpers(ArrayList<Parachutist> parachutists) { DBCollection dbCollectionParachutists = db.getCollection("jumpers"); dbCollectionParachutists.drop(); setParachutistCounter(1); int counter; for (Parachutist parachutist : parachutists) { ArrayList<Integer> memberLicenses = new ArrayList<>(); for (License license : parachutist.getLicenses()) { memberLicenses.add(new Integer(license.getId())); } ArrayList<Integer> memberClubs = new ArrayList<>(); for (Club club : parachutist.getMemberclubs()) { memberClubs.add(new Integer(club.getId())); } counter = getParachutistCounter(); BasicDBObject parachutistMongoObject = new BasicDBObject("melwinId", parachutist.getMelwinId()) .append("_class", "no.nlf.models.mongoclasses.MongoParachutist") .append("memberclubs", memberClubs).append("licenses", memberLicenses) .append("firstname", parachutist.getFirstname()).append("lastname", parachutist.getLastname()) .append("id", counter).append("birthdate", parachutist.getBirthdate()) .append("gender", parachutist.getGender()).append("street", parachutist.getStreet()) .append("postnumber", parachutist.getPostnumber()) .append("postplace", parachutist.getPostplace()).append("mail", parachutist.getMail()) .append("phone", parachutist.getPhone()); dbCollectionParachutists.save(parachutistMongoObject); } }
From source file:nosqltools.DBConnection.java
protected List<String> dropCollections(List<String> collectionList) { DBCollection collection = null; String collectionName = ""; for (int i = 0; i < collectionList.size(); i++) { try {//from w w w. ja va 2 s. c o m collectionName = collectionList.get(i); collection = db.getCollection(collectionName); collection.drop(); //--- If it was successful then it should hit here otherwise it will be caught in the exception... collectionName += "--- Dropped"; collectionList.set(i, collectionName); } catch (Exception exp) { collectionName += exp.getMessage(); collectionList.set(i, collectionName); } } return collectionList; }
From source file:org.alfresco.provision.Mongo.java
License:Open Source License
public void removeTestRunData(String testName, String testRunName) { {/*w ww . ja v a 2s .c o m*/ String collectionName = eventsCollectionName(testName, testRunName); if (db.collectionExists(collectionName)) { DBCollection collection = db.getCollection(collectionName); collection.drop(); } } { String collectionName = resultsCollectionName(testName, testRunName); if (db.collectionExists(collectionName)) { DBCollection collection = db.getCollection(collectionName); collection.drop(); } } { String collectionName = sessionsCollectionName(testName, testRunName); if (db.collectionExists(collectionName)) { DBCollection collection = db.getCollection(collectionName); collection.drop(); } } }
From source file:org.apache.hadoop.contrib.mongoreduce.MongoOutputCommitter.java
License:Apache License
@Override public void setupJob(JobContext jobContext) throws IOException { /**//from ww w.j a v a 2 s. co m * note: we don't really have to do anything here - * MongoDB is one of the few systems that don't require you to * create a database or collection before writing to it * * but in order to ingest a ton of data quickly we have to * pre-split the output collection * */ Configuration conf = jobContext.getConfiguration(); if (conf.getBoolean("mongo.output.skip_splitting", false)) return; String database = conf.get("mongo.output.database"); String collection = conf.get("mongo.output.collection"); // connect to global db Mongo m = new Mongo("localhost"); DB db = m.getDB(database); DB admindb = m.getDB("admin"); DB configdb = m.getDB("config"); // optionally drop the existing collection boolean drop = conf.getBoolean("mongo.output.drop", false); DBCollection coll = db.getCollection(collection); if (drop) { coll.drop(); } else { if (coll.count() > 0) { // don't shard an existing collection - may already be sharded ... return; } } // get a list of shards ArrayList<String> shards = new ArrayList<String>(); for (DBObject s : configdb.getCollection("shards").find()) { shards.add((String) s.get("_id")); } if (shards.size() < 2) { // don't let's be silly - nice sharded cluster, no shard return; } // shard the new output collection BasicDBObjectBuilder builder = new BasicDBObjectBuilder(); builder.add("enableSharding", database); admindb.command(builder.get()); builder = new BasicDBObjectBuilder(); builder.add("shardCollection", database + "." + collection); // just shard on _id - but user gets to decide what the _id is builder.add("key", new BasicDBObject("_id", 1)); admindb.command(builder.get()); // pre-split to get parallel writes // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says // balancer moving chunks should take 5 minutes ... too long // wonder if moveChunk command is faster // well we could do it anyway - the jobs that can benefit from it will // check for user-submitted splitPoints String[] splits; String splitString = conf.get("mongo.output.split_points", ""); // generate our own split points if necessary if (splitString.equals("")) { long max = (long) Math.pow(93.0, 5.0); long step = max / shards.size(); splits = new String[shards.size() - 1]; // assume human readable keys for (int i = 0; i < shards.size() - 1; i++) { splits[i] = splitPointForLong(step * (i + 1)); } } else { splits = splitString.split(","); } HashMap<String, Object> splitCmd = new HashMap<String, Object>(); splitCmd.put("split", database + "." + collection); splitCmd.put("middle", ""); HashMap<String, Object> moveCmd = new HashMap<String, Object>(); moveCmd.put("moveChunk", database + "." + collection); moveCmd.put("find", ""); moveCmd.put("to", ""); // do the splitting and migrating // we assign chunks to shards in a round-robin manner int i = 0; for (String split : splits) { splitCmd.remove("middle"); splitCmd.put("middle", new BasicDBObject("_id", split)); // create new chunk admindb.command(new BasicDBObject(splitCmd)); // move to shard moveCmd.remove("find"); moveCmd.put("find", new BasicDBObject("_id", split)); moveCmd.put("to", shards.get(i)); admindb.command(new BasicDBObject(moveCmd)); i = (i + 1) % shards.size(); } }