Example usage for com.mongodb DBCollection save

List of usage examples for com.mongodb DBCollection save

Introduction

In this page you can find the example usage for com.mongodb DBCollection save.

Prototype

public WriteResult save(final DBObject document) 

Source Link

Document

Update an existing document or insert a document depending on the parameter.

Usage

From source file:com.restfiddle.controller.rest.EntityDataController.java

License:Apache License

@RequestMapping(value = "/api/{projectId}/entities/{name}/{uuid}", method = RequestMethod.PUT, headers = "Accept=application/json", consumes = "application/json")
public @ResponseBody String updateEntityData(@PathVariable("projectId") String projectId,
        @PathVariable("name") String entityName, @PathVariable("uuid") String uuid,
        @RequestBody Object genericEntityDataDTO) {
    DBObject resultObject = new BasicDBObject();
    if (genericEntityDataDTO instanceof Map) {
        Map map = (Map) genericEntityDataDTO;
        if (map.get("id") != null && map.get("id") instanceof String) {
            String entityDataId = (String) map.get("id");
            logger.debug("Updating Entity Data with Id " + entityDataId);
        }//w  w  w.  j  a v  a2  s. c  o  m
        JSONObject uiJson = createJsonFromMap(map);
        // ID is stored separately (in a different column).
        uiJson.remove("id");

        DBCollection dbCollection = mongoTemplate.getCollection(entityName);
        BasicDBObject queryObject = new BasicDBObject();
        queryObject.append("_id", new ObjectId(uuid));
        resultObject = dbCollection.findOne(queryObject);

        Set<String> keySet = uiJson.keySet();
        for (String key : keySet) {
            resultObject.put(key, uiJson.get(key));
        }
        dbCollection.save(resultObject);
    }
    String json = resultObject.toString();

    // Indentation
    JSONObject jsonObject = new JSONObject(json);
    return jsonObject.toString(4);
}

From source file:com.sanaldiyar.projects.nanohttpd.mongodbbasedsessionhandler.MongoDBBasedSessionHandler.java

/**
 * Parse Response for sending session information to the client. Especially
 * cookies//from   w ww  .  ja  v a  2 s  .  c  o  m
 *
 * @param nanoSessionManager session manager
 * @param response the response
 */
@Override
public void parseResponse(NanoSessionManager nanoSessionManager, Response response) {
    if (!(nanoSessionManager instanceof MongoDBBasedSessionManager)) {
        return;
    }
    MongoDBBasedSessionManager mdbbsm = (MongoDBBasedSessionManager) nanoSessionManager;
    String sessionid = mdbbsm.getSessionID();
    Cookie sesscookie = new Cookie(SESSIONCOOKIEID, sessionid, mdbbasedsh_sesstimeout, TimeUnit.SECONDS,
            response.getRequestURL().getHost(), "/", false, true);
    mdbbsm.setExpires(new Date(new Date().getTime() + sesscookie.getMaxAge() * 1000));
    response.getCookies().add(sesscookie);
    DBCollection sessions = managers.getCollection("sessions");
    try {
        sessions.save(mdbbsm.getSession());
    } catch (Exception ex) {
    }
}

From source file:com.softinstigate.restheart.db.DocumentDAO.java

License:Open Source License

private static int optimisticCheckEtag(DBCollection coll, DBObject oldDocument, ObjectId requestEtag,
        int httpStatusIfOk) {
    if (requestEtag == null) {
        coll.save(oldDocument);
        return HttpStatus.SC_CONFLICT;
    }//www  .j  a  va  2  s .  co m

    Object oldEtag = RequestHelper.getEtagAsObjectId(oldDocument.get("_etag"));

    if (oldEtag == null) { // well we don't had an etag there so fine
        return HttpStatus.SC_NO_CONTENT;
    } else {
        if (oldEtag.equals(requestEtag)) {
            return httpStatusIfOk; // ok they match
        } else {
            // oopps, we need to restore old document
            // they call it optimistic lock strategy
            coll.save(oldDocument);
            return HttpStatus.SC_PRECONDITION_FAILED;
        }
    }
}

From source file:com.sonyericsson.jenkins.plugins.bfa.db.MongoDBKnowledgeBase.java

License:Open Source License

/**
 * Copies all causes flagged as removed from the old database to this one.
 *
 * @param oldKnowledgeBase the old database.
 * @throws Exception if something goes wrong.
 */// w  w w. j a v a  2s  .c  om
protected void convertRemoved(MongoDBKnowledgeBase oldKnowledgeBase) throws Exception {
    List<DBObject> removed = oldKnowledgeBase.getRemovedCauses();
    DBCollection dbCollection = getJacksonCollection().getDbCollection();
    for (DBObject obj : removed) {
        dbCollection.save(obj);
    }
}

From source file:com.strategicgains.docussandra.controller.perf.remote.mongo.MongoLoader.java

License:Apache License

public static void loadMongoData(MongoClientURI uri, final int NUM_WORKERS, Database database,
        final int numDocs, final PerfTestParent clazz) {
    logger.info("------------Loading Data into: " + database.name() + " with MONGO!------------");
    try {/*from w w w . j  a va2 s. co  m*/
        try {
            MongoClient mongoClient = new MongoClient(uri);
            mongoClient.setWriteConcern(WriteConcern.MAJORITY);
            DB db = mongoClient.getDB(database.name());
            final DBCollection coll = db.getCollection(database.name());
            ArrayList<Thread> workers = new ArrayList<>(NUM_WORKERS + 1);
            int docsPerWorker = numDocs / NUM_WORKERS;
            try {
                List<Document> docs = clazz.getDocumentsFromFS();
                ArrayList<List<Document>> documentQueues = new ArrayList<>(NUM_WORKERS + 1);
                int numDocsAssigned = 0;
                while ((numDocsAssigned + 1) < numDocs) {
                    int start = numDocsAssigned;
                    int end = numDocsAssigned + docsPerWorker;
                    if (end > numDocs) {
                        end = numDocs - 1;
                    }
                    documentQueues.add(new ArrayList(docs.subList(start, end)));
                    numDocsAssigned = end;
                }
                for (final List<Document> queue : documentQueues) {
                    workers.add(new Thread() {
                        @Override
                        public void run() {
                            for (Document d : queue) {
                                DBObject o = (DBObject) JSON.parse(d.object());
                                coll.save(o);
                            }
                            logger.info("Thread " + Thread.currentThread().getName() + " is done. It processed "
                                    + queue.size() + " documents.");
                        }
                    });
                }
            } catch (UnsupportedOperationException e)//we can't read everything in at once
            {
                //all we need to do in this block is find a way to set "workers"
                for (int i = 0; i < NUM_WORKERS; i++) {
                    workers.add(new Thread() {
                        private final int chunk = (int) (Math.random() * 100) + 150;//pick a random chunk so we are not going back to the FS all at the same time and potentially causing a bottle neck

                        @Override
                        public void run() {
                            ThreadLocal<Integer> counter = new ThreadLocal<>();
                            counter.set(new Integer(0));
                            try {
                                List<Document> docs = clazz.getDocumentsFromFS(chunk);//grab a handful of documents
                                while (docs.size() > 0) {
                                    for (Document d : docs)//process the documents we grabbed
                                    {
                                        DBObject o = (DBObject) JSON.parse(d.object());
                                        coll.save(o);
                                        counter.set(counter.get() + 1);
                                    }
                                    docs = clazz.getDocumentsFromFS(chunk);//grab another handful of documents
                                }
                                logger.info("Thread " + Thread.currentThread().getName()
                                        + " is done. It processed " + counter.get() + " documents.");
                            } catch (IOException | ParseException e) {
                                logger.error("Couldn't read from document", e);
                            }
                        }
                    });
                }
            }

            long start = new Date().getTime();
            //start your threads!
            for (Thread t : workers) {
                t.start();
            }
            logger.info("All threads started, waiting for completion.");
            boolean allDone = false;
            boolean first = true;
            while (!allDone || first) {
                first = false;
                boolean done = true;
                for (Thread t : workers) {
                    if (t.isAlive()) {
                        done = false;
                        logger.info("Thread " + t.getName() + " is still running.");
                        break;
                    }
                }
                if (done) {
                    allDone = true;
                } else {
                    logger.info("We still have workers running...");
                    try {
                        Thread.sleep(10000);
                    } catch (InterruptedException e) {
                    }
                }
            }
            long end = new Date().getTime();
            long miliseconds = end - start;
            double seconds = (double) miliseconds / 1000d;
            output.info("Done loading data using: " + NUM_WORKERS + ". Took: " + seconds + " seconds");
            double tpms = (double) numDocs / (double) miliseconds;
            double tps = tpms * 1000;
            double transactionTime = (double) miliseconds / (double) numDocs;
            output.info(database.name() + " Mongo Average Transactions Per Second: " + tps);
            output.info(
                    database.name() + " Mongo Average Transactions Time (in miliseconds): " + transactionTime);

        } catch (UnknownHostException e) {
            logger.error("Couldn't connect to Mongo Server", e);
        }
    } catch (IOException | ParseException e) {
        logger.error("Couldn't read data.", e);
    }
}

From source file:com.streamreduce.core.dao.GenericCollectionDAO.java

License:Apache License

public BasicDBObject updateCollectionEntry(DAODatasourceType datasourceType, String collectionName, ObjectId id,
        String json) throws CollectionObjectNotFoundException {
    DB db = getDatabase(datasourceType);
    DBCollection collection = db.getCollection(collectionName);
    BasicDBObject newPayloadObject = (BasicDBObject) JSON.parse(json);
    BasicDBObject oldPayloadObject = (BasicDBObject) collection.findOne(new BasicDBObject("_id", id));

    if (oldPayloadObject == null) {
        throw new CollectionObjectNotFoundException(datasourceType, collectionName, id);
    }//from w ww .  j  a  v a  2 s  .c o  m

    newPayloadObject.put("_id", id);

    collection.save(newPayloadObject);

    return newPayloadObject;
}

From source file:com.streamreduce.core.dao.MetricDAO.java

License:Apache License

@Override
public Key<Metric> save(Metric entity) {
    DBCollection collection = getCollection(entity.getAccountId());
    DBObject dbObject = entity.toDBObject();
    collection.save(dbObject);
    entity.setId((ObjectId) dbObject.get("_id"));
    return new Key<>(Metric.class, entity.getId());
}

From source file:com.tensorwrench.testng.mongo.MongoTestDriver.java

License:Apache License

/**
 * Reads JSON from the input stream to populate the database.
 * @param db//from w  w  w  . j  a  v  a 2s  . c  o  m
 * @param in
 * @throws IOException
 */
private void importStream(DB db, InputStream in) throws IOException {
    BufferedReader reader = new BufferedReader(new InputStreamReader(in));
    StringBuilder sb = new StringBuilder();
    String read = reader.readLine();
    while (read != null) {
        sb.append(read);
        read = reader.readLine();
    }

    BSONObject obj = (BSONObject) JSON.parse(sb.toString());
    for (String collection : obj.keySet()) {
        @SuppressWarnings("unchecked")
        List<DBObject> docs = (List<DBObject>) obj.get(collection);

        DBCollection col = db.getCollection(collection);
        for (DBObject o : docs) {
            col.save(o);
        }
    }
}

From source file:com.tomtom.speedtools.mongodb.migratedb.MongoDBMigration.java

License:Apache License

/**
 * Used to modify top-level documents. Documents will be stored in the collection when modified.
 *
 * @param db             Database./*from w  w  w  .ja v a2 s . c o  m*/
 * @param collectionName Collection to iterate over.
 * @return Iterable to loop over all documents.
 */
@Nonnull
protected Iterable<DBObject> migrateCollection(@Nonnull final MongoDB db,
        @Nonnull final String collectionName) {
    assert db != null;
    assert collectionName != null;

    rootContext.flush();
    final DBCollection collection = db.getCollection(collectionName);

    final long count = collection.count();
    if (count > Integer.MAX_VALUE) {
        addProblem("",
                "Collection has too many records (" + count + ", where " + Integer.MAX_VALUE + " is max)");
    }

    /**
     * This set is going to to contain all records for sure, so make sure it is large enough not to get
     * re-allocated all the time.
     *
     * See HashMap's class description at [http://docs.oracle.com/javase/6/docs/api/java/util/HashMap.html],
     * specifically "The expected number of entries in the map and its load factor should be taken into account
     * when setting its initial capacity, so as to minimize the number of rehash operations. If the initial
     * capacity is greater than the maximum number of entries divided by the load factor, no rehash operations
     * will ever occur.".
     */
    @SuppressWarnings("NumericCastThatLosesPrecision")
    final Set<Object> recordIds = new HashSet<>((int) ((double) count / 0.75) + 1);

    return new IterableDelegate<DBObject, DBObject>(collection.find()) {

        private int index = 1;

        @Nullable
        @Override
        public DBObject next(@Nonnull final DBObject value) {

            final Context context = rootContext.createChild(value, collectionName + ':' + index);
            index++;

            // Each document should have an _id field.
            final Object id = value.get("_id");
            if (id == null) {
                addProblem(context.path, "Document has no _id field: " + value);
                return null;
            }

            // Don't process records we have already processed. This can happen if a record
            // is modified.
            if (recordIds.contains(id)) {
                return null;
            }
            recordIds.add(id);

            // Keep original value in immutable string, referenced from 'flush()'.
            final String originalStringValue = value.toString();

            // Save object.
            context.add(new Command() {

                @Override
                public void flush() {

                    // If the new value differs from the old one, store it and print it.
                    final String stringValue = value.toString();
                    if (!originalStringValue.equals(stringValue)) {
                        if (!dryRun) {
                            collection.save(value);
                        }
                        LOG.debug(context.path + " - original document: " + originalStringValue);
                        LOG.debug(context.path + " - migrated document: " + value);
                    }
                }

                @Override
                public int ranking() {
                    return Integer.MAX_VALUE; // Saves should be executed last.
                }
            });

            return value;
        }
    };
}

From source file:com.tomtom.speedtools.mongodb.migratedb.MongoDBMigration.java

License:Apache License

/**
 * Used to add top-level documents./*from www .j a  va  2  s.c o  m*/
 *
 * @param db             Database.
 * @param collectionName Collection to add to.
 * @param values         The values to add to the collection. Individual values must not be null.
 */
protected void addToCollection(@Nonnull final MongoDB db, @Nonnull final String collectionName,
        @Nonnull final DBObject... values) {
    assert db != null;
    assert collectionName != null;
    assert values != null;

    rootContext.flush();
    final DBCollection collection = db.getCollection(collectionName);

    int index = 1;
    for (final DBObject value : values) {
        if (value != null) {
            final Context context = rootContext.createChild(value, collectionName + '+' + index);
            index++;

            // Each document should have an _id field.
            final Object id = value.get("_id");
            if (id == null) {
                addProblem(context.path, "Document has no _id field: " + value);
                break;
            }

            // Save object.
            context.add(new Command() {

                @Override
                public void flush() {
                    if (!dryRun) {
                        collection.save(value);
                    }
                    LOG.debug(context.path + " - added document: " + value);
                }

                @Override
                public int ranking() {
                    return Integer.MAX_VALUE; // Saves should be executed last.
                }
            });
        } else {
            addProblem(rootContext.path, "Trying to add null document.");
        }
    }
}