Example usage for com.mongodb DBCollection save

List of usage examples for com.mongodb DBCollection save

Introduction

In this page you can find the example usage for com.mongodb DBCollection save.

Prototype

public WriteResult save(final DBObject document) 

Source Link

Document

Update an existing document or insert a document depending on the parameter.

Usage

From source file:com.gigaspaces.persistency.MongoClientConnector.java

License:Open Source License

public void performBatch(List<BatchUnit> rows) {
    if (logger.isTraceEnabled()) {
        logger.trace("MongoClientWrapper.performBatch(" + rows + ")");
        logger.trace("Batch size to be performed is " + rows.size());
    }/*from   w  ww  .j a v  a  2s  .  c  o  m*/
    //List<Future<? extends Number>> pending = new ArrayList<Future<? extends Number>>();

    for (BatchUnit row : rows) {
        SpaceDocument spaceDoc = row.getSpaceDocument();
        SpaceTypeDescriptor typeDescriptor = types.get(row.getTypeName()).getTypeDescriptor();
        SpaceDocumentMapper<DBObject> mapper = getMapper(typeDescriptor);

        DBObject obj = mapper.toDBObject(spaceDoc);

        DBCollection col = getCollection(row.getTypeName());
        switch (row.getDataSyncOperationType()) {

        case WRITE:
        case UPDATE:
            col.save(obj);
            break;
        case PARTIAL_UPDATE:
            DBObject query = BasicDBObjectBuilder.start()
                    .add(Constants.ID_PROPERTY, obj.get(Constants.ID_PROPERTY)).get();

            DBObject update = normalize(obj);
            col.update(query, update);
            break;
        // case REMOVE_BY_UID: // Not supported by this implementation
        case REMOVE:
            col.remove(obj);
            break;
        default:
            throw new IllegalStateException(
                    "Unsupported data sync operation type: " + row.getDataSyncOperationType());
        }
    }

    /*long totalCount = waitFor(pending);
            
    if (logger.isTraceEnabled()) {
       logger.trace("total accepted replies is: " + totalCount);
    }*/
}

From source file:com.google.api.ads.adwords.jaxws.extensions.report.model.persistence.mongodb.MongoEntityPersister.java

License:Open Source License

/**
 * @see com.google.api.ads.adwords.jaxws.extensions.report.model.persistence.EntityPersister
 *      #save(java.util.List)//from w  ww . j  av  a  2 s . c o m
 */
@Override
public <T> void save(List<T> listT) {
    if (listT != null && listT.size() > 0) {
        DBCollection dbcollection;
        for (T t : listT) {
            dbcollection = getDBCollection(t.getClass(), true);

            String jsonObject = gson.toJson(t);

            DBObject dbObject = (DBObject) com.mongodb.util.JSON.parse(jsonObject.toString());
            dbcollection.save(dbObject);
        }
    }
}

From source file:com.ikanow.infinit.e.api.knowledge.federated.SimpleFederatedQueryEngine.java

License:Open Source License

private void cacheApiResponse(String url, BasicDBObject toCacheJson, SourceFederatedQueryConfigPojo endpoint) {

    int cacheTime_days = DEFAULT_CACHE_TIME_DAYS;
    if (null != endpoint.cacheTime_days) {
        cacheTime_days = endpoint.cacheTime_days;
    }//  w w  w. j a  v  a  2  s. com
    if (cacheTime_days <= 0) { // Disable _request_ cache (to disable all caching include doc caching use -1)
        return;
    }

    DBCollection endpointCacheCollection = getCacheCollection();

    BasicDBObject toCacheObj = new BasicDBObject(SimpleFederatedCache._id_, url);
    toCacheObj.put(SimpleFederatedCache.cachedJson_, toCacheJson);
    toCacheObj.put(SimpleFederatedCache.expiryDate_,
            new Date(new Date().getTime() + cacheTime_days * 3600L * 24L * 1000L));
    toCacheObj.put(SimpleFederatedCache.created_, new Date());
    endpointCacheCollection.save(toCacheObj);
}

From source file:com.ikanow.infinit.e.api.knowledge.federated.SimpleFederatedQueryEngine.java

License:Open Source License

public void test_cacheExpire() {
    DBCollection endpointCacheCollection = getCacheCollection();

    DBCursor dbc = endpointCacheCollection.find();
    for (DBObject cacheEntryObj : dbc) {
        BasicDBObject cacheEntry = (BasicDBObject) cacheEntryObj;
        cacheEntry.put(SimpleFederatedCache.expiryDate_, new Date(new Date().getTime() - 3600L * 1000L)); // (ie expired an hour ago)
        endpointCacheCollection.save(cacheEntry);
    }/*  ww w  . ja  v  a  2 s.co m*/
}

From source file:com.ikanow.infinit.e.api.knowledge.federated.SimpleFederatedQueryEngine.java

License:Open Source License

public void test_cacheFill(String testName, boolean fill, boolean shouldBeFull) {
    DBCollection endpointCacheCollection = getCacheCollection();
    if (fill) {/*from  www.jav  a  2s  . c o m*/
        for (long i = 0; i < (1 + SimpleFederatedCache.QUERY_FEDERATION_CACHE_CLEANSE_SIZE); ++i) {
            SimpleFederatedCache fakeCacheElement = new SimpleFederatedCache();
            fakeCacheElement.expiryDate = new Date(new Date().getTime() - 3600L * 1000L); // (ie expired an hour ago)
            fakeCacheElement._id = testName + "_" + i;
            fakeCacheElement.cachedJson = new BasicDBObject();
            endpointCacheCollection.save(fakeCacheElement.toDb());
        }
        _lastChecked = new Date(new Date().getTime() - 602L * 1000L).getTime();
    }
    long count = endpointCacheCollection.count();
    if (shouldBeFull) {
        if (count < SimpleFederatedCache.QUERY_FEDERATION_CACHE_CLEANSE_SIZE) {
            System.out.println("*** " + testName + ": cache should just contain many elements, not: " + count);
            System.exit(-1);
        }
    } else {
        if (1 != count) {
            System.out.println("*** " + testName + ": cache should just contain one element, not: " + count);
            System.exit(-1);
        }
    }
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

private void runSavedQuery(CustomMapReduceJobPojo savedQuery) {

    // Run saved query:

    QueryHandler queryHandler = new QueryHandler();

    // Create query object

    ResponsePojo rp = null;//from ww  w.j a  v a  2 s  .  co  m
    StringBuffer errorString = new StringBuffer("Saved query error");
    try {
        String queryString = getQueryOrProcessing(savedQuery.query, QuerySpec.QUERY);
        AdvancedQueryPojo query = QueryHandler.createQueryPojo(queryString);
        StringBuffer communityIdStrList = new StringBuffer();
        for (ObjectId commId : savedQuery.communityIds) {
            if (communityIdStrList.length() > 0) {
                communityIdStrList.append(',');
            }
            communityIdStrList.append(commId.toString());
        }
        rp = queryHandler.doQuery(savedQuery.submitterID.toString(), query, communityIdStrList.toString(),
                errorString);
    } catch (Exception e) {
        //DEBUG
        e.printStackTrace();
        errorString.append(": " + e.getMessage());
    }
    if ((null == rp) || (null == rp.getResponse())) { // (this is likely some sort of internal error)
        if (null == rp) {
            rp = new ResponsePojo();
        }
        rp.setResponse(new ResponseObject("Query", false, "Unknown error"));
    }
    if (!rp.getResponse().isSuccess()) {
        setJobComplete(savedQuery, true, true, -1, -1,
                errorString.append('/').append(rp.getResponse().getMessage()).toString());
        return;
    }

    try {
        // Write to the temp output collection:

        DBCollection dbTemp = DbManager.getCollection(savedQuery.getOutputDatabase(),
                savedQuery.outputCollectionTemp);
        BasicDBObject outObj = new BasicDBObject();
        outObj.put("_id", new Date()); // (this gets renamed to "key")
        outObj.put("value", com.mongodb.util.JSON.parse(BaseDbPojo.getDefaultBuilder().create().toJson(rp)));
        dbTemp.save(outObj);
    } catch (Exception e) { // Any sort of error, just make sure we set the job to complete         
        setJobComplete(savedQuery, true, true, 1, 1, e.getMessage());
        return;
    }
    // Update job status

    setJobComplete(savedQuery, true, false, 1, 1, ApiManager.mapToApi(rp.getStats(), null));
}

From source file:com.ikanow.infinit.e.data_model.store.MongoDbManager.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws UnknownHostException {
    MongoClient mc = new MongoClient(args[0]);
    long tnow = 0;
    DB db = mc.getDB("test");
    DBCollection test = db.getCollection("test123");
    BasicDBObject outObj = new BasicDBObject();
    int ITS = 1000;
    test.drop();//from  w w w.ja  v  a2s. c  o m

    boolean checkPerformance = false;
    boolean checkFunctionality = false;
    boolean checkErrors = false;

    // 1] Performance

    if (checkPerformance) {

        // ack'd
        db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("1: Ack'd: " + tnow);

        // un ack'd
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("2: unAck'd: " + tnow);

        // un ack'd but call getLastError
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
            db.getLastError();
        }
        tnow = new Date().getTime() - tnow;
        test.drop();
        System.out.println("3: unAck'd but GLEd: " + tnow);

        // ack'd override
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj, WriteConcern.ACKNOWLEDGED);
            db.getLastError();
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("4: unAck'd but ACKd: " + tnow);

        // Performance Results:
        // 2.6) (unack'd 100ms ... ack'd 27000)
        // 2.4) (same)
    }

    // 2] Functionality

    if (checkFunctionality) {

        // Unack:
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        WriteResult wr = test.update(new BasicDBObject(),
                new BasicDBObject(DbManager.set_, new BasicDBObject("val2", "x")), false, true);
        CommandResult cr = db.getLastError();
        System.out.println("UNACK: wr: " + wr);
        System.out.println("UNACK: cr: " + cr);

        // bonus, check that we get N==0 when insert dup object
        WriteResult wr2 = test.insert(outObj);
        System.out.println("ACK wr2 = " + wr2.getN() + " all = " + wr2);
        CommandResult cr2 = db.getLastError();
        System.out.println("ACK cr2 = " + cr2);

        // Ack1:
        db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
        wr = test.update(new BasicDBObject(), new BasicDBObject(DbManager.set_, new BasicDBObject("val3", "x")),
                false, true);
        cr = db.getLastError();
        System.out.println("ACK1: wr: " + wr);
        System.out.println("ACK1: cr: " + cr);

        // Ack2:
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        wr = test.update(new BasicDBObject(), new BasicDBObject(DbManager.set_, new BasicDBObject("val4", "x")),
                false, true, WriteConcern.ACKNOWLEDGED);
        cr = db.getLastError();
        System.out.println("ACK2: wr: " + wr);
        System.out.println("ACK2: cr: " + cr);

        // bonus, check that we get N==0 when insert dup object
        wr2 = test.insert(outObj);
        System.out.println("ACK wr2 = " + wr2.getN() + " all = " + wr2);

        // Functionality results:
        // 2.6: unack wr == N/A, otherwise both have "n", "ok"
        // 2.4: unack wr == N/A all other wrs + crs identical 
    }

    if (checkErrors) {

        //set up sharding
        DbManager.getDB("admin").command(new BasicDBObject("enablesharding", "test"));
        // Ack:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            System.out.println("ACK wr = " + wr);
        } catch (Exception e) {
            System.out.println("ACK err = " + e.toString());
        }

        // UnAck:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")), false, false,
                    WriteConcern.ACKNOWLEDGED);
            System.out.println("ACK override wr = " + wr);
        } catch (Exception e) {
            System.out.println("ACK override  err = " + e.toString());
        }

        // UnAck:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            System.out.println("UNACK wr = " + wr);
        } catch (Exception e) {
            System.out.println("UNACK err = " + e.toString());
        }

        // UnAck + GLE:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            CommandResult cr = db.getLastError();
            System.out.println("UNACK GLE wr = " + wr);
            System.out.println("UNACK GLE cr = " + cr);
        } catch (Exception e) {
            System.out.println("UNACK GLE err = " + e.toString());
        }

        // Error handling:

        // 2.6:
        // Ack - exception
        // Ack override - exception
        // UnAck - no error given
        // UnAck + GLE  - gle error

        // 2.4:
        // Ack - exception
        // Ack override - exception
        // UnAck - no error given
        // UnAck + GLE  - gle error

    }
}

From source file:com.ikanow.infinit.e.processing.custom.CustomProcessingController.java

License:Open Source License

public void initializeJob(CustomMapReduceJobPojo job) {
    long time_start_setup = new Date().getTime();
    long time_setup = 0;
    try {/* w  w w.  jav a 2s . c o  m*/
        CustomOutputManager.prepareOutputCollection(job);

        // This may be a saved query, if so handle that separately
        if (null == job.jarURL) {
            ResponsePojo rp = new CustomSavedQueryTaskLauncher().runSavedQuery(job);
            if (!rp.getResponse().isSuccess()) {
                _statusManager.setJobComplete(job, true, true, -1, -1, rp.getResponse().getMessage());
            } else { // Success, write to output
                try {
                    // Write to the temp output collection:

                    String outCollection = job.outputCollectionTemp;
                    if ((job.appendResults != true) && job.appendResults)
                        outCollection = job.outputCollection;
                    //TESTED

                    DBCollection dbTemp = DbManager.getCollection(job.getOutputDatabase(), outCollection);
                    BasicDBObject outObj = new BasicDBObject();
                    outObj.put("key", new Date());
                    outObj.put("value",
                            com.mongodb.util.JSON.parse(BaseDbPojo.getDefaultBuilder().create().toJson(rp)));
                    dbTemp.save(outObj);

                    _statusManager.setJobComplete(job, true, false, 1, 1,
                            ApiManager.mapToApi(rp.getStats(), null));
                    job.jobidS = null;
                } catch (Exception e) { // Any sort of error, just make sure we set the job to complete         
                    _statusManager.setJobComplete(job, true, true, 1, 1, e.getMessage());
                    job.jobidS = null;
                }
            }
        } else {

            List<ObjectId> communityIds = InfiniteHadoopUtils.getUserCommunities(job.submitterID);
            job.tempJarLocation = InfiniteHadoopUtils.downloadJarFile(job.jarURL, communityIds, prop_custom,
                    job.submitterID);

            // Programmatic code:
            String jobid = new CustomHadoopTaskLauncher(_bLocalMode, _nDebugLimit, prop_custom)
                    .runHadoopJob(job, job.tempJarLocation);
            //OLD "COMMAND LINE: CODE
            //add job to hadoop
            //String jobid = new CustomHadoopTaskLauncher().runHadoopJob_commandLine(job, job.tempJarLocation);

            if (jobid.startsWith("local_done")) { // (run locally)
                String statusMessage = null;
                if (jobid.length() > 12) {
                    statusMessage = jobid.substring(12);
                }
                _statusManager.setJobComplete(job, true, false, -1, -1, statusMessage);
                job.jobidS = null;
            } else if (jobid != null && !jobid.startsWith("Error")) {
                time_setup = new Date().getTime() - time_start_setup;
                _logger.info("job_setup_title=" + job.jobtitle + " job_setup_id=" + job._id.toString()
                        + " job_setup_time=" + time_setup + " job_setup_success=true job_hadoop_id=" + jobid);
                //write jobid back to lookup
                String[] jobParts = jobid.split("_");
                String jobS = jobParts[1];
                int jobN = Integer.parseInt(jobParts[2]);
                job.jobidS = jobS;
                job.jobidN = jobN;
                _statusManager.updateJobPojo(job._id, jobS, jobN, job.tempConfigXMLLocation,
                        job.tempJarLocation, job);
            } else {
                time_setup = new Date().getTime() - time_start_setup;
                _logger.info("job_setup_title=" + job.jobtitle + " job_setup_id=" + job._id.toString()
                        + " job_setup_time=" + time_setup + " job_setup_success=false  job_setup_message="
                        + jobid);
                //job failed, send off the error message
                _statusManager.setJobComplete(job, true, true, -1, -1, jobid);
                job.jobidS = null;
            }
        }
    } catch (Exception ex) {
        //job failed, send off the error message
        time_setup = new Date().getTime() - time_start_setup;
        _logger.info("job_setup_title=" + job.jobtitle + " job_setup_id=" + job._id.toString()
                + " job_setup_time=" + time_setup + " job_setup_success=false job_setup_message="
                + InfiniteHadoopUtils.createExceptionMessage(ex));
        _statusManager.setJobComplete(job, true, true, -1, -1, ex.getMessage());
        job.jobidS = null;
    }
}

From source file:com.ikanow.infinit.e.processing.generic.store_and_index.StoreAndIndexManager.java

License:Open Source License

/**
 * Add a single doc document to the datastore
 * @param col/*ww w .j a va  2 s. c om*/
 * @param doc
 */
private void addToDatastore(DBCollection col, DocumentPojo doc) {
    if (!_diagnosticMode) {
        if (!docHasExternalContent(doc.getUrl(), doc.getSourceUrl(), doc.getTempSource())) {
            doc.makeFullTextNonTransient(); // (ie store full text in this case)
        }
        col.save(doc.toDb());
    } else {
        System.out.println("StoreAndIndexManager.addToDatastore: " + ((BasicDBObject) doc.toDb()).toString());
    }
}

From source file:com.jhkt.playgroundArena.shared.tasks.MongoDBTaskRunner.java

License:Apache License

public void save(AbstractDocument document) {
    DBCollection collection = getDBCollection(document);
    WriteResult wr = collection.save(document.resolveToBasicDBObject());
    processWriteResult(wr);/*w  ww  .  j av  a 2  s.  com*/
}