Example usage for com.mongodb BasicDBObject append

List of usage examples for com.mongodb BasicDBObject append

Introduction

In this page you can find the example usage for com.mongodb BasicDBObject append.

Prototype

@Override
public BasicDBObject append(final String key, final Object val) 

Source Link

Document

Add a key/value pair to this object

Usage

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

/**
 * Attempt to shard the output collection.  If the collection
 * is already sharded it will just spit back an error which
 * is fine.//from  ww  w  .  j a v a2  s.  c  om
 * 
 * @param outputCollection
 */
private void shardOutputCollection(CustomMapReduceJobPojo job) {
    //enable sharding for the custommr db incase it hasn't been
    DbManager.getDB("admin").command(new BasicDBObject("enablesharding", job.getOutputDatabase()));
    //enable sharding for the output collection
    if (job.outputCollection != null) {
        BasicDBObject command = new BasicDBObject("shardcollection",
                job.getOutputDatabase() + "." + job.outputCollection);
        command.append("key", new BasicDBObject("_id", 1));
        DbManager.getDB("admin").command(command);
    }
    //enable sharding on temp output collection
    if (job.outputCollectionTemp != null) {
        BasicDBObject command1 = new BasicDBObject("shardcollection",
                job.getOutputDatabase() + "." + job.outputCollection);
        command1.append("key", new BasicDBObject("_id", 1));
        DbManager.getDB("admin").command(command1);
    }
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

private void updateJobPojo(ObjectId _id, String jobids, int jobidn, String xmlLocation, String jarLocation) {
    try {/*from w  w w .  j  a  v  a 2  s . c  o m*/
        BasicDBObject set = new BasicDBObject();
        set.append(CustomMapReduceJobPojo.jobidS_, jobids);
        set.append(CustomMapReduceJobPojo.jobidN_, jobidn);
        set.append(CustomMapReduceJobPojo.tempConfigXMLLocation_, xmlLocation);
        set.append(CustomMapReduceJobPojo.tempJarLocation_, jarLocation);
        set.append(CustomMapReduceJobPojo.errorMessage_, null);
        BasicDBObject updateObject = new BasicDBObject(MongoDbManager.set_, set);
        DbManager.getCustom().getLookup().update(new BasicDBObject(CustomMapReduceJobPojo._id_, _id),
                updateObject);
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

/**
 * Queries mongo to see if any jobs need to be ran now (if their nextRunTime is
 * less than current time).//from   ww w  .  j  ava 2  s  .  c o  m
 * 5/23/2012 Burch - Updated to only return 1 job atomically, sets that jobs jobidS to
 * a blank so other core servers won't attempt to run it.
 * 
 * @return a list of jobs that need ran
 */
private CustomMapReduceJobPojo getJobsToRun() {
    try {
        // First off, check the number of running jobs - don't exceed the max
        // (see to run into memory problems if this isn't limited?)
        int nMaxConcurrent = prop_custom.getHadoopMaxConcurrent();
        if (Integer.MAX_VALUE != nMaxConcurrent) {
            BasicDBObject maxQuery = new BasicDBObject(CustomMapReduceJobPojo.jobidS_,
                    new BasicDBObject(DbManager.ne_, null));
            int nCurrRunningJobs = (int) DbManager.getCustom().getLookup().count(maxQuery);
            if (nCurrRunningJobs >= nMaxConcurrent) {
                return null;
            }
        }
        //TESTED

        BasicDBObject query = new BasicDBObject();
        query.append(CustomMapReduceJobPojo.jobidS_, null);
        query.append(CustomMapReduceJobPojo.waitingOn_, new BasicDBObject(MongoDbManager.size_, 0));
        query.append(CustomMapReduceJobPojo.nextRunTime_,
                new BasicDBObject(MongoDbManager.lt_, new Date().getTime()));
        if (!bHadoopEnabled && !bLocalMode) {
            // Can only get shared queries:
            query.append("jarURL", null);
        }
        BasicDBObject updates = new BasicDBObject(CustomMapReduceJobPojo.jobidS_, "");
        updates.append("lastRunTime", new Date());
        BasicDBObject update = new BasicDBObject(MongoDbManager.set_, updates);
        DBObject dbo = DbManager.getCustom().getLookup().findAndModify(query, null, null, false, update, true,
                false);

        if (dbo != null) {
            return CustomMapReduceJobPojo.fromDb(dbo, CustomMapReduceJobPojo.class);
        }
    } catch (Exception ex) {
        //oh noes!
        ex.printStackTrace();
    }

    return null;
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

private CustomMapReduceJobPojo getJobsToMakeComplete() {
    try {/*  www . j a  v  a2 s  .  c o  m*/
        BasicDBObject query = new BasicDBObject();
        BasicDBObject nors[] = new BasicDBObject[3];
        nors[0] = new BasicDBObject(CustomMapReduceJobPojo.jobidS_, null);
        nors[1] = new BasicDBObject(CustomMapReduceJobPojo.jobidS_, "CHECKING_COMPLETION");
        nors[2] = new BasicDBObject(CustomMapReduceJobPojo.jobidS_, "");
        query.put(MongoDbManager.nor_, Arrays.asList(nors));
        BasicDBObject updates = new BasicDBObject(CustomMapReduceJobPojo.jobidS_, "CHECKING_COMPLETION");
        BasicDBObject update = new BasicDBObject(MongoDbManager.set_, updates);
        if (!bHadoopEnabled) {
            // Can only get shared queries:
            query.append(CustomMapReduceJobPojo.jarURL_, null);
        }
        DBObject dbo = DbManager.getCustom().getLookup().findAndModify(query, update);

        if (dbo != null) {
            return CustomMapReduceJobPojo.fromDb(dbo, CustomMapReduceJobPojo.class);
        }
    } catch (Exception ex) {
        //oh noes!
        ex.printStackTrace();
    }

    return null;
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

/**
 * Sets the custom mr pojo to be complete for the
 * current job.  Currently this is done by removing the
 * jobid and updating the next runtime, increments the
 * amount of timeRan counter as well so we can calculate nextRunTime
 * //  w  w w.j  a  va  2s  . c om
 * Also set lastCompletion time to now (best we can approx)
 * 
 * @param cmr
 */
private void setJobComplete(CustomMapReduceJobPojo cmr, boolean isComplete, boolean isError, float mapProgress,
        float reduceProgress, String errorMessage) {
    BasicDBObject updates = new BasicDBObject();
    BasicDBObject update = new BasicDBObject();
    try {
        long nNew = 0;
        long nTotal = 0;
        if (isComplete) {
            updates.append(CustomMapReduceJobPojo.jobidS_, null);
            updates.append(CustomMapReduceJobPojo.jobidN_, 0);
            try {
                long nextRunTime = getNextRunTime(cmr.scheduleFreq, cmr.firstSchedule, cmr.nextRunTime,
                        cmr.timesRan + 1);
                //if next run time reschedules to run before now, keep rescheduling until its later
                //the server could have been turned off for days and would try to rerun all jobs once a day
                while (nextRunTime < new Date().getTime()) {
                    Date firstSchedule = new Date(nextRunTime);
                    cmr.firstSchedule = firstSchedule;
                    updates.append(CustomMapReduceJobPojo.firstSchedule_, firstSchedule);
                    nextRunTime = getNextRunTime(cmr.scheduleFreq, cmr.firstSchedule, cmr.nextRunTime,
                            cmr.timesRan + 1);
                }
                updates.append(CustomMapReduceJobPojo.nextRunTime_, nextRunTime);
            } catch (Exception e) {
            } // just carry on, we'll live...

            updates.append(CustomMapReduceJobPojo.lastCompletionTime_, new Date());
            updates.append(CustomMapReduceJobPojo.tempConfigXMLLocation_, null);
            updates.append(CustomMapReduceJobPojo.tempJarLocation_, null);
            try {
                removeTempFile(cmr.tempConfigXMLLocation);
                removeTempFile(cmr.tempJarLocation);
            } catch (Exception e) {
                _logger.info("job_error_removing_tempfiles=" + HarvestExceptionUtils.createExceptionMessage(e));
            }

            BasicDBObject incs = new BasicDBObject(CustomMapReduceJobPojo.timesRan_, 1);
            //copy depencies to waitingOn
            updates.append(CustomMapReduceJobPojo.waitingOn_, cmr.jobDependencies);
            if (!isError) {
                nNew = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).count();

                updates.append(CustomMapReduceJobPojo.errorMessage_, errorMessage); // (will often be null)
                moveTempOutput(cmr);
                //if job was successfully, mark off dependencies
                removeJobFromChildren(cmr._id);

                nTotal = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection).count();
            } else {
                //failed, just append error message                              
                updates.append(CustomMapReduceJobPojo.errorMessage_, errorMessage);
                incs.append(CustomMapReduceJobPojo.timesFailed_, 1);
            }
            update.append(MongoDbManager.inc_, incs);
            long runtime = new Date().getTime() - cmr.lastRunTime.getTime();
            long timeFromSchedule = cmr.lastRunTime.getTime() - cmr.nextRunTime;

            if (null != cmr.jobidS) {
                _logger.info("job_completion_title=" + cmr.jobtitle + " job_completion_id=" + cmr._id.toString()
                        + " job_completion_time=" + runtime + " job_schedule_delta=" + timeFromSchedule
                        + " job_completion_success=" + !isError + " job_hadoop_id=" + cmr.jobidS + "_"
                        + cmr.jobidN + " job_new_records=" + nNew + " job_total_records=" + nTotal);
            } else {
                _logger.info("job_completion_title=" + cmr.jobtitle + " job_completion_id=" + cmr._id.toString()
                        + " job_completion_time=" + runtime + " job_schedule_delta=" + timeFromSchedule
                        + " job_completion_success=" + !isError + " job_new_records=" + nNew
                        + " job_total_records=" + nTotal);
            }
        }
        updates.append(CustomMapReduceJobPojo.mapProgress_, mapProgress);
        updates.append(CustomMapReduceJobPojo.reduceProgress_, reduceProgress);
    } catch (Exception ex) {
        _logger.info("job_error_updating_status_title=" + cmr.jobtitle + " job_error_updating_status_id="
                + cmr._id.toString() + " job_error_updating_status_message="
                + HarvestExceptionUtils.createExceptionMessage(ex));
    } finally { // It's really bad if this doesn't happen, so do it here so that it always gets called
        if (!updates.isEmpty()) {
            update.append(MongoDbManager.set_, updates);
            // (if isComplete, should always include resetting jobidS and jobidN)
            DbManager.getCustom().getLookup().update(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                    update);
        }
    }
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

/**
 * Moves the output of a job from output_tmp to output and deletes
 * the tmp collection.//from w w  w.j a v  a  2 s  .  c o  m
 * 
 * @param cmr
 * @throws IOException 
 * @throws ParserConfigurationException 
 * @throws SAXException 
 */
private void moveTempOutput(CustomMapReduceJobPojo cmr)
        throws IOException, SAXException, ParserConfigurationException {
    // If we are an export job then move files:
    bringTempOutputToFront(cmr);
    // (the rest of this will just do nothing) 

    /**
     * Atomic plan:
     * If not append, move customlookup pointer to tmp collection, drop old collection.
     * If append, set sync flag (find/mod), move results from tmp to old, unset sync flag.
     * 
     */
    //step1 build out any of the post proc arguments
    DBObject postProcObject = null;
    boolean limitAllData = true;
    boolean hasSort = false;
    int limit = 0;
    BasicDBObject sort = new BasicDBObject();
    try {
        postProcObject = (DBObject) com.mongodb.util.JSON
                .parse(getQueryOrProcessing(cmr.query, QuerySpec.POSTPROC));
        if (postProcObject != null) {
            if (postProcObject.containsField("limitAllData")) {
                limitAllData = (Boolean) postProcObject.get("limitAllData");
            }
            if (postProcObject.containsField("limit")) {
                limit = (Integer) postProcObject.get("limit");
                if (postProcObject.containsField("sortField")) {
                    String sfield = (String) postProcObject.get("sortField");
                    int sortDir = 1;
                    if (postProcObject.containsField("sortDirection")) {
                        sortDir = (Integer) postProcObject.get("sortDirection");
                    }
                    sort.put(sfield, sortDir);
                    hasSort = true;
                } else if (limit > 0) {
                    //set a default sort because the user posted a limit
                    sort.put("_id", -1);
                    hasSort = true;
                }
            }
        }
    } catch (Exception ex) {
        _logger.info(
                "job_error_post_proc_title=" + cmr.jobtitle + " job_error_post_proc_id=" + cmr._id.toString()
                        + " job_error_post_proc_message=" + HarvestExceptionUtils.createExceptionMessage(ex));
    }

    //step 2a if not appending results then work on temp collection and swap to main
    if ((null == cmr.appendResults) || !cmr.appendResults) //format temp then change lookup pointer to temp collection
    {
        //transform all the results into necessary format:         
        DBCursor dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                .find(new BasicDBObject("key", null)).sort(sort).limit(limit);
        while (dbc_tmp.hasNext()) {
            DBObject dbo = dbc_tmp.next();
            Object key = dbo.get("_id");
            dbo.put("key", key);
            dbo.removeField("_id");
            DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).insert(dbo);
        }
        DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                .remove(new BasicDBObject("key", null));

        //swap the output collections
        BasicDBObject notappendupdates = new BasicDBObject(CustomMapReduceJobPojo.outputCollection_,
                cmr.outputCollectionTemp);
        notappendupdates.append(CustomMapReduceJobPojo.outputCollectionTemp_, cmr.outputCollection);
        DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                new BasicDBObject(MongoDbManager.set_, notappendupdates));
        String temp = cmr.outputCollectionTemp;
        cmr.outputCollectionTemp = cmr.outputCollection;
        cmr.outputCollection = temp;
    } else //step 2b if appending results then drop modified results in output collection
    {
        DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                new BasicDBObject(MongoDbManager.set_, new BasicDBObject("isUpdatingOutput", true)));
        //remove any aged out results
        if ((null != cmr.appendAgeOutInDays) && cmr.appendAgeOutInDays > 0) {
            //remove any results that have aged out
            long ageOutMS = (long) (cmr.appendAgeOutInDays * MS_IN_DAY);
            Date lastAgeOut = new Date(((new Date()).getTime() - ageOutMS));
            DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection).remove(
                    new BasicDBObject("_id", new BasicDBObject(MongoDbManager.lt_, new ObjectId(lastAgeOut))));
        }
        DBCursor dbc_tmp;
        if (!limitAllData) {
            //sort and limit the temp data set because we only want to process it
            dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                    .find(new BasicDBObject("key", null)).sort(sort).limit(limit);
            limit = 0; //reset limit so we get everything in a few steps (we only want to limit the new data)
        } else {
            dbc_tmp = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp)
                    .find(new BasicDBObject("key", null));
        }

        DBCollection dbc = DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollection);
        //transform temp results and dump into output collection
        while (dbc_tmp.hasNext()) {
            DBObject dbo = dbc_tmp.next();
            //transform the dbo to format {_id:ObjectId, key:(prev_id), value:value}
            Object key = dbo.get("_id");
            dbo.put("key", key);
            dbo.removeField("_id");
            //_id field should be automatically set to objectid when inserting now
            dbc.insert(dbo);
        }
        //if there is a sort, we need to apply it to all the data now
        if (hasSort) {
            ObjectId OID = new ObjectId();
            BasicDBObject query = new BasicDBObject("_id", new BasicDBObject(MongoDbManager.lt_, OID));
            //find everything inserted before now and sort/limit the data
            DBCursor dbc_sort = dbc.find(query).sort(sort).limit(limit);
            while (dbc_sort.hasNext()) {
                //reinsert the data into db (it should be in sorted order naturally now)
                DBObject dbo = dbc_sort.next();
                dbo.removeField("_id");
                dbc.insert(dbo);
            }
            //remove everything inserted before we reorganized everything (should leave only the new results in natural order)
            dbc.remove(query);
        }
        DbManager.getCustom().getLookup().findAndModify(new BasicDBObject(CustomMapReduceJobPojo._id_, cmr._id),
                new BasicDBObject(MongoDbManager.set_, new BasicDBObject("isUpdatingOutput", false)));
    }
    //step3 clean up temp output collection so we can use it again
    // (drop it, removing chunks)
    try {
        DbManager.getCollection(cmr.getOutputDatabase(), cmr.outputCollectionTemp).drop();
    } catch (Exception e) {
    } // That's fine, it probably just doesn't exist yet...
}

From source file:com.ikanow.infinit.e.core.utils.SourceUtils.java

License:Open Source License

private static void addSearchCycleClause(BasicDBObject currQuery, Date now) {
    BasicDBObject subclause1 = new BasicDBObject(SourcePojo.searchCycle_secs_,
            new BasicDBObject(MongoDbManager.exists_, false));
    StringBuffer js = new StringBuffer();
    js.append(//from ww  w  .  j av  a2 s .  c  om
            "(null == this.harvest) || ('success_iteration'== this.harvest.harvest_status) || (null == this.harvest.harvested) || (null == this.searchCycle_secs) || ((this.searchCycle_secs >= 0) && ((this.harvest.harvested.getTime() + 1000*this.searchCycle_secs) <= ");
    js.append(now.getTime());
    js.append("))");
    BasicDBObject subclause2 = new BasicDBObject(MongoDbManager.where_, js.toString());
    currQuery.append(MongoDbManager.or_, Arrays.asList(subclause1, subclause2));
}

From source file:com.ikanow.infinit.e.core.utils.SourceUtils.java

License:Open Source License

private static int pruneSource(SourcePojo source, int nToPrune, int ttl_days) {
    int nTotalDocsDeleted = 0;
    int nDocsDeleted = 0;

    // (code taken mostly from SourceHandler.deleteSource)
    if (null != source.getKey()) { // or may delete everything!
        BasicDBObject docQuery = new BasicDBObject(DocumentPojo.sourceKey_,
                source.getDistributedKeyQueryTerm());
        if (ttl_days > 0) {
            Date ageOut = new Date(new Date().getTime() - ttl_days * 24L * 3600L * 1000L);
            ObjectId oldestAllowedId = new ObjectId(ageOut);
            docQuery.put(DocumentPojo._id_, new BasicDBObject(DbManager.lt_, oldestAllowedId));
        } //TODO: TOTEST
        docQuery.put(DocumentPojo.index_, new BasicDBObject(DbManager.ne_, "?DEL?")); // (robustness)
        BasicDBObject sortField = new BasicDBObject(DocumentPojo._id_, 1);
        BasicDBObject docFields = new BasicDBObject();
        docFields.append(DocumentPojo.url_, 1);
        docFields.append(DocumentPojo.sourceUrl_, 1);
        docFields.append(DocumentPojo.index_, 1);
        docFields.append(DocumentPojo.sourceKey_, 1);

        StoreAndIndexManager dataStore = new StoreAndIndexManager();
        ObjectId nextId = null;/*from  www .j a va2  s .com*/
        while (nToPrune > 0) {
            int nToDelete = nToPrune;
            if (nToDelete > 10000) {
                nToDelete = 10000;
            }
            if (null != nextId) {
                docQuery.put(DocumentPojo._id_, new BasicDBObject(DbManager.gt_, nextId));
            } //TESTED (by hand)

            DBCursor dbc = DbManager.getDocument().getMetadata().find(docQuery, docFields).sort(sortField)
                    .limit(nToDelete);
            // (ie batches of 10K, ascending ordered by _id)

            nToPrune -= nToDelete;
            if (0 == nDocsDeleted) {
                nDocsDeleted = dbc.count();
                nTotalDocsDeleted += nDocsDeleted;
            }
            if (0 == dbc.size()) {
                break;
            }
            List<DocumentPojo> docs = DocumentPojo.listFromDb(dbc, DocumentPojo.listType());

            nextId = dataStore.removeFromDatastore_byURL(docs, source);
        }
    }
    // No need to do anything related to soft deletion, this is all handled when the harvest ends 
    return nTotalDocsDeleted;
}

From source file:com.ikanow.infinit.e.data_model.store.document.CompressedFullTextPojo.java

License:Apache License

public BasicDBObject getUpdate() {
    BasicDBObject update = new BasicDBObject();
    update.append(url_, url);
    update.append(sourceKey_, sourceKey);
    update.append(communityId_, communityId);

    update.append(gzip_content_, gzip_content);
    update.append(gzip_len_, gzip_len);// w ww. j  ava 2 s  . co m
    if (null != gzip_raw_content) {
        update.append(gzip_raw_content_, gzip_raw_content);
        update.append(gzip_raw_len_, gzip_raw_len);
    }
    if (null != gzip_md_content) {
        update.append(gzip_md_content_, gzip_md_content);
        update.append(gzip_md_len_, gzip_md_len);
    }
    return update;

}

From source file:com.ikanow.infinit.e.data_model.store.MongoDbManager.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws UnknownHostException {
    MongoClient mc = new MongoClient(args[0]);
    long tnow = 0;
    DB db = mc.getDB("test");
    DBCollection test = db.getCollection("test123");
    BasicDBObject outObj = new BasicDBObject();
    int ITS = 1000;
    test.drop();/*from   www  .  j  a  v a  2s  . com*/

    boolean checkPerformance = false;
    boolean checkFunctionality = false;
    boolean checkErrors = false;

    // 1] Performance

    if (checkPerformance) {

        // ack'd
        db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("1: Ack'd: " + tnow);

        // un ack'd
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("2: unAck'd: " + tnow);

        // un ack'd but call getLastError
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
            db.getLastError();
        }
        tnow = new Date().getTime() - tnow;
        test.drop();
        System.out.println("3: unAck'd but GLEd: " + tnow);

        // ack'd override
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj, WriteConcern.ACKNOWLEDGED);
            db.getLastError();
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("4: unAck'd but ACKd: " + tnow);

        // Performance Results:
        // 2.6) (unack'd 100ms ... ack'd 27000)
        // 2.4) (same)
    }

    // 2] Functionality

    if (checkFunctionality) {

        // Unack:
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        WriteResult wr = test.update(new BasicDBObject(),
                new BasicDBObject(DbManager.set_, new BasicDBObject("val2", "x")), false, true);
        CommandResult cr = db.getLastError();
        System.out.println("UNACK: wr: " + wr);
        System.out.println("UNACK: cr: " + cr);

        // bonus, check that we get N==0 when insert dup object
        WriteResult wr2 = test.insert(outObj);
        System.out.println("ACK wr2 = " + wr2.getN() + " all = " + wr2);
        CommandResult cr2 = db.getLastError();
        System.out.println("ACK cr2 = " + cr2);

        // Ack1:
        db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
        wr = test.update(new BasicDBObject(), new BasicDBObject(DbManager.set_, new BasicDBObject("val3", "x")),
                false, true);
        cr = db.getLastError();
        System.out.println("ACK1: wr: " + wr);
        System.out.println("ACK1: cr: " + cr);

        // Ack2:
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        wr = test.update(new BasicDBObject(), new BasicDBObject(DbManager.set_, new BasicDBObject("val4", "x")),
                false, true, WriteConcern.ACKNOWLEDGED);
        cr = db.getLastError();
        System.out.println("ACK2: wr: " + wr);
        System.out.println("ACK2: cr: " + cr);

        // bonus, check that we get N==0 when insert dup object
        wr2 = test.insert(outObj);
        System.out.println("ACK wr2 = " + wr2.getN() + " all = " + wr2);

        // Functionality results:
        // 2.6: unack wr == N/A, otherwise both have "n", "ok"
        // 2.4: unack wr == N/A all other wrs + crs identical 
    }

    if (checkErrors) {

        //set up sharding
        DbManager.getDB("admin").command(new BasicDBObject("enablesharding", "test"));
        // Ack:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            System.out.println("ACK wr = " + wr);
        } catch (Exception e) {
            System.out.println("ACK err = " + e.toString());
        }

        // UnAck:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")), false, false,
                    WriteConcern.ACKNOWLEDGED);
            System.out.println("ACK override wr = " + wr);
        } catch (Exception e) {
            System.out.println("ACK override  err = " + e.toString());
        }

        // UnAck:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            System.out.println("UNACK wr = " + wr);
        } catch (Exception e) {
            System.out.println("UNACK err = " + e.toString());
        }

        // UnAck + GLE:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            CommandResult cr = db.getLastError();
            System.out.println("UNACK GLE wr = " + wr);
            System.out.println("UNACK GLE cr = " + cr);
        } catch (Exception e) {
            System.out.println("UNACK GLE err = " + e.toString());
        }

        // Error handling:

        // 2.6:
        // Ack - exception
        // Ack override - exception
        // UnAck - no error given
        // UnAck + GLE  - gle error

        // 2.4:
        // Ack - exception
        // Ack override - exception
        // UnAck - no error given
        // UnAck + GLE  - gle error

    }
}