Example usage for com.mongodb DBCollection update

List of usage examples for com.mongodb DBCollection update

Introduction

In this page you can find the example usage for com.mongodb DBCollection update.

Prototype

public WriteResult update(final DBObject query, final DBObject update, final boolean upsert,
        final boolean multi) 

Source Link

Document

Modify an existing document or documents in collection.

Usage

From source file:com.edgytech.umongo.DocumentMenu.java

License:Apache License

public void update(ButtonBase button) {
    final DocView dv = (DocView) (UMongo.instance.getTabbedResult().getSelectedUnit());
    TreeNodeDocument node = (TreeNodeDocument) dv.getSelectedNode().getUserObject();
    final DBObject doc = node.getDBObject();

    ((DocBuilderField) getBoundUnit(Item.upUpdate)).setDBObject((BasicDBObject) doc);
    if (!((MenuItem) getBoundUnit(Item.update)).getDialog().show()) {
        return;//from  ww w.j  a va  2 s  .c o m
    }

    final DBObject query = doc.containsField("_id") ? new BasicDBObject("_id", doc.get("_id")) : doc;
    final DBObject update = ((DocBuilderField) getBoundUnit(Item.upUpdate)).getDBObject();

    if (dv.getDBCursor() == null) {
        // local data
        Tree tree = dv.getTree();
        tree.removeChild(node);
        dv.addDocument(update, null);
        tree.structureComponent();
        tree.expandNode(tree.getTreeNode());
        return;
    }

    final DBCollection col = dv.getDBCursor().getCollection();
    new DbJob() {

        @Override
        public Object doRun() {
            return col.update(query, update, false, false);
        }

        @Override
        public String getNS() {
            return col.getFullName();
        }

        @Override
        public String getShortName() {
            return "Update";
        }

        @Override
        public DBObject getRoot(Object result) {
            BasicDBObject obj = new BasicDBObject("query", query);
            obj.put("update", update);
            return obj;
        }

        @Override
        public void wrapUp(Object res) {
            super.wrapUp(res);
            dv.refresh(null);
        }
    }.addJob();
}

From source file:com.edgytech.umongo.RouterPanel.java

License:Apache License

public void balancer(ButtonBase button) {
    final MongoClient mongo = getRouterNode().getMongoClient();
    final DB config = mongo.getDB("config");
    final DBCollection settings = config.getCollection("settings");

    FormDialog diag = (FormDialog) ((MenuItem) getBoundUnit(Item.balancer)).getDialog();
    diag.xmlLoadCheckpoint();//from   ww w .ja va2  s.c  om

    final BasicDBObject query = new BasicDBObject("_id", "balancer");
    BasicDBObject balDoc = (BasicDBObject) settings.findOne(query);
    if (balDoc != null) {
        if (balDoc.containsField("stopped"))
            setIntFieldValue(Item.balStopped, balDoc.getBoolean("stopped") ? 1 : 2);
        if (balDoc.containsField("_secondaryThrottle"))
            setIntFieldValue(Item.balSecThrottle, balDoc.getBoolean("_secondaryThrottle") ? 1 : 2);
        BasicDBObject window = (BasicDBObject) balDoc.get("activeWindow");
        if (window != null) {
            setStringFieldValue(Item.balStartTime, window.getString("start"));
            setStringFieldValue(Item.balStopTime, window.getString("stop"));
        }
    }

    if (!diag.show())
        return;

    if (balDoc == null)
        balDoc = new BasicDBObject("_id", "balancer");
    int stopped = getIntFieldValue(Item.balStopped);
    if (stopped > 0)
        balDoc.put("stopped", stopped == 1 ? true : false);
    else
        balDoc.removeField("stopped");
    int throttle = getIntFieldValue(Item.balSecThrottle);
    if (throttle > 0)
        balDoc.put("_secondaryThrottle", throttle == 1 ? true : false);
    else
        balDoc.removeField("_secondaryThrottle");

    if (!getStringFieldValue(Item.balStartTime).trim().isEmpty()) {
        BasicDBObject aw = new BasicDBObject();
        aw.put("start", getStringFieldValue(Item.balStartTime).trim());
        aw.put("stop", getStringFieldValue(Item.balStopTime).trim());
        balDoc.put("activeWindow", aw);
    }
    final BasicDBObject newDoc = balDoc;

    new DbJob() {

        @Override
        public Object doRun() throws IOException {
            return settings.update(query, newDoc, true, false);
        }

        @Override
        public String getNS() {
            return settings.getFullName();
        }

        @Override
        public String getShortName() {
            return "Balancer";
        }

        @Override
        public void wrapUp(Object res) {
            updateComponent();
            super.wrapUp(res);
        }
    }.addJob();
}

From source file:com.github.lsiu.vaadin.mongocontainer.GenerateSampleData.java

private void processData(InputStream in) throws ParserConfigurationException, SAXException, IOException {
    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
    DocumentBuilder documentBuilder = dbf.newDocumentBuilder();
    InputSource inSource = new InputSource(in);
    Document doc = documentBuilder.parse(inSource);

    Element root = doc.getDocumentElement();
    root.normalize();/*  ww  w.j a  v a 2s .c  o  m*/
    NodeList nl = root.getElementsByTagName("LP");
    Gson gson = new Gson();
    DBCollection col = db.getCollection("restaurants");

    for (int i = 0; i < nl.getLength(); i++) {
        Node n = nl.item(i);

        if (n.getNodeType() == Node.ELEMENT_NODE) {
            BasicDBObject o = new BasicDBObject();

            Element e = (Element) n;

            String licenseNo = getTagValue("LICNO", e);
            o.put("_id", licenseNo);
            o.put("type", getTagValue("TYPE", e));
            o.put("districtCode", getTagValue("DIST", e));
            o.put("name", getTagValue("SS", e));
            o.put("address", getTagValue("ADR", e));
            o.put("info", getTagValue("INFO", e));

            System.out.println(gson.toJson(o));
            col.update(new BasicDBObject("_id", licenseNo), o, true, false);
        }
    }

}

From source file:com.github.nlloyd.hornofmongo.adaptor.Mongo.java

License:Open Source License

@JSFunction
public void update(final String ns, Object query, Object obj, final Boolean upsert, final Boolean multi) {
    Object rawQuery = BSONizer.convertJStoBSON(query, false);
    Object rawObj = BSONizer.convertJStoBSON(obj, true);
    DBObject bsonQuery = null;// w w  w.jav  a2 s . c  o m
    DBObject bsonObj = null;
    if (rawQuery instanceof DBObject)
        bsonQuery = (DBObject) rawQuery;
    if (rawObj instanceof DBObject)
        bsonObj = (DBObject) rawObj;

    boolean upsertOp = (upsert != null) ? upsert : false;
    boolean multiOp = (multi != null) ? multi : false;

    com.mongodb.DB db = innerMongo.getDB(ns.substring(0, ns.indexOf('.')));
    DBCollection collection = db.getCollection(ns.substring(ns.indexOf('.') + 1));
    collection.setDBEncoderFactory(HornOfMongoBSONEncoder.FACTORY);

    try {
        collection.update(bsonQuery, bsonObj, upsertOp, multiOp);
        saveLastCalledDB(db);
    } catch (MongoException me) {
        handleMongoException(me);
    }
}

From source file:com.hangum.tadpole.mongodb.core.test.UpdateEx.java

License:Open Source License

public static void exam04(DBCollection collection) throws Exception {
    // find type = vps , update all matched documents , "clients" value to
    // 888//ww w . j av a2  s  . c o  m
    BasicDBObject updateQuery = new BasicDBObject().append("$set",
            new BasicDBObject().append("clients", "888"));

    // both methods are doing the same thing.
    // collection.updateMulti(new BasicDBObject().append("type", "vps"),
    // updateQuery);
    collection.update(new BasicDBObject().append("type", "vps"), updateQuery, false, true);
}

From source file:com.ikanow.infinit.e.data_model.store.MongoDbManager.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws UnknownHostException {
    MongoClient mc = new MongoClient(args[0]);
    long tnow = 0;
    DB db = mc.getDB("test");
    DBCollection test = db.getCollection("test123");
    BasicDBObject outObj = new BasicDBObject();
    int ITS = 1000;
    test.drop();//from w w  w  .ja  va2 s.c  o  m

    boolean checkPerformance = false;
    boolean checkFunctionality = false;
    boolean checkErrors = false;

    // 1] Performance

    if (checkPerformance) {

        // ack'd
        db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("1: Ack'd: " + tnow);

        // un ack'd
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("2: unAck'd: " + tnow);

        // un ack'd but call getLastError
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj);
            db.getLastError();
        }
        tnow = new Date().getTime() - tnow;
        test.drop();
        System.out.println("3: unAck'd but GLEd: " + tnow);

        // ack'd override
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        test.drop();
        tnow = new Date().getTime();
        outObj = new BasicDBObject();
        for (int i = 0; i < ITS; ++i) {
            outObj.remove("_id");
            outObj.put("val", i);
            test.save(outObj, WriteConcern.ACKNOWLEDGED);
            db.getLastError();
        }
        tnow = new Date().getTime() - tnow;
        System.out.println("4: unAck'd but ACKd: " + tnow);

        // Performance Results:
        // 2.6) (unack'd 100ms ... ack'd 27000)
        // 2.4) (same)
    }

    // 2] Functionality

    if (checkFunctionality) {

        // Unack:
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        WriteResult wr = test.update(new BasicDBObject(),
                new BasicDBObject(DbManager.set_, new BasicDBObject("val2", "x")), false, true);
        CommandResult cr = db.getLastError();
        System.out.println("UNACK: wr: " + wr);
        System.out.println("UNACK: cr: " + cr);

        // bonus, check that we get N==0 when insert dup object
        WriteResult wr2 = test.insert(outObj);
        System.out.println("ACK wr2 = " + wr2.getN() + " all = " + wr2);
        CommandResult cr2 = db.getLastError();
        System.out.println("ACK cr2 = " + cr2);

        // Ack1:
        db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
        wr = test.update(new BasicDBObject(), new BasicDBObject(DbManager.set_, new BasicDBObject("val3", "x")),
                false, true);
        cr = db.getLastError();
        System.out.println("ACK1: wr: " + wr);
        System.out.println("ACK1: cr: " + cr);

        // Ack2:
        db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
        wr = test.update(new BasicDBObject(), new BasicDBObject(DbManager.set_, new BasicDBObject("val4", "x")),
                false, true, WriteConcern.ACKNOWLEDGED);
        cr = db.getLastError();
        System.out.println("ACK2: wr: " + wr);
        System.out.println("ACK2: cr: " + cr);

        // bonus, check that we get N==0 when insert dup object
        wr2 = test.insert(outObj);
        System.out.println("ACK wr2 = " + wr2.getN() + " all = " + wr2);

        // Functionality results:
        // 2.6: unack wr == N/A, otherwise both have "n", "ok"
        // 2.4: unack wr == N/A all other wrs + crs identical 
    }

    if (checkErrors) {

        //set up sharding
        DbManager.getDB("admin").command(new BasicDBObject("enablesharding", "test"));
        // Ack:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.ACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            System.out.println("ACK wr = " + wr);
        } catch (Exception e) {
            System.out.println("ACK err = " + e.toString());
        }

        // UnAck:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")), false, false,
                    WriteConcern.ACKNOWLEDGED);
            System.out.println("ACK override wr = " + wr);
        } catch (Exception e) {
            System.out.println("ACK override  err = " + e.toString());
        }

        // UnAck:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            System.out.println("UNACK wr = " + wr);
        } catch (Exception e) {
            System.out.println("UNACK err = " + e.toString());
        }

        // UnAck + GLE:
        try {
            test.drop();
            test.createIndex(new BasicDBObject("key", 1));
            BasicDBObject command1 = new BasicDBObject("shardcollection", "test.test123");
            command1.append("key", new BasicDBObject("key", 1));
            DbManager.getDB("admin").command(command1);

            db.setWriteConcern(WriteConcern.UNACKNOWLEDGED);
            outObj = new BasicDBObject("key", "test");
            test.save(outObj);
            WriteResult wr = test.update(new BasicDBObject(),
                    new BasicDBObject(DbManager.set_, new BasicDBObject("key", "test2")));
            CommandResult cr = db.getLastError();
            System.out.println("UNACK GLE wr = " + wr);
            System.out.println("UNACK GLE cr = " + cr);
        } catch (Exception e) {
            System.out.println("UNACK GLE err = " + e.toString());
        }

        // Error handling:

        // 2.6:
        // Ack - exception
        // Ack override - exception
        // UnAck - no error given
        // UnAck + GLE  - gle error

        // 2.4:
        // Ack - exception
        // Ack override - exception
        // UnAck - no error given
        // UnAck + GLE  - gle error

    }
}

From source file:com.ikanow.infinit.e.data_model.utils.MongoTransactionLock.java

License:Apache License

protected synchronized boolean updateToken(boolean bForce) {
    if (_bHaveControl || bForce) {
        DBCollection cachedCollection = _collections.get();
        BasicDBObject lockObj = new BasicDBObject();

        long nOneUp = Long.parseLong(_savedOneUp);
        lockObj.put(hostname_, getHostname());
        String newOneUp = Long.toString(nOneUp + 1);
        lockObj.put(oneUp_, newOneUp);//from www  . j ava2  s .c o m
        lockObj.put(lastUpdated_, new Date());
        BasicDBObject queryObj = new BasicDBObject();
        queryObj.put(hostname_, _savedHostname);
        queryObj.put(oneUp_, _savedOneUp);
        WriteResult wr = cachedCollection.update(queryObj, new BasicDBObject(MongoDbManager.set_, lockObj),
                false, true);
        // (need the true in case the db is sharded)

        if (wr.getN() > 0) {
            _savedOneUp = newOneUp;
            _bHaveControl = true;
            _nLastCheck = 0;
            return true;
        } else {
            return false;
        }
    }
    return false;
}

From source file:com.ikanow.infinit.e.processing.generic.aggregation.AssociationAggregationUtils.java

License:Open Source License

/**
 * Add events to the elastic search index for events
 * and the mongodb collection//w  ww. j a v  a 2s  . c o  m
 * so they are searchable for searchsuggest
 * 
 * Step 1.a, try to just update alias's
 * Step 1.b, if fail, create new entry
 * 
 * Step 2, Update totalfreq and doccount
 * 
 * Step 3, After updating totalfreq and doccount, write to ES for every group
 * 
 * @param events
 */
public static void updateEventFeatures(Map<String, Map<ObjectId, AssociationFeaturePojo>> eventFeatures) {
    // Some diagnostic counters:
    int numCacheMisses = 0;
    int numCacheHits = 0;
    int numNewAssocs = 0;
    long entityAggregationTime = new Date().getTime();

    DBCollection col = DbManager.getFeature().getAssociation();

    // (This fn is normally run for a single community id)
    CommunityFeatureCaches.CommunityFeatureCache currCache = null;

    String savedSyncTime = null;
    for (Map<ObjectId, AssociationFeaturePojo> evtCommunity : eventFeatures.values()) {

        Iterator<Map.Entry<ObjectId, AssociationFeaturePojo>> it = evtCommunity.entrySet().iterator();
        while (it.hasNext()) {
            Map.Entry<ObjectId, AssociationFeaturePojo> evtFeatureKV = it.next();
            try {
                AssociationFeaturePojo evtFeature = evtFeatureKV.getValue();
                long nSavedDocCount = evtFeature.getDoccount();

                ObjectId communityID = evtFeature.getCommunityId();

                if ((null == currCache) || !currCache.getCommunityId().equals(evtFeatureKV.getKey())) {
                    currCache = CommunityFeatureCaches.getCommunityFeatureCache(evtFeatureKV.getKey());
                    if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out.println(
                                    "AssociationAggregationUtils.updateEventFeatures, Opened cache for community: "
                                            + evtFeatureKV.getKey());
                    }
                } //TESTED (by hand)               

                // Is this in our cache? If so can short cut a bunch of the DB interaction:
                AssociationFeaturePojo cachedAssoc = currCache.getCachedAssocFeature(evtFeature);
                if (null != cachedAssoc) {
                    if (_incrementalMode) {
                        if (_diagnosticMode) {
                            if (_logInDiagnosticMode)
                                System.out.println(
                                        "AssociationAggregationUtils.updateEventFeatures, skip cached: "
                                                + cachedAssoc.toDb());
                            //TODO (INF-2825): should be continue-ing here so can use delta more efficiently...
                        }
                    } else if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out
                                    .println("AssociationAggregationUtils.updateEventFeatures, grabbed cached: "
                                            + cachedAssoc.toDb());
                    }
                    numCacheHits++;
                } //TESTED (by hand)         
                else {
                    numCacheMisses++;
                }

                //try to update
                BasicDBObject query = new BasicDBObject(AssociationFeaturePojo.index_, evtFeature.getIndex());
                query.put(AssociationFeaturePojo.communityId_, communityID);

                //Step1 try to update alias
                //update arrays
                BasicDBObject multiopAliasArrays = new BasicDBObject();
                // Entity1 Alias:
                if (null != evtFeature.getEntity1_index()) {
                    evtFeature.addEntity1(evtFeature.getEntity1_index());
                }
                if (null != evtFeature.getEntity1()) {
                    if ((null == cachedAssoc) || (null == cachedAssoc.getEntity1())
                            || !cachedAssoc.getEntity1().containsAll(evtFeature.getEntity1())) {
                        BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_,
                                evtFeature.getEntity1());
                        multiopAliasArrays.put(AssociationFeaturePojo.entity1_, multiopE);
                    }
                } //TESTED (by hand)

                // Entity2 Alias:
                if (null != evtFeature.getEntity2_index()) {
                    evtFeature.addEntity2(evtFeature.getEntity2_index());
                }
                if (null != evtFeature.getEntity2()) {
                    if ((null == cachedAssoc) || (null == cachedAssoc.getEntity2())
                            || !cachedAssoc.getEntity2().containsAll(evtFeature.getEntity2())) {
                        BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_,
                                evtFeature.getEntity2());
                        multiopAliasArrays.put(AssociationFeaturePojo.entity2_, multiopE);
                    }
                } //TESTED (by hand)

                // verb/verb cat alias:
                if (null != evtFeature.getVerb_category()) {
                    evtFeature.addVerb(evtFeature.getVerb_category());
                }
                if (null != evtFeature.getVerb()) {
                    if ((null == cachedAssoc) || (null == cachedAssoc.getVerb())
                            || !cachedAssoc.getVerb().containsAll(evtFeature.getVerb())) {
                        BasicDBObject multiopE = new BasicDBObject(MongoDbManager.each_, evtFeature.getVerb());
                        multiopAliasArrays.put(AssociationFeaturePojo.verb_, multiopE);
                    }
                } //TESTED (by hand)

                // OK - now we can copy across the fields into the cache:
                if (null != cachedAssoc) {
                    currCache.updateCachedAssocFeatureStatistics(cachedAssoc, evtFeature); //(evtFeature is now fully up to date)
                } //TESTED (by hand)

                BasicDBObject updateOp = new BasicDBObject();
                if (!multiopAliasArrays.isEmpty()) {
                    updateOp.put(MongoDbManager.addToSet_, multiopAliasArrays);
                }
                // Document count for this event
                BasicDBObject updateFreqDocCount = new BasicDBObject(AssociationFeaturePojo.doccount_,
                        nSavedDocCount);
                updateOp.put(MongoDbManager.inc_, updateFreqDocCount);

                BasicDBObject fields = new BasicDBObject(AssociationFeaturePojo.doccount_, 1);
                fields.put(AssociationFeaturePojo.entity1_, 1);
                fields.put(AssociationFeaturePojo.entity2_, 1);
                fields.put(AssociationFeaturePojo.verb_, 1);
                //(slightly annoying, since only want these if updating dc but won't know
                // until after i've got this object)

                fields.put(AssociationFeaturePojo.db_sync_time_, 1);
                fields.put(AssociationFeaturePojo.db_sync_doccount_, 1);

                DBObject dboUpdate = null;
                if (_diagnosticMode) {
                    if (null == cachedAssoc) {
                        dboUpdate = col.findOne(query, fields);
                    }
                } else {
                    if (null != cachedAssoc) {
                        col.update(query, updateOp, false, false);
                    } else { // Not cached - so have to grab the feature we're either getting or creating
                        dboUpdate = col.findAndModify(query, fields, new BasicDBObject(), false, updateOp,
                                false, true);
                        // (can use findAndModify because specify index, ie the shard key)
                        // (returns event before the changes above, update the feature object below)
                        // (also atomically creates the object if it doesn't exist so is "distributed-safe")
                    }
                }
                if ((null != cachedAssoc) || ((dboUpdate != null) && !dboUpdate.keySet().isEmpty())) // (feature already exists)
                {
                    AssociationFeaturePojo egp = cachedAssoc;

                    if (null == egp) {
                        egp = AssociationFeaturePojo.fromDb(dboUpdate, AssociationFeaturePojo.class);
                        evtFeature.setDoccount(egp.getDoccount() + nSavedDocCount);
                        evtFeature.setDb_sync_doccount(egp.getDb_sync_doccount());
                        evtFeature.setDb_sync_time(egp.getDb_sync_time());
                        if (null != egp.getEntity1()) {
                            for (String ent : egp.getEntity1())
                                evtFeature.addEntity1(ent);
                        }
                        if (null != egp.getEntity2()) {
                            for (String ent : egp.getEntity2())
                                evtFeature.addEntity2(ent);
                        }
                        if (null != egp.getVerb()) {
                            for (String verb : egp.getVerb())
                                evtFeature.addVerb(verb);
                        }
                    } //TESTED (cached and non-cached cases)
                      // (in the cached case, evtFeature has already been updated by updateCachedAssocFeatureStatistics)

                    if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out.println("AssociationAggregationUtils.updateEventFeatures, found: "
                                    + ((BasicDBObject) egp.toDb()).toString());
                        if (_logInDiagnosticMode)
                            System.out.println(
                                    "AssociationAggregationUtils.updateEventFeatures, ^^^ found from query: "
                                            + query.toString() + " / " + updateOp.toString());
                    }
                    // (In background aggregation mode we update db_sync_prio when checking the -otherwise unused, unlike entities- document update schedule) 
                } else // (the object in memory is now an accurate representation of the database, minus some fields we'll now add)
                {
                    numNewAssocs++;

                    // Synchronization settings for the newly created object
                    evtFeature.setDb_sync_doccount(nSavedDocCount);
                    if (null == savedSyncTime) {
                        savedSyncTime = Long.toString(System.currentTimeMillis());
                    }
                    evtFeature.setDb_sync_time(savedSyncTime);

                    // This is all "distributed safe" (apart from the db_syc_xxx and it doesn't matter if that is 
                    // out of date, the update will just be slightly out-of-date at worst) since (otherwise) these fields are 
                    // only set here, and the findAndModify is atomic

                    BasicDBObject baseFields = new BasicDBObject();
                    if (null != evtFeature.getEntity1_index()) {
                        baseFields.put(AssociationFeaturePojo.entity1_index_, evtFeature.getEntity1_index());
                    }
                    if (null != evtFeature.getEntity2_index()) {
                        baseFields.put(AssociationFeaturePojo.entity2_index_, evtFeature.getEntity2_index());
                    }
                    if (null != evtFeature.getVerb_category()) {
                        baseFields.put(AssociationFeaturePojo.verb_category_, evtFeature.getVerb_category());
                    }
                    baseFields.put(AssociationFeaturePojo.assoc_type_, evtFeature.getAssociation_type());
                    baseFields.put(AssociationFeaturePojo.db_sync_doccount_, evtFeature.getDb_sync_doccount());
                    baseFields.put(AssociationFeaturePojo.db_sync_time_, evtFeature.getDb_sync_time());
                    baseFields.put(AssociationFeaturePojo.db_sync_prio_, 1000.0); // (ensures new objects are quickly index-synchronized)

                    if (!_diagnosticMode) {
                        // Store the object
                        col.update(query, new BasicDBObject(MongoDbManager.set_, baseFields));
                    } else {
                        if (_logInDiagnosticMode)
                            System.out.println("AssociationAggregationUtils.updateEventFeatures, not found: "
                                    + query.toString() + " / " + baseFields.toString() + "/ orig_update= "
                                    + updateOp.toString());
                    }

                    // (Note even in background aggregation mode we still perform the feature synchronization
                    //  for new entities - and it has to be right at the end because it "corrupts" the objects)

                } //(end if first time seen)

                if (null == cachedAssoc) { // First time we've seen this locally, so add to cache
                    currCache.addCachedAssocFeature(evtFeature);
                    if (_diagnosticMode) {
                        if (_logInDiagnosticMode)
                            System.out
                                    .println("AssociationAggregationUtils.updateEventFeatures, added to cache: "
                                            + evtFeature.toDb());
                    }
                } //TESTED (by hand)                           
            } catch (Exception e) {
                // Exception, remove from feature list
                it.remove();

                // If an exception occurs log the error
                logger.error("Exception Message: " + e.getMessage(), e);
            }

        } // (end loop over all communities for the set of features sharing and index)                        
    } // (end loop over indexes) 

    if ((numCacheHits > 0) || (numCacheMisses > 0)) { // ie some assocs were grabbed
        int cacheSize = 0;
        if (null != currCache) {
            cacheSize = currCache.getAssocCacheSize();
        }
        StringBuffer logMsg = new StringBuffer() // (should append key, but don't have that...)
                .append(" assoc_agg_time_ms=").append(new Date().getTime() - entityAggregationTime)
                .append(" total_assocs=").append(eventFeatures.size()).append(" new_assocs=")
                .append(numNewAssocs).append(" cache_misses=").append(numCacheMisses).append(" cache_hits=")
                .append(numCacheHits).append(" cache_size=").append(cacheSize);

        logger.info(logMsg.toString());
    }

}

From source file:com.ikanow.infinit.e.processing.generic.aggregation.AssociationAggregationUtils.java

License:Open Source License

public static void synchronizeEventFeature(AssociationFeaturePojo eventFeature, ObjectId communityId) {
    DBCollection eventFeatureDb = DbManager.getFeature().getAssociation();

    // NOTE: Important that feeds update occurs before synchronization, since the sync "corrupts" the event      

    if (_diagnosticMode || (null != eventFeature.getDb_sync_time())
            || (null != eventFeature.getDb_sync_prio())) {
        // Else this is a new feature so don't need to update the feature DB, only the index (if db_sync_prio null then have to update to avoid b/g aggergation loop)
        // (note that db_sync_prio will in practice not be set when this is a new feature because it will have same sync_doccount as doc_count)

        long nCurrTime = System.currentTimeMillis();
        //(query from top of the function, basically lookup on gaz_index)
        BasicDBObject update2 = new BasicDBObject();
        update2.put(AssociationFeaturePojo.db_sync_time_, Long.toString(nCurrTime));
        update2.put(AssociationFeaturePojo.db_sync_doccount_, eventFeature.getDoccount());
        BasicDBObject update = new BasicDBObject(MongoDbManager.set_, update2);
        // (also can be added to below)
        BasicDBObject update3 = new BasicDBObject(EntityFeaturePojo.db_sync_prio_, 1);
        update.put(MongoDbManager.unset_, update3);
        BasicDBObject query = new BasicDBObject(AssociationFeaturePojo.index_, eventFeature.getIndex());
        query.put(AssociationFeaturePojo.communityId_, communityId);

        // Keep the number of entity1 and entity2 sets down to a reasonable number
        // (In the end would like to be able to do this based on date rather than (essentially) completely randomly)
        int nSize;
        BasicDBObject toPull = null;// www . ja  va 2  s. c  o  m
        if (null != eventFeature.getEntity1()) {
            if ((nSize = eventFeature.getEntity1().size()) > AssociationFeaturePojo.entity_MAXFIELDS) {
                if (null == toPull)
                    toPull = new BasicDBObject();
                ArrayList<String> ent1ToRemove = new ArrayList<String>(
                        eventFeature.getEntity1().size() - AssociationFeaturePojo.entity_MAXFIELDS);
                Iterator<String> it = eventFeature.getEntity1().iterator();
                while (it.hasNext() && (nSize > AssociationFeaturePojo.entity_MAXFIELDS)) {
                    String ent = it.next();
                    if (-1 == ent.indexOf('/')) { // (ie don't remove the index)
                        nSize--;
                        it.remove(); // (this removes from the index)
                        ent1ToRemove.add(ent);
                    }
                }
                toPull.put(AssociationFeaturePojo.entity1_, ent1ToRemove);
                // (this removes from the database)
            }
        }
        if (null != eventFeature.getEntity2()) {
            if ((nSize = eventFeature.getEntity2().size()) > AssociationFeaturePojo.entity_MAXFIELDS) {
                if (null == toPull)
                    toPull = new BasicDBObject();
                ArrayList<String> ent2ToRemove = new ArrayList<String>(
                        eventFeature.getEntity2().size() - AssociationFeaturePojo.entity_MAXFIELDS);
                Iterator<String> it = eventFeature.getEntity2().iterator();
                while (it.hasNext() && (nSize > AssociationFeaturePojo.entity_MAXFIELDS)) {
                    String ent = it.next();
                    if (-1 == ent.indexOf('/')) { // (ie don't remove the index)
                        nSize--;
                        it.remove(); // (this removes from the index)
                        ent2ToRemove.add(ent);
                    }
                }
                toPull.put(AssociationFeaturePojo.entity2_, ent2ToRemove);
                // (this removes from the database)
            }
        }
        if (null != toPull) {
            update.put(MongoDbManager.pullAll_, toPull);
            // (this removes from the database)
        }
        //TESTED (2.1.4.3b, including no index removal clause)

        if (_diagnosticMode) {
            if ((null != eventFeature.getDb_sync_time()) || (null != eventFeature.getDb_sync_prio())) {
                if (_logInDiagnosticMode)
                    System.out.println("AssociationAggregationUtils.synchronizeEventFeature, featureDB: "
                            + query.toString() + " / " + update.toString());
            } else {
                if (_logInDiagnosticMode)
                    System.out.println(
                            "(WOULD NOT RUN) EventAggregationUtils.synchronizeEventFeature, featureDB: "
                                    + query.toString() + " / " + update.toString());
            }
        } else {
            eventFeatureDb.update(query, update, false, true);
        }
    }

    if (_diagnosticMode) {
        if (_logInDiagnosticMode)
            System.out.println("AssociationAggregationUtils.synchronizeEventFeature, synchronize: "
                    + new StringBuffer(eventFeature.getIndex()).append(':').append(communityId).toString()
                    + " = " + IndexManager.mapToIndex(eventFeature, new AssociationFeaturePojoIndexMap()));
    } else {
        ElasticSearchManager esm = IndexManager.getIndex(AssociationFeaturePojoIndexMap.indexName_);
        esm.addDocument(eventFeature, new AssociationFeaturePojoIndexMap(), null, true);
    }
}

From source file:com.ikanow.infinit.e.processing.generic.aggregation.AssociationAggregationUtils.java

License:Open Source License

public static void markAssociationFeatureForSync(AssociationFeaturePojo assocFeature, ObjectId communityId) {
    DBCollection assocFeatureDb = DbManager.getFeature().getAssociation();
    double dPrio = 100.0 * (double) assocFeature.getDoccount()
            / (0.01 + (double) assocFeature.getDb_sync_doccount());
    assocFeature.setDb_sync_prio(dPrio);
    BasicDBObject query = new BasicDBObject(AssociationFeaturePojo.index_, assocFeature.getIndex());
    query.put(AssociationFeaturePojo.communityId_, communityId);
    BasicDBObject update = new BasicDBObject(MongoDbManager.set_,
            new BasicDBObject(AssociationFeaturePojo.db_sync_prio_, dPrio));
    if (_diagnosticMode) {
        if (_logInDiagnosticMode)
            System.out.println("EntityAggregationUtils.markAssociationFeatureForSync, featureDB: "
                    + query.toString() + " / " + update.toString());
    } else {/*from ww  w. ja  v  a 2  s.  c  o  m*/
        assocFeatureDb.update(query, update, false, true);
    }
}