Example usage for com.mongodb BasicDBObject getLong

List of usage examples for com.mongodb BasicDBObject getLong

Introduction

In this page you can find the example usage for com.mongodb BasicDBObject getLong.

Prototype

public long getLong(final String key) 

Source Link

Document

Returns the value of a field as a long .

Usage

From source file:com.ikanow.infinit.e.api.knowledge.processing.ScoringUtils.java

License:Open Source License

@SuppressWarnings("unchecked")
private void stage1_initialCountingLoop(DBCursor docs, AdvancedQueryPojo.QueryScorePojo scoreParams,
        int toReturn, StatisticsPojo scores, LinkedList<BasicDBObject> standaloneEventsReturn,
        int nCommunities) {
    double s0_nQuerySubsetDocCountInv = 1.0 / (double) _s0_nQuerySubsetDocCount;

    // Some memory management:
    DBCollection dbc = MongoDbManager.getDocument().getMetadata();
    DBDecoderFactory defaultDecoder = dbc.getDBDecoderFactory();

    try {// w w w. j av a 2s . com
        SizeReportingBasicBSONDecoder sizeReportingDecoder = new SizeReportingBasicBSONDecoder();
        dbc.setDBDecoderFactory(sizeReportingDecoder);

        long currMemUsage = 0;
        int ndocs = 0;
        long lastBatch = 0L;

        long initialUnusedMemory = Runtime.getRuntime().maxMemory() - Runtime.getRuntime().totalMemory();
        long initialFreeMemory = Runtime.getRuntime().freeMemory();

        for (DBObject f0 : docs) {
            BasicDBObject f = (BasicDBObject) f0;
            long newMemUsage = sizeReportingDecoder.getSize();
            if ((newMemUsage - currMemUsage) > 0) { // check every batch               
                long now = new Date().getTime();

                //DEBUG
                //logger.warn(ndocs + " : " + (now - lastBatch) + " : " + newMemUsage + " VS " + Runtime.getRuntime().maxMemory() + " UNUSED " + (Runtime.getRuntime().maxMemory() - Runtime.getRuntime().totalMemory()) + " FREE " + Runtime.getRuntime().freeMemory());

                // Check vs total memory:
                long runtimeMem = Runtime.getRuntime().maxMemory();
                // note newMemUsage is the input memory ... gets expanded ~6x by the BSON-ification, allowed at most 1/4rd of memory...
                // Also if we're taking more than 20s for a batch then limp over the limit and exit...
                if (((newMemUsage * 24) > runtimeMem)
                        || (((now - lastBatch) > 20000L) && (ndocs >= toReturn))) {
                    long finalUnusedMemory = Runtime.getRuntime().maxMemory()
                            - Runtime.getRuntime().totalMemory();
                    long finalFreeMemory = Runtime.getRuntime().freeMemory();

                    logger.error("Query truncated memUsage=" + newMemUsage + ", memory=" + runtimeMem
                            + ", docs=" + ndocs + ", totaldocs=" + scores.found + ", init_free_mem="
                            + initialFreeMemory + ", end_free_mem=" + finalFreeMemory + ", init_unused_mem="
                            + initialUnusedMemory + ", end_unused_mem=" + finalUnusedMemory);
                    break;
                } //TESTED
                currMemUsage = newMemUsage;
                lastBatch = now;
            } //TESTED
            ndocs++;

            // Simple handling for standalone events
            if ((null != _s0_standaloneEventAggregator) && !_s0_bNeedToCalcSig) {
                //if _s0_bNeedToCalcSig then do this elsewhere
                ScoringUtils_Associations.addStandaloneEvents(f, 0.0, 0, _s0_standaloneEventAggregator,
                        _s0_bEntityTypeFilterPositive, _s0_bAssocVerbFilterPositive, _s0_entityTypeFilter,
                        _s0_assocVerbFilter, _s0_bEvents, _s0_bSummaries, _s0_bFacts);
            } //TESTED

            if (!_s0_bNeedToCalcSig) {
                continue;
            } //TESTED

            if (nCommunities > 1) { // (could have pan-community entities)
                ObjectId communityId = (ObjectId) f.get(DocumentPojo.communityId_);
                if (null != communityId) { // (have big problems if so, but anyway!)
                    int retval = _s0_multiCommunityHandler.community_getIdAndInitialize(communityId,
                            _s1_entitiesInDataset);
                    // (returns an int community id but also sets it into the cache, so just use that below)
                    if (Integer.MIN_VALUE == retval) {
                        //this document cannot be viewed from within this set of communities
                        continue;
                    }
                }
            } //TESTED      

            TempDocBucket docBucket = new TempDocBucket();
            docBucket.dbo = f;
            ObjectId id = (ObjectId) f.get(DocumentPojo._id_);

            // If we're going to weight relevance in, or we need the geo temporal decay:
            if ((0 != scoreParams.relWeight) || (null != scoreParams.timeProx)
                    || (null != scoreParams.geoProx)) {
                StatisticsPojo.Score scoreObj = scores.getScore().get(id);
                if (null != scoreObj) {
                    docBucket.explain = scoreObj.explain; // (will normally be null)
                    docBucket.luceneScore = scoreObj.score;
                    if ((null != scoreParams.timeProx) || (null != scoreParams.geoProx)) {
                        if (scoreObj.decay >= 0.0) {
                            docBucket.geoTemporalDecay = scoreObj.decay;
                        }
                        // (see also below for low accuracy geo scoring)
                    }
                } else {
                    docBucket.luceneScore = 1.0;
                }
            } //TESTED
            else if (this._s0_sortingByDate) {
                StatisticsPojo.Score scoreObj = scores.getScore().get(id);
                if (null != scoreObj) {
                    docBucket.nLuceneIndex = scoreObj.nIndex;
                }
            }
            docBucket.manualWeighting = this.getManualScoreWeights(scoreParams, f);

            BasicDBList l = (BasicDBList) (f.get(DocumentPojo.entities_));
            if (null != l) {

                long nEntsInDoc = l.size();
                double dBestGeoScore = 0.0; // (for low accuracy geo only)
                for (Iterator<?> e0 = l.iterator(); e0.hasNext();) {
                    BasicDBObject e = (BasicDBObject) e0.next();
                    BasicDBObject tmpGeotag = null;
                    if (_s3_bLowAccuracyGeo || (null != _s1_dManualGeoDecay_latLonInvdecay)) {
                        // low accuracy geo, need to look for geotag
                        tmpGeotag = (BasicDBObject) e.get(EntityPojo.geotag_);
                    }

                    // Get attributes

                    double freq = -1.0;
                    long ntotaldoccount = -1;
                    String entity_index;
                    Double sentiment = null;
                    try {
                        sentiment = (Double) e.get(EntityPojo.sentiment_);
                        ntotaldoccount = e.getLong(EntityPojo.doccount_);
                        freq = e.getDouble(EntityPojo.frequency_);
                        entity_index = e.getString(EntityPojo.index_);
                        if (null == entity_index) {
                            // Just bypass the entity 
                            e.put(EntityPojo.significance_, 0.0);
                            nEntsInDoc--;
                            continue;
                        }
                    } catch (Exception ex) {
                        try {
                            String sfreq;
                            if (ntotaldoccount < 0) {
                                sfreq = e.getString(EntityPojo.doccount_);
                                ntotaldoccount = Long.valueOf(sfreq);
                            }
                            if (freq < -0.5) {
                                sfreq = e.getString(EntityPojo.frequency_);
                                freq = Long.valueOf(sfreq).doubleValue();
                            }
                            entity_index = e.getString(EntityPojo.index_);
                            if (null == entity_index) {
                                // Just bypass the entity 
                                e.put(EntityPojo.significance_, 0.0);
                                nEntsInDoc--;
                                continue;
                            }
                        } catch (Exception e2) {
                            // Just bypass the entity 
                            e.put(EntityPojo.significance_, 0.0);
                            nEntsInDoc--;
                            continue;
                        }
                    } //TESTED

                    // First loop through is just counting

                    // Retrieve entity (create/initialzie if necessary)
                    EntSigHolder shp = _s1_entitiesInDataset.get(entity_index);
                    if (null == shp) {
                        if (ntotaldoccount > (long) _s0_globalDocCount) { // obviously can't have more entities-in-dos than docs... 
                            ntotaldoccount = (long) _s0_globalDocCount;
                        }
                        shp = new EntSigHolder(entity_index, ntotaldoccount, _s0_multiCommunityHandler);

                        // Stage 1a alias handling: set up infrastructure, calculate doc overlap
                        if (null != _s1_aliasLookup) {
                            stage1_initAlias(shp);
                        }
                        if ((null != shp.aliasInfo) && (null == shp.masterAliasSH)) { // this is the discard alias
                            nEntsInDoc--;
                            continue;
                        } //TESTED

                        // Check if entity is in type filter list
                        if (null != _s0_entityTypeFilter) {
                            String entType = null;
                            if (null != shp.aliasInfo) {
                                entType = shp.aliasInfo.getType();
                            } else {
                                entType = e.getString(EntityPojo.type_);
                            }
                            if (_s0_bEntityTypeFilterPositive) {
                                if ((null != entType)
                                        && !_s0_entityTypeFilter.contains(entType.toLowerCase())) {
                                    nEntsInDoc--;
                                    continue;
                                }
                            } else if ((null != entType)
                                    && _s0_entityTypeFilter.contains(entType.toLowerCase())) {
                                //(negative filter)
                                nEntsInDoc--;
                                continue;
                            }

                        } //TESTED (end entity filter)

                        // Geo:
                        if (null != shp.aliasInfo) {
                            if (null != shp.aliasInfo.getGeotag()) { //Geo, overwrite/create tmpGeotag
                                if (_s3_bLowAccuracyGeo || _s3_bExtraAliasGeo
                                        || (null != _s1_dManualGeoDecay_latLonInvdecay)) {
                                    // Always capture alias geo, even if not in low accuracy mode because we add it to the 
                                    // legitimate geo:
                                    if ((_s3_bLowAccuracyGeo || _s3_bExtraAliasGeo)
                                            && (null == _s3_geoBuckets)) {
                                        // Initialize the buckets if this is for aggregation not just decay
                                        _s3_geoBuckets = (LinkedList<EntSigHolder>[]) new LinkedList[_s3_nGEO_BUCKETS];
                                    }

                                    if (null == tmpGeotag) {
                                        tmpGeotag = new BasicDBObject();
                                    }
                                    tmpGeotag.put(GeoPojo.lat_, shp.aliasInfo.getGeotag().lat);
                                    tmpGeotag.put(GeoPojo.lon_, shp.aliasInfo.getGeotag().lon);

                                    if (null != shp.aliasInfo.getOntology_type()) {
                                        e.put(EntityPojo.ontology_type_, shp.aliasInfo.getOntology_type());
                                    }
                                }
                            }
                        } //TESTED (end geo for aggregation or decay)

                        _s1_entitiesInDataset.put(entity_index, shp);
                        // end Stage 1a alias handling
                    } //(end if is alias)

                    // Stage 1b alias handling: calculate document counts (taking overlaps into account)
                    if (null != shp.masterAliasSH) {
                        // Counts:
                        shp.masterAliasSH.nTotalDocCount++;
                        // docs including overlaps
                        shp.masterAliasSH.avgFreqOverQuerySubset += freq;

                        // Keep track of overlaps:
                        if (f != shp.masterAliasSH.unusedDbo) {
                            shp.masterAliasSH.unusedDbo = f;
                            // (note this is only used in stage 1, alias.unusedDbo is re-used differently in stage 3/4)
                            shp.masterAliasSH.nDocCountInQuerySubset++;
                            // non-overlapping docs ie < shp.nDocCountInQuerySubset
                        }

                        // Sentiment:
                        shp.masterAliasSH.positiveSentiment += shp.positiveSentiment;
                        shp.masterAliasSH.negativeSentiment += shp.negativeSentiment;
                        if (null != sentiment) {
                            shp.masterAliasSH.nTotalSentimentValues++;
                        }

                    } //TESTED (end if is alias)
                      // end Stage 1b

                    // Pan-community logic (this needs to be before the entity object is updated)
                    if (_s0_multiCommunityHandler.isActive()) {
                        _s0_multiCommunityHandler.community_updateCorrelations(shp, ntotaldoccount,
                                entity_index);
                    } else { // (Once we've started multi-community logic, this is no longer desirable)
                        if ((ntotaldoccount > shp.nTotalDocCount) && (ntotaldoccount <= _s0_globalDocCount)) {
                            shp.nTotalDocCount = ntotaldoccount;
                        }
                        //(note there used to be some cases where we adjusted for dc/tf==0, but the 
                        // underlying issue in the data model that caused this has been fixed, so it's 
                        // now a pathological case that can be ignored)
                    } //(TESTED)

                    // Update counts:
                    _s1_sumFreqInQuerySubset += freq;
                    shp.avgFreqOverQuerySubset += freq;
                    shp.nDocCountInQuerySubset++;
                    shp.decayedDocCountInQuerySubset += docBucket.geoTemporalDecay;
                    // (note this doesn't handle low accuracy geo-decay ... we'll address that via a separate term)

                    TempEntityInDocBucket entBucket = new TempEntityInDocBucket();
                    entBucket.dbo = e;
                    entBucket.freq = freq;
                    entBucket.doc = docBucket;
                    shp.entityInstances.add(entBucket);
                    if (null != tmpGeotag) { // (only needed for low accuracy geo aggregation)

                        if ((_s3_bLowAccuracyGeo || _s3_bExtraAliasGeo) && (null == shp.geotag)) { // (first time for shp only)
                            shp.geotag = tmpGeotag;
                            shp.geotaggedEntity = e; // (ie for onto type, which has been overwritten in the alias case...)
                        }
                        if (null != _s1_dManualGeoDecay_latLonInvdecay) {
                            // Emulate scripted Lucene calculations
                            double minlat = tmpGeotag.getDouble(GeoPojo.lat_);
                            double minlon = tmpGeotag.getDouble(GeoPojo.lon_);
                            double paramlat = _s1_dManualGeoDecay_latLonInvdecay[0];
                            double paramlon = _s1_dManualGeoDecay_latLonInvdecay[1];
                            double gdecay = _s1_dManualGeoDecay_latLonInvdecay[2];
                            char ontCode = GeoOntologyMapping
                                    .encodeOntologyCode(e.getString(EntityPojo.ontology_type_));
                            double dDecay = QueryDecayScript.getGeoDecay(minlat, minlon, paramlat, paramlon,
                                    gdecay, ontCode);
                            if (dDecay > dBestGeoScore) {
                                dBestGeoScore = dDecay;
                            }
                        } //TESTED
                    } //(end if entity has geo and need to process entity geo)

                    if (freq > shp.maxFreq) {
                        shp.maxFreq = freq;
                    }
                    // Sentiment:
                    if ((null != sentiment) && (Math.abs(sentiment) <= 1.1)) { // (actually 1.0)
                        shp.nTotalSentimentValues++;
                        if (sentiment > 0.0) {
                            shp.positiveSentiment += sentiment;
                        } else {
                            shp.negativeSentiment += sentiment;
                        }
                    } else if (null != sentiment) { // corrupt sentiment for some reason?!
                        e.put(EntityPojo.sentiment_, null);
                    }
                    docBucket.docLength += freq;

                } //(end loop over entities)

                docBucket.nLeftToProcess = nEntsInDoc;
                docBucket.nEntsInDoc = (int) nEntsInDoc;

                if (null != this._s1_dManualGeoDecay_latLonInvdecay) { // Low accuracy geo-calculations
                    docBucket.geoTemporalDecay *= dBestGeoScore;
                    docBucket.luceneScore *= dBestGeoScore;
                    _s2_dAvgLowAccuracyGeoDecay += dBestGeoScore * s0_nQuerySubsetDocCountInv;
                } //TESTED            

            } // (end if feed has entities)

            // Handle documents with no entities - can still promote them
            if (0 == docBucket.nLeftToProcess) { // (use this rather than doc length in case all the entities had freq 0)
                _s1_noEntityBuckets.add(docBucket);
            }

        } // (end loop over feeds)
          //TESTED
    } finally {
        dbc.setDBDecoderFactory(defaultDecoder);
    }
}

From source file:com.ikanow.infinit.e.core.utils.SourceUtils.java

License:Open Source License

public static void updateHarvestStatus(SourcePojo source, HarvestEnum harvestStatus, List<DocumentPojo> added,
        long nDocsDeleted, String extraMessage) {
    // Handle successful harvests where the max docs were reached, so don't want to respect the searchCycle
    if ((harvestStatus == HarvestEnum.success) && (source.reachedMaxDocs())) {
        harvestStatus = HarvestEnum.success_iteration;
    }/*w ww.  j  a v  a 2  s. c  o m*/
    // Always update status object in order to release the "in_progress" lock
    // (make really really sure we don't exception out before doing this!)

    BasicDBObject query = new BasicDBObject(SourcePojo._id_, source.getId());
    BasicDBObject setClause = new BasicDBObject(SourceHarvestStatusPojo.sourceQuery_harvest_status_,
            harvestStatus.toString());
    if ((null != added) && !added.isEmpty()) {
        setClause.put(SourceHarvestStatusPojo.sourceQuery_extracted_, new Date());
    }
    if (null != extraMessage) {
        if ((null == source.getHarvestStatus()) || (null == source.getHarvestStatus().getHarvest_message())) {
            setClause.put(SourceHarvestStatusPojo.sourceQuery_harvest_message_, extraMessage);
        } else {
            source.getHarvestStatus()
                    .setHarvest_message(source.getHarvestStatus().getHarvest_message() + "\n" + extraMessage);
            setClause.put(SourceHarvestStatusPojo.sourceQuery_harvest_message_,
                    source.getHarvestStatus().getHarvest_message());
        }
    }
    BasicDBObject update = new BasicDBObject(MongoDbManager.set_, setClause);

    int docsAdded = 0;
    if (null != added) {
        docsAdded = added.size();
    }
    BasicDBObject incClause = new BasicDBObject(SourceHarvestStatusPojo.sourceQuery_doccount_,
            docsAdded - nDocsDeleted);
    update.put(MongoDbManager.inc_, incClause);

    // Special case, if searchCycle_secs == 0 and not success_iteration, then suspend:
    if ((harvestStatus != HarvestEnum.success_iteration) && (null != source.getSearchCycle_secs())
            && (0 == source.getSearchCycle_secs())) {
        setClause.put(SourcePojo.searchCycle_secs_, -1);
    }

    if (null != source.getDistributionTokens()) { // Distribution logic (specified and also enabled - eg ignore Feed/DB)
        updateHarvestDistributionState_tokenComplete(source, harvestStatus, incClause, setClause);
    }
    if (setClause.isEmpty()) { // (ie got removed by the distribution logic above)
        update.remove(MongoDbManager.set_);
    } //TESTED

    long nTotalDocsAfterInsert = 0;
    BasicDBObject fieldsToReturn = new BasicDBObject(SourceHarvestStatusPojo.sourceQuery_doccount_, 1);
    BasicDBObject updatedSource = (BasicDBObject) DbManager.getIngest().getSource().findAndModify(query,
            fieldsToReturn, null, false, update, true, false);
    BasicDBObject harvestStatusObj = (BasicDBObject) updatedSource.get(SourcePojo.harvest_);
    if (null != harvestStatusObj) {
        Long docCount = harvestStatusObj.getLong(SourceHarvestStatusPojo.doccount_);
        if (null != docCount) {
            nTotalDocsAfterInsert = docCount;
        }
    }
    //TESTED

    // Prune documents if necessary
    if ((null != source.getMaxDocs()) && (nTotalDocsAfterInsert > source.getMaxDocs())) {
        long nToPrune = (nTotalDocsAfterInsert - source.getMaxDocs());
        SourceUtils.pruneSource(source, (int) nToPrune, -1);
        nDocsDeleted += nToPrune;

        // And update to reflect that it now has max docs...
        BasicDBObject update2_1 = new BasicDBObject(SourceHarvestStatusPojo.sourceQuery_doccount_,
                source.getMaxDocs());
        BasicDBObject update2 = new BasicDBObject(DbManager.set_, update2_1);
        DbManager.getIngest().getSource().update(query, update2);
    }
    //TESTED      

    if ((null != source.getTimeToLive_days())) {
        nDocsDeleted += SourceUtils.pruneSource(source, Integer.MAX_VALUE, source.getTimeToLive_days());
    } //TODO: TOTEST

    // (OK now the only thing we really had to do is complete, add some handy metadata)

    // Also update the document count table in doc_metadata:
    if (docsAdded > 0) {
        if (1 == source.getCommunityIds().size()) { // (simple/usual case, just 1 community)
            query = new BasicDBObject(DocCountPojo._id_, source.getCommunityIds().iterator().next());
            update = new BasicDBObject(MongoDbManager.inc_,
                    new BasicDBObject(DocCountPojo.doccount_, docsAdded - nDocsDeleted));
            if ((docsAdded != 0) || (nDocsDeleted != 0)) {
                update.put(DbManager.set_, new BasicDBObject(DocCountPojo.extracted_, new Date()));
            }
            DbManager.getDocument().getCounts().update(query, update, true, false);
        } else if (!source.getCommunityIds().isEmpty()) { // Complex case since docs can belong to diff communities (but they're usually somewhat grouped)
            Map<ObjectId, Integer> communityMap = new HashMap<ObjectId, Integer>();
            for (DocumentPojo doc : added) {
                ObjectId communityId = doc.getCommunityId();
                Integer count = communityMap.get(communityId);
                communityMap.put(communityId, (count == null ? 1 : count + 1));
            } //end loop over added documents (updating the separate community counts)
            long nDocsDeleted_byCommunity = nDocsDeleted / source.getCommunityIds().size();
            // (can't do better than assume a uniform distribution - the whole thing gets recalculated weekly anyway...)

            for (Map.Entry<ObjectId, Integer> communityInfo : communityMap.entrySet()) {
                query = new BasicDBObject(DocCountPojo._id_, communityInfo.getKey());
                update = new BasicDBObject(MongoDbManager.inc_, new BasicDBObject(DocCountPojo.doccount_,
                        communityInfo.getValue() - nDocsDeleted_byCommunity));
                if ((communityInfo.getValue() != 0) || (nDocsDeleted_byCommunity != 0)) {
                    update.put(DbManager.set_, new BasicDBObject(DocCountPojo.extracted_, new Date()));
                }
                DbManager.getDocument().getCounts().update(query, update, true, false);
                // (true for upsert, false for multi add)
            }
        } //(never called in practice - tested up until 5/2/2014)
    }
}

From source file:com.ikanow.infinit.e.processing.generic.aggregation.AggregationManager.java

License:Open Source License

public static void updateDocEntitiesFromDeletedDocuments(String uuid) {
    String outCollection = new StringBuilder(uuid).append("_AggregationUtils").toString();
    try {// w  ww.j a  v a  2s  . c  o  m
        PropertiesManager props = new PropertiesManager();
        if (props.getAggregationDisabled()) { // (no need to do this)
            return;
        }
        DBCollection outColl = DbManager.getDB("doc_metadata").getCollection(outCollection);

        DBCursor dbc = outColl.find();
        for (DBObject dbo : dbc) {
            BasicDBObject entityEl = (BasicDBObject) dbo;
            BasicDBObject entityVal = (BasicDBObject) entityEl.get("value");

            long nDocDecrement = entityVal.getLong("dc");
            long nFreqDecrement = entityVal.getLong("f");
            long nCurrFreq = entityVal.getLong("tf");
            long nCurrDocCount = entityVal.getLong("tdc");

            // (These are by construction the lowest values so this will provide some defence against going -ve)
            if (nDocDecrement > nCurrDocCount) {
                nDocDecrement = nCurrDocCount;
            }
            if (nFreqDecrement > nCurrFreq) {
                nFreqDecrement = nCurrFreq;
            }

            BasicDBObject entityId = (BasicDBObject) entityEl.get("_id");
            ObjectId commId = null;
            Object commObj = entityId.get("comm");
            if (commObj instanceof ObjectId) {
                commId = entityId.getObjectId("comm");
            }
            String index = (String) entityId.get("index");
            if ((null == index) || (null == commId)) {
                continue; // random error
            }

            BasicDBObject updateQuery = new BasicDBObject(EntityFeaturePojo.index_, index);
            updateQuery.put(EntityFeaturePojo.communityId_, commId);
            BasicDBObject entityUpdate1 = new BasicDBObject(EntityFeaturePojo.doccount_, -nDocDecrement);
            entityUpdate1.put(EntityFeaturePojo.totalfreq_, -nFreqDecrement);
            BasicDBObject entityUpdate = new BasicDBObject(DbManager.inc_, entityUpdate1);

            if (_diagnosticMode) {
                if (_logInDiagnosticMode)
                    System.out.println("UPDATE FEATURE DATABASE: " + updateQuery.toString() + "/"
                            + entityUpdate.toString());
            } else {
                DbManager.getFeature().getEntity().update(updateQuery, entityUpdate);
                // (can be a single query because the query is on index, the shard)
            }
            //TESTED

            if ((nDocDecrement < nCurrDocCount) && (nDocDecrement * 10 > nCurrDocCount)) {
                // ie there are some documents left
                // and the doc count has shifted by more than 10%
                BasicDBObject updateQuery2 = new BasicDBObject(EntityPojo.docQuery_index_, index);
                updateQuery2.put(DocumentPojo.communityId_, commId);
                BasicDBObject entityUpdate2_1 = new BasicDBObject(EntityPojo.docUpdate_doccount_,
                        nCurrDocCount - nDocDecrement);
                entityUpdate2_1.put(EntityPojo.docUpdate_totalfrequency_, nCurrFreq - nFreqDecrement);
                BasicDBObject entityUpdate2 = new BasicDBObject(DbManager.set_, entityUpdate2_1);

                if (_diagnosticMode) {
                    if (_logInDiagnosticMode)
                        System.out.println("UPDATE DOC DATABASE: " + updateQuery2.toString() + "/"
                                + entityUpdate2.toString());
                } else {
                    DbManager.getDocument().getMetadata().update(updateQuery2, entityUpdate2, false, true);
                }
            }
        } //TESTED (including when to update logic above)
    } catch (Exception e) {
        e.printStackTrace();
    }

    // Tidy up
    DbManager.getDB("doc_metadata").getCollection(outCollection).drop();
}

From source file:com.images3.data.impl.ImageMetricsServiceImplMongoDB.java

License:Apache License

@Override
public long calculateNumber(String imagePlantId, ImageMetricsType type) {
    long counts = 0;
    List<DBObject> objects = selectMetricsByImagePlantId(imagePlantId);
    for (DBObject obj : objects) {
        BasicDBObject object = (BasicDBObject) obj;
        counts += object.getLong(type.toString());
    }//from ww w.j a  va  2 s .co m
    return counts;
}

From source file:com.images3.data.impl.ImageMetricsServiceImplMongoDB.java

License:Apache License

@Override
public long calculateNumber(TemplateIdentity templateId, ImageMetricsType type) {
    BasicDBObject object = (BasicDBObject) selectMetricsByTempalteId(templateId);
    return object.getLong(type.toString());
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public PageCursor mapToPageCursor(BasicDBObject source) {
    Page page = new Page(source.getInt("start"), source.getInt("size"));
    return new PageCursor(source.getString("id"), source.getString("previousPageCursorId"), page,
            new Date(source.getLong("creationTime")));
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ImagePlantOS mapToImagePlantOS(BasicDBObject source) {
    int maximumImageSize = MaximumImageSize.UNLIMITED;
    if (source.containsValue("maximumImageSize")) {
        maximumImageSize = source.getInt("maximumImageSize");
    }/*from w  ww .j av a2s. c  o m*/
    return new ImagePlantOS(source.getString("id"), source.getString("name"),
            new Date(source.getLong("creationTime")), mapToAmazonS3Bucket((BasicDBObject) source.get("bucket")),
            source.getString("masterTemplateName"), source.getLong("numberOfTemplates"), maximumImageSize);
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ImageOS mapToImageOS(BasicDBObject source) {
    return new ImageOS(new ImageIdentity(source.getString("imagePlantId"), source.getString("id")),
            new Date(source.getLong("dateTime")), mapToImageMetadata((BasicDBObject) source.get("metadata")),
            mapToImageVersion((BasicDBObject) source.get("version")));
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ImageMetadata mapToImageMetadata(BasicDBObject source) {
    return new ImageMetadata(mapToImageDimension((BasicDBObject) source.get("dimension")),
            ImageFormat.valueOf(source.getString("format")), source.getLong("size"));
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ImageMetricsOS mapToImageMetricsOS(BasicDBObject source) {
    Map<ImageMetricsType, Long> numbers = new HashMap<ImageMetricsType, Long>();
    numbers.put(ImageMetricsType.COUNTS_INBOUND,
            (source.containsField(ImageMetricsType.COUNTS_INBOUND.toString())
                    ? source.getLong(ImageMetricsType.COUNTS_INBOUND.toString())
                    : 0L));//from w  w w.  ja  v a2  s  .c  o m
    numbers.put(ImageMetricsType.COUNTS_OUTBOUND,
            (source.containsField(ImageMetricsType.COUNTS_OUTBOUND.toString())
                    ? source.getLong(ImageMetricsType.COUNTS_OUTBOUND.toString())
                    : 0L));
    numbers.put(ImageMetricsType.SIZE_INBOUND,
            (source.containsField(ImageMetricsType.SIZE_INBOUND.toString())
                    ? source.getLong(ImageMetricsType.SIZE_INBOUND.toString())
                    : 0L));
    numbers.put(ImageMetricsType.SIZE_OUTBOUND,
            (source.containsField(ImageMetricsType.SIZE_OUTBOUND.toString())
                    ? source.getLong(ImageMetricsType.SIZE_OUTBOUND.toString())
                    : 0L));
    return new ImageMetricsOS(source.getString("imagePlantId"), source.getString("templateName"),
            source.getLong("second"), numbers);
}