Example usage for com.mongodb QueryBuilder start

List of usage examples for com.mongodb QueryBuilder start

Introduction

In this page you can find the example usage for com.mongodb QueryBuilder start.

Prototype

public static QueryBuilder start(final String key) 

Source Link

Document

Creates a new query with a document key

Usage

From source file:org.apache.gora.mongodb.filters.DefaultFactory.java

License:Apache License

protected DBObject transformMapFilter(final MapFieldValueFilter<K, T> mapFilter, final MongoStore<K, T> store) {
    MongoMapping mapping = store.getMapping();
    String dbFieldName = mapping.getDocumentField(mapFilter.getFieldName()) + "."
            + store.encodeFieldKey(mapFilter.getMapKey().toString());

    FilterOp filterOp = mapFilter.getFilterOp();
    List<Object> operands = mapFilter.getOperands();

    QueryBuilder builder = QueryBuilder.start(dbFieldName);
    builder = appendToBuilder(builder, filterOp, operands);
    if (!mapFilter.isFilterIfMissing()) {
        // If false, the find query will pass if the column is not found.
        DBObject notExist = QueryBuilder.start(dbFieldName).exists(false).get();
        builder = QueryBuilder.start().or(notExist, builder.get());
    }/*from  www . j a v a 2 s .  c  o  m*/
    return builder.get();
}

From source file:org.apache.jackrabbit.oak.plugins.document.DocumentNodeStoreHelper.java

License:Apache License

private static Iterable<NodeDocument> getDocuments(DocumentStore store) {
    if (store instanceof MongoDocumentStore) {
        // optimized implementation for MongoDocumentStore
        final MongoDocumentStore mds = (MongoDocumentStore) store;
        DBCollection dbCol = MongoDocumentStoreHelper.getDBCollection(mds, Collection.NODES);
        DBObject query = QueryBuilder.start(NodeDocument.HAS_BINARY_FLAG).is(NodeDocument.HAS_BINARY_VAL).get();
        DBCursor cursor = dbCol.find(query);
        return Iterables.transform(cursor, new Function<DBObject, NodeDocument>() {
            @Nullable// ww w .  jav  a 2s . c  o  m
            @Override
            public NodeDocument apply(DBObject input) {
                return convertFromDBObject(mds, Collection.NODES, input);
            }
        });
    } else {
        return Utils.getSelectedDocuments(store, NodeDocument.HAS_BINARY_FLAG, NodeDocument.HAS_BINARY_VAL);
    }
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoBlobReferenceIterator.java

License:Apache License

private void initializeCursor() {
    if (cursor == null) {
        DBObject query = QueryBuilder.start(NodeDocument.HAS_BINARY_FLAG).is(NodeDocument.HAS_BINARY_VAL).get();
        //TODO It currently prefers secondary. Would that be Ok?
        cursor = getNodeCollection().find(query)
                .setReadPreference(documentStore.getConfiguredReadPreference(Collection.NODES));
    }/*from ww  w . j av a2  s  .  co m*/
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

@SuppressWarnings("unchecked")
@Nonnull//  w w  w.  jav  a 2s. c  o  m
<T extends Document> List<T> queryInternal(Collection<T> collection, String fromKey, String toKey,
        String indexedProperty, long startValue, int limit, long maxQueryTime) {
    log("query", fromKey, toKey, indexedProperty, startValue, limit);
    DBCollection dbCollection = getDBCollection(collection);
    QueryBuilder queryBuilder = QueryBuilder.start(Document.ID);
    queryBuilder.greaterThan(fromKey);
    queryBuilder.lessThan(toKey);

    DBObject hint = new BasicDBObject(NodeDocument.ID, 1);

    if (indexedProperty != null) {
        if (NodeDocument.DELETED_ONCE.equals(indexedProperty)) {
            if (startValue != 1) {
                throw new DocumentStoreException("unsupported value for property " + NodeDocument.DELETED_ONCE);
            }
            queryBuilder.and(indexedProperty);
            queryBuilder.is(true);
        } else {
            queryBuilder.and(indexedProperty);
            queryBuilder.greaterThanEquals(startValue);

            if (NodeDocument.MODIFIED_IN_SECS.equals(indexedProperty) && canUseModifiedTimeIdx(startValue)) {
                hint = new BasicDBObject(NodeDocument.MODIFIED_IN_SECS, -1);
            }
        }
    }
    DBObject query = queryBuilder.get();
    String parentId = Utils.getParentIdFromLowerLimit(fromKey);
    long lockTime = -1;
    final Stopwatch watch = startWatch();

    boolean isSlaveOk = false;
    int resultSize = 0;
    CacheChangesTracker cacheChangesTracker = null;
    if (parentId != null && collection == Collection.NODES) {
        cacheChangesTracker = nodesCache.registerTracker(fromKey, toKey);
    }
    try {
        DBCursor cursor = dbCollection.find(query).sort(BY_ID_ASC);
        if (!disableIndexHint && !hasModifiedIdCompoundIndex) {
            cursor.hint(hint);
        }
        if (maxQueryTime > 0) {
            // OAK-2614: set maxTime if maxQueryTimeMS > 0
            cursor.maxTime(maxQueryTime, TimeUnit.MILLISECONDS);
        }
        ReadPreference readPreference = getMongoReadPreference(collection, parentId, null,
                getDefaultReadPreference(collection));

        if (readPreference.isSlaveOk()) {
            isSlaveOk = true;
            LOG.trace("Routing call to secondary for fetching children from [{}] to [{}]", fromKey, toKey);
        }

        cursor.setReadPreference(readPreference);

        List<T> list;
        try {
            list = new ArrayList<T>();
            for (int i = 0; i < limit && cursor.hasNext(); i++) {
                DBObject o = cursor.next();
                T doc = convertFromDBObject(collection, o);
                list.add(doc);
            }
            resultSize = list.size();
        } finally {
            cursor.close();
        }

        if (cacheChangesTracker != null) {
            nodesCache.putNonConflictingDocs(cacheChangesTracker, (List<NodeDocument>) list);
        }

        return list;
    } finally {
        if (cacheChangesTracker != null) {
            cacheChangesTracker.close();
        }
        stats.doneQuery(watch.elapsed(TimeUnit.NANOSECONDS), collection, fromKey, toKey,
                indexedProperty != null, resultSize, lockTime, isSlaveOk);
    }
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

@Override
public <T extends Document> void remove(Collection<T> collection, List<String> keys) {
    log("remove", keys);
    DBCollection dbCollection = getDBCollection(collection);
    long start = PERFLOG.start();
    try {/*  w w w. ja v  a2s.  co  m*/
        for (List<String> keyBatch : Lists.partition(keys, IN_CLAUSE_BATCH_SIZE)) {
            DBObject query = QueryBuilder.start(Document.ID).in(keyBatch).get();
            try {
                dbCollection.remove(query);
            } catch (Exception e) {
                throw DocumentStoreException.convert(e, "Remove failed for " + keyBatch);
            } finally {
                if (collection == Collection.NODES) {
                    for (String key : keyBatch) {
                        invalidateCache(collection, key);
                    }
                }
            }
        }
    } finally {
        PERFLOG.end(start, 1, "remove keys={}", keys);
    }
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

@Override
public <T extends Document> void update(Collection<T> collection, List<String> keys, UpdateOp updateOp) {
    log("update", keys, updateOp);
    UpdateUtils.assertUnconditional(updateOp);
    DBCollection dbCollection = getDBCollection(collection);
    QueryBuilder query = QueryBuilder.start(Document.ID).in(keys);
    // make sure we don't modify the original updateOp
    updateOp = updateOp.copy();/*from   w  ww . j a  v  a  2s. co m*/
    DBObject update = createUpdate(updateOp, false);
    final Stopwatch watch = startWatch();
    try {
        Map<String, NodeDocument> cachedDocs = Collections.emptyMap();
        if (collection == Collection.NODES) {
            cachedDocs = Maps.newHashMap();
            for (String key : keys) {
                cachedDocs.put(key, nodesCache.getIfPresent(key));
            }
        }
        try {
            dbCollection.update(query.get(), update, false, true);
            if (collection == Collection.NODES) {
                Map<String, ModificationStamp> modCounts = getModStamps(
                        filterValues(cachedDocs, notNull()).keySet());
                // update cache
                for (Entry<String, NodeDocument> entry : cachedDocs.entrySet()) {
                    // the cachedDocs is not empty, so the collection = NODES
                    Lock lock = nodeLocks.acquire(entry.getKey());
                    try {
                        ModificationStamp postUpdateModStamp = modCounts.get(entry.getKey());
                        if (postUpdateModStamp != null && entry.getValue() != null
                                && entry.getValue() != NodeDocument.NULL
                                && Long.valueOf(postUpdateModStamp.modCount - 1)
                                        .equals(entry.getValue().getModCount())) {
                            // post update modCount is one higher than
                            // what we currently see in the cache. we can
                            // replace the cached document
                            NodeDocument newDoc = applyChanges(Collection.NODES, entry.getValue(),
                                    updateOp.shallowCopy(entry.getKey()));
                            nodesCache.replaceCachedDocument(entry.getValue(), newDoc);
                        } else {
                            // make sure concurrently loaded document is
                            // invalidated
                            nodesCache.invalidate(entry.getKey());
                        }
                    } finally {
                        lock.unlock();
                    }
                }
            }
        } catch (MongoException e) {
            // some documents may still have been updated
            // invalidate all documents affected by this update call
            for (String k : keys) {
                nodesCache.invalidate(k);
            }
            throw DocumentStoreException.convert(e);
        }
    } finally {
        stats.doneUpdate(watch.elapsed(TimeUnit.NANOSECONDS), collection, keys.size());
    }
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

/**
 * Returns the {@link Document#MOD_COUNT} and
 * {@link NodeDocument#MODIFIED_IN_SECS} values of the documents with the
 * given {@code keys}. The returned map will only contain entries for
 * existing documents. The default value is -1 if the document does not have
 * a modCount field. The same applies to the modified field.
 *
 * @param keys the keys of the documents.
 * @return map with key to modification stamp mapping.
 * @throws MongoException if the call fails
 *///w  w w .ja  va 2s  .  c o m
@Nonnull
private Map<String, ModificationStamp> getModStamps(Iterable<String> keys) throws MongoException {
    QueryBuilder query = QueryBuilder.start(Document.ID).in(keys);
    // Fetch only the modCount and id
    final BasicDBObject fields = new BasicDBObject(Document.ID, 1);
    fields.put(Document.MOD_COUNT, 1);
    fields.put(NodeDocument.MODIFIED_IN_SECS, 1);

    DBCursor cursor = nodes.find(query.get(), fields);
    cursor.setReadPreference(ReadPreference.primary());

    Map<String, ModificationStamp> modCounts = Maps.newHashMap();
    for (DBObject obj : cursor) {
        String id = (String) obj.get(Document.ID);
        Long modCount = Utils.asLong((Number) obj.get(Document.MOD_COUNT));
        if (modCount == null) {
            modCount = -1L;
        }
        Long modified = Utils.asLong((Number) obj.get(NodeDocument.MODIFIED_IN_SECS));
        if (modified == null) {
            modified = -1L;
        }
        modCounts.put(id, new ModificationStamp(modCount, modified));
    }
    return modCounts;
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore.java

License:Apache License

private static QueryBuilder getByKeyQuery(String key) {
    return QueryBuilder.start(Document.ID).is(key);
}

From source file:org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStoreHelper.java

License:Apache License

public static void repair(MongoDocumentStore store, String path) {
    DBCollection col = store.getDBCollection(NODES);
    String id = Utils.getIdFromPath(path);

    NodeDocument doc = store.find(NODES, id);
    if (doc == null) {
        System.out.println("No document for path " + path);
        return;//  ww w .  j  av a2 s  .  c o  m
    }

    Set<Revision> changes = Sets.newHashSet();
    for (String key : doc.keySet()) {
        if (Utils.isPropertyName(key) || NodeDocument.isDeletedEntry(key)) {
            changes.addAll(getLocalMap(doc, key).keySet());
        }
    }

    SortedMap<Revision, String> commitRoot = newTreeMap(getLocalCommitRoot(doc));
    if (!commitRoot.keySet().retainAll(changes)) {
        System.out.println("Nothing to repair on " + path);
        return;
    }

    Number modCount = doc.getModCount();
    if (modCount == null) {
        System.err.println("Document does not have a modCount " + path);
        return;
    }
    DBObject query = QueryBuilder.start(Document.ID).is(id).and(Document.MOD_COUNT).is(modCount).get();
    DBObject cr = new BasicDBObject();
    for (Map.Entry<Revision, String> entry : commitRoot.entrySet()) {
        cr.put(entry.getKey().toString(), entry.getValue());
    }

    DBObject update = new BasicDBObject();
    update.put("$set", new BasicDBObject(commitRoot(), cr));
    update.put("$inc", new BasicDBObject(Document.MOD_COUNT, 1L));

    WriteResult result = col.update(query, update);
    if (result.getN() == 1) {
        int num = getLocalCommitRoot(doc).size() - commitRoot.size();
        System.out.println("Removed " + num + " _commitRoot entries on " + path);
    } else {
        System.out.println("Unable to repair " + path + " (concurrent update).");
    }

}

From source file:org.apache.rya.indexing.geotemporal.mongo.GeoTemporalMongoDBStorageStrategy.java

License:Apache License

private DBObject getTemporalObject(final TemporalInstant instant, final TemporalPolicy policy) {
    final DBObject temporalObj;
    switch (policy) {
    case INSTANT_AFTER_INSTANT:
        temporalObj = QueryBuilder.start(INSTANT).greaterThan(instant.getAsDateTime().toDate()).get();
        break;/*w ww .  j  a  v  a  2  s  .co  m*/
    case INSTANT_BEFORE_INSTANT:
        temporalObj = QueryBuilder.start(INSTANT).lessThan(instant.getAsDateTime().toDate()).get();
        break;
    case INSTANT_EQUALS_INSTANT:
        temporalObj = QueryBuilder.start(INSTANT).is(instant.getAsDateTime().toDate()).get();
        break;
    default:
        temporalObj = new BasicDBObject();
    }
    return temporalObj;
}