Example usage for com.mongodb BasicDBObjectBuilder get

List of usage examples for com.mongodb BasicDBObjectBuilder get

Introduction

In this page you can find the example usage for com.mongodb BasicDBObjectBuilder get.

Prototype

public DBObject get() 

Source Link

Document

Gets the top level document.

Usage

From source file:org.alfresco.service.common.elasticsearch.ElasticSearchMonitoringIndexer.java

License:Open Source License

public void indexMonitoring(HistogramMonitoringEvent event) {
    long timestampMS = event.getTimestamp();
    String type = event.getType();
    Histogram data = event.getData();//from www . j av  a 2  s .co  m

    BasicDBObjectBuilder builder = BasicDBObjectBuilder.start("mt", type).add("tim", timestampMS)
            .add("ti", timestampMS).add("max", data.getMax()).add("min", data.getMin())
            .add("mean", data.getMean()).add("p50", data.getP50()).add("p75", data.getP75())
            .add("p95", data.getP95()).add("p98", data.getP98()).add("p99", data.getP99())
            .add("p999", data.getP999()).add("stdev", data.getStddev());

    String id = event.getId();
    String json = builder.get().toString();

    IndexResponse response = elasticSearchClient.index(indexName, id, IndexType.monitoring, json, true);

    logger.debug("Indexed monitoring " + id + ", " + builder.get() + "response " + response.getId() + ", "
            + response.getType() + ", " + response.getIndex() + ", " + response.getVersion());
}

From source file:org.alfresco.service.common.elasticsearch.ElasticSearchMonitoringIndexer.java

License:Open Source License

public void indexMonitoring(TimerMonitoringEvent event) {
    long timestampMS = event.getTimestamp();
    String type = event.getType();
    Timer data = event.getData();

    BasicDBObjectBuilder builder = BasicDBObjectBuilder.start("mt", type).add("tim", timestampMS)
            .add("ti", timestampMS).add("max", data.getMax()).add("min", data.getMin())
            .add("mean", data.getMean()).add("p50", data.getP50()).add("p75", data.getP75())
            .add("p95", data.getP95()).add("p98", data.getP98()).add("p99", data.getP99())
            .add("p999", data.getP999()).add("stdev", data.getStddev()).add("m1_rate", data.getM1_rate())
            .add("m5_rate", data.getM5_rate()).add("m15_rate", data.getM15_rate())
            .add("mean_rate", data.getMean_rate());

    String id = event.getId();/*from   w  ww . ja  v a2s.com*/
    String json = builder.get().toString();

    IndexResponse response = elasticSearchClient.index(indexName, id, IndexType.monitoring, json, true);

    logger.debug("Indexed monitoring " + id + ", " + builder.get() + "response " + response.getId() + ", "
            + response.getType() + ", " + response.getIndex() + ", " + response.getVersion());
}

From source file:org.alfresco.service.common.elasticsearch.ElasticSearchMonitoringIndexer.java

License:Open Source License

public void indexMonitoring(GaugeMonitoringEvent event) {
    long timestampMS = event.getTimestamp();
    String type = event.getType();
    long data = event.getData();

    BasicDBObjectBuilder builder = BasicDBObjectBuilder.start("mt", type).add("tim", timestampMS)
            .add("ti", timestampMS).add("value", data);

    String id = event.getId();/*from  www  .  j  a v  a  2  s  .  co m*/
    String json = builder.get().toString();

    IndexResponse response = elasticSearchClient.index(indexName, id, IndexType.monitoring, json, true);

    logger.debug("Indexed monitoring " + id + ", " + builder.get() + "response " + response.getId() + ", "
            + response.getType() + ", " + response.getIndex() + ", " + response.getVersion());
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoOutputCommitter.java

License:Apache License

@Override
public void setupJob(JobContext jobContext) throws IOException {

    /**/*from  w ww .  ja v  a  2  s. c  om*/
     * note: we don't really have to do anything here -
     * MongoDB is one of the few systems that don't require you to
     * create a database or collection before writing to it
     * 
     * but in order to ingest a ton of data quickly we have to 
     * pre-split the output collection
     *
     */

    Configuration conf = jobContext.getConfiguration();
    if (conf.getBoolean("mongo.output.skip_splitting", false))
        return;

    String database = conf.get("mongo.output.database");
    String collection = conf.get("mongo.output.collection");

    // connect to global db
    Mongo m = new Mongo("localhost");
    DB db = m.getDB(database);
    DB admindb = m.getDB("admin");
    DB configdb = m.getDB("config");

    // optionally drop the existing collection
    boolean drop = conf.getBoolean("mongo.output.drop", false);
    DBCollection coll = db.getCollection(collection);
    if (drop) {
        coll.drop();
    } else {
        if (coll.count() > 0) {
            // don't shard an existing collection - may already be sharded ...
            return;
        }
    }

    // get a list of shards
    ArrayList<String> shards = new ArrayList<String>();
    for (DBObject s : configdb.getCollection("shards").find()) {
        shards.add((String) s.get("_id"));
    }

    if (shards.size() < 2) {
        // don't let's be silly - nice sharded cluster, no shard
        return;
    }

    // shard the new output collection
    BasicDBObjectBuilder builder = new BasicDBObjectBuilder();
    builder.add("enableSharding", database);
    admindb.command(builder.get());

    builder = new BasicDBObjectBuilder();
    builder.add("shardCollection", database + "." + collection);

    // just shard on _id - but user gets to decide what the _id is
    builder.add("key", new BasicDBObject("_id", 1));
    admindb.command(builder.get());

    // pre-split to get parallel writes
    // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says 
    // balancer moving chunks should take 5 minutes ... too long
    // wonder if moveChunk command is faster
    // well we could do it anyway - the jobs that can benefit from it will

    // check for user-submitted splitPoints
    String[] splits;
    String splitString = conf.get("mongo.output.split_points", "");

    // generate our own split points if necessary
    if (splitString.equals("")) {
        long max = (long) Math.pow(93.0, 5.0);

        long step = max / shards.size();
        splits = new String[shards.size() - 1];

        // assume human readable keys
        for (int i = 0; i < shards.size() - 1; i++) {
            splits[i] = splitPointForLong(step * (i + 1));
        }
    } else {
        splits = splitString.split(",");
    }

    HashMap<String, Object> splitCmd = new HashMap<String, Object>();
    splitCmd.put("split", database + "." + collection);
    splitCmd.put("middle", "");

    HashMap<String, Object> moveCmd = new HashMap<String, Object>();
    moveCmd.put("moveChunk", database + "." + collection);
    moveCmd.put("find", "");
    moveCmd.put("to", "");

    // do the splitting and migrating
    // we assign chunks to shards in a round-robin manner
    int i = 0;
    for (String split : splits) {

        splitCmd.remove("middle");
        splitCmd.put("middle", new BasicDBObject("_id", split));

        // create new chunk
        admindb.command(new BasicDBObject(splitCmd));

        // move to shard
        moveCmd.remove("find");
        moveCmd.put("find", new BasicDBObject("_id", split));
        moveCmd.put("to", shards.get(i));

        admindb.command(new BasicDBObject(moveCmd));

        i = (i + 1) % shards.size();
    }
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoStreamOutputFormat.java

License:Apache License

public void checkOutputSpecs(FileSystem ignored, JobConf conf) throws IOException {

    if (conf.getBoolean("mongo.output.skip_splitting", false))
        return;//from  ww w  . j ava  2 s.  c o  m

    String database = conf.get("mongo.output.database", "");
    if (database.equals("")) {
        throw new IOException("must specify a value for mongo.output.database");
    }

    String collection = conf.get("mongo.output.collection", "");
    if (collection.equals("")) {
        throw new IOException("must supply a value for mongo.output.collection");
    }

    // connect to global db
    Mongo m = new Mongo("localhost");
    DB db = m.getDB(database);
    DB admindb = m.getDB("admin");
    DB configdb = m.getDB("config");

    // optionally drop the existing collection
    boolean drop = conf.getBoolean("mongo.output.drop", false);
    DBCollection coll = db.getCollection(collection);
    if (drop) {
        coll.drop();
    } else {
        if (coll.count() > 0) {
            // don't shard an existing collection - may already be sharded ...
            return;
        }
    }

    // get a list of shards
    ArrayList<String> shards = new ArrayList<String>();
    for (DBObject s : configdb.getCollection("shards").find()) {
        shards.add((String) s.get("_id"));
    }

    if (shards.size() < 2) {
        // don't let's be silly
        return;
    }

    // shard the new output collection
    BasicDBObjectBuilder builder = new BasicDBObjectBuilder();
    builder.add("enableSharding", database);
    admindb.command(builder.get());

    builder = new BasicDBObjectBuilder();
    builder.add("shardCollection", database + "." + collection);

    // just shard on _id - but user gets to decide what the _id is
    builder.add("key", new BasicDBObject("_id", 1));
    admindb.command(builder.get());

    // pre-split to get parallel writes
    // this http://www.mongodb.org/display/DOCS/Splitting+Chunks says 
    // balancer moving chunks should take 5 minutes ... too long
    // wonder if moveChunk command is faster
    // well we could do it anyway - the jobs that can benefit from it will

    // check for user-submitted splitPoints
    String[] splits;
    String splitString = conf.get("mongo.output.split_points", "");

    // generate our own split points if necessary
    if (splitString.equals("")) {
        long max = (long) Math.pow(93.0, 5.0);

        long step = max / shards.size();
        splits = new String[shards.size() - 1];

        // assume human readable keys
        for (int i = 0; i < shards.size() - 1; i++) {
            splits[i] = splitPointForLong(step * (i + 1));
        }
    } else {
        splits = splitString.split(",");
    }

    HashMap<String, Object> splitCmd = new HashMap<String, Object>();
    splitCmd.put("split", database + "." + collection);
    splitCmd.put("middle", "");

    HashMap<String, Object> moveCmd = new HashMap<String, Object>();
    moveCmd.put("moveChunk", database + "." + collection);
    moveCmd.put("find", "");
    moveCmd.put("to", "");

    // do the splitting and migrating
    // we assign chunks to shards in a round-robin manner
    int i = 0;
    for (String split : splits) {

        splitCmd.remove("middle");
        splitCmd.put("middle", new BasicDBObject("_id", split));

        // create new chunk
        admindb.command(new BasicDBObject(splitCmd));

        // move to shard
        moveCmd.remove("find");
        moveCmd.put("find", new BasicDBObject("_id", split));
        moveCmd.put("to", shards.get(i));

        admindb.command(new BasicDBObject(moveCmd));

        i = (i + 1) % shards.size();
    }
}

From source file:org.apache.karaf.jaas.modules.mongo.internal.DefaultUserDetailService.java

License:Apache License

@Override
public UserInfo getUserInfo(String username) throws Exception {

    DB db = getDB();//from   ww  w .j  a  v  a  2  s.  co m

    DBCollection users = db.getCollection(configuration.getUserCollectionName());

    // populate user
    DBObject userQuery = new BasicDBObject("username", username);

    BasicDBObjectBuilder userProjectionBuilder = BasicDBObjectBuilder.start().add("_id", 0).add("username", 1)
            .add("passwordHash", 1);

    // also add all custom user fields
    for (String prop : configuration.getAdditionalAttributes()) {
        userProjectionBuilder.add(prop, 1);
    }

    DBObject user = users.findOne(userQuery, userProjectionBuilder.get());
    // if nothing comes back just return empty handed
    if (user == null) {
        return null;
    }

    UserInfo userInfo = new UserInfo().withName((String) user.get("username"))
            .withPassword((String) user.get("passwordHash"));

    for (String prop : configuration.getAdditionalAttributes()) {

        // only add if property is actually present in the database
        if (user.containsField(prop)) {
            Object val = user.get(prop);
            userInfo.addProperty(prop, val != null ? val.toString() : "");
        }

    }

    // populate group
    DBCollection groups = db.getCollection(configuration.getGroupCollectionName());

    DBObject groupQuery = new BasicDBObject("members", username);

    DBCursor gc = groups.find(groupQuery,
            BasicDBObjectBuilder.start().append("_id", 0).append("name", 1).get());

    while (gc.hasNext()) {
        DBObject group = gc.next();
        userInfo.addGroup((String) group.get("name"));
    }
    gc.close();

    return userInfo;

}

From source file:org.apache.rya.export.mongo.parent.ParentMetadataRepositoryAdapter.java

License:Apache License

/**
 * Serializes the {@link MergeParentMetadata} into a mongoDB object.
 * @param metadata - The {@link MergeParentMetadata} to serialize.
 * @return The MongoDB object/*from w  ww.  j a  va  2s . c o  m*/
 */
public DBObject serialize(final MergeParentMetadata metadata) {
    final BasicDBObjectBuilder builder = BasicDBObjectBuilder.start()
            .add(RYANAME_KEY, metadata.getRyaInstanceName()).add(TIMESTAMP_KEY, metadata.getTimestamp())
            .add(FILTER_TIMESTAMP_KEY, metadata.getFilterTimestamp())
            .add(PARENT_TIME_OFFSET_KEY, metadata.getParentTimeOffset());
    return builder.get();
}

From source file:org.apache.rya.indexing.geotemporal.mongo.GeoTemporalMongoDBStorageStrategy.java

License:Apache License

@Override
public DBObject serialize(final RyaStatement ryaStatement) {
    final BasicDBObjectBuilder builder = BasicDBObjectBuilder.start("_id",
            ryaStatement.getSubject().hashCode());
    final URI obj = ryaStatement.getObject().getDataType();

    if (obj.equals(GeoConstants.GEO_AS_WKT) || obj.equals(GeoConstants.GEO_AS_GML)
            || obj.equals(GeoConstants.XMLSCHEMA_OGC_GML) || obj.equals(GeoConstants.XMLSCHEMA_OGC_WKT)) {
        try {/* w w  w.  j a  v  a  2s. c om*/
            final Statement statement = RyaToRdfConversions.convertStatement(ryaStatement);
            final Geometry geo = GeoParseUtils.getGeometry(statement, new GmlParser());
            if (geo.getNumPoints() > 1) {
                builder.add(GEO_KEY, geoStrategy.getCorrespondingPoints(geo));
            } else {
                builder.add(GEO_KEY, geoStrategy.getDBPoint(geo));
            }
        } catch (final ParseException e) {
            LOG.error("Could not create geometry for statement " + ryaStatement, e);
            return null;
        }
    } else {
        builder.add(TIME_KEY, temporalStrategy.getTimeValue(ryaStatement.getObject().getData()));
    }
    return builder.get();
}

From source file:org.apache.rya.indexing.geotemporal.mongo.MongoEventStorage.java

License:Apache License

@Override
public Collection<Event> search(final Optional<RyaURI> subject,
        final Optional<Collection<IndexingExpr>> geoFilters,
        final Optional<Collection<IndexingExpr>> temporalFilters) throws EventStorageException {
    requireNonNull(subject);/* w w w .j  av a2 s. c o m*/

    try {
        final Collection<IndexingExpr> geos = (geoFilters.isPresent() ? geoFilters.get() : new ArrayList<>());
        final Collection<IndexingExpr> tempos = (temporalFilters.isPresent() ? temporalFilters.get()
                : new ArrayList<>());
        final DBObject filterObj = queryAdapter.getFilterQuery(geos, tempos);

        final BasicDBObjectBuilder builder = BasicDBObjectBuilder.start(filterObj.toMap());
        if (subject.isPresent()) {
            builder.append(EventDocumentConverter.SUBJECT, subject.get().getData());
        }
        final MongoCursor<Document> results = mongo.getDatabase(ryaInstanceName).getCollection(COLLECTION_NAME)
                .find(BsonDocument.parse(builder.get().toString())).iterator();

        final List<Event> events = new ArrayList<>();
        while (results.hasNext()) {
            events.add(EVENT_CONVERTER.fromDocument(results.next()));
        }
        return events;
    } catch (final MongoException | DocumentConverterException | GeoTemporalIndexException e) {
        throw new EventStorageException("Could not get the Event.", e);
    }
}

From source file:org.apache.rya.indexing.mongodb.temporal.TemporalMongoDBStorageStrategy.java

License:Apache License

public DBObject getTimeValue(final String timeData) {
    final Matcher match = TemporalInstantRfc3339.PATTERN.matcher(timeData);
    final BasicDBObjectBuilder builder = BasicDBObjectBuilder.start();
    if (match.find()) {
        final TemporalInterval date = TemporalInstantRfc3339.parseInterval(timeData);
        builder.add(INTERVAL_START, date.getHasBeginning().getAsDateTime().toDate());
        builder.add(INTERVAL_END, date.getHasEnd().getAsDateTime().toDate());
    } else {/*from   w w w .j  a va 2 s  . c o  m*/
        builder.add(INSTANT, TemporalInstantRfc3339.FORMATTER.parseDateTime(timeData).toDate());
    }
    return builder.get();
}