Example usage for com.mongodb BasicDBObject getInt

List of usage examples for com.mongodb BasicDBObject getInt

Introduction

In this page you can find the example usage for com.mongodb BasicDBObject getInt.

Prototype

public int getInt(final String key) 

Source Link

Document

Returns the value of a field as an int .

Usage

From source file:com.ibm.bluemix.smartveggie.service.UserServiceImpl.java

private UserDTO setObjectToDTOMapping(BasicDBObject dbObj) {
    UserDTO userDTO = new UserDTO();
    userDTO.setFirstName(dbObj.getString("firstName"));
    userDTO.setLastName(dbObj.getString("lastName"));
    userDTO.setAddressLine1(dbObj.getString("addressLine1"));
    userDTO.setAddressLine2(dbObj.getString("addressLine2"));
    userDTO.setSex(dbObj.getString("sex"));
    userDTO.setPinCode(dbObj.getString("pin"));
    userDTO.setAge(dbObj.getInt("age"));
    userDTO.setCity(dbObj.getString("city"));
    userDTO.setUserName(dbObj.getString("userName"));
    userDTO.setRegulatingCityCode(dbObj.getString("regulatingCityCode"));
    userDTO.setRegulatingCityName(dbObj.getString("regulatingCityName"));
    userDTO.setUserTypeCode(dbObj.getString("userType"));
    userDTO.setLicenseNo(dbObj.getString("licenseNo"));
    return userDTO;
}

From source file:com.ijuru.ijambo.dao.WordDAO.java

License:Open Source License

/**
 * Gets a random word//from w w  w  . j a  v  a  2  s  . c  o m
 * @param difficulty the difficulty (may be null)
 * @return the word
 */
public Word getRandomWord(Difficulty difficulty) {
    DBCollection words = db.getCollection("words");
    BasicDBObject obj;

    if (difficulty != null) {
        // Get count of words of this difficulty
        BasicDBObject query = new BasicDBObject();
        query.put("difficulty", difficulty.ordinal());
        int count = words.find(query).count();

        // Pick random one
        int randOffset = (int) (Math.random() * count);
        obj = (BasicDBObject) words.find(query).limit(-1).skip(randOffset).next();
    } else {
        int randOffset = (int) (Math.random() * words.find().count());
        obj = (BasicDBObject) words.find().limit(-1).skip(randOffset).next();
    }

    return new Word(obj.getString("word"), obj.getString("meaning"),
            Difficulty.fromInt(obj.getInt("difficulty")));
}

From source file:com.ikanow.aleph2.v1.document_db.utils.LegacyV1HadoopUtils.java

License:Open Source License

/** parse the V1 query string 
 * @param query/*from   w ww.jav  a  2  s.  co  m*/
 * @return the required objects embedded in various tuples
 */
public static Tuple4<String, Tuple2<Integer, Integer>, BasicDBObject, DBObject> parseQueryObject(
        final String query, final List<String> community_ids) {
    // Some fixed variables just to avoid changing the guts of the (tested in v1) code
    final boolean isCustomTable = false;
    @SuppressWarnings("unused")
    Integer nDebugLimit = null;
    final boolean bLocalMode = false;
    @SuppressWarnings("unused")
    final Boolean incrementalMode = null;
    final String input = "doc_metadata.metadata";

    // Output objects
    final String out_query;
    int nSplits = 8;
    int nDocsPerSplit = 12500;

    List<ObjectId> communityIds = community_ids.stream().map(s -> new ObjectId(s)).collect(Collectors.toList());

    //C/P code:

    //add communities to query if this is not a custom table
    BasicDBObject oldQueryObj = null;
    BasicDBObject srcTags = null;
    // Start with the old query:
    if (query.startsWith("{")) {
        oldQueryObj = (BasicDBObject) com.mongodb.util.JSON.parse(query);
    } else {
        oldQueryObj = new BasicDBObject();
    }
    boolean elasticsearchQuery = oldQueryObj.containsField("qt") && !isCustomTable;
    @SuppressWarnings("unused")
    int nLimit = 0;
    if (oldQueryObj.containsField(":limit")) {
        nLimit = oldQueryObj.getInt(":limit");
        oldQueryObj.remove(":limit");
    }
    if (oldQueryObj.containsField(":splits")) {
        nSplits = oldQueryObj.getInt(":splits");
        oldQueryObj.remove(":splits");
    }
    if (oldQueryObj.containsField(":srctags")) {
        srcTags = new BasicDBObject(SourcePojo.tags_, oldQueryObj.get(":srctags"));
        oldQueryObj.remove(":srctags");
    }
    if (bLocalMode) { // If in local mode, then set this to a large number so we always run inside our limit/split version
        // (since for some reason MongoInputFormat seems to fail on large collections)
        nSplits = InfiniteMongoSplitter.MAX_SPLITS;
    }
    if (oldQueryObj.containsField(":docsPerSplit")) {
        nDocsPerSplit = oldQueryObj.getInt(":docsPerSplit");
        oldQueryObj.remove(":docsPerSplit");
    }
    final DBObject fields = (DBObject) oldQueryObj.remove(":fields");
    oldQueryObj.remove(":output");
    oldQueryObj.remove(":reducers");
    @SuppressWarnings("unused")
    String mapperKeyClass = oldQueryObj.getString(":mapper_key_class", "");
    @SuppressWarnings("unused")
    String mapperValueClass = oldQueryObj.getString(":mapper_value_class", "");
    oldQueryObj.remove(":mapper_key_class");
    oldQueryObj.remove(":mapper_value_class");
    String cacheList = null;
    Object cacheObj = oldQueryObj.get(":caches");
    if (null != cacheObj) {
        cacheList = cacheObj.toString(); // (either array of strings, or single string)
        if (!cacheList.startsWith("[")) {
            cacheList = "[" + cacheList + "]"; // ("must" now be valid array)
        }
        oldQueryObj.remove(":caches");
    } //TESTED

    //      if (null != nDebugLimit) { // (debug mode override)
    //         nLimit = nDebugLimit;
    //      }
    //      boolean tmpIncMode = ( null != incrementalMode) && incrementalMode; 

    @SuppressWarnings("unused")
    String otherCollections = null;
    Date fromOverride = null;
    Date toOverride = null;
    Object fromOverrideObj = oldQueryObj.remove(":tmin");
    Object toOverrideObj = oldQueryObj.remove(":tmax");
    if (null != fromOverrideObj) {
        fromOverride = dateStringFromObject(fromOverrideObj, true);
    }
    if (null != toOverrideObj) {
        toOverride = dateStringFromObject(toOverrideObj, false);
    }

    if (!isCustomTable) {
        if (elasticsearchQuery) {
            oldQueryObj.put("communityIds", communityIds);
            //tmin/tmax not supported - already have that capability as part of the query
        } else {
            if (input.equals("feature.temporal")) {
                if ((null != fromOverride) || (null != toOverride)) {
                    oldQueryObj.put("value.maxTime", createDateRange(fromOverride, toOverride, true));
                } //TESTED
                oldQueryObj.put("_id.c", new BasicDBObject(DbManager.in_, communityIds));
            } else {
                oldQueryObj.put(DocumentPojo.communityId_, new BasicDBObject(DbManager.in_, communityIds));
                if ((null != fromOverride) || (null != toOverride)) {
                    oldQueryObj.put(JsonUtils._ID, createDateRange(fromOverride, toOverride, false));
                } //TESTED         
                if (input.equals("doc_metadata.metadata")) {
                    oldQueryObj.put(DocumentPojo.index_, new BasicDBObject(DbManager.ne_, "?DEL?")); // (ensures not soft-deleted)
                }
            }
        }
    } else {
        throw new RuntimeException("Custom Tables not currently supported (no plans to)");
        //         if ((null != fromOverride) || (null != toOverride)) {
        //            oldQueryObj.put(JsonUtils._ID, createDateRange(fromOverride, toOverride, false));
        //         }//TESTED
        //         //get the custom table (and database)
        //
        //         String[] candidateInputs = input.split("\\s*,\\s*");
        //         input = CustomOutputManager.getCustomDbAndCollection(candidateInputs[0]);
        //         if (candidateInputs.length > 1) {            
        //            otherCollections = Arrays.stream(candidateInputs)
        //                  .skip(1L)
        //                  .map(i -> CustomOutputManager.getCustomDbAndCollection(i))
        //                  .map(i -> "mongodb://"+dbserver+"/"+i).collect(Collectors.joining("|"));
        //         }
    }
    out_query = oldQueryObj.toString();

    return Tuples._4T(out_query, Tuples._2T(nSplits, nDocsPerSplit), srcTags, fields);
}

From source file:com.ikanow.infinit.e.core.mapreduce.HadoopJobRunner.java

License:Open Source License

private void createConfigXML(Writer out, String title, String input, String fields, boolean isCustomTable,
        String outputDatabase, String output, String tempOutputCollection, String mapper, String reducer,
        String combiner, String query, List<ObjectId> communityIds, String outputKey, String outputValue,
        String arguments) throws IOException {
    String dbserver = prop_general.getDatabaseServer();
    output = outputDatabase + "." + tempOutputCollection;

    int nSplits = 8;
    int nDocsPerSplit = 12500;

    //add communities to query if this is not a custom table
    if (!isCustomTable) {
        // Start with the old query:
        BasicDBObject oldQueryObj = null;
        if (query.startsWith("{")) {
            oldQueryObj = (BasicDBObject) com.mongodb.util.JSON.parse(query);
        } else {/*from w w w  .  j  a  v a  2 s  .co m*/
            oldQueryObj = new BasicDBObject();
        }

        // Community Ids aren't indexed in the metadata collection, but source keys are, so we need to transform to that
        BasicDBObject keyQuery = new BasicDBObject(SourcePojo.communityIds_,
                new BasicDBObject(DbManager.in_, communityIds));
        boolean bAdminOverride = false;
        if (oldQueryObj.containsField("admin")) { // For testing only...
            if (1 == communityIds.size()) {
                ObjectId communityId = communityIds.get(0);
                if (RESTTools.adminLookup(communityId.toString())) {
                    bAdminOverride = true;
                    if (oldQueryObj.containsField("max.splits")) {
                        nSplits = oldQueryObj.getInt("max.splits");
                    }
                    if (oldQueryObj.containsField("max.docs.per.split")) {
                        nDocsPerSplit = oldQueryObj.getInt("max.docs.per.split");
                    }
                }
            }
        } //(end diagnostic/benchmarking/test code for admins only part 1)
        if (bAdminOverride) {
            oldQueryObj = (BasicDBObject) oldQueryObj.get("admin");
            //(end diagnostic/benchmarking/test code for admins only part 2)
        } else if (oldQueryObj.containsField(DocumentPojo.sourceKey_) || input.startsWith("feature.")) {
            // Source Key specified by user, stick communityIds check in for security
            oldQueryObj.put(DocumentPojo.communityId_, new BasicDBObject(DbManager.in_, communityIds));
        } else { // Source key not specified by user, transform communities->sourcekeys
            BasicDBObject keyFields = new BasicDBObject(SourcePojo.key_, 1);
            DBCursor dbc = MongoDbManager.getIngest().getSource().find(keyQuery, keyFields);
            if (dbc.count() > 500) {
                // (too many source keys let's keep the query size sensible...)
                oldQueryObj.put(DocumentPojo.communityId_, new BasicDBObject(DbManager.in_, communityIds));
            } else {
                HashSet<String> sourceKeys = new HashSet<String>();
                while (dbc.hasNext()) {
                    DBObject dbo = dbc.next();
                    String sourceKey = (String) dbo.get(SourcePojo.key_);
                    if (null != sourceKey) {
                        sourceKeys.add(sourceKey);
                    }
                }
                if (sourceKeys.isEmpty()) { // query returns empty
                    throw new RuntimeException("Communities contain no sources");
                }
                BasicDBObject newQueryClauseObj = new BasicDBObject(DbManager.in_, sourceKeys);
                // Now combine the queries...
                oldQueryObj.put(DocumentPojo.sourceKey_, newQueryClauseObj);

            } // (end if too many source keys across the communities)
        } //(end if need to break source keys down into communities)
        query = oldQueryObj.toString();
    } else {
        //get the custom table (and database)
        input = getCustomDbAndCollection(input);
    }
    if (arguments == null)
        arguments = "";

    // Generic configuration
    out.write("<?xml version=\"1.0\"?>\n<configuration>");

    // Mongo specific configuration

    out.write("\n\t<property><!-- name of job shown in jobtracker --><name>mongo.job.name</name><value>" + title
            + "</value></property>"
            + "\n\t<property><!-- run the job verbosely ? --><name>mongo.job.verbose</name><value>true</value></property>"
            + "\n\t<property><!-- Run the job in the foreground and wait for response, or background it? --><name>mongo.job.background</name><value>false</value></property>"
            + "\n\t<property><!-- If you are reading from mongo, the URI --><name>mongo.input.uri</name><value>mongodb://"
            + dbserver + "/" + input + "</value></property>"
            + "\n\t<property><!-- If you are writing to mongo, the URI --><name>mongo.output.uri</name><value>mongodb://"
            + dbserver + "/" + output + "</value>  </property>"
            + "\n\t<property><!-- The query, in JSON, to execute [OPTIONAL] --><name>mongo.input.query</name><value>"
            + query + "</value></property>"
            + "\n\t<property><!-- The fields, in JSON, to read [OPTIONAL] --><name>mongo.input.fields</name><value>"
            + ((fields == null) ? ("") : fields) + "</value></property>"
            + "\n\t<property><!-- A JSON sort specification for read [OPTIONAL] --><name>mongo.input.sort</name><value></value></property>"
            + "\n\t<property><!-- The number of documents to limit to for read [OPTIONAL] --><name>mongo.input.limit</name><value>0</value><!-- 0 == no limit --></property>"
            + "\n\t<property><!-- The number of documents to skip in read [OPTIONAL] --><!-- TODO - Are we running limit() or skip() first? --><name>mongo.input.skip</name><value>0</value> <!-- 0 == no skip --></property>"
            + "\n\t<property><!-- Class for the mapper --><name>mongo.job.mapper</name><value>" + mapper
            + "</value></property>"
            + "\n\t<property><!-- Reducer class --><name>mongo.job.reducer</name><value>" + reducer
            + "</value></property>"
            + "\n\t<property><!-- InputFormat Class --><name>mongo.job.input.format</name><value>com.ikanow.infinit.e.data_model.custom.InfiniteMongoInputFormat</value></property>"
            + "\n\t<property><!-- OutputFormat Class --><name>mongo.job.output.format</name><value>com.mongodb.hadoop.MongoOutputFormat</value></property>"
            + "\n\t<property><!-- Output key class for the output format --><name>mongo.job.output.key</name><value>"
            + outputKey + "</value></property>"
            + "\n\t<property><!-- Output value class for the output format --><name>mongo.job.output.value</name><value>"
            + outputValue + "</value></property>"
            + "\n\t<property><!-- Output key class for the mapper [optional] --><name>mongo.job.mapper.output.key</name><value></value></property>"
            + "\n\t<property><!-- Output value class for the mapper [optional] --><name>mongo.job.mapper.output.value</name><value></value></property>"
            + "\n\t<property><!-- Class for the combiner [optional] --><name>mongo.job.combiner</name><value>"
            + combiner + "</value></property>"
            + "\n\t<property><!-- Partitioner class [optional] --><name>mongo.job.partitioner</name><value></value></property>"
            + "\n\t<property><!-- Sort Comparator class [optional] --><name>mongo.job.sort_comparator</name><value></value></property>"
            + "\n\t<property><!-- Split Size [optional] --><name>mongo.input.split_size</name><value>32</value></property>");

    // Infinit.e specific configuration

    out.write("\n\t<property><!-- User Arguments [optional] --><name>arguments</name><value>"
            + StringEscapeUtils.escapeXml(arguments) + "</value></property>"
            + "\n\t<property><!-- Maximum number of splits [optional] --><name>max.splits</name><value>"
            + nSplits + "</value></property>"
            + "\n\t<property><!-- Maximum number of docs per split [optional] --><name>max.docs.per.split</name><value>"
            + nDocsPerSplit + "</value></property>");

    // Closing thoughts:
    out.write("\n</configuration>");

    out.flush();
    out.close();
}

From source file:com.ikanow.infinit.e.processing.custom.launcher.CustomHadoopTaskLauncher.java

License:Open Source License

private void createConfigXML(Writer out, String title, String input, String fields, boolean isCustomTable,
        String outputDatabase, String output, String tempOutputCollection, String mapper, String reducer,
        String combiner, String query, List<ObjectId> communityIds, String outputKey, String outputValue,
        String arguments, Boolean incrementalMode, ObjectId userId, Boolean selfMerge,
        String originalOutputCollection, Boolean appendResults) throws IOException {
    String dbserver = prop_general.getDatabaseServer();
    output = outputDatabase + "." + tempOutputCollection;

    boolean isAdmin = AuthUtils.isAdmin(userId);

    int nSplits = 8;
    int nDocsPerSplit = 12500;

    //add communities to query if this is not a custom table
    BasicDBObject oldQueryObj = null;
    BasicDBObject srcTags = null;/*from  w w  w. j a  v  a  2  s  .co  m*/
    // Start with the old query:
    if (query.startsWith("{")) {
        oldQueryObj = (BasicDBObject) com.mongodb.util.JSON.parse(query);
    } else {
        oldQueryObj = new BasicDBObject();
    }
    boolean elasticsearchQuery = oldQueryObj.containsField("qt") && !isCustomTable;
    int nLimit = 0;
    if (oldQueryObj.containsField("$limit")) {
        nLimit = oldQueryObj.getInt("$limit");
        oldQueryObj.remove("$limit");
    }
    if (oldQueryObj.containsField("$splits")) {
        nSplits = oldQueryObj.getInt("$splits");
        oldQueryObj.remove("$splits");
    }
    if (oldQueryObj.containsField("$srctags")) {
        srcTags = new BasicDBObject(SourcePojo.tags_, oldQueryObj.get("$srctags"));
        oldQueryObj.remove("$srctags");
    }
    if (bLocalMode) { // If in local mode, then set this to a large number so we always run inside our limit/split version
        // (since for some reason MongoInputFormat seems to fail on large collections)
        nSplits = InfiniteMongoSplitter.MAX_SPLITS;
    }
    if (oldQueryObj.containsField("$docsPerSplit")) {
        nDocsPerSplit = oldQueryObj.getInt("$docsPerSplit");
        oldQueryObj.remove("$docsPerSplit");
    }
    oldQueryObj.remove("$fields");
    oldQueryObj.remove("$output");
    oldQueryObj.remove("$reducers");
    String mapperKeyClass = oldQueryObj.getString("$mapper_key_class", "");
    String mapperValueClass = oldQueryObj.getString("$mapper_value_class", "");
    oldQueryObj.remove("$mapper_key_class");
    oldQueryObj.remove("$mapper_value_class");
    String cacheList = null;
    Object cacheObj = oldQueryObj.get("$caches");
    if (null != cacheObj) {
        cacheList = cacheObj.toString(); // (either array of strings, or single string)
        if (!cacheList.startsWith("[")) {
            cacheList = "[" + cacheList + "]"; // ("must" now be valid array)
        }
        oldQueryObj.remove("$caches");
    } //TESTED

    if (null != nDebugLimit) { // (debug mode override)
        nLimit = nDebugLimit;
    }
    boolean tmpIncMode = (null != incrementalMode) && incrementalMode;

    Date fromOverride = null;
    Date toOverride = null;
    Object fromOverrideObj = oldQueryObj.remove("$tmin");
    Object toOverrideObj = oldQueryObj.remove("$tmax");
    if (null != fromOverrideObj) {
        fromOverride = InfiniteHadoopUtils.dateStringFromObject(fromOverrideObj, true);
    }
    if (null != toOverrideObj) {
        toOverride = InfiniteHadoopUtils.dateStringFromObject(toOverrideObj, false);
    }

    if (!isCustomTable) {
        if (elasticsearchQuery) {
            oldQueryObj.put("communityIds", communityIds);
            //tmin/tmax not supported - already have that capability as part of the query
        } else {
            if (input.equals("feature.temporal")) {
                if ((null != fromOverride) || (null != toOverride)) {
                    oldQueryObj.put("value.maxTime",
                            InfiniteHadoopUtils.createDateRange(fromOverride, toOverride, true));
                } //TESTED
                oldQueryObj.put("_id.c", new BasicDBObject(DbManager.in_, communityIds));
            } else {
                oldQueryObj.put(DocumentPojo.communityId_, new BasicDBObject(DbManager.in_, communityIds));
                if ((null != fromOverride) || (null != toOverride)) {
                    oldQueryObj.put("_id",
                            InfiniteHadoopUtils.createDateRange(fromOverride, toOverride, false));
                } //TESTED         
                if (input.equals("doc_metadata.metadata")) {
                    oldQueryObj.put(DocumentPojo.index_, new BasicDBObject(DbManager.ne_, "?DEL?")); // (ensures not soft-deleted)
                }
            }
        }
    } else {
        if ((null != fromOverride) || (null != toOverride)) {
            oldQueryObj.put("_id", InfiniteHadoopUtils.createDateRange(fromOverride, toOverride, false));
        } //TESTED
          //get the custom table (and database)
        input = CustomOutputManager.getCustomDbAndCollection(input);
    }
    query = oldQueryObj.toString();

    if (arguments == null)
        arguments = "";

    // Generic configuration
    out.write("<?xml version=\"1.0\"?>\n<configuration>");

    // Mongo specific configuration
    out.write("\n\t<property><!-- name of job shown in jobtracker --><name>mongo.job.name</name><value>" + title
            + "</value></property>"
            + "\n\t<property><!-- run the job verbosely ? --><name>mongo.job.verbose</name><value>true</value></property>"
            + "\n\t<property><!-- Run the job in the foreground and wait for response, or background it? --><name>mongo.job.background</name><value>false</value></property>"
            + "\n\t<property><!-- If you are reading from mongo, the URI --><name>mongo.input.uri</name><value>mongodb://"
            + dbserver + "/" + input + "</value></property>"
            + "\n\t<property><!-- If you are writing to mongo, the URI --><name>mongo.output.uri</name><value>mongodb://"
            + dbserver + "/" + output + "</value>  </property>"
            + "\n\t<property><!-- The query, in JSON, to execute [OPTIONAL] --><name>mongo.input.query</name><value>"
            + StringEscapeUtils.escapeXml(query) + "</value></property>"
            + "\n\t<property><!-- The fields, in JSON, to read [OPTIONAL] --><name>mongo.input.fields</name><value>"
            + ((fields == null) ? ("") : fields) + "</value></property>"
            + "\n\t<property><!-- A JSON sort specification for read [OPTIONAL] --><name>mongo.input.sort</name><value></value></property>"
            + "\n\t<property><!-- The number of documents to limit to for read [OPTIONAL] --><name>mongo.input.limit</name><value>"
            + nLimit + "</value><!-- 0 == no limit --></property>"
            + "\n\t<property><!-- The number of documents to skip in read [OPTIONAL] --><!-- TODO - Are we running limit() or skip() first? --><name>mongo.input.skip</name><value>0</value> <!-- 0 == no skip --></property>"
            + "\n\t<property><!-- Class for the mapper --><name>mongo.job.mapper</name><value>" + mapper
            + "</value></property>"
            + "\n\t<property><!-- Reducer class --><name>mongo.job.reducer</name><value>" + reducer
            + "</value></property>"
            + "\n\t<property><!-- InputFormat Class --><name>mongo.job.input.format</name><value>com.ikanow.infinit.e.data_model.custom.InfiniteMongoInputFormat</value></property>"
            + "\n\t<property><!-- OutputFormat Class --><name>mongo.job.output.format</name><value>com.ikanow.infinit.e.data_model.custom.InfiniteMongoOutputFormat</value></property>"
            + "\n\t<property><!-- Output key class for the output format --><name>mongo.job.output.key</name><value>"
            + outputKey + "</value></property>"
            + "\n\t<property><!-- Output value class for the output format --><name>mongo.job.output.value</name><value>"
            + outputValue + "</value></property>"
            + "\n\t<property><!-- Output key class for the mapper [optional] --><name>mongo.job.mapper.output.key</name><value>"
            + mapperKeyClass + "</value></property>"
            + "\n\t<property><!-- Output value class for the mapper [optional] --><name>mongo.job.mapper.output.value</name><value>"
            + mapperValueClass + "</value></property>"
            + "\n\t<property><!-- Class for the combiner [optional] --><name>mongo.job.combiner</name><value>"
            + combiner + "</value></property>"
            + "\n\t<property><!-- Partitioner class [optional] --><name>mongo.job.partitioner</name><value></value></property>"
            + "\n\t<property><!-- Sort Comparator class [optional] --><name>mongo.job.sort_comparator</name><value></value></property>"
            + "\n\t<property><!-- Split Size [optional] --><name>mongo.input.split_size</name><value>32</value></property>");

    // Infinit.e specific configuration

    out.write("\n\t<property><!-- User Arguments [optional] --><name>infinit.e.userid</name><value>"
            + StringEscapeUtils.escapeXml(userId.toString()) + "</value></property>"
            + "\n\t<property><!-- User Arguments [optional] --><name>arguments</name><value>"
            + StringEscapeUtils.escapeXml(arguments) + "</value></property>"
            + "\n\t<property><!-- Maximum number of splits [optional] --><name>max.splits</name><value>"
            + nSplits + "</value></property>"
            + "\n\t<property><!-- Maximum number of docs per split [optional] --><name>max.docs.per.split</name><value>"
            + nDocsPerSplit + "</value></property>"
            + "\n\t<property><!-- Infinit.e incremental mode [optional] --><name>update.incremental</name><value>"
            + tmpIncMode + "</value></property>"
            + "\n\t<property><!-- Infinit.e quick admin check [optional] --><name>infinit.e.is.admin</name><value>"
            + isAdmin + "</value></property>"
            + "\n\t<property><!-- Infinit.e userid [optional] --><name>infinit.e.userid</name><value>" + userId
            + "</value></property>");
    if (null != cacheList) {
        out.write(
                "\n\t<property><!-- Infinit.e cache list [optional] --><name>infinit.e.cache.list</name><value>"
                        + cacheList + "</value></property>");
    } //TESTED
    if (null != srcTags) {
        out.write(
                "\n\t<property><!-- Infinit.e src tags filter [optional] --><name>infinit.e.source.tags.filter</name><value>"
                        + srcTags.toString() + "</value></property>");
    }

    if (null != selfMerge && selfMerge && originalOutputCollection != null) {
        originalOutputCollection = "mongodb://" + dbserver + "/" + outputDatabase + "."
                + originalOutputCollection;
        out.write(
                "\n\t<property><!-- This jobs output collection for passing into the mapper along with input collection [optional] --><name>infinit.e.selfMerge</name><value>"
                        + originalOutputCollection + "</value></property>");
    }

    // Closing thoughts:
    out.write("\n</configuration>");

    out.flush();
    out.close();
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public PageCursor mapToPageCursor(BasicDBObject source) {
    Page page = new Page(source.getInt("start"), source.getInt("size"));
    return new PageCursor(source.getString("id"), source.getString("previousPageCursorId"), page,
            new Date(source.getLong("creationTime")));
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ImagePlantOS mapToImagePlantOS(BasicDBObject source) {
    int maximumImageSize = MaximumImageSize.UNLIMITED;
    if (source.containsValue("maximumImageSize")) {
        maximumImageSize = source.getInt("maximumImageSize");
    }/*w ww  .ja  va 2  s . c  om*/
    return new ImagePlantOS(source.getString("id"), source.getString("name"),
            new Date(source.getLong("creationTime")), mapToAmazonS3Bucket((BasicDBObject) source.get("bucket")),
            source.getString("masterTemplateName"), source.getLong("numberOfTemplates"), maximumImageSize);
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ResizingConfig mapToResizingConfig(BasicDBObject source) {
    return new ResizingConfig(ResizingUnit.valueOf(source.getString("unit")), source.getInt("width"),
            source.getInt("height"), source.getBoolean("isKeepProportions"));
}

From source file:com.images3.data.impl.MongoDBObjectMapper.java

License:Apache License

public ImageDimension mapToImageDimension(BasicDBObject source) {
    return new ImageDimension(source.getInt("width"), source.getInt("height"));
}

From source file:com.sonyericsson.jenkins.plugins.bfa.db.MongoDBKnowledgeBase.java

License:Open Source License

/**
 * Generates a {@link TimePeriod} based on a MongoDB grouping aggregation result.
 * @param result the result to interpret
 * @param intervalSize the interval size, should be set to Calendar.HOUR_OF_DAY,
 * Calendar.DATE or Calendar.MONTH.//from   www.j a v  a2  s .c o  m
 * @return TimePeriod
 */
private TimePeriod generateTimePeriodFromResult(DBObject result, int intervalSize) {
    BasicDBObject groupedAttrs = (BasicDBObject) result.get("_id");
    int month = groupedAttrs.getInt("month");
    int year = groupedAttrs.getInt("year");

    Calendar c = Calendar.getInstance();
    c.set(Calendar.YEAR, year);
    c.set(Calendar.MONTH, month - 1);
    // MongoDB timezone is UTC:
    c.setTimeZone(new SimpleTimeZone(0, "UTC"));

    TimePeriod period = null;
    if (intervalSize == Calendar.HOUR_OF_DAY) {
        int dayOfMonth = groupedAttrs.getInt("dayOfMonth");
        c.set(Calendar.DAY_OF_MONTH, dayOfMonth);
        int hour = groupedAttrs.getInt("hour");
        c.set(Calendar.HOUR_OF_DAY, hour);

        period = new Hour(c.getTime());
    } else if (intervalSize == Calendar.DATE) {
        int dayOfMonth = groupedAttrs.getInt("dayOfMonth");
        c.set(Calendar.DAY_OF_MONTH, dayOfMonth);

        period = new Day(c.getTime());
    } else {
        period = new Month(c.getTime());
    }
    return period;
}