Example usage for com.mongodb.util JSON parse

List of usage examples for com.mongodb.util JSON parse

Introduction

In this page you can find the example usage for com.mongodb.util JSON parse.

Prototype

public static Object parse(final String jsonString) 

Source Link

Document

Parses a JSON string and returns a corresponding Java object.

Usage

From source file:org.apache.camel.component.gridfs.GridFsConsumer.java

License:Apache License

@Override
public void run() {
    DBCursor c = null;/* w  w  w .  j  av  a2  s.  c o  m*/
    java.util.Date fromDate = null;

    QueryStrategy s = endpoint.getQueryStrategy();
    boolean usesTimestamp = (s != QueryStrategy.FileAttribute);
    boolean persistsTimestamp = (s == QueryStrategy.PersistentTimestamp
            || s == QueryStrategy.PersistentTimestampAndFileAttribute);
    boolean usesAttribute = (s == QueryStrategy.FileAttribute || s == QueryStrategy.TimeStampAndFileAttribute
            || s == QueryStrategy.PersistentTimestampAndFileAttribute);

    DBCollection ptsCollection = null;
    DBObject persistentTimestamp = null;
    if (persistsTimestamp) {
        ptsCollection = endpoint.getDB().getCollection(endpoint.getPersistentTSCollection());
        // ensure standard indexes as long as collections are small
        try {
            if (ptsCollection.count() < 1000) {
                ptsCollection.createIndex(new BasicDBObject("id", 1));
            }
        } catch (MongoException e) {
            //TODO: Logging
        }
        persistentTimestamp = ptsCollection.findOne(new BasicDBObject("id", endpoint.getPersistentTSObject()));
        if (persistentTimestamp == null) {
            persistentTimestamp = new BasicDBObject("id", endpoint.getPersistentTSObject());
            fromDate = new java.util.Date();
            persistentTimestamp.put("timestamp", fromDate);
            ptsCollection.save(persistentTimestamp);
        }
        fromDate = (java.util.Date) persistentTimestamp.get("timestamp");
    } else if (usesTimestamp) {
        fromDate = new java.util.Date();
    }
    try {
        Thread.sleep(endpoint.getInitialDelay());
        while (isStarted()) {
            if (c == null || c.getCursorId() == 0) {
                if (c != null) {
                    c.close();
                }
                String queryString = endpoint.getQuery();
                DBObject query;
                if (queryString == null) {
                    query = new BasicDBObject();
                } else {
                    query = (DBObject) JSON.parse(queryString);
                }
                if (usesTimestamp) {
                    query.put("uploadDate", new BasicDBObject("$gt", fromDate));
                }
                if (usesAttribute) {
                    query.put(endpoint.getFileAttributeName(), null);
                }
                c = endpoint.getFilesCollection().find(query);
            }
            boolean dateModified = false;
            while (c.hasNext() && isStarted()) {
                GridFSDBFile file = (GridFSDBFile) c.next();
                GridFSDBFile forig = file;
                if (usesAttribute) {
                    file.put(endpoint.getFileAttributeName(), "processing");
                    DBObject q = BasicDBObjectBuilder.start("_id", file.getId()).append("camel-processed", null)
                            .get();
                    forig = (GridFSDBFile) endpoint.getFilesCollection().findAndModify(q, null, null, false,
                            file, true, false);
                }
                if (forig != null) {
                    file = endpoint.getGridFs().findOne(new BasicDBObject("_id", file.getId()));

                    Exchange exchange = endpoint.createExchange();
                    exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_METADATA,
                            JSON.serialize(file.getMetaData()));
                    exchange.getIn().setHeader(Exchange.FILE_CONTENT_TYPE, file.getContentType());
                    exchange.getIn().setHeader(Exchange.FILE_LENGTH, file.getLength());
                    exchange.getIn().setHeader(Exchange.FILE_LAST_MODIFIED, file.getUploadDate());
                    exchange.getIn().setBody(file.getInputStream(), InputStream.class);
                    try {
                        getProcessor().process(exchange);
                        //System.out.println("Processing " + file.getFilename());
                        if (usesAttribute) {
                            forig.put(endpoint.getFileAttributeName(), "done");
                            endpoint.getFilesCollection().save(forig);
                        }
                        if (usesTimestamp) {
                            if (file.getUploadDate().compareTo(fromDate) > 0) {
                                fromDate = file.getUploadDate();
                                dateModified = true;
                            }
                        }
                    } catch (Exception e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                }
            }
            if (persistsTimestamp && dateModified) {
                persistentTimestamp.put("timestamp", fromDate);
                ptsCollection.save(persistentTimestamp);
            }
            Thread.sleep(endpoint.getDelay());
        }
    } catch (Throwable e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
    if (c != null) {
        c.close();
    }
}

From source file:org.apache.camel.component.gridfs.GridFsProducer.java

License:Apache License

public void process(Exchange exchange) throws Exception {
    String operation = endpoint.getOperation();
    if (operation == null) {
        operation = exchange.getIn().getHeader(GridFsEndpoint.GRIDFS_OPERATION, String.class);
    }//ww  w . j  av a 2  s .c om
    if (operation == null || "create".equals(operation)) {
        final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
        Long chunkSize = exchange.getIn().getHeader(GridFsEndpoint.GRIDFS_CHUNKSIZE, Long.class);

        InputStream ins = exchange.getIn().getMandatoryBody(InputStream.class);
        GridFSInputFile gfsFile = endpoint.getGridFs().createFile(ins, filename, true);
        if (chunkSize != null && chunkSize > 0) {
            gfsFile.setChunkSize(chunkSize);
        }
        final String ct = exchange.getIn().getHeader(Exchange.CONTENT_TYPE, String.class);
        if (ct != null) {
            gfsFile.setContentType(ct);
        }
        String metaData = exchange.getIn().getHeader(GridFsEndpoint.GRIDFS_METADATA, String.class);
        DBObject dbObject = (DBObject) JSON.parse(metaData);
        gfsFile.setMetaData(dbObject);
        gfsFile.save();
        exchange.getIn().setHeader(Exchange.FILE_NAME_PRODUCED, gfsFile.getFilename());
    } else if ("remove".equals(operation)) {
        final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
        endpoint.getGridFs().remove(filename);
    } else if ("findOne".equals(operation)) {
        final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
        GridFSDBFile file = endpoint.getGridFs().findOne(filename);
        if (file != null) {
            exchange.getIn().setHeader(GridFsEndpoint.GRIDFS_METADATA, JSON.serialize(file.getMetaData()));
            exchange.getIn().setHeader(Exchange.FILE_CONTENT_TYPE, file.getContentType());
            exchange.getIn().setHeader(Exchange.FILE_LENGTH, file.getLength());
            exchange.getIn().setHeader(Exchange.FILE_LAST_MODIFIED, file.getUploadDate());
            exchange.getIn().setBody(file.getInputStream(), InputStream.class);
        } else {
            throw new FileNotFoundException("No GridFS file for " + filename);
        }
    } else if ("listAll".equals(operation)) {
        final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
        DBCursor cursor;
        if (filename == null) {
            cursor = endpoint.getGridFs().getFileList();
        } else {
            cursor = endpoint.getGridFs().getFileList(new BasicDBObject("filename", filename));
        }
        exchange.getIn().setBody(new DBCursorFilenameReader(cursor), Reader.class);
    } else if ("count".equals(operation)) {
        final String filename = exchange.getIn().getHeader(Exchange.FILE_NAME, String.class);
        DBCursor cursor;
        if (filename == null) {
            cursor = endpoint.getGridFs().getFileList();
        } else {
            cursor = endpoint.getGridFs().getFileList(new BasicDBObject("filename", filename));
        }
        exchange.getIn().setBody(cursor.count(), Integer.class);
    }

}

From source file:org.apache.camel.component.mongodb.converters.MongoDbBasicConverters.java

License:Apache License

@Converter
public static DBObject fromStringToDBObject(String s) {
    DBObject answer = null;/*from w  w  w. ja  v  a  2 s .  c  om*/
    try {
        answer = (DBObject) JSON.parse(s);
    } catch (Exception e) {
        LOG.warn(
                "String -> DBObject conversion selected, but the following exception occurred. Returning null.",
                e);
    }

    return answer;
}

From source file:org.apache.camel.component.mongodb.converters.MongoDbBasicConverters.java

License:Apache License

@Converter
public static DBObject fromInputStreamToDBObject(InputStream is, Exchange exchange) {
    DBObject answer = null;//from w w w  .  ja  v  a 2  s . co m
    try {
        byte[] input = IOConverter.toBytes(is);

        if (isBson(input)) {
            BSONCallback callback = new JSONCallback();
            new BasicBSONDecoder().decode(input, callback);
            answer = (DBObject) callback.get();
        } else {
            answer = (DBObject) JSON.parse(IOConverter.toString(input, exchange));
        }
    } catch (Exception e) {
        LOG.warn(
                "String -> DBObject conversion selected, but the following exception occurred. Returning null.",
                e);
    } finally {
        // we need to make sure to close the input stream
        IOHelper.close(is, "InputStream", LOG);
    }
    return answer;
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoInputFormat.java

License:Apache License

/**
 * A JSON string representing a MongoDB query to be performed by each 
 * MongoDB server. The matching documents are passed to Map()
 * /* ww  w  .  j a v  a  2s . c om*/
 * @param job
 * @param query
 */
public static void setQuery(Job job, String query) {
    // quickly validate query
    JSON.parse(query);
    job.getConfiguration().set("mongo.input.query", query);
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoInputFormat.java

License:Apache License

/**
 * A JSON string representing the fields selected from documents
 * returned from MongoDB// ww w  . j  a v a 2  s. c om
 * 
 * MongoDB performs this selection before they are passed to Map()
 * 
 * @param job
 * @param select
 */
public static void setSelect(Job job, String select) {
    JSON.parse(select);
    job.getConfiguration().set("mongo.input.select", select);
}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoRecordReader.java

License:Apache License

private void connect(String location, Configuration conf) throws IOException {

    String[] parts = location.split(":");

    // default port for sharded server
    int port = 27018;
    if (parts.length > 1)
        port = Integer.parseInt(parts[1]);

    Mongo mongo = new Mongo(parts[0], port);

    // figure out if we can read from this server

    // allow reading from secondaries
    mongo.slaveOk();//from  ww w.  ja  v a  2s  .  co  m

    String database = conf.get("mongo.input.database");
    String collection = conf.get("mongo.input.collection");
    String query = conf.get("mongo.input.query", "");
    String select = conf.get("mongo.input.select", "");

    if (!query.equals("")) {
        DBObject q = (DBObject) JSON.parse(query);

        if (!select.equals("")) {
            DBObject s = (DBObject) JSON.parse(select);
            cursor = mongo.getDB(database).getCollection(collection).find(q, s);
        } else {
            cursor = mongo.getDB(database).getCollection(collection).find(q);
        }
    } else {
        if (!select.equals("")) {
            DBObject s = (DBObject) JSON.parse(select);
            cursor = mongo.getDB(database).getCollection(collection).find(new BasicDBObject(), s);
        } else {
            cursor = mongo.getDB(database).getCollection(collection).find();
        }
    }

    cursor.addOption(Bytes.QUERYOPTION_NOTIMEOUT);

    // thanks mongo, for this handy method
    totalResults = cursor.count();
    resultsRead = 0.0f;

}

From source file:org.apache.hadoop.contrib.mongoreduce.MongoStreamRecordWriter.java

License:Apache License

public void write(Text key, Text value) {

    DBObject objValue = (DBObject) JSON.parse(value.toString());

    objValue.put("_id", key.toString());

    coll.save(objValue);
}

From source file:org.apache.nifi.processors.mongodb.PutMongo.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;/*from   w  ww . j ava 2 s.com*/
    }

    final ComponentLog logger = getLogger();

    final Charset charset = Charset.forName(context.getProperty(CHARACTER_SET).getValue());
    final String mode = context.getProperty(MODE).getValue();
    final String updateMode = context.getProperty(UPDATE_MODE).getValue();
    final WriteConcern writeConcern = getWriteConcern(context);

    final MongoCollection<Document> collection = getCollection(context, flowFile)
            .withWriteConcern(writeConcern);

    try {
        // Read the contents of the FlowFile into a byte array
        final byte[] content = new byte[(int) flowFile.getSize()];
        session.read(flowFile, in -> StreamUtils.fillBuffer(in, content, true));

        // parse
        final Object doc = (mode.equals(MODE_INSERT)
                || (mode.equals(MODE_UPDATE) && updateMode.equals(UPDATE_WITH_DOC.getValue())))
                        ? Document.parse(new String(content, charset))
                        : JSON.parse(new String(content, charset));

        if (MODE_INSERT.equalsIgnoreCase(mode)) {
            collection.insertOne((Document) doc);
            logger.info("inserted {} into MongoDB", new Object[] { flowFile });
        } else {
            // update
            final boolean upsert = context.getProperty(UPSERT).asBoolean();
            final String updateKey = context.getProperty(UPDATE_QUERY_KEY).getValue();

            Object keyVal = ((Map) doc).get(updateKey);
            if (updateKey.equals("_id") && ObjectId.isValid(((String) keyVal))) {
                keyVal = new ObjectId((String) keyVal);
            }

            final Document query = new Document(updateKey, keyVal);

            if (updateMode.equals(UPDATE_WITH_DOC.getValue())) {
                collection.replaceOne(query, (Document) doc, new UpdateOptions().upsert(upsert));
            } else {
                BasicDBObject update = (BasicDBObject) doc;
                update.remove(updateKey);
                collection.updateOne(query, update, new UpdateOptions().upsert(upsert));
            }
            logger.info("updated {} into MongoDB", new Object[] { flowFile });
        }

        session.getProvenanceReporter().send(flowFile, getURI(context));
        session.transfer(flowFile, REL_SUCCESS);
    } catch (Exception e) {
        logger.error("Failed to insert {} into MongoDB due to {}", new Object[] { flowFile, e }, e);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}

From source file:org.apache.rya.forwardchain.strategy.MongoPipelineStrategy.java

License:Apache License

/**
 * Execute a CONSTRUCT rule by converting it into a pipeline, iterating
 * through the resulting documents, and inserting them back to the data
 * store as new triples. If pipeline conversion fails, falls back on
 * default execution strategy.//from w w  w  . ja  v a 2  s .c  o m
 * @param rule A construct query rule; not null.
 * @param metadata StatementMetadata to attach to new triples; not null.
 * @return The number of new triples inferred.
 * @throws ForwardChainException if execution fails.
 */
@Override
public long executeConstructRule(AbstractConstructRule rule, StatementMetadata metadata)
        throws ForwardChainException {
    Preconditions.checkNotNull(rule);
    logger.info("Applying inference rule " + rule + "...");
    long timestamp = System.currentTimeMillis();
    // Get a pipeline that turns individual matches into triples
    List<Bson> pipeline = null;
    try {
        int requireSourceLevel = 0;
        if (!usedBackup) {
            // If we can assume derivation levels are set properly, we can optimize by
            // pruning any derived fact whose sources are all old information. (i.e. we can
            // infer that the pruned fact would have already been derived in a previous
            // step.) But if the backup strategy has ever been used, the source triples aren't
            // guaranteed to have derivation level set.
            requireSourceLevel = requiredLevel;
        }
        pipeline = toPipeline(rule, requireSourceLevel, timestamp);
    } catch (ForwardChainException e) {
        logger.error(e);
    }
    if (pipeline == null) {
        if (backup == null) {
            logger.error("Couldn't convert " + rule + " to pipeline:");
            for (String line : rule.getQuery().toString().split("\n")) {
                logger.error("\t" + line);
            }
            throw new UnsupportedOperationException("Couldn't convert query to pipeline.");
        } else {
            logger.debug("Couldn't convert " + rule + " to pipeline:");
            for (String line : rule.getQuery().toString().split("\n")) {
                logger.debug("\t" + line);
            }
            logger.debug("Using fallback strategy.");
            usedBackup = true;
            return backup.executeConstructRule(rule, metadata);
        }
    }
    // Execute the pipeline
    for (Bson step : pipeline) {
        logger.debug("\t" + step.toString());
    }
    LongAdder count = new LongAdder();
    baseCollection.aggregate(pipeline).allowDiskUse(true).batchSize(PIPELINE_BATCH_SIZE)
            .forEach(new Block<Document>() {
                @Override
                public void apply(Document doc) {
                    final DBObject dbo = (DBObject) JSON.parse(doc.toJson());
                    RyaStatement rstmt = storageStrategy.deserializeDBObject(dbo);
                    if (!statementExists(rstmt)) {
                        count.increment();
                        doc.replace(SimpleMongoDBStorageStrategy.STATEMENT_METADATA, metadata.toString());
                        try {
                            batchWriter.addObjectToQueue(doc);
                        } catch (MongoDbBatchWriterException e) {
                            logger.error("Couldn't insert " + rstmt, e);
                        }
                    }
                }
            });
    try {
        batchWriter.flush();
    } catch (MongoDbBatchWriterException e) {
        throw new ForwardChainException("Error writing to Mongo", e);
    }
    logger.info("Added " + count + " new statements.");
    executionTimes.compute(rule, (r, previous) -> {
        if (previous != null && previous > timestamp) {
            return previous;
        } else {
            return timestamp;
        }
    });
    return count.longValue();
}