Example usage for com.mongodb BulkWriteOperation insert

List of usage examples for com.mongodb BulkWriteOperation insert

Introduction

In this page you can find the example usage for com.mongodb BulkWriteOperation insert.

Prototype

public void insert(final DBObject document) 

Source Link

Document

Add an insert request to the bulk operation

Usage

From source file:fr.gouv.vitam.mdbes.MainIngestMDBESFromFile.java

License:Open Source License

@Override
public void run() {
    if (file == null) {
        // ES/*from  w w  w .ja v  a 2  s .  com*/
        //Thread.sleep(1000);
        try {
            for (int i = 0; i < files.length - 1; i++) {
                System.out.println("ESFile: " + files[i]);
                final HashMap<String, String> esIndex = new HashMap<>();
                final FileInputStream fstream = new FileInputStream(files[i]);
                final DataInputStream in = new DataInputStream(fstream);
                final BufferedReader br = new BufferedReader(new InputStreamReader(in));
                String strLine;
                // Read File Line By Line
                while ((strLine = br.readLine()) != null) {
                    final BSONObject bson = (BSONObject) JSON.parse(strLine);
                    ElasticSearchAccess.addEsIndex(original, model, esIndex, bson);
                }
                // Close the input stream
                br.close();
                in.close();
                fstream.close();
                if (!esIndex.isEmpty()) {
                    System.out.println("Last bulk ES");
                    original.addEsEntryIndex(true, esIndex, model);
                    esIndex.clear();
                }
            }
            // last file might contains already inserted but to be updated DAip
            int i = files.length - 1;
            System.out.println("ESFile: " + files[i]);
            final FileInputStream fstream = new FileInputStream(files[i]);
            final DataInputStream in = new DataInputStream(fstream);
            final BufferedReader br = new BufferedReader(new InputStreamReader(in));
            String strLine;
            // Read File Line By Line
            while ((strLine = br.readLine()) != null) {
                final BSONObject bson = (BSONObject) JSON.parse(strLine);
                ElasticSearchAccess.addEsIndex(original, model, bson);
            }
            // Close the input stream
            br.close();
            in.close();
            fstream.close();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        return;
    }
    MongoDbAccess dbvitam = null;
    FileInputStream fstream = null;
    DataInputStream in = null;
    final BufferedReader br;
    try {
        System.out.println("MDFile: " + file);
        fstream = new FileInputStream(file);
        in = new DataInputStream(fstream);
        br = new BufferedReader(new InputStreamReader(in));
        dbvitam = new MongoDbAccess(mongoClient, database, esbase, unicast, false);
        // now ingest metaaip/metafield/data
        final long date11 = System.currentTimeMillis();
        String strLine;
        int nb = 0;

        if (false) {
            // Tokumx
            List<DBObject> inserts = new ArrayList<DBObject>(GlobalDatas.LIMIT_MDB_NEW_INDEX);
            while ((strLine = br.readLine()) != null) {
                final DBObject bson = (DBObject) JSON.parse(strLine);
                inserts.add(bson);
                nb++;
                if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                    WriteResult result = dbvitam.daips.collection.insert(inserts);
                    if (result.getN() != nb) {
                        LOGGER.error("Wrong bulk op: " + result);
                    }
                    MainIngestFile.cptMaip.addAndGet(nb);
                    inserts.clear();
                    nb = 0;
                    System.out.print(".");
                }
            }
            if (nb != 0) {
                WriteResult result = dbvitam.daips.collection.insert(inserts);
                if (result.getN() != nb) {
                    LOGGER.error("Wrong bulk op: " + result);
                }
                MainIngestFile.cptMaip.addAndGet(nb);
                inserts.clear();
                nb = 0;
            }
        } else {
            BulkWriteOperation bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
            while ((strLine = br.readLine()) != null) {
                final DBObject bson = (DBObject) JSON.parse(strLine);
                bulk.insert(bson);
                nb++;
                if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                    BulkWriteResult result = bulk.execute();
                    bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
                    if (result.getInsertedCount() != nb) {
                        LOGGER.error("Wrong bulk op: " + result);
                    }
                    MainIngestFile.cptMaip.addAndGet(nb);
                    nb = 0;
                    System.out.print(".");
                }
            }
            if (nb != 0) {
                BulkWriteResult result = bulk.execute();
                if (result.getInsertedCount() != nb) {
                    LOGGER.error("Wrong bulk op: " + result);
                }
                MainIngestFile.cptMaip.addAndGet(nb);
                nb = 0;
            }
        }
        final long date12 = System.currentTimeMillis();
        loadt.addAndGet(date12 - date11);
        return;
    } catch (final InvalidUuidOperationException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (final FileNotFoundException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } finally {
        // release resources
        try {
            in.close();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        try {
            fstream.close();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        if (dbvitam != null) {
            dbvitam.close();
        }
    }
}

From source file:fr.gouv.vitam.mdbes.MainIngestMDBFromFile.java

License:Open Source License

private static final void runOnce(final MongoDbAccess dbvitam)
        throws InterruptedException, InstantiationException, IllegalAccessException, IOException {
    System.out.println("Load starting... ");
    int nbThread = ingest.length;

    final long date11 = System.currentTimeMillis();
    if (ingest.length == 1) {
        final FileInputStream fstream = new FileInputStream(ingest[0]);
        final DataInputStream in = new DataInputStream(fstream);
        final BufferedReader br = new BufferedReader(new InputStreamReader(in));
        String strLine;/*from   w w  w . ja  va2  s.co  m*/
        int nb = 0;
        BulkWriteOperation bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
        while ((strLine = br.readLine()) != null) {
            final DBObject bson = (DBObject) JSON.parse(strLine);
            bulk.insert(bson);
            nb++;
            if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                BulkWriteResult result = bulk.execute();
                int check = result.getInsertedCount();
                if (check != nb) {
                    System.out.print("x");
                } else {
                    System.out.print(".");
                }
                bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
                MainIngestFile.cptMaip.addAndGet(check);
                nb = 0;
            }
        }
        if (nb != 0) {
            bulk.execute();
            MainIngestFile.cptMaip.addAndGet(nb);
            nb = 0;
        }
    } else {
        // threads
        ExecutorService executorService = Executors.newFixedThreadPool(ingest.length);
        for (int i = 0; i < ingest.length; i++) {
            MainIngestMDBFromFile ingestrun = new MainIngestMDBFromFile();
            ingestrun.file = ingest[i];
            executorService.execute(ingestrun);
            Thread.sleep(200);
        }
        Thread.sleep(1000);
        executorService.shutdown();
        while (!executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)) {
            ;
        }
        System.out.println("Load ended");
        final long nbBigM = dbvitam.getDaipSize();
        final long nbBigD = dbvitam.getPaipSize();
        System.out.println("\n Big Test (" + nbThread + " nb MAIP: " + MainIngestFile.cptMaip.get()
                + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));

        System.out.println("\nThread;nbLoad;nbTotal;Load");
        System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
                + (loadt.get()) / ((float) MainIngestFile.cptMaip.get() * nbThread));
    }
    final long date12 = System.currentTimeMillis();
    MainIngestMDBFromFile.loadt.set(date12 - date11);

    System.out.println("Load ended");
    /*
     * System.out.println("All elements\n================================================================");
     * DbVitam.printStructure(dbvitam);
     */
    final long nbBigM = dbvitam.getDaipSize();
    final long nbBigD = dbvitam.getPaipSize();
    System.out.println("\n Big Test (" + nbThread + " Threads chacune " + MainIngestFile.nb + " nb MAIP: "
            + MainIngestFile.cptMaip.get() + ") with MAIP: " + nbBigM + " DATA: " + nbBigD + " => Load:"
            + (MainIngestMDBFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));

    System.out.println("\nThread;nbLoad;nbTotal;Load");
    System.out.println(nbThread + ";" + MainIngestFile.cptMaip.get() + ";" + nbBigM + ";"
            + (MainIngestMDBFromFile.loadt.get()) / ((float) MainIngestFile.cptMaip.get()));
}

From source file:fr.gouv.vitam.mdbes.MainIngestMDBFromFile.java

License:Open Source License

@Override
public void run() {
    MongoDbAccess dbvitam = null;// w  w w . j av  a2 s  .  c  o m
    FileInputStream fstream = null;
    DataInputStream in = null;
    final BufferedReader br;
    try {
        fstream = new FileInputStream(file);
        in = new DataInputStream(fstream);
        br = new BufferedReader(new InputStreamReader(in));
        dbvitam = new MongoDbAccess(mongoClient, database, esbase, unicast, false);
        // now ingest metaaip/metafield/data
        final long date11 = System.currentTimeMillis();
        String strLine;
        int nb = 0;

        if (false) {
            // Tokumx
            List<DBObject> inserts = new ArrayList<DBObject>(GlobalDatas.LIMIT_MDB_NEW_INDEX);
            while ((strLine = br.readLine()) != null) {
                final DBObject bson = (DBObject) JSON.parse(strLine);
                inserts.add(bson);
                nb++;
                if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                    WriteResult result = dbvitam.daips.collection.insert(inserts);
                    if (result.getN() != nb) {
                        LOGGER.error("Wrong bulk op: " + result);
                    }
                    MainIngestFile.cptMaip.addAndGet(nb);
                    inserts.clear();
                    nb = 0;
                    System.out.print(".");
                }
            }
            if (nb != 0) {
                WriteResult result = dbvitam.daips.collection.insert(inserts);
                if (result.getN() != nb) {
                    LOGGER.error("Wrong bulk op: " + result);
                }
                MainIngestFile.cptMaip.addAndGet(nb);
                inserts.clear();
                nb = 0;
            }
        } else {
            BulkWriteOperation bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
            while ((strLine = br.readLine()) != null) {
                final DBObject bson = (DBObject) JSON.parse(strLine);
                bulk.insert(bson);
                nb++;
                if (nb % GlobalDatas.LIMIT_MDB_NEW_INDEX == 0) {
                    BulkWriteResult result = bulk.execute();
                    bulk = dbvitam.daips.collection.initializeUnorderedBulkOperation();
                    if (result.getInsertedCount() != nb) {
                        LOGGER.error("Wrong bulk op: " + result);
                    }
                    MainIngestFile.cptMaip.addAndGet(nb);
                    nb = 0;
                    System.out.print(".");
                }
            }
            if (nb != 0) {
                BulkWriteResult result = bulk.execute();
                if (result.getInsertedCount() != nb) {
                    LOGGER.error("Wrong bulk op: " + result);
                }
                MainIngestFile.cptMaip.addAndGet(nb);
                nb = 0;
            }
        }
        final long date12 = System.currentTimeMillis();
        loadt.addAndGet(date12 - date11);
        return;
    } catch (final InvalidUuidOperationException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (final FileNotFoundException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } finally {
        // release resources
        try {
            in.close();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        try {
            fstream.close();
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        if (dbvitam != null) {
            dbvitam.close();
        }
    }
}

From source file:io.hipstogram.trident.mongodb.operation.Insert.java

License:Apache License

@Override
public void addToBulkOperation(BulkWriteOperation bulk) {
    bulk.insert(dbObject);
}

From source file:mongodb.performance.MongoDBPerformance.java

/**
 * @param args the command line arguments
 *//*from  ww w.  ja v  a  2 s  .  com*/
public static void main(String[] args) throws UnknownHostException, FileNotFoundException, IOException {
    if (args.length == 0) {
        System.out.println("Parmetro no informado!");
        System.exit(-1);
    }
    System.out.println("Parmetro: " + args[0]);

    MongoClient mongoClient = new MongoClient();
    //MongoClient mongoClient = new MongoClient( "54.172.218.64" , 27017 );
    DB db = mongoClient.getDB("myDatabase");

    DBCollection collection = db.getCollection("ads");
    collection.drop();

    BulkWriteOperation builder = collection.initializeUnorderedBulkOperation();

    FileInputStream fileInputStream = new FileInputStream(".\\resources\\MongoDB" + args[0] + ".txt");
    BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(fileInputStream));
    // Insert
    // Time start    
    long start = System.currentTimeMillis();

    String line;
    while ((line = bufferedReader.readLine()) != null) {
        DBObject bson = (DBObject) JSON.parse(line);
        builder.insert(bson);
    }
    bufferedReader.close();
    builder.execute();
    //Time end
    long elapsed = System.currentTimeMillis() - start;
    System.out.println("[insert] Time elapsed: " + elapsed + " ms");

    // Update
    // Time start    
    start = System.currentTimeMillis();
    collection.updateMulti(new BasicDBObject(),
            new BasicDBObject("$set", new BasicDBObject().append("ano", 2006)));
    // Time end
    elapsed = System.currentTimeMillis() - start;
    System.out.println("[update] Time elapsed: " + elapsed + " ms");

    // Select
    // Time start    
    start = System.currentTimeMillis();
    BasicDBObject keys = new BasicDBObject();
    keys.put("_id", 1);
    keys.put("modeloCarro.marca", 1);
    keys.put("modeloCarro.nome", 1);
    keys.put("uf", 1);
    keys.put("placa_carro", 1);
    keys.put("qtd_portas", 1);
    keys.put("cambio", 1);
    keys.put("combustivel", 1);
    keys.put("cor", 1);
    keys.put("km", 1);
    keys.put("valor", 1);
    keys.put("detalhe", 1);
    BasicDBObject sort = new BasicDBObject("_id", 1);

    DBCursor cursor = collection.find(new BasicDBObject(), keys).sort(sort);
    while (cursor.hasNext()) {
        cursor.next();
    }
    // Time end
    elapsed = System.currentTimeMillis() - start;
    System.out.println("[select] Time elapsed: " + elapsed + " ms");

    // Delete
    // Time start    
    start = System.currentTimeMillis();
    collection.remove(new BasicDBObject());
    // Time end
    elapsed = System.currentTimeMillis() - start;
    System.out.println("[delete] Time elapsed: " + elapsed + " ms");
}

From source file:org.easybatch.extensions.mongodb.MongoDBBatchWriter.java

License:Open Source License

@Override
public Batch processRecord(final Batch batch) throws RecordWritingException {
    List<Record> records = batch.getPayload();
    Collection<DBObject> documents = asDocuments(records);
    BulkWriteOperation bulkWriteOperation = collection.initializeOrderedBulkOperation();

    for (DBObject document : documents) {
        bulkWriteOperation.insert(document);
    }/*w  w  w.j a  v  a 2 s .  c  om*/

    try {
        bulkWriteOperation.execute();
        return batch;
    } catch (Exception e) {
        throw new RecordWritingException(format("Unable to write documents [%s] to Mongo DB server", documents),
                e);
    }
}

From source file:org.easybatch.integration.mongodb.MongoDBMultiRecordWriter.java

License:Open Source License

@Override
protected void writeRecord(List<DBObject> documents) throws RecordProcessingException {
    BulkWriteOperation bulkWriteOperation = collection.initializeOrderedBulkOperation();

    for (DBObject document : documents) {
        bulkWriteOperation.insert(document);
    }//from  w ww. j  a  va2s .c o m

    try {
        bulkWriteOperation.execute();
    } catch (Exception e) {
        throw new RecordProcessingException(
                format("Unable to write documents [%s] to Mongo DB server", documents), e);
    }
}

From source file:org.geogit.storage.mongo.MongoObjectDatabase.java

License:Open Source License

@Override
public void putAll(Iterator<? extends RevObject> objects, BulkOpListener listener) {
    Preconditions.checkNotNull(executor, "executor service not set");
    if (!objects.hasNext()) {
        return;//from   www . j  ava  2s  .  c o  m
    }

    final int bulkSize = 1000;
    final int maxRunningTasks = 10;

    final AtomicBoolean cancelCondition = new AtomicBoolean();

    List<ObjectId> ids = Lists.newArrayListWithCapacity(bulkSize);
    List<Future<?>> runningTasks = new ArrayList<Future<?>>(maxRunningTasks);

    BulkWriteOperation bulkOperation = collection.initializeOrderedBulkOperation();
    try {
        while (objects.hasNext()) {
            RevObject object = objects.next();
            bulkOperation.insert(toDocument(object));

            ids.add(object.getId());

            if (ids.size() == bulkSize || !objects.hasNext()) {
                InsertTask task = new InsertTask(bulkOperation, listener, ids, cancelCondition);
                runningTasks.add(executor.submit(task));

                if (objects.hasNext()) {
                    bulkOperation = collection.initializeOrderedBulkOperation();
                    ids = Lists.newArrayListWithCapacity(bulkSize);
                }
            }
            if (runningTasks.size() == maxRunningTasks) {
                waitForTasks(runningTasks);
            }
        }
        waitForTasks(runningTasks);
    } catch (RuntimeException e) {
        cancelCondition.set(true);
        throw e;
    }
}

From source file:org.lucee.mongodb.DBCollectionImpl.java

License:Open Source License

@Override
public Object call(PageContext pc, Key methodName, Object[] args) throws PageException {

    // aggregate//  www  . j  a  v  a2  s .  com
    if (methodName.equals("aggregate")) {
        boolean hasOptions = false;
        AggregationOptions options = null;
        int len = checkArgLength("aggregate", args, 1, -1); // no length limitation
        List<DBObject> pipeline = new ArrayList<DBObject>();
        // Pipeline array as single argument
        if (len == 1 && decision.isArray(args[0])) {
            Array arr = caster.toArray(args[0]);
            if (arr.size() == 0)
                throw exp.createApplicationException(
                        "the array passed to the function aggregate needs at least 1 element");

            Iterator<Object> it = arr.valueIterator();
            while (it.hasNext()) {
                pipeline.add(toDBObject(it.next()));
            }
        } else {
            // First argument is pipeline of operations, second argument is struct of options --> returns cursor!
            if (len == 2 && decision.isArray(args[0]) && decision.isStruct(args[1])) {
                Array arr = caster.toArray(args[0]);
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }

                hasOptions = true;
                // options builder
                AggregationOptions.Builder optbuilder = AggregationOptions.builder()
                        .outputMode(AggregationOptions.OutputMode.CURSOR);

                DBObject dboOpts = toDBObject(args[1]);
                if (dboOpts.containsField("allowDiskUse")) {
                    if (!decision.isBoolean(dboOpts.get("allowDiskUse")))
                        throw exp.createApplicationException("allowDiskUse in options must be boolean value");

                    optbuilder = optbuilder.allowDiskUse(caster.toBooleanValue(dboOpts.get("allowDiskUse")));
                }
                if (dboOpts.containsField("cursor")) {
                    if (!decision.isStruct(dboOpts.get("cursor")))
                        throw exp.createApplicationException(
                                "cursor in options must be struct with optional key batchSize");

                    DBObject cursoropts = toDBObject(dboOpts.get("cursor"));
                    if (cursoropts.containsField("batchSize")) {
                        if (!decision.isNumeric(cursoropts.get("batchSize")))
                            throw exp.createApplicationException("cursor.batchSize in options must be integer");

                        optbuilder = optbuilder.batchSize(caster.toIntValue(cursoropts.get("batchSize")));
                    }
                }

                options = optbuilder.build();
            }
            // First argument is first operation, second argument is array of additional operations
            else if (len == 2 && decision.isArray(args[1])) {
                Array arr = caster.toArray(args[1]);
                pipeline.add(toDBObject(args[0]));
                Iterator<Object> it = arr.valueIterator();
                while (it.hasNext()) {
                    pipeline.add(toDBObject(it.next()));
                }
            }
            // N arguments of pipeline operations
            else {
                for (int i = 0; i < len; i++) {
                    pipeline.add(toDBObject(args[i]));
                }
            }
        }

        if (hasOptions) {
            // returns Cursor - requires >= MongoDB 2.6
            return toCFML(coll.aggregate(pipeline, options));
        } else {
            // returns AggregationOutput
            return toCFML(coll.aggregate(pipeline));
        }
    }
    // count
    if (methodName.equals("count")) {
        int len = checkArgLength("count", args, 0, 1);
        if (len == 0) {
            return toCFML(coll.count());
        } else if (len == 1) {
            return toCFML(coll.count(toDBObject(args[0])));
        }
    }
    // dataSize
    if (methodName.equals("dataSize")) {
        checkArgLength("dataSize", args, 0, 0);
        return toCFML(coll.getStats().get("size"));
    }

    // distinct
    if (methodName.equals("distinct")) {
        int len = checkArgLength("distinct", args, 1, 2);
        if (len == 1) {
            return toCFML(coll.distinct(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.distinct(caster.toString(args[0]), toDBObject(args[1])));
        }
    }
    // drop
    if (methodName.equals("drop")) {
        checkArgLength("drop", args, 0, 0);
        coll.drop();
        return null;
    }

    // dropIndex
    if (methodName.equals("dropIndex")) {
        checkArgLength("dropIndex", args, 1, 1);
        DBObject dbo = toDBObject(args[0], null);
        if (dbo != null)
            coll.dropIndex(dbo);
        else
            coll.dropIndex(caster.toString(args[0]));

        return null;
    }
    // dropIndexes
    if (methodName.equals("dropIndexes")) {
        int len = checkArgLength("dropIndexes", args, 0, 1);
        if (len == 0) {
            coll.dropIndexes();
            return null;
        } else if (len == 1) {
            coll.dropIndexes(caster.toString(args[0]));
            return null;
        }
    }

    // createIndex
    if (methodName.equals("createIndex") || methodName.equals("ensureIndex")) {
        int len = checkArgLength("createIndex", args, 1, 3);
        if (len == 1) {
            DBObject dbo = toDBObject(args[0], null);
            if (dbo != null)
                coll.createIndex(dbo);
            else
                coll.createIndex(caster.toString(args[0]));
            return null;
        }
        if (len == 2) {
            DBObject p1 = toDBObject(args[0]);
            DBObject p2 = toDBObject(args[1], null);
            if (p2 != null)
                coll.createIndex(p1, p2);
            else
                coll.createIndex(p1, caster.toString(args[1]));
            return null;
        } else if (len == 3) {
            coll.createIndex(toDBObject(args[0]), caster.toString(args[1]), caster.toBooleanValue(args[2]));
            return null;
        }
    }

    // getStats
    if (methodName.equals("getStats") || methodName.equals("stats")) {
        checkArgLength("getStats", args, 0, 0);
        return toCFML(coll.getStats());
    }

    // getIndexes
    if (methodName.equals("getIndexes") || methodName.equals("getIndexInfo")) {
        checkArgLength(methodName.getString(), args, 0, 0);
        return toCFML(coll.getIndexInfo());
    }

    // getWriteConcern
    if (methodName.equals("getWriteConcern")) {
        checkArgLength("getWriteConcern", args, 0, 0);
        return toCFML(coll.getWriteConcern());
    }

    // find
    if (methodName.equals("find")) {
        int len = checkArgLength("find", args, 0, 3);
        DBCursor cursor = null;
        if (len == 0) {
            cursor = coll.find();
        } else if (len == 1) {
            cursor = coll.find(toDBObject(args[0]));
        } else if (len == 2) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            cursor = coll.find(toDBObject(args[0]), toDBObject(args[1])).skip(caster.toIntValue(args[2]));
        }

        return toCFML(cursor);
    }
    // findOne
    else if (methodName.equals("findOne")) {
        int len = checkArgLength("findOne", args, 0, 3);
        DBObject obj = null;
        if (len == 0) {
            obj = coll.findOne();
        } else if (len == 1) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1);
            else
                obj = coll.findOne(args[0]);

        } else if (len == 2) {
            DBObject arg1 = toDBObject(args[0], null);
            if (arg1 != null)
                obj = coll.findOne(arg1, toDBObject(args[1]));
            else
                obj = coll.findOne(args[0], toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findOne(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        }
        return toCFML(obj);
    }
    // findAndRemove
    if (methodName.equals("findAndRemove")) {
        checkArgLength("findAndRemove", args, 1, 1);
        DBObject obj = coll.findAndRemove(toDBObject(args[0]));
        return toCFML(obj);
    }
    // findAndModify
    if (methodName.equals("findAndModify")) {
        int len = args == null ? 0 : args.length;
        if (len != 2 && len != 3 && len != 7) {
            throw exp.createApplicationException(
                    "the function findAndModify needs 2, 3 or 7 arguments, but you have defined only " + len);
        }
        DBObject obj = null;
        if (len == 2) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]));
        } else if (len == 3) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]));
        } else if (len == 7) {
            obj = coll.findAndModify(toDBObject(args[0]), toDBObject(args[1]), toDBObject(args[2]),
                    caster.toBooleanValue(args[3]), toDBObject(args[4]), caster.toBooleanValue(args[5]),
                    caster.toBooleanValue(args[6]));
        }

        return toCFML(obj);
    }

    //group
    /*
       TODO: needs GroupCommand
    if(methodName.equals("group")) {
       int len=checkArgLength("group",args,1,1);
       if(len==1){
    return toCFML(coll.group(
       toDBObject(args[0])
    ));
       }
    }*/

    // insert
    if (methodName.equals("insert")) {
        checkArgLength("insert", args, 1, 1);
        return toCFML(coll.insert(toDBObjectArray(args[0])));
    }

    // insertMany(required array documents, struct options) valid options keys are string "writeconcern", boolean "ordered"
    if (methodName.equals("insertMany")) {
        int len = checkArgLength("insertMany", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {
            bulk.insert(toDBObject(it.next()));
        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    // bulkWrite(required array operations, struct options) valid options keys are string "writeconcern", boolean "ordered", boolean "bypassDocumentValidation"
    // an operation is a struct with the following keys: { "operation":[insert|update|updateOne|remove|removeOne], "document":[(required if operation is insert) - a doc to insert], "query":[(optional) - the query to find for remove/update operations], "update":[(required for update/updateOne) - the update document] }
    // i.e. dbCollection.bulkWrite([
    //       {"operation":"insert", "document":{"test":"insert"}}
    //       ,{"operation":"updateOne", "query":{"_id":"foo"}, "update":{"$set":{"updated":true}}}         
    //       ,{"operation":"removeOne", "query":{"_id":"goaway"}}         
    // ],{"ordered":false})
    if (methodName.equals("bulkWrite")) {
        int len = checkArgLength("bulkWrite", args, 1, 2);
        BulkWriteOperation bulk = coll.initializeOrderedBulkOperation();
        WriteConcern wc = coll.getWriteConcern();

        if (len == 2) {
            DBObject dboOpts = toDBObject(args[1]);
            if (dboOpts.containsField("ordered")) {
                if (!decision.isBoolean(dboOpts.get("ordered")))
                    throw exp.createApplicationException("ordered in options must be boolean value");

                if (!caster.toBooleanValue(dboOpts.get("ordered"))) {
                    bulk = coll.initializeUnorderedBulkOperation();
                }
            }

            if (dboOpts.containsField("bypassDocumentValidation")) {
                if (!decision.isBoolean(dboOpts.get("bypassDocumentValidation")))
                    throw exp.createApplicationException(
                            "bypassDocumentValidation in options must be boolean value");

                bulk.setBypassDocumentValidation(
                        caster.toBooleanValue(dboOpts.get("bypassDocumentValidation")));
            }

            if (dboOpts.containsField("writeconcern")) {
                WriteConcern newWc = WriteConcern.valueOf(caster.toString(dboOpts.get("writeconcern")));
                if (newWc != null) {
                    wc = newWc;
                }
            }
        }
        Map<String, Object> result = new LinkedHashMap<String, Object>();
        BulkWriteResult bulkResult;
        List<Map> writeErrors = new ArrayList<Map>();

        Array arr = caster.toArray(args[0]);
        if (arr.size() == 0) {
            result.put("nInserted", 0);
            result.put("nMatched", 0);
            result.put("nModified", 0);
            result.put("nRemoved", 0);
            result.put("writeErrors", writeErrors);
            result.put("acknowledged", true);
            return toCFML(result);
        }

        Iterator<Object> it = arr.valueIterator();
        while (it.hasNext()) {

            DBObject operation = toDBObject(it.next());

            if (operation.get("operation") == "update") {
                // do stuff to add update operation
                bulk.find(toDBObject(operation.get("query"))).update(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "updateOne") {
                // do stuff to add updateOne operation
                bulk.find(toDBObject(operation.get("query"))).updateOne(toDBObject(operation.get("update")));
            } else if (operation.get("operation") == "remove") {
                // do stuff to add remove operation
                bulk.find(toDBObject(operation.get("query"))).remove();
            } else if (operation.get("operation") == "removeOne") {
                // do stuff to add removeOne operation
                bulk.find(toDBObject(operation.get("query"))).removeOne();
            } else if (operation.get("operation") == "insert") {
                bulk.insert(toDBObject(operation.get("document")));
            }

        }

        try {
            bulkResult = bulk.execute(wc);
        } catch (BulkWriteException e) {
            Map<String, Object> bulkErrorItem;
            BulkWriteError bulkError;

            bulkResult = e.getWriteResult();
            List<BulkWriteError> errors = e.getWriteErrors();

            Iterator<BulkWriteError> jj = errors.iterator();
            while (jj.hasNext()) {
                bulkErrorItem = new LinkedHashMap<String, Object>();
                bulkError = jj.next();
                bulkErrorItem.put("index", (bulkError.getIndex() + 1)); // +1 so we get index of item in CFML array
                bulkErrorItem.put("code", bulkError.getCode());
                bulkErrorItem.put("errmsg", bulkError.getMessage());
                bulkErrorItem.put("op", bulkError.getDetails());
                writeErrors.add(bulkErrorItem);
            }
        }

        result.put("acknowledged", bulkResult.isAcknowledged());
        if (bulkResult.isAcknowledged()) {
            result.put("nInserted", bulkResult.getInsertedCount());
            result.put("nMatched", bulkResult.getMatchedCount());
            result.put("nModified", bulkResult.getModifiedCount());
            result.put("nRemoved", bulkResult.getRemovedCount());
            result.put("writeErrors", writeErrors);
        }

        return toCFML(result);
    }

    //mapReduce
    if (methodName.equals("mapReduce")) {
        int len = checkArgLength("mapReduce", args, 4, 4);
        if (len == 4) {
            return toCFML(coll.mapReduce(caster.toString(args[0]), caster.toString(args[1]),
                    caster.toString(args[2]), toDBObject(args[3])));
        }
    }

    // remove
    if (methodName.equals("remove")) {
        checkArgLength("remove", args, 1, 1);
        return toCFML(coll.remove(toDBObject(args[0])));

    }

    // rename
    if (methodName.equals("rename") || methodName.equals("renameCollection")) {
        int len = checkArgLength(methodName.getString(), args, 1, 2);
        if (len == 1) {
            return toCFML(coll.rename(caster.toString(args[0])));
        } else if (len == 2) {
            return toCFML(coll.rename(caster.toString(args[0]), caster.toBooleanValue(args[1])));
        }
    }

    // save
    if (methodName.equals("save")) {
        checkArgLength("save", args, 1, 1);
        return toCFML(coll.save(toDBObject(args[0])));
    }

    // setWriteConcern
    if (methodName.equals("setWriteConcern")) {
        checkArgLength("setWriteConcern", args, 1, 1);
        WriteConcern wc = WriteConcern.valueOf(caster.toString(args[0]));
        if (wc != null) {
            coll.setWriteConcern(wc);
        }
        return null;
    }

    // storageSize
    if (methodName.equals("storageSize")) {
        checkArgLength("storageSize", args, 0, 0);
        return toCFML(coll.getStats().get("storageSize"));
    }

    // totalIndexSize
    if (methodName.equals("totalIndexSize")) {
        checkArgLength("totalIndexSize", args, 0, 0);
        return toCFML(coll.getStats().get("totalIndexSize"));
    }

    // update
    if (methodName.equals("update")) {
        int len = checkArgLength("update", args, 2, 4);
        if (len == 2) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1])));
        } else if (len == 3) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    false));
        } else if (len == 4) {
            return toCFML(coll.update(toDBObject(args[0]), toDBObject(args[1]), caster.toBooleanValue(args[2]),
                    caster.toBooleanValue(args[3])));
        }
    }

    String functionNames = "aggregate,count,dataSize,distinct,drop,dropIndex,dropIndexes,createIndex,stats,getIndexes,getWriteConcern,find,findOne,findAndRemove,findAndModify,"
            + "group,insert,insertMany,bulkWrite,mapReduce,remove,rename,save,setWriteConcern,storageSize,totalIndexSize,update";

    throw exp.createApplicationException(
            "function " + methodName + " does not exist existing functions are [" + functionNames + "]");
}

From source file:org.sglover.entities.dao.mongo.MongoEntitiesDAO.java

License:Open Source License

private void addEntities(Node node, String type, String key, Collection<Entity<String>> entities) {
    BulkWriteOperation bulk = entitiesData.initializeUnorderedBulkOperation();

    String nodeId = node.getNodeId();
    long nodeInternalId = node.getNodeInternalId();
    String nodeVersion = node.getVersionLabel();

    if (entities.size() > 0) {
        int expected = entities.size();
        for (Entity<String> nameEntity : entities) {
            List<EntityLocation> locations = nameEntity.getLocations();
            List<DBObject> locs = new LinkedList<>();
            for (EntityLocation location : locations) {
                long start = location.getBeginOffset();
                long end = location.getEndOffset();
                String context = location.getContext();
                double probability = location.getProbability();

                DBObject locDBObject = BasicDBObjectBuilder.start("s", start).add("e", end)
                        .add("p", probability).add("c", context).get();
                locs.add(locDBObject);//from  www . j  a v  a  2s.c om
            }

            DBObject dbObject = BasicDBObjectBuilder.start("n", nodeId).add("ni", nodeInternalId)
                    .add("v", nodeVersion).add("t", type).add(key, nameEntity.getEntity())
                    .add("c", nameEntity.getCount()).add("locs", locs).get();
            bulk.insert(dbObject);
        }

        BulkWriteResult result = bulk.execute();
        int inserted = result.getInsertedCount();

        if (expected != inserted) {
            throw new RuntimeException("Mongo write failed");
        }
    }
}