Example usage for com.mongodb MongoClient getDatabase

List of usage examples for com.mongodb MongoClient getDatabase

Introduction

In this page you can find the example usage for com.mongodb MongoClient getDatabase.

Prototype

public MongoDatabase getDatabase(final String databaseName) 

Source Link

Usage

From source file:gridfs.GridFSTour.java

License:Apache License

/**
 * Run this main method to see the output of this quick example.
 *
 * @param args takes an optional single argument for the connection string
 * @throws FileNotFoundException if the sample file cannot be found
 * @throws IOException if there was an exception closing an input stream
 *///from www.  j  av a 2s  .  c o  m
public static void main(final String[] args) throws FileNotFoundException, IOException {
    MongoClient mongoClient;

    if (args.length == 0) {
        // connect to the local database server
        mongoClient = new MongoClient();
    } else {
        mongoClient = new MongoClient(new MongoClientURI(args[0]));
    }

    // get handle to "mydb" database
    MongoDatabase database = mongoClient.getDatabase("mydb");
    database.drop();

    GridFSBucket gridFSBucket = GridFSBuckets.create(database);

    /*
     * UploadFromStream Example
     */
    // Get the input stream
    InputStream streamToUploadFrom = new ByteArrayInputStream("Hello World".getBytes(StandardCharsets.UTF_8));

    // Create some custom options
    GridFSUploadOptions options = new GridFSUploadOptions().chunkSizeBytes(1024)
            .metadata(new Document("type", "presentation"));

    ObjectId fileId = gridFSBucket.uploadFromStream("mongodb-tutorial", streamToUploadFrom, options);
    streamToUploadFrom.close();
    System.out.println("The fileId of the uploaded file is: " + fileId.toHexString());

    /*
     * OpenUploadStream Example
     */

    // Get some data to write
    byte[] data = "Data to upload into GridFS".getBytes(StandardCharsets.UTF_8);

    GridFSUploadStream uploadStream = gridFSBucket.openUploadStream("sampleData");
    uploadStream.write(data);
    uploadStream.close();
    System.out.println("The fileId of the uploaded file is: " + uploadStream.getFileId().toHexString());

    /*
     * Find documents
     */
    gridFSBucket.find().forEach(new Block<GridFSFile>() {
        @Override
        public void apply(final GridFSFile gridFSFile) {
            System.out.println(gridFSFile.getFilename());
        }
    });

    /*
     * Find documents with a filter
     */
    gridFSBucket.find(eq("metadata.contentType", "image/png")).forEach(new Block<GridFSFile>() {
        @Override
        public void apply(final GridFSFile gridFSFile) {
            System.out.println(gridFSFile.getFilename());
        }
    });

    /*
     * DownloadToStream
     */
    FileOutputStream streamToDownloadTo = new FileOutputStream("/tmp/mongodb-tutorial.txt");
    gridFSBucket.downloadToStream(fileId, streamToDownloadTo);
    streamToDownloadTo.close();

    /*
     * DownloadToStreamByName
     */
    streamToDownloadTo = new FileOutputStream("/tmp/mongodb-tutorial.txt");
    GridFSDownloadByNameOptions downloadOptions = new GridFSDownloadByNameOptions().revision(0);
    gridFSBucket.downloadToStreamByName("mongodb-tutorial", streamToDownloadTo, downloadOptions);
    streamToDownloadTo.close();

    /*
     * OpenDownloadStream
     */
    GridFSDownloadStream downloadStream = gridFSBucket.openDownloadStream(fileId);
    int fileLength = (int) downloadStream.getGridFSFile().getLength();
    byte[] bytesToWriteTo = new byte[fileLength];
    downloadStream.read(bytesToWriteTo);
    downloadStream.close();

    System.out.println(new String(bytesToWriteTo, StandardCharsets.UTF_8));

    /*
     * OpenDownloadStreamByName
     */

    downloadStream = gridFSBucket.openDownloadStreamByName("sampleData");
    fileLength = (int) downloadStream.getGridFSFile().getLength();
    bytesToWriteTo = new byte[fileLength];
    downloadStream.read(bytesToWriteTo);
    downloadStream.close();

    System.out.println(new String(bytesToWriteTo, StandardCharsets.UTF_8));

    /*
     * Rename
     */
    gridFSBucket.rename(fileId, "mongodbTutorial");

    /*
     * Delete
     */
    gridFSBucket.delete(fileId);

    database.drop();
}

From source file:henu.util.NosqlDB.java

public static MongoDatabase getMongoDataBase() {
    try {/*from  ww  w. j  a v a 2s  .co  m*/
        //  mongodb ?
        MongoClient mongoClient = new MongoClient(ADDRESS, PORT);

        // ?
        MongoDatabase mongoDatabase = mongoClient.getDatabase(DBNAME);
        System.out.println("Connect to database successfully");
        return mongoDatabase;

    } catch (Exception e) {
        System.err.println(e.getClass().getName() + ": " + e.getMessage());
    }

    return null;
}

From source file:henu.util.NosqlDB.java

public static MongoDatabase getMongoDataBaseByAuth() {

    try {//from   www.ja  v a  2 s  .  c o m

        //MongoDB? ??localhost??IP?  
        //ServerAddress()? ??  ? 
        ServerAddress serverAddress = new ServerAddress(ADDRESS, PORT);
        List<ServerAddress> addrs = new ArrayList<ServerAddress>();
        addrs.add(serverAddress);

        //MongoCredential.createScramSha1Credential()? ?? ??? ?  
        MongoCredential credential = MongoCredential.createScramSha1Credential(USERNAME, DBNAME,
                PASSWORD.toCharArray());
        List<MongoCredential> credentials = new ArrayList<MongoCredential>();
        credentials.add(credential);

        //??MongoDB  
        MongoClient mongoClient = new MongoClient(addrs, credentials);

        // ?
        MongoDatabase mongoDatabase = mongoClient.getDatabase(DBNAME);
        System.out.println("Connect to database successfully");

        return mongoDatabase;
    } catch (Exception e) {
        System.err.println(e.getClass().getName() + ": " + e.getMessage());
    }

    return null;
}

From source file:io.debezium.connector.mongodb.MongoUtil.java

License:Apache License

/**
 * Perform the given operation on each of the collection names in the named database.
 * //from  w w w  . j  av  a 2 s  . c  o m
 * @param client the MongoDB client; may not be null
 * @param databaseName the name of the database; may not be null
 * @param operation the operation to perform; may not be null
 */
public static void forEachCollectionNameInDatabase(MongoClient client, String databaseName,
        Consumer<String> operation) {
    MongoDatabase db = client.getDatabase(databaseName);
    forEach(db.listCollectionNames(), operation);
}

From source file:io.debezium.connector.mongodb.MongoUtil.java

License:Apache License

/**
 * Perform the given operation on the database with the given name, only if that database exists.
 * //ww w.jav  a  2s  .c o  m
 * @param client the MongoDB client; may not be null
 * @param dbName the name of the database; may not be null
 * @param dbOperation the operation to perform; may not be null
 */
public static void onDatabase(MongoClient client, String dbName, Consumer<MongoDatabase> dbOperation) {
    if (contains(client.listDatabaseNames(), dbName)) {
        dbOperation.accept(client.getDatabase(dbName));
    }
}

From source file:io.debezium.connector.mongodb.Replicator.java

License:Apache License

/**
 * Copy the collection, sending to the recorder a record for each document.
 * //from ww w .j a  va2 s.c  om
 * @param primary the connection to the replica set's primary node; may not be null
 * @param collectionId the identifier of the collection to be copied; may not be null
 * @param timestamp the timestamp in milliseconds at which the copy operation was started
 * @return number of documents that were copied
 * @throws InterruptedException if the thread was interrupted while the copy operation was running
 */
protected long copyCollection(MongoClient primary, CollectionId collectionId, long timestamp)
        throws InterruptedException {
    RecordsForCollection factory = recordMakers.forCollection(collectionId);
    MongoDatabase db = primary.getDatabase(collectionId.dbName());
    MongoCollection<Document> docCollection = db.getCollection(collectionId.name());
    long counter = 0;
    try (MongoCursor<Document> cursor = docCollection.find().iterator()) {
        while (cursor.hasNext()) {
            Document doc = cursor.next();
            logger.trace("Found existing doc in {}: {}", collectionId, doc);
            counter += factory.recordObject(collectionId, doc, timestamp);
        }
    }
    return counter;
}

From source file:io.debezium.connector.mongodb.Replicator.java

License:Apache License

/**
 * Use the given primary to read the oplog.
 * /*from  w w  w  . j  a v a  2 s .c  o  m*/
 * @param primary the connection to the replica set's primary node; may not be null
 */
protected void readOplog(MongoClient primary) {
    BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName());
    logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(),
            oplogStart);

    // Include none of the cluster-internal operations and only those events since the previous timestamp ...
    MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs");
    Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position
            Filters.exists("fromMigrate", false)); // skip internal movements across shards
    FindIterable<Document> results = oplog.find(filter).sort(new Document("$natural", 1)) // force forwards collection scan
            .oplogReplay(true) // tells Mongo to not rely on indexes
            .noCursorTimeout(true) // don't timeout waiting for events
            .cursorType(CursorType.TailableAwait); // tail and await new data
    // Read as much of the oplog as we can ...
    ServerAddress primaryAddress = primary.getAddress();
    try (MongoCursor<Document> cursor = results.iterator()) {
        while (running.get() && cursor.hasNext()) {
            if (!handleOplogEvent(primaryAddress, cursor.next())) {
                // Something happened, and we're supposed to stop reading
                return;
            }
        }
    }
}

From source file:io.gravitee.gateway.registry.mongodb.MongoDBRegistry.java

License:Apache License

public MongoDBRegistry(final String configurationPath) {
    try {//from  www.j  a v a2  s.com
        final InputStream input = new FileInputStream(configurationPath);
        properties.load(input);
        final String host = PropertiesUtils.getProperty(properties, "gravitee.io.mongodb.host");
        final int port = PropertiesUtils.getPropertyAsInteger(properties, "gravitee.io.mongodb.port");
        final String database = PropertiesUtils.getProperty(properties, "gravitee.io.mongodb.database");
        final MongoClient mongoClient = new MongoClient(host, port);
        mongoDatabase = mongoClient.getDatabase(database);
        readConfiguration();
    } catch (final IOException e) {
        LOGGER.error("No MongoDB configuration can be read from {}", configurationPath, e);
    }
}

From source file:io.mandrel.blob.impl.MongoBlobStore.java

License:Apache License

public MongoBlobStore(TaskContext context, MongoClient mongoClient, String databaseName, String bucketName,
        int batchSize) {
    super(context);
    this.mongoClient = mongoClient;
    this.databaseName = databaseName;
    this.bucket = GridFSBuckets.create(mongoClient.getDatabase(databaseName), bucketName);
    this.batchSize = batchSize;
    this.mapper = new ObjectMapper();
}

From source file:io.mandrel.document.impl.MongoDocumentStore.java

License:Apache License

public MongoDocumentStore(TaskContext context, DataExtractor metadataExtractor, MongoClient mongoClient,
        String databaseName, String collectionName, int batchSize) {
    super(context, metadataExtractor);
    this.mongoClient = mongoClient;
    this.collection = mongoClient.getDatabase(databaseName).getCollection(collectionName);
    this.batchSize = batchSize;
}