Example usage for com.mongodb CommandResult ok

List of usage examples for com.mongodb CommandResult ok

Introduction

In this page you can find the example usage for com.mongodb CommandResult ok.

Prototype

public boolean ok() 

Source Link

Document

Gets the "ok" field, which is whether this command executed correctly or not.

Usage

From source file:org.eclipse.linuxtools.tmf.totalads.dbms.DBMS.java

License:Open Source License

/**
 * This function is used to update the values of individual fields--specified by the replacementFieldsAndValue object--
 *  in documents--specified by the searchFieldsandValues object. Pass two objects of  classes that only has primitive data types
 *   as fields--no methods. Each object's fields' values  and their data types will be automatically extracted and used
 *   in the update. If no document matches the criteria then new document will be inserted
 * @param searchKeyAndItsValue Search fields
 * @param replacementFieldsAndValues Replacement fields
 * @param database Database name//from  w w w .  j  a  v  a2  s  .  c om
 * @param collection Collection name
 * @throws IllegalArgumentException
 * @throws IllegalAccessException
 */
public void replaceFields(Object searchKeyAndItsValue, Object replacementFieldsAndValues, String database,
        String collection) throws IllegalArgumentException, IllegalAccessException, TotalADSDBMSException {

    DB db = mongoClient.getDB(database);
    DBCollection coll = db.getCollection(collection);

    BasicDBObject replacementDocument = new BasicDBObject();
    BasicDBObject setFieldValDocument = new BasicDBObject();

    extractKeysAndValuesfromTheObject(replacementFieldsAndValues, setFieldValDocument);

    replacementDocument.append("$set", setFieldValDocument);

    BasicDBObject searchQueryDocument = new BasicDBObject();
    //.append("hosting", "hostB");
    extractKeysAndValuesfromTheObject(searchKeyAndItsValue, searchQueryDocument);

    WriteResult writeRes = coll.update(searchQueryDocument, replacementDocument, true, false);

    CommandResult cmdResult = writeRes.getLastError();
    if (!cmdResult.ok())
        throw new TotalADSDBMSException("Error : " + cmdResult.getErrorMessage());

}

From source file:org.eclipse.tracecompass.totalads.dbms.MongoDBMS.java

License:Open Source License

@Override
public void insertOrUpdateUsingJSON(String database, JsonObject keytoSearch, JsonObject jsonObjectToUpdate,
        String collection) throws TotalADSDBMSException {
    DB db = mongoClient.getDB(database);
    DBCollection coll = db.getCollection(collection);

    BasicDBObject docToUpdate = (BasicDBObject) JSON.parse(jsonObjectToUpdate.toString());

    BasicDBObject keyToSearch = (BasicDBObject) JSON.parse(keytoSearch.toString());

    WriteResult writeRes = coll.update(keyToSearch, docToUpdate, true, false);

    CommandResult cmdResult = writeRes.getLastError();
    if (!cmdResult.ok()) {
        throw new TotalADSDBMSException("Error : " + cmdResult.getErrorMessage()); //$NON-NLS-1$
    }/*from  w  ww  .j a v  a  2  s .c  o m*/

}

From source file:org.grails.datastore.mapping.mongo.engine.MongoEntityPersister.java

License:Apache License

@Override
protected Object generateIdentifier(final PersistentEntity persistentEntity, final DBObject nativeEntry) {
    return mongoTemplate.execute(new DbCallback<Object>() {
        public Object doInDB(DB con) throws MongoException, DataAccessException {

            @SuppressWarnings("hiding")
            String collectionName = getCollectionName(persistentEntity, nativeEntry);

            DBCollection dbCollection = con.getCollection(collectionName + NEXT_ID_SUFFIX);

            // If there is a numeric identifier then we need to rely on optimistic concurrency controls to obtain a unique identifer
            // sequence. If the identifier is not numeric then we assume BSON ObjectIds.
            if (hasNumericalIdentifier) {
                while (true) {
                    DBCursor result = dbCollection.find().sort(new BasicDBObject(MONGO_ID_FIELD, -1)).limit(1);

                    long nextId;
                    if (result.hasNext()) {
                        final Long current = getMappingContext().getConversionService()
                                .convert(result.next().get(MONGO_ID_FIELD), Long.class);
                        nextId = current + 1;
                    } else {
                        nextId = 1;/*from ww w.  jav  a  2 s  .c  om*/
                    }

                    nativeEntry.put(MONGO_ID_FIELD, nextId);
                    final WriteResult writeResult = dbCollection.insert(nativeEntry);
                    final CommandResult lastError = writeResult.getLastError();
                    if (lastError.ok()) {
                        break;
                    }

                    final Object code = lastError.get("code");
                    // duplicate key error try again
                    if (code != null && code.equals(11000)) {
                        continue;
                    }
                    break;
                }

                return nativeEntry.get(MONGO_ID_FIELD);
            }

            ObjectId objectId = ObjectId.get();
            if (ObjectId.class.isAssignableFrom(persistentEntity.getIdentity().getType())) {
                nativeEntry.put(MONGO_ID_FIELD, objectId);
                return objectId;
            }

            String stringId = objectId.toString();
            nativeEntry.put(MONGO_ID_FIELD, stringId);
            return stringId;
        }
    });
}

From source file:org.graylog2.system.stats.mongo.MongoProbe.java

License:Open Source License

private HostInfo createHostInfo() {
    final HostInfo hostInfo;
    final CommandResult hostInfoResult = adminDb.command("hostInfo");
    if (hostInfoResult.ok()) {
        final BasicDBObject systemMap = (BasicDBObject) hostInfoResult.get("system");
        final HostInfo.System system = HostInfo.System.create(new DateTime(systemMap.getDate("currentTime")),
                systemMap.getString("hostname"), systemMap.getInt("cpuAddrSize"),
                systemMap.getLong("memSizeMB"), systemMap.getInt("numCores"), systemMap.getString("cpuArch"),
                systemMap.getBoolean("numaEnabled"));
        final BasicDBObject osMap = (BasicDBObject) hostInfoResult.get("os");
        final HostInfo.Os os = HostInfo.Os.create(osMap.getString("type"), osMap.getString("name"),
                osMap.getString("version"));

        final BasicDBObject extraMap = (BasicDBObject) hostInfoResult.get("extra");
        final HostInfo.Extra extra = HostInfo.Extra.create(extraMap.getString("versionString"),
                extraMap.getString("libcVersion"), extraMap.getString("kernelVersion"),
                extraMap.getString("cpuFrequencyMHz"), extraMap.getString("cpuFeatures"),
                extraMap.getString("scheduler"), extraMap.getLong("pageSize", -1l),
                extraMap.getLong("numPages", -1l), extraMap.getLong("maxOpenFiles", -1l));

        hostInfo = HostInfo.create(system, os, extra);
    } else {//from   ww  w.  j  av  a 2s. c o  m
        hostInfo = null;
    }

    return hostInfo;
}

From source file:org.graylog2.system.stats.mongo.MongoProbe.java

License:Open Source License

private BuildInfo createBuildInfo() {
    final BuildInfo buildInfo;
    final CommandResult buildInfoResult = adminDb.command("buildInfo");
    if (buildInfoResult.ok()) {
        buildInfo = BuildInfo.create(buildInfoResult.getString("version"),
                buildInfoResult.getString("gitVersion"), buildInfoResult.getString("sysInfo"),
                buildInfoResult.getString("loaderFlags"), buildInfoResult.getString("compilerFlags"),
                buildInfoResult.getString("allocator"), (List<Integer>) buildInfoResult.get("versionArray"),
                buildInfoResult.getString("javascriptEngine"), buildInfoResult.getInt("bits"),
                buildInfoResult.getBoolean("debug"), buildInfoResult.getLong("maxBsonObjectSize")

        );/*from  ww  w  . j  a v  a 2 s.  com*/
    } else {
        buildInfo = null;
    }

    return buildInfo;
}

From source file:org.graylog2.system.stats.mongo.MongoProbe.java

License:Open Source License

public MongoStats mongoStats() {
    final List<ServerAddress> serverAddresses = mongoClient.getServerAddressList();
    final List<HostAndPort> servers = Lists.newArrayListWithCapacity(serverAddresses.size());
    for (ServerAddress serverAddress : serverAddresses) {
        servers.add(HostAndPort.fromParts(serverAddress.getHost(), serverAddress.getPort()));
    }//  w  w  w.ja va 2s  .c  o m

    final DatabaseStats dbStats;
    final CommandResult dbStatsResult = db.command("dbStats");
    if (dbStatsResult.ok()) {
        final BasicDBObject extentFreeListMap = (BasicDBObject) dbStatsResult.get("extentFreeList");
        final DatabaseStats.ExtentFreeList extentFreeList = DatabaseStats.ExtentFreeList
                .create(extentFreeListMap.getInt("num"), extentFreeListMap.getInt("totalSize"));

        final BasicDBObject dataFileVersionMap = (BasicDBObject) dbStatsResult.get("dataFileVersion");
        final DatabaseStats.DataFileVersion dataFileVersion = DatabaseStats.DataFileVersion
                .create(dataFileVersionMap.getInt("major"), dataFileVersionMap.getInt("minor"));

        dbStats = DatabaseStats.create(dbStatsResult.getString("db"), dbStatsResult.getLong("collections"),
                dbStatsResult.getLong("objects"), dbStatsResult.getDouble("avgObjSize"),
                dbStatsResult.getLong("dataSize"), dbStatsResult.getLong("storageSize"),
                dbStatsResult.getLong("numExtents"), dbStatsResult.getLong("indexes"),
                dbStatsResult.getLong("indexSize"), dbStatsResult.getLong("fileSize"),
                dbStatsResult.getLong("nsSizeMB"), extentFreeList, dataFileVersion);
    } else {
        dbStats = null;
    }

    final ServerStatus serverStatus;
    final CommandResult serverStatusResult = adminDb.command("serverStatus");
    if (serverStatusResult.ok()) {
        final BasicDBObject connectionsMap = (BasicDBObject) serverStatusResult.get("connections");
        final ServerStatus.Connections connections = ServerStatus.Connections.create(
                connectionsMap.getInt("current"), connectionsMap.getInt("available"),
                connectionsMap.getLong("totalCreated"));

        final BasicDBObject networkMap = (BasicDBObject) serverStatusResult.get("network");
        final ServerStatus.Network network = ServerStatus.Network.create(networkMap.getInt("bytesIn"),
                networkMap.getInt("bytesOut"), networkMap.getInt("numRequests"));

        final BasicDBObject memoryMap = (BasicDBObject) serverStatusResult.get("mem");
        final ServerStatus.Memory memory = ServerStatus.Memory.create(memoryMap.getInt("bits"),
                memoryMap.getInt("resident"), memoryMap.getInt("virtual"), memoryMap.getBoolean("supported"),
                memoryMap.getInt("mapped"), memoryMap.getInt("mappedWithJournal"));

        serverStatus = ServerStatus.create(serverStatusResult.getString("host"),
                serverStatusResult.getString("version"), serverStatusResult.getString("process"),
                serverStatusResult.getLong("pid"), serverStatusResult.getInt("uptime"),
                serverStatusResult.getLong("uptimeMillis"), serverStatusResult.getInt("uptimeEstimate"),
                new DateTime(serverStatusResult.getDate("localTime")), connections, network, memory);
    } else {
        serverStatus = null;
    }

    // TODO Collection stats? http://docs.mongodb.org/manual/reference/command/collStats/

    return MongoStats.create(servers, buildInfo, hostInfo, serverStatus, dbStats);
}

From source file:org.mule.modules.morphia.MorphiaConnector.java

License:Open Source License

/**
 * Calculates aggregates values without the need for complex map-reduce operations
 *
 * <p/>//from   ww  w  .  j  a  va2 s . c  o m
 * {@sample.xml ../../../doc/mule-module-morphia.xml.sample morphia:aggregate}
 *
 * @param collection collection name
 * @param pipeline list of pipeline operators
 * @param exception The exception that needs to be thrown if there is an error executing the aggregation query
 * @param username the username to use in case authentication is required
 * @param password the password to use in case authentication is required, null
 *                 if no authentication is desired
 * @param host     The host of the Mongo server. If the host is part of a replica set then you can specify all the hosts
 *                 separated by comma.
 * @param port     The port of the Mongo server
 * @param database The database name of the Mongo server
 * @return the aggregation result
 * @throws Exception if there is an exception while aggregating
 */
@Processor
public BasicDBList aggregate(String collection, List<Pipeline> pipeline, @Optional String exception,
        @Optional String username, @Optional @Password String password, @Optional String host,
        @Optional Integer port, @Optional String database) throws Exception {
    if (!pipeline.isEmpty()) {
        Datastore datastore = getDatastore(username, password, database, host, port);
        List<DBObject> dbObjects = new ArrayList<DBObject>();
        for (Pipeline pipelineOperator : pipeline) {
            Object dbObject = JSON.parse(pipelineOperator.toJson());
            if (dbObject == null || !(dbObject instanceof DBObject)) {
                throw new IllegalArgumentException("Illegal pipeline operator '" + pipelineOperator + "'");
            }
            dbObjects.add((DBObject) dbObject);
        }
        BasicDBObjectBuilder builder = BasicDBObjectBuilder.start().add("aggregate", collection);
        builder.append("pipeline", dbObjects.toArray());
        CommandResult result = datastore.getDB().command(builder.get());
        if (result.ok()) {
            return (BasicDBList) result.get("result");
        }
        if (exception != null) {
            throw getExceptionFromClassName(exception);
        }
    }
    // Return an empty list
    return new BasicDBList();
}

From source file:org.pentaho.di.trans.steps.mongodboutput.MongoDbOutput.java

License:Open Source License

protected void commitUpdate(DBObject updateQuery, DBObject insertUpdate, Object[] row) throws KettleException {

    int retrys = 0;
    MongoException lastEx = null;//  w w  w  .j a  va  2  s.  co m

    while (retrys <= m_writeRetries && !isStopped()) {
        WriteResult result = null;
        CommandResult cmd = null;
        try {
            // TODO It seems that doing an update() via a secondary node does not
            // generate any sort of exception or error result! (at least via
            // driver version 2.11.1). Transformation completes successfully
            // but no updates are made to the collection.
            // This is unlike doing an insert(), which generates
            // a MongoException if you are not talking to the primary. So we need
            // some logic to check whether or not the connection configuration
            // contains the primary in the replica set and give feedback if it
            // doesnt
            try {
                result = m_data.getCollection().update(updateQuery, insertUpdate, m_meta.getUpsert(),
                        m_meta.getMulti());
            } catch (MongoDbException e) {
                throw new MongoException(e.getMessage(), e);
            }

            cmd = result.getLastError();
            if (cmd != null && !cmd.ok()) {
                String message = cmd.getErrorMessage();
                logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); //$NON-NLS-1$

                cmd.throwOnError();
            }
        } catch (MongoException me) {
            lastEx = me;
            retrys++;
            if (retrys <= m_writeRetries) {
                logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.ErrorWritingToMongo", //$NON-NLS-1$
                        me.toString()));
                logBasic(
                        BaseMessages.getString(PKG, "MongoDbOutput.Messages.Message.Retry", m_writeRetryDelay)); //$NON-NLS-1$
                try {
                    Thread.sleep(m_writeRetryDelay * 1000);
                    // CHECKSTYLE:OFF
                } catch (InterruptedException e) {
                    // CHECKSTYLE:ON
                }
            }
        }

        if (cmd != null && cmd.ok()) {
            break;
        }
    }

    if ((retrys > m_writeRetries || isStopped()) && lastEx != null) {

        // Send this one to the error stream if doing error handling
        if (getStepMeta().isDoingErrorHandling()) {
            putError(getInputRowMeta(), row, 1, lastEx.getMessage(), "", "MongoDbOutput");
        } else {
            throw new KettleException(lastEx);
        }
    }
}

From source file:org.pentaho.di.trans.steps.mongodboutput.MongoDbOutput.java

License:Open Source License

protected CommandResult batchRetryUsingSave(boolean lastRetry)
        throws MongoException, KettleException, MongoDbException {
    WriteResult result = null;/* ww w. j  a  va 2  s  .c om*/
    CommandResult cmd = null;
    int count = 0;
    logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.CurrentBatchSize", m_batch.size()));
    for (int i = 0, len = m_batch.size(); i < len; i++) {
        DBObject toTry = m_batch.get(i);
        Object[] correspondingRow = m_batchRows.get(i);
        try {
            result = m_data.getCollection().save(toTry);
            cmd = result.getLastError();

            if (cmd != null && !cmd.ok()) {
                String message = cmd.getErrorMessage();
                logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); //$NON-NLS-1$

                cmd.throwOnError();
            }

            count++;
        } catch (MongoException ex) {
            if (!lastRetry) {
                logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.SuccessfullySavedXDocuments",
                        count));
                m_batch = copyExceptFirst(count, m_batch);
                m_batchRows = copyExceptFirst(count, m_batchRows);
                throw ex;
            }

            // Send this one to the error stream if doing error handling
            if (getStepMeta().isDoingErrorHandling()) {
                putError(getInputRowMeta(), correspondingRow, 1, ex.getMessage(), "", "MongoDbOutput");
            } else {
                m_batch = copyExceptFirst(i + 1, m_batch);
                m_batchRows = copyExceptFirst(i + 1, m_batchRows);
                throw ex;
            }
        }
    }

    m_batch.clear();
    m_batchRows.clear();

    logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.SuccessfullySavedXDocuments", count));

    return cmd;
}

From source file:org.pentaho.di.trans.steps.mongodboutput.MongoDbOutput.java

License:Open Source License

protected void doBatch() throws KettleException, MongoDbException {
    int retries = 0;
    MongoException lastEx = null;//  www. j  a  v a2  s  .com

    while (retries <= m_writeRetries && !isStopped()) {
        WriteResult result = null;
        CommandResult cmd = null;
        try {
            if (retries == 0) {
                result = m_data.getCollection().insert(m_batch);
                cmd = result.getLastError();

                if (cmd != null && !cmd.ok()) {
                    String message = cmd.getErrorMessage();
                    logError(
                            BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.MongoReported", message)); //$NON-NLS-1$

                    cmd.throwOnError();
                }
            } else {
                // fall back to save
                logBasic(BaseMessages.getString(PKG,
                        "MongoDbOutput.Messages.SavingIndividualDocsInCurrentBatch"));
                cmd = batchRetryUsingSave(retries == m_writeRetries);
            }
        } catch (MongoException me) {
            // avoid exception if a timeout issue occurred and it was exactly the first attempt
            boolean shouldNotBeAvoided = !isTimeoutException(me) && (retries == 0);
            if (shouldNotBeAvoided) {
                lastEx = me;
            }
            retries++;
            if (retries <= m_writeRetries) {
                if (shouldNotBeAvoided) {
                    // skip logging error
                    // however do not skip saving elements separately during next attempt to prevent losing data
                    logError(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Error.ErrorWritingToMongo", //$NON-NLS-1$
                            me.toString()));
                    logBasic(BaseMessages.getString(PKG, "MongoDbOutput.Messages.Message.Retry", //$NON-NLS-1$
                            m_writeRetryDelay));
                }
                try {
                    Thread.sleep(m_writeRetryDelay * 1000);
                    // CHECKSTYLE:OFF
                } catch (InterruptedException e) {
                    // CHECKSTYLE:ON
                }
            }
            // throw new KettleException(me.getMessage(), me);
        }

        if (cmd != null) {
            ServerAddress s = cmd.getServerUsed();
            if (s != null) {
                logDetailed(
                        BaseMessages.getString(PKG, "MongoDbOutput.Messages.WroteBatchToServer", s.toString())); //$NON-NLS-1$
            }
        }

        if (cmd != null && cmd.ok()) {
            break;
        }
    }

    if ((retries > m_writeRetries || isStopped()) && lastEx != null) {
        throw new KettleException(lastEx);
    }

    m_batch.clear();
    m_batchRows.clear();
}