List of usage examples for com.mongodb WriteConcern SAFE
WriteConcern SAFE
To view the source code for com.mongodb WriteConcern SAFE.
Click Source Link
Write operations that use this write concern will wait for acknowledgement from the primary server before returning.
From source file:cn.cnic.bigdatalab.flume.sink.mongodb.MongoSink.java
License:Apache License
/** * {@inheritDoc}/* w ww .jav a 2 s . c om*/ * * @param context */ @Override public void configure(Context context) { try { if (!"INJECTED".equals(context.getString(CONF_URI))) { this.mongoClientURI = new MongoClientURI(context.getString(CONF_URI), MongoClientOptions.builder().writeConcern(WriteConcern.SAFE)); this.mongoClient = new MongoClient(mongoClientURI); if (mongoClientURI.getDatabase() != null) { this.mongoDefaultDb = mongoClient.getDB(mongoClientURI.getDatabase()); } if (mongoClientURI.getCollection() != null) { this.mongoDefaultCollection = mongoDefaultDb.getCollection(mongoClientURI.getCollection()); } } final String mappingFilename = context.getString(CONF_MAPPING_FILE); this.eventParser = (mappingFilename == null) ? new EventParser() : new EventParser(MappingDefinition.load(mappingFilename)); this.isDynamicMode = context.getBoolean(CONF_DYNAMIC, DEFAULT_DYNAMIC); if (!isDynamicMode && mongoDefaultCollection == null) { throw new MongoSinkException( "Default MongoDB collection must be specified unless dynamic mode is enabled"); } this.dynamicDBField = context.getString(CONF_DYNAMIC_DB_FIELD, DEFAULT_DYNAMIC_DB_FIELD); this.dynamicCollectionField = context.getString(CONF_DYNAMIC_COLLECTION_FIELD, DEFAULT_DYNAMIC_COLLECTION_FIELD); this.sinkCounter = new SinkCounter(this.getName()); this.batchSize = context.getInteger(CONF_BATCH_SIZE, DEFAULT_BATCH_SIZE); this.updateInsteadReplace = context.getBoolean(CONF_UPDATE_INSTEAD_REPLACE, DEFAULT_UPDATE_INSTEAD_REPLACE); } catch (IOException ex) { throw new MongoSinkException(ex); } }
From source file:cn.vlabs.clb.server.storage.mongo.MongoStorageService.java
License:Apache License
protected void writeFile(InputStream ins, MFile mf, String tableName) { DB db = options.getCollection(TABLE_TEMP_KEY).getDB(); db.requestStart();// w w w. j a va 2 s. c om GridFSInputFile gfsInput; gfsInput = new GridFS(db, tableName).createFile(ins); DBCollection col = db.getCollection(tableName + ".files"); col.setWriteConcern(WriteConcern.SAFE); gfsInput.setFilename(mf.getFilename()); gfsInput.put(FIELD_STORAGE_KEY, mf.getStorageKey()); gfsInput.put(FIELD_DOC_ID, mf.getDocid()); gfsInput.put(FILED_VERSION_ID, mf.getVid()); gfsInput.put(FIELD_APP_ID, mf.getAppid()); gfsInput.put(FILED_IS_PUB, getIsPubStatus(mf.getIsPub())); gfsInput.setContentType(mf.getContentType()); gfsInput.save(); db.requestDone(); }
From source file:cn.vlabs.clb.server.storage.mongo.MongoStorageService.java
License:Apache License
protected void writeTrivialFile(InputStream ins, MTrivialFile mtf, String tableName) { DB db = options.getCollection(TABLE_TEMP_KEY).getDB(); db.requestStart();/*w w w . ja v a2 s . c o m*/ GridFSInputFile gfsInput; DBCollection col = db.getCollection(tableName + ".files"); col.setWriteConcern(WriteConcern.SAFE); gfsInput = new GridFS(db, tableName).createFile(ins); gfsInput.setFilename(mtf.getFileName()); gfsInput.put(FIELD_STORAGE_KEY, mtf.getStorageKey()); gfsInput.put(FIELD_SPACE_NAME, mtf.getSpaceName()); gfsInput.setContentType(mtf.getContentType()); gfsInput.save(); db.requestDone(); }
From source file:cn.vlabs.clb.server.storage.mongo.MongoStorageService.java
License:Apache License
private void chunkedUpload(int chunkIndex, int chunkCount, InputStream data, int dataLength, MFile mf, String tableName, int chunkSize) { DB db = options.getCollection(TABLE_TEMP_KEY).getDB(); db.requestStart();/* w ww . j av a2 s . com*/ MyGridFS gfs = new MyGridFS(db, tableName); DBCollection col = db.getCollection(tableName + ".files"); col.setWriteConcern(WriteConcern.SAFE); MyGridFSInputFile inf = new MyGridFSInputFile(gfs); inf.setFilename(mf.getFilename()); inf.setId(mf.getStorageKey()); inf.put(FIELD_STORAGE_KEY, mf.getStorageKey()); inf.put(FIELD_DOC_ID, mf.getDocid()); inf.put(FILED_VERSION_ID, mf.getVid()); inf.put(FIELD_APP_ID, mf.getAppid()); inf.put(FILED_IS_PUB, getIsPubStatus(mf.getIsPub())); inf.setContentType(mf.getContentType()); inf.setChunkSize(chunkSize); inf.setContentType(mf.getContentType()); inf.setMD5(mf.getMd5()); inf.setLength(mf.getLength()); int numBytes = 0; int tmpRead = 0; byte[] buf = new byte[BUF_SIZE]; byte[] bigBuf = new byte[dataLength]; try { while (EOF != (tmpRead = data.read(buf))) { System.arraycopy(buf, 0, bigBuf, numBytes, tmpRead); numBytes += tmpRead; } if (dataLength != numBytes) { System.out.println( "Missing Data expected data-size=" + dataLength + ", actual read data-size=" + numBytes); return; } inf.setInputStream(new ByteArrayInputStream(bigBuf, 0, numBytes)); inf.setSavedChunk(false); inf.setCurrentChunkNumber(chunkIndex); inf.saveChunks(chunkSize); System.out.println("Chunk " + chunkIndex + " upload finish, bytes " + numBytes); } catch (IOException e) { e.printStackTrace(); } catch (MongoException e) { e.printStackTrace(); } if (chunkIndex == (chunkCount - 1)) { inf.setSavedChunk(true); inf.save(); } db.requestDone(); }
From source file:com.clavain.utils.Database.java
License:Apache License
public static void removeOldPackageTrack(int p_nodeid) { try {/*from ww w . j av a 2 s . c o m*/ logger.info("Purging Package Logs for NodeID: " + p_nodeid); DB db; String dbName = com.clavain.muninmxcd.p.getProperty("mongo.dbessentials"); db = m.getDB(dbName); db.setWriteConcern(WriteConcern.SAFE); DBCollection col = db.getCollection("trackpkg"); BasicDBObject query = new BasicDBObject(); query.append("node", p_nodeid); col.remove(query); db.setWriteConcern(WriteConcern.NONE); } catch (Exception ex) { logger.error("Error in removeOldPackageTrack: " + ex.getLocalizedMessage()); } }
From source file:com.cloudbees.demo.beesshop.domain.ProductRepository.java
License:Apache License
public void update(@Nonnull Product product) { collection.save(new Product.ToDBObjectFunction().apply(product), WriteConcern.SAFE); }
From source file:com.cloudbees.demo.beesshop.domain.ProductRepository.java
License:Apache License
public void insert(@Nonnull Product product) { DBObject dbObject = new Product.ToDBObjectFunction().apply(product); collection.insert(WriteConcern.SAFE, dbObject); product.setId((ObjectId) dbObject.get("_id")); }
From source file:com.cloudbees.gasp.model.MongoConnection.java
License:Apache License
public void connect() throws Exception { try {//from w w w .j a v a 2 s . co m // Connect to Mongo and Authenticate MongoURI mongoURI = new MongoURI(mongoURL); mongo = new Mongo(mongoURI); mongoDB = mongo.getDB(mongoURI.getDatabase()); mongoDB.authenticate(mongoURI.getUsername(), mongoURI.getPassword()); // Get Mongo collections and set WriteConcern String mongoLocations = "locations"; locations = getMongoDB().getCollection(mongoLocations); mongoDB.setWriteConcern(WriteConcern.SAFE); } catch (Exception e) { e.printStackTrace(); throw e; } }
From source file:com.deafgoat.ml.prognosticator.MongoImport.java
License:Apache License
/** * Removes a JSON configuraiton matching the specified configuration name. * //from www . j ava 2 s .com * @param configName * The name of the JSON configuration to find. * @return The JSON configurations found. Null if none is found. * @throws IOException * If it can not read configuration file. * @throws FileNotFoundException * If it can not find configuration file. * @throws JSONException * If the configuration file(s) can not be converted to a * JSONObject. * @throws MongoException * If it is unable to connect to a running mongod. */ public boolean removeConfiguration(String configName) throws FileNotFoundException, IOException, MongoException, JSONException { BasicDBObject document = new BasicDBObject(); document.put("name", configName); WriteResult result = _collection.remove(document, WriteConcern.SAFE); return result.getError() == null; }
From source file:com.deftlabs.lock.mongo.impl.LockHistoryDao.java
License:Apache License
/** * Insert an entry.//from w ww . j av a2 s . c om */ static void insert(final MongoClient pMongo, final String pLockName, final DistributedLockSvcOptions pSvcOptions, final int pInactiveLockTimeout, long pServerTime, final LockState pLockState, final Object pLockId, final boolean pTimedOut) { final Thread currentThread = Thread.currentThread(); long serverTime = pServerTime; if (serverTime == 0) serverTime = getServerTime(pMongo, pSvcOptions); final Date now = new Date(serverTime); final BasicDBObject lockDoc = new BasicDBObject(LockHistoryDef.LOCK_NAME.field, pLockName); lockDoc.put(LockHistoryDef.LIBRARY_VERSION.field, pSvcOptions.getLibVersion()); lockDoc.put(LockHistoryDef.CREATED.field, now); lockDoc.put(LockHistoryDef.LOCK_ID.field, pLockId); lockDoc.put(LockHistoryDef.STATE.field, pLockState.code()); lockDoc.put(LockHistoryDef.OWNER_APP_NAME.field, pSvcOptions.getAppName()); lockDoc.put(LockHistoryDef.OWNER_ADDRESS.field, pSvcOptions.getHostAddress()); lockDoc.put(LockHistoryDef.OWNER_HOSTNAME.field, pSvcOptions.getHostname()); lockDoc.put(LockHistoryDef.OWNER_THREAD_ID.field, currentThread.getId()); lockDoc.put(LockHistoryDef.OWNER_THREAD_NAME.field, currentThread.getName()); lockDoc.put(LockHistoryDef.OWNER_THREAD_GROUP_NAME.field, currentThread.getThreadGroup().getName()); lockDoc.put(LockHistoryDef.INACTIVE_LOCK_TIMEOUT.field, pInactiveLockTimeout); lockDoc.put(LockHistoryDef.TIMED_OUT.field, pTimedOut); getDbCollection(pMongo, pSvcOptions).insert(lockDoc, WriteConcern.SAFE); }