List of usage examples for com.mongodb WriteConcern FSYNCED
WriteConcern FSYNCED
To view the source code for com.mongodb WriteConcern FSYNCED.
Click Source Link
From source file:com.novemberain.quartz.mongodb.MongoDBJobStore.java
License:Open Source License
private void doAcquireNextTriggers(Map<TriggerKey, OperableTrigger> triggers, Date noLaterThanDate, int maxCount) throws JobPersistenceException { BasicDBObject query = new BasicDBObject(); query.put(TRIGGER_NEXT_FIRE_TIME, new BasicDBObject("$lte", noLaterThanDate)); DBCursor cursor = triggerCollection.find(query); BasicDBObject sort = new BasicDBObject(); sort.put(TRIGGER_NEXT_FIRE_TIME, Integer.valueOf(1)); cursor.sort(sort);// w w w . j a v a 2s . c om log.debug("Found {} triggers which are eligible to be run.", cursor.count()); while (cursor.hasNext() && maxCount > triggers.size()) { DBObject dbObj = cursor.next(); OperableTrigger trigger = toTrigger(dbObj); try { if (trigger == null) { continue; } if (triggers.containsKey(trigger.getKey())) { log.debug("Skipping trigger {} as we have already acquired it.", trigger.getKey()); continue; } if (trigger.getNextFireTime() == null) { log.debug("Skipping trigger {} as it has no next fire time.", trigger.getKey()); // No next fire time, so delete it removeTrigger(trigger.getKey()); continue; } // deal with misfires if (applyMisfire(trigger)) { log.debug("Misfire trigger {}.", trigger.getKey()); Date nextFireTime = trigger.getNextFireTime(); if (nextFireTime == null) { log.debug("Removing trigger {} as it has no next fire time after the misfire was applied.", trigger.getKey()); // No next fire time, so delete it removeTrigger(trigger.getKey()); continue; } // The trigger has misfired and was rescheduled, its firetime may be too far in the future // and we don't want to hang the quartz scheduler thread up on <code>sigLock.wait(timeUntilTrigger);</code> // so, check again that the trigger is due to fire if (nextFireTime.after(noLaterThanDate)) { log.debug("Skipping trigger {} as it misfired and was scheduled for {}.", trigger.getKey(), trigger.getNextFireTime()); continue; } } log.debug("Inserting lock for trigger {}", trigger.getKey()); BasicDBObject lock = new BasicDBObject(); lock.put(KEY_NAME, dbObj.get(KEY_NAME)); lock.put(KEY_GROUP, dbObj.get(KEY_GROUP)); lock.put(LOCK_INSTANCE_ID, instanceId); lock.put(LOCK_TIME, new Date()); // A lock needs to be written with FSYNCED to be 100% effective across multiple servers locksCollection.insert(lock, WriteConcern.FSYNCED); log.debug("Aquired trigger {}", trigger.getKey()); triggers.put(trigger.getKey(), trigger); } catch (DuplicateKey e) { // someone else acquired this lock. Move on. log.debug("Failed to acquire trigger {} due to a lock", trigger.getKey()); BasicDBObject lock = new BasicDBObject(); lock.put(KEY_NAME, dbObj.get(KEY_NAME)); lock.put(KEY_GROUP, dbObj.get(KEY_GROUP)); DBObject existingLock; DBCursor lockCursor = locksCollection.find(lock); if (lockCursor.hasNext()) { existingLock = lockCursor.next(); // support for trigger lock expirations if (isTriggerLockExpired(existingLock)) { log.warn("Lock for trigger {} is expired - removing lock and retrying trigger acquisition", trigger.getKey()); removeTriggerLock(trigger); doAcquireNextTriggers(triggers, noLaterThanDate, maxCount - triggers.size()); } } else { log.warn("Error retrieving expired lock from the database. Maybe it was deleted"); doAcquireNextTriggers(triggers, noLaterThanDate, maxCount - triggers.size()); } } } }
From source file:com.novemberain.quartz.mongodb.MongoDBJobStore.java
License:Open Source License
public List<TriggerFiredResult> triggersFired(List<OperableTrigger> triggers) throws JobPersistenceException { List<TriggerFiredResult> results = new ArrayList<TriggerFiredResult>(); for (OperableTrigger trigger : triggers) { log.debug("Fired trigger {}", trigger.getKey()); Calendar cal = null;/* w ww .ja va 2s . co m*/ if (trigger.getCalendarName() != null) { cal = retrieveCalendar(trigger.getCalendarName()); if (cal == null) continue; } Date prevFireTime = trigger.getPreviousFireTime(); TriggerFiredBundle bndle = new TriggerFiredBundle(retrieveJob(trigger), trigger, cal, false, new Date(), trigger.getPreviousFireTime(), prevFireTime, trigger.getNextFireTime()); JobDetail job = bndle.getJobDetail(); if (job != null) { try { if (job.isConcurrentExectionDisallowed()) { log.debug("Inserting lock for job {}", job.getKey()); BasicDBObject lock = new BasicDBObject(); lock.put(KEY_NAME, "jobconcurrentlock:" + job.getKey().getName()); lock.put(KEY_GROUP, job.getKey().getGroup()); lock.put(LOCK_INSTANCE_ID, instanceId); lock.put(LOCK_TIME, new Date()); // A lock needs to be written with FSYNCED to be 100% effective across multiple servers locksCollection.insert(lock, WriteConcern.FSYNCED); } results.add(new TriggerFiredResult(bndle)); trigger.triggered(cal); storeTrigger(trigger, true); } catch (DuplicateKey dk) { log.debug("Job disallows concurrent execution and is already running {}", job.getKey()); // Remove the trigger lock removeTriggerLock(trigger); // Find the existing lock and if still present, and expired, then remove it. BasicDBObject lock = new BasicDBObject(); lock.put(KEY_NAME, "jobconcurrentlock:" + job.getKey().getName()); lock.put(KEY_GROUP, job.getKey().getGroup()); DBObject existingLock; DBCursor lockCursor = locksCollection.find(lock); if (lockCursor.hasNext()) { existingLock = lockCursor.next(); if (isJobLockExpired(existingLock)) { log.debug("Removing expired lock for job {}", job.getKey()); locksCollection.remove(existingLock); } } } } } return results; }
From source file:net.acesinc.nifi.processors.mongodb.PartialUpdateMongo.java
protected WriteConcern getWriteConcern(final ProcessContext context) { final String writeConcernProperty = context.getProperty(WRITE_CONCERN).getValue(); WriteConcern writeConcern = null;// w ww. j a va 2 s. c om switch (writeConcernProperty) { case WRITE_CONCERN_ACKNOWLEDGED: writeConcern = WriteConcern.ACKNOWLEDGED; break; case WRITE_CONCERN_UNACKNOWLEDGED: writeConcern = WriteConcern.UNACKNOWLEDGED; break; case WRITE_CONCERN_FSYNCED: writeConcern = WriteConcern.FSYNCED; break; case WRITE_CONCERN_JOURNALED: writeConcern = WriteConcern.JOURNALED; break; case WRITE_CONCERN_REPLICA_ACKNOWLEDGED: writeConcern = WriteConcern.REPLICA_ACKNOWLEDGED; break; case WRITE_CONCERN_MAJORITY: writeConcern = WriteConcern.MAJORITY; break; default: writeConcern = WriteConcern.ACKNOWLEDGED; } return writeConcern; }
From source file:org.apache.nifi.mongodb.AbstractMongoDBControllerService.java
License:Apache License
protected WriteConcern getWriteConcern(final ConfigurationContext context) { final String writeConcernProperty = context.getProperty(WRITE_CONCERN).getValue(); WriteConcern writeConcern = null;/*from w w w.j ava 2s . co m*/ switch (writeConcernProperty) { case WRITE_CONCERN_ACKNOWLEDGED: writeConcern = WriteConcern.ACKNOWLEDGED; break; case WRITE_CONCERN_UNACKNOWLEDGED: writeConcern = WriteConcern.UNACKNOWLEDGED; break; case WRITE_CONCERN_FSYNCED: writeConcern = WriteConcern.FSYNCED; break; case WRITE_CONCERN_JOURNALED: writeConcern = WriteConcern.JOURNALED; break; case WRITE_CONCERN_REPLICA_ACKNOWLEDGED: writeConcern = WriteConcern.REPLICA_ACKNOWLEDGED; break; case WRITE_CONCERN_MAJORITY: writeConcern = WriteConcern.MAJORITY; break; default: writeConcern = WriteConcern.ACKNOWLEDGED; } return writeConcern; }
From source file:org.graylog2.cluster.ClusterConfigServiceImpl.java
License:Open Source License
@VisibleForTesting static DBCollection prepareCollection(final MongoConnection mongoConnection) { DBCollection coll = mongoConnection.getDatabase().getCollection(COLLECTION_NAME); coll.createIndex(DBSort.asc("type"), "unique_type", true); coll.setWriteConcern(WriteConcern.FSYNCED); return coll;// w w w . j a va 2 s . c o m }
From source file:org.graylog2.cluster.ClusterConfigServiceImpl.java
License:Open Source License
@Override public <T> void write(T payload) { if (payload == null) { LOG.debug("Payload was null. Skipping."); return;//w w w . j a v a 2 s . c om } String canonicalClassName = AutoValueUtils.getCanonicalName(payload.getClass()); ClusterConfig clusterConfig = ClusterConfig.create(canonicalClassName, payload, nodeId.toString()); dbCollection.update(DBQuery.is("type", canonicalClassName), clusterConfig, true, false, WriteConcern.FSYNCED); ClusterConfigChangedEvent event = ClusterConfigChangedEvent.create(DateTime.now(DateTimeZone.UTC), nodeId.toString(), canonicalClassName); clusterEventBus.post(event); }
From source file:org.graylog2.events.ClusterEventCleanupPeriodical.java
License:Open Source License
@Override public void doRun() { try {/*from w ww . j a va2 s. com*/ LOG.debug("Removing stale events from MongoDB collection \"{}\"", COLLECTION_NAME); final long timestamp = DateTime.now(DateTimeZone.UTC).getMillis() - maxEventAge; final DBQuery.Query query = DBQuery.lessThan("timestamp", timestamp); final WriteResult<ClusterEvent, String> writeResult = dbCollection.remove(query, WriteConcern.FSYNCED); LOG.debug("Removed {} stale events from \"{}\"", writeResult.getN(), COLLECTION_NAME); } catch (Exception e) { LOG.warn("Error while removing stale cluster events from MongoDB", e); } }
From source file:org.graylog2.events.ClusterEventPeriodical.java
License:Open Source License
@VisibleForTesting static DBCollection prepareCollection(final MongoConnection mongoConnection) { final DB db = mongoConnection.getDatabase(); DBCollection coll = db.getCollection(COLLECTION_NAME); if (coll.isCapped()) { LOG.warn(/*from w w w .ja v a2 s. co m*/ "The \"{}\" collection in MongoDB is capped which will cause problems. Please drop the collection.", COLLECTION_NAME); } coll.createIndex(DBSort.asc("timestamp").asc("producer").asc("consumers")); coll.setWriteConcern(WriteConcern.FSYNCED); return coll; }
From source file:org.graylog2.events.ClusterEventPeriodical.java
License:Open Source License
@Subscribe public void publishClusterEvent(Object event) { if (event instanceof DeadEvent) { LOG.debug("Skipping DeadEvent on cluster event bus"); return;// w w w . ja v a 2 s.c om } final String className = AutoValueUtils.getCanonicalName(event.getClass()); final ClusterEvent clusterEvent = ClusterEvent.create(nodeId.toString(), className, event); try { final String id = dbCollection.save(clusterEvent, WriteConcern.FSYNCED).getSavedId(); LOG.debug("Published cluster event with ID <{}> and type <{}>", id, className); } catch (MongoException e) { LOG.error("Couldn't publish cluster event of type <" + className + ">", e); } }
From source file:org.lucee.mongodb.support.ObjectSupport.java
License:Open Source License
public WriteConcern toWriteConcern(Object obj, WriteConcern defaultValue) { if (obj instanceof WriteConcern) return (WriteConcern) obj; if (decision.isSimpleValue(obj)) { String str = caster.toString(obj, ""); str = str.trim().toUpperCase();/* w w w . j a va2 s . com*/ if ("ACKNOWLEDGED".equals(str)) return WriteConcern.ACKNOWLEDGED; else if ("ACKNOWLEDGED".equals(str)) return WriteConcern.FSYNC_SAFE; else if ("FSYNC_SAFE".equals(str) || "FSYNCSAFE".equals(str)) return WriteConcern.FSYNCED; else if ("JOURNAL_SAFE".equals(str) || "JOURNALSAFE".equals(str)) return WriteConcern.JOURNAL_SAFE; else if ("JOURNALED".equals(str)) return WriteConcern.JOURNALED; else if ("MAJORITY".equals(str)) return WriteConcern.MAJORITY; else if ("NORMAL".equals(str)) return WriteConcern.NORMAL; else if ("REPLICA_ACKNOWLEDGED".equals(str) || "REPLICAACKNOWLEDGED".equals(str)) return WriteConcern.REPLICA_ACKNOWLEDGED; else if ("REPLICAS_SAFE".equals(str) || "REPLICASSAFE".equals(str)) return WriteConcern.REPLICAS_SAFE; else if ("SAFE".equals(str)) return WriteConcern.SAFE; else if ("UNACKNOWLEDGED".equals(str)) return WriteConcern.UNACKNOWLEDGED; } return defaultValue; }