List of usage examples for com.mongodb.client MongoCursor hasNext
@Override
boolean hasNext();
From source file:org.apache.nifi.processors.mongodb.gridfs.DeleteGridFS.java
License:Apache License
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile input = session.get();/*from w ww . j a v a 2s .c o m*/ if (input == null) { return; } final String deleteQuery = getQuery(context, input); final String queryAttribute = context.getProperty(QUERY_ATTRIBUTE).isSet() ? context.getProperty(QUERY_ATTRIBUTE).evaluateAttributeExpressions(input).getValue() : null; GridFSBucket bucket = getBucket(input, context); try { Document query = Document.parse(deleteQuery); MongoCursor cursor = bucket.find(query).iterator(); if (cursor.hasNext()) { GridFSFile file = (GridFSFile) cursor.next(); bucket.delete(file.getObjectId()); if (!StringUtils.isEmpty(queryAttribute)) { input = session.putAttribute(input, queryAttribute, deleteQuery); } session.transfer(input, REL_SUCCESS); } else { getLogger().error(String.format("Query %s did not delete anything in %s", deleteQuery, bucket.getBucketName())); session.transfer(input, REL_FAILURE); } cursor.close(); } catch (Exception ex) { getLogger().error(String.format("Error deleting using query: %s", deleteQuery), ex); session.transfer(input, REL_FAILURE); } }
From source file:org.apache.nifi.processors.mongodb.gridfs.FetchGridFS.java
License:Apache License
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile input = session.get();// ww w . j av a2s . c o m if (input == null) { return; } final String operatingMode = context.getProperty(OPERATION_MODE).getValue(); final Map<String, String> originalAttributes = input.getAttributes(); String queryStr; try { queryStr = getQuery(session, context, input); if (StringUtils.isEmpty(queryStr)) { getLogger().error("No query could be found or built from the supplied input."); session.transfer(input, REL_FAILURE); return; } } catch (IOException ex) { getLogger().error("No query could be found from supplied input", ex); session.transfer(input, REL_FAILURE); return; } Document query = Document.parse(queryStr); try { final GridFSBucket bucket = getBucket(input, context); final String queryPtr = queryStr; final FlowFile parent = operatingMode.equals(MODE_ONE_COMMIT.getValue()) ? input : null; MongoCursor it = bucket.find(query).iterator(); if (operatingMode.equals(MODE_MANY_COMMITS.getValue())) { session.transfer(input, REL_ORIGINAL); input = null; } while (it.hasNext()) { GridFSFile gridFSFile = (GridFSFile) it.next(); handleFile(bucket, session, context, parent, gridFSFile, queryPtr); if (operatingMode.equals(MODE_MANY_COMMITS.getValue())) { session.commit(); } } if (input != null) { session.transfer(input, REL_ORIGINAL); } } catch (Exception ex) { getLogger().error("An error occurred wile trying to run the query.", ex); if (input != null && operatingMode.equals(MODE_ONE_COMMIT.getValue())) { session.transfer(input, REL_FAILURE); } else if (input != null && operatingMode.equals(MODE_MANY_COMMITS.getValue())) { final String queryPtr = queryStr; FlowFile cloned = session.create(); cloned = session.putAllAttributes(cloned, originalAttributes); cloned = session.write(cloned, out -> out.write(queryPtr.getBytes())); session.transfer(cloned, REL_FAILURE); } } }
From source file:org.apache.nifi.processors.mongodb.gridfs.GridFSITTestBase.java
License:Apache License
public boolean fileExists(String name, String bucketName) { GridFSBucket bucket = GridFSBuckets.create(client.getDatabase(DB), bucketName); MongoCursor it = bucket.find(Document.parse(String.format("{ \"filename\": \"%s\" }", name))).iterator(); boolean retVal = it.hasNext(); it.close();//from w ww . j a va2 s.c om return retVal; }
From source file:org.apache.nifi.processors.mongodb.gridfs.GridFSITTestBase.java
License:Apache License
public boolean fileHasProperties(String name, String bucketName, Map<String, String> attrs) { GridFSBucket bucket = GridFSBuckets.create(client.getDatabase(DB), bucketName); MongoCursor it = bucket.find(Document.parse(String.format("{ \"filename\": \"%s\" }", name))).iterator(); boolean retVal = false; if (it.hasNext()) { GridFSFile file = (GridFSFile) it.next(); Document metadata = file.getMetadata(); if (metadata != null && metadata.size() == attrs.size()) { retVal = true;//from w w w. j ava2 s . c om for (Map.Entry<String, Object> entry : metadata.entrySet()) { Object val = attrs.get(entry.getKey()); if (val == null || !entry.getValue().equals(val)) { retVal = false; break; } } } } it.close(); return retVal; }
From source file:org.apache.nifi.processors.mongodb.RunMongoAggregation.java
License:Apache License
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile flowFile = null;/*from w ww . j av a 2s.c o m*/ if (context.hasIncomingConnection()) { flowFile = session.get(); if (flowFile == null && context.hasNonLoopConnection()) { return; } } final String query = context.getProperty(QUERY).evaluateAttributeExpressions(flowFile).getValue(); final Boolean allowDiskUse = context.getProperty(ALLOW_DISK_USE).asBoolean(); final String queryAttr = context.getProperty(QUERY_ATTRIBUTE).evaluateAttributeExpressions(flowFile) .getValue(); final Integer batchSize = context.getProperty(BATCH_SIZE).asInteger(); final Integer resultsPerFlowfile = context.getProperty(RESULTS_PER_FLOWFILE).asInteger(); final String jsonTypeSetting = context.getProperty(JSON_TYPE).getValue(); final String dateFormat = context.getProperty(DATE_FORMAT).evaluateAttributeExpressions(flowFile) .getValue(); configureMapper(jsonTypeSetting, dateFormat); Map<String, String> attrs = new HashMap<>(); if (queryAttr != null && queryAttr.trim().length() > 0) { attrs.put(queryAttr, query); } MongoCursor<Document> iter = null; try { MongoCollection<Document> collection = getCollection(context, flowFile); List<Bson> aggQuery = buildAggregationQuery(query); AggregateIterable<Document> it = collection.aggregate(aggQuery).allowDiskUse(allowDiskUse); ; it.batchSize(batchSize != null ? batchSize : 1); iter = it.iterator(); List<Document> batch = new ArrayList<>(); while (iter.hasNext()) { batch.add(iter.next()); if (batch.size() == resultsPerFlowfile) { writeBatch(buildBatch(batch), flowFile, context, session, attrs, REL_RESULTS); batch = new ArrayList<>(); } } if (batch.size() > 0) { writeBatch(buildBatch(batch), flowFile, context, session, attrs, REL_RESULTS); } if (flowFile != null) { session.transfer(flowFile, REL_ORIGINAL); } } catch (Exception e) { getLogger().error("Error running MongoDB aggregation query.", e); if (flowFile != null) { session.transfer(flowFile, REL_FAILURE); } } finally { if (iter != null) { iter.close(); } } }
From source file:org.apache.rya.indexing.geotemporal.mongo.MongoEventStorage.java
License:Apache License
@Override public Collection<Event> search(final Optional<RyaURI> subject, final Optional<Collection<IndexingExpr>> geoFilters, final Optional<Collection<IndexingExpr>> temporalFilters) throws EventStorageException { requireNonNull(subject);/* w w w . ja v a 2 s .c om*/ try { final Collection<IndexingExpr> geos = (geoFilters.isPresent() ? geoFilters.get() : new ArrayList<>()); final Collection<IndexingExpr> tempos = (temporalFilters.isPresent() ? temporalFilters.get() : new ArrayList<>()); final DBObject filterObj = queryAdapter.getFilterQuery(geos, tempos); final BasicDBObjectBuilder builder = BasicDBObjectBuilder.start(filterObj.toMap()); if (subject.isPresent()) { builder.append(EventDocumentConverter.SUBJECT, subject.get().getData()); } final MongoCursor<Document> results = mongo.getDatabase(ryaInstanceName).getCollection(COLLECTION_NAME) .find(BsonDocument.parse(builder.get().toString())).iterator(); final List<Event> events = new ArrayList<>(); while (results.hasNext()) { events.add(EVENT_CONVERTER.fromDocument(results.next())); } return events; } catch (final MongoException | DocumentConverterException | GeoTemporalIndexException e) { throw new EventStorageException("Could not get the Event.", e); } }
From source file:org.axonframework.mongo.eventhandling.saga.repository.MongoSagaStore.java
License:Apache License
@Override public Set<String> findSagas(Class<?> sagaType, AssociationValue associationValue) { final BasicDBObject value = associationValueQuery(sagaType, associationValue); MongoCursor<Document> dbCursor = mongoTemplate.sagaCollection().find(value) .projection(include("sagaIdentifier")).iterator(); Set<String> found = new TreeSet<>(); while (dbCursor.hasNext()) { found.add((String) dbCursor.next().get("sagaIdentifier")); }//from w ww . j a va2 s . co m return found; }
From source file:org.bananaforscale.cormac.dao.AbstractDataService.java
License:Apache License
/** * Returns all unique database names in a MongoDB data source. * * @return a {@link Set} of database names *//*ww w. j a v a 2 s .c o m*/ protected Set<String> getDatabaseNames() { final Set<String> dbSet = new HashSet<>(); final MongoCursor<String> cursor = mongoClient.listDatabaseNames().iterator(); while (cursor.hasNext()) { dbSet.add(cursor.next()); } return dbSet; }
From source file:org.bananaforscale.cormac.dao.AbstractDataService.java
License:Apache License
/** * Retrieves the names of the collections in a database. * * @param databaseName the name of the database * @return a {@link Set} of collection names *//*w w w . java2 s. c o m*/ protected Set<String> getCollectionNames(final String databaseName) { final MongoDatabase mongoDatabase = mongoClient.getDatabase(databaseName); final Set<String> collectionSet = new HashSet<>(); final MongoCursor<String> cursor = mongoDatabase.listCollectionNames().iterator(); while (cursor.hasNext()) { collectionSet.add(cursor.next()); } return collectionSet; }
From source file:org.cleaner.mongo.FindDuplicatesByMd5.java
License:Open Source License
private void writeOutDuplicates(AggregateIterable<Document> aggregate) { MongoCursor<Document> iterator = aggregate.iterator(); while (iterator.hasNext()) { duplicateStrategy.handleDuplicates(iterator.next(), gridFS); }//from w ww . j a v a 2 s . c o m }