List of usage examples for com.mongodb.bulk BulkWriteResult getInsertedCount
public abstract int getInsertedCount();
From source file:com.streamsets.pipeline.stage.destination.mongodb.MongoDBTarget.java
License:Apache License
@Override public void write(Batch batch) throws StageException { Iterator<Record> records = batch.getRecords(); List<WriteModel<Document>> documentList = new ArrayList<>(); List<Record> recordList = new ArrayList<>(); while (records.hasNext()) { Record record = records.next();// w ww . j a va 2 s.c o m try { ByteArrayOutputStream baos = new ByteArrayOutputStream(DEFAULT_CAPACITY); DataGenerator generator = generatorFactory.getGenerator(baos); generator.write(record); generator.close(); Document document = Document.parse(new String(baos.toByteArray())); //create a write model based on record header if (!record.getHeader().getAttributeNames().contains(OPERATION_KEY)) { LOG.error(Errors.MONGODB_15.getMessage(), record.getHeader().getSourceId()); throw new OnRecordErrorException(Errors.MONGODB_15, record.getHeader().getSourceId()); } String operation = record.getHeader().getAttribute(OPERATION_KEY); switch (operation) { case INSERT: documentList.add(new InsertOneModel<>(document)); recordList.add(record); break; case UPSERT: validateUniqueKey(operation, record); recordList.add(record); documentList.add(new ReplaceOneModel<>( new Document(removeLeadingSlash(mongoTargetConfigBean.uniqueKeyField), record.get(mongoTargetConfigBean.uniqueKeyField).getValueAsString()), document, new UpdateOptions().upsert(true))); break; case DELETE: recordList.add(record); documentList.add(new DeleteOneModel<Document>(document)); break; default: LOG.error(Errors.MONGODB_14.getMessage(), operation, record.getHeader().getSourceId()); throw new StageException(Errors.MONGODB_14, operation, record.getHeader().getSourceId()); } } catch (IOException | StageException e) { errorRecordHandler.onError(new OnRecordErrorException(record, Errors.MONGODB_13, e.toString(), e)); } } if (!documentList.isEmpty()) { try { BulkWriteResult bulkWriteResult = coll.bulkWrite(documentList); if (bulkWriteResult.wasAcknowledged()) { LOG.trace("Wrote batch with {} inserts, {} updates and {} deletes", bulkWriteResult.getInsertedCount(), bulkWriteResult.getModifiedCount(), bulkWriteResult.getDeletedCount()); } } catch (MongoException e) { for (Record record : recordList) { errorRecordHandler .onError(new OnRecordErrorException(record, Errors.MONGODB_17, e.toString(), e)); } } } }
From source file:org.eclipse.ditto.services.thingsearch.persistence.write.streaming.SearchUpdaterStream.java
License:Open Source License
private static String logResult(final BulkWriteResult bulkWriteResult) { return String.format("BulkWriteResult[matched=%d,upserts=%d,inserted=%d,modified=%d,deleted=%d]", bulkWriteResult.getMatchedCount(), bulkWriteResult.getUpserts().size(), bulkWriteResult.getInsertedCount(), bulkWriteResult.getModifiedCount(), bulkWriteResult.getDeletedCount()); }
From source file:org.opencb.opencga.storage.mongodb.variant.load.stage.MongoDBVariantStageLoader.java
License:Apache License
/** * Given a map of id -> binary[], inserts the binary objects in the stage collection. * * {/*from ww w . j a va 2 s . com*/ * <studyId> : { * <fileId> : [ BinData(), BinData() ] * } * } * * The field <fileId> is an array to detect duplicated variants within the same file. * * It may happen that an update with upsert:true fail if two different threads try to * update the same non existing document. * See https://jira.mongodb.org/browse/SERVER-14322 * * In that case, the non inserted values will be returned. * * @param values Map with all the values to insert * @param result MongoDBVariantWriteResult to fill * @param retryIds List of IDs to retry. If not null, only will update those documents within this set * @return List of non updated documents. * @throws MongoBulkWriteException if the exception was not a DuplicatedKeyException (e:11000) */ private Set<String> updateMongo(ListMultimap<Document, Binary> values, MongoDBVariantWriteResult result, Set<String> retryIds) { Set<String> nonInsertedIds = Collections.emptySet(); if (values.isEmpty()) { return nonInsertedIds; } List<Bson> queries = new LinkedList<>(); List<Bson> updates = new LinkedList<>(); for (Document id : values.keySet()) { if (retryIds == null || retryIds.contains(id.getString("_id"))) { List<Binary> binaryList = values.get(id); queries.add(eq("_id", id.getString("_id"))); if (binaryList.size() == 1) { updates.add(combine( resumeStageLoad ? addToSet(fieldName, binaryList.get(0)) : push(fieldName, binaryList.get(0)), setOnInsert(END_FIELD, id.get(END_FIELD)), setOnInsert(REF_FIELD, id.get(REF_FIELD)), setOnInsert(ALT_FIELD, id.get(ALT_FIELD)))); } else { updates.add(combine( resumeStageLoad ? addEachToSet(fieldName, binaryList) : pushEach(fieldName, binaryList), setOnInsert(END_FIELD, id.get(END_FIELD)), setOnInsert(REF_FIELD, id.get(REF_FIELD)), setOnInsert(ALT_FIELD, id.get(ALT_FIELD)))); } } } try { final BulkWriteResult mongoResult = collection.update(queries, updates, QUERY_OPTIONS).first(); result.setNewVariants(mongoResult.getInsertedCount()) .setUpdatedVariants(mongoResult.getModifiedCount()); } catch (MongoBulkWriteException e) { result.setNewVariants(e.getWriteResult().getInsertedCount()) .setUpdatedVariants(e.getWriteResult().getModifiedCount()); if (retryIds != null) { // If retryIds != null, means that this this was the second attempt to update. In this case, do fail. LOGGER.error("BulkWriteErrors when retrying the updates"); throw e; } nonInsertedIds = new HashSet<>(); for (BulkWriteError writeError : e.getWriteErrors()) { if (ErrorCategory.fromErrorCode(writeError.getCode()).equals(ErrorCategory.DUPLICATE_KEY)) { //Dup Key error code Matcher matcher = DUP_KEY_WRITE_RESULT_ERROR_PATTERN.matcher(writeError.getMessage()); if (matcher.find()) { String id = matcher.group(1); nonInsertedIds.add(id); LOGGER.warn("Catch error : {}", writeError.toString()); LOGGER.warn("DupKey exception inserting '{}'. Retry!", id); } else { LOGGER.error("WriteError with code {} does not match with the pattern {}", writeError.getCode(), DUP_KEY_WRITE_RESULT_ERROR_PATTERN.pattern()); throw e; } } else { throw e; } } } return nonInsertedIds; }
From source file:uk.ac.ebi.eva.dbmigration.mongodb.ExtractAnnotationFromVariant.java
License:Apache License
@ChangeSet(order = "001", id = "migrateAnnotation", author = "EVA") public void migrateAnnotation(MongoDatabase mongoDatabase) { final MongoCollection<Document> variantsCollection = mongoDatabase .getCollection(databaseParameters.getDbCollectionsVariantsName()); final MongoCollection<Document> annotationCollection = mongoDatabase .getCollection(databaseParameters.getDbCollectionsAnnotationsName()); logger.info("1) migrate annotation from collection {}", variantsCollection.getNamespace()); long annotationsReadCount = 0; long annotationsWrittenCount = 0; BulkWriteOptions unorderedBulk = new BulkWriteOptions().ordered(false); Document onlyAnnotatedVariants = new Document(ANNOT_FIELD, EXISTS); try (MongoCursor<Document> cursor = variantsCollection.find(onlyAnnotatedVariants).iterator()) { while (true) { List<InsertOneModel<Document>> annotationsToInsert = getBatch(cursor, BULK_SIZE).stream() .map(this::buildInsertionDocument).collect(toList()); if (annotationsToInsert.isEmpty()) { break; }/*from w ww . j a v a2s . c o m*/ annotationsReadCount += annotationsToInsert.size(); BulkWriteResult bulkInsert = annotationCollection.bulkWrite(annotationsToInsert, unorderedBulk); annotationsWrittenCount += bulkInsert.getInsertedCount(); } } //before executing the next changeSet check that the count of read and written annotation documents match if (annotationsReadCount != annotationsWrittenCount) { throw new RuntimeException("The number of processed Variants (" + annotationsReadCount + ") is different from the number of new annotation inserted (" + annotationsWrittenCount + "). The '" + ANNOT_FIELD + "' field will not be removed from the " + variantsCollection.getNamespace() + " collection."); } }