Example usage for com.mongodb ErrorCategory DUPLICATE_KEY

List of usage examples for com.mongodb ErrorCategory DUPLICATE_KEY

Introduction

In this page you can find the example usage for com.mongodb ErrorCategory DUPLICATE_KEY.

Prototype

ErrorCategory DUPLICATE_KEY

To view the source code for com.mongodb ErrorCategory DUPLICATE_KEY.

Click Source Link

Document

A duplicate key error

Usage

From source file:org.apache.rya.indexing.geotemporal.mongo.MongoEventStorage.java

License:Apache License

@Override
public void create(final Event event) throws EventStorageException {
    requireNonNull(event);//w ww. j  a va 2 s  .co  m

    try {
        mongo.getDatabase(ryaInstanceName).getCollection(COLLECTION_NAME)
                .insertOne(EVENT_CONVERTER.toDocument(event));
    } catch (final MongoException e) {
        final ErrorCategory category = ErrorCategory.fromErrorCode(e.getCode());
        if (category == ErrorCategory.DUPLICATE_KEY) {
            throw new EventAlreadyExistsException(
                    "Failed to create Event with Subject '" + event.getSubject().getData() + "'.", e);
        }
        throw new EventStorageException(
                "Failed to create Event with Subject '" + event.getSubject().getData() + "'.", e);
    }
}

From source file:org.axonframework.mongo.eventsourcing.tokenstore.MongoTokenStore.java

License:Apache License

private void updateOrInsertTokenEntry(TrackingToken token, String processorName, int segment) {
    AbstractTokenEntry<?> tokenEntry = new GenericTokenEntry<>(token, serializer, contentType, processorName,
            segment);//from www .  j  a  v a 2s. c om
    tokenEntry.claim(nodeId, claimTimeout);
    Bson update = combine(set("owner", nodeId), set("timestamp", tokenEntry.timestamp().toEpochMilli()),
            set("token", tokenEntry.getSerializedToken().getData()),
            set("tokenType", tokenEntry.getSerializedToken().getType().getName()));
    UpdateResult updateResult = mongoTemplate.trackingTokensCollection()
            .updateOne(claimableTokenEntryFilter(processorName, segment), update);

    if (updateResult.getModifiedCount() == 0) {
        try {
            mongoTemplate.trackingTokensCollection().insertOne(tokenEntryToDocument(tokenEntry));
        } catch (MongoWriteException exception) {
            if (ErrorCategory.fromErrorCode(exception.getError().getCode()) == ErrorCategory.DUPLICATE_KEY) {
                throw new UnableToClaimTokenException(
                        format("Unable to claim token '%s[%s]'", processorName, segment));
            }
        }
    }
}

From source file:org.axonframework.mongo.eventsourcing.tokenstore.MongoTokenStore.java

License:Apache License

private AbstractTokenEntry<?> loadOrInsertTokenEntry(String processorName, int segment) {
    Document document = mongoTemplate.trackingTokensCollection().findOneAndUpdate(
            claimableTokenEntryFilter(processorName, segment),
            combine(set("owner", nodeId), set("timestamp", clock.millis())),
            new FindOneAndUpdateOptions().returnDocument(ReturnDocument.AFTER));

    if (document == null) {
        try {/*from  w ww. j av a 2s. c o  m*/
            AbstractTokenEntry<?> tokenEntry = new GenericTokenEntry<>(null, serializer, contentType,
                    processorName, segment);
            tokenEntry.claim(nodeId, claimTimeout);

            mongoTemplate.trackingTokensCollection().insertOne(tokenEntryToDocument(tokenEntry));

            return tokenEntry;
        } catch (MongoWriteException exception) {
            if (ErrorCategory.fromErrorCode(exception.getError().getCode()) == ErrorCategory.DUPLICATE_KEY) {
                throw new UnableToClaimTokenException(
                        format("Unable to claim token '%s[%s]'", processorName, segment));
            }
        }
    }
    return documentToTokenEntry(document);
}

From source file:org.jaqpot.core.service.filter.excmappers.MongoWriteExceptionMapper.java

License:Open Source License

@Override
public Response toResponse(MongoWriteException exception) {
    LOG.log(Level.INFO, "MongoWriteExceptionMapper exception caught", exception);
    ErrorReport error;/*from  w ww . ja v  a2 s .c  om*/
    Response.Status status = Response.Status.INTERNAL_SERVER_ERROR;
    if (ErrorCategory.DUPLICATE_KEY.equals(exception.getError().getCategory())) {
        error = ErrorReportFactory.alreadyInDatabase(exception.getMessage());
        status = Response.Status.BAD_REQUEST;
    } else {
        error = ErrorReportFactory.internalServerError(exception, "MongoWriteException");
    }

    return Response.ok(error, MediaType.APPLICATION_JSON).status(status).build();
}

From source file:org.opencb.opencga.catalog.db.mongodb.MongoDBUtils.java

License:Apache License

static boolean isDuplicateKeyException(MongoWriteException e) {
    return ErrorCategory.fromErrorCode(e.getCode()) == ErrorCategory.DUPLICATE_KEY;
}

From source file:org.opencb.opencga.storage.mongodb.variant.load.stage.MongoDBVariantStageLoader.java

License:Apache License

/**
 * Given a map of id -> binary[], inserts the binary objects in the stage collection.
 *
 * {// w w  w . ja  va 2  s  .com
 *     <studyId> : {
 *         <fileId> : [ BinData(), BinData() ]
 *     }
 * }
 *
 * The field <fileId> is an array to detect duplicated variants within the same file.
 *
 * It may happen that an update with upsert:true fail if two different threads try to
 * update the same non existing document.
 * See https://jira.mongodb.org/browse/SERVER-14322
 *
 * In that case, the non inserted values will be returned.
 *
 * @param values        Map with all the values to insert
 * @param result        MongoDBVariantWriteResult to fill
 * @param retryIds      List of IDs to retry. If not null, only will update those documents within this set
 * @return              List of non updated documents.
 * @throws MongoBulkWriteException if the exception was not a DuplicatedKeyException (e:11000)
 */
private Set<String> updateMongo(ListMultimap<Document, Binary> values, MongoDBVariantWriteResult result,
        Set<String> retryIds) {

    Set<String> nonInsertedIds = Collections.emptySet();
    if (values.isEmpty()) {
        return nonInsertedIds;
    }
    List<Bson> queries = new LinkedList<>();
    List<Bson> updates = new LinkedList<>();
    for (Document id : values.keySet()) {
        if (retryIds == null || retryIds.contains(id.getString("_id"))) {
            List<Binary> binaryList = values.get(id);
            queries.add(eq("_id", id.getString("_id")));
            if (binaryList.size() == 1) {
                updates.add(combine(
                        resumeStageLoad ? addToSet(fieldName, binaryList.get(0))
                                : push(fieldName, binaryList.get(0)),
                        setOnInsert(END_FIELD, id.get(END_FIELD)), setOnInsert(REF_FIELD, id.get(REF_FIELD)),
                        setOnInsert(ALT_FIELD, id.get(ALT_FIELD))));
            } else {
                updates.add(combine(
                        resumeStageLoad ? addEachToSet(fieldName, binaryList) : pushEach(fieldName, binaryList),
                        setOnInsert(END_FIELD, id.get(END_FIELD)), setOnInsert(REF_FIELD, id.get(REF_FIELD)),
                        setOnInsert(ALT_FIELD, id.get(ALT_FIELD))));
            }
        }
    }

    try {
        final BulkWriteResult mongoResult = collection.update(queries, updates, QUERY_OPTIONS).first();
        result.setNewVariants(mongoResult.getInsertedCount())
                .setUpdatedVariants(mongoResult.getModifiedCount());
    } catch (MongoBulkWriteException e) {
        result.setNewVariants(e.getWriteResult().getInsertedCount())
                .setUpdatedVariants(e.getWriteResult().getModifiedCount());

        if (retryIds != null) {
            // If retryIds != null, means that this this was the second attempt to update. In this case, do fail.
            LOGGER.error("BulkWriteErrors when retrying the updates");
            throw e;
        }

        nonInsertedIds = new HashSet<>();
        for (BulkWriteError writeError : e.getWriteErrors()) {
            if (ErrorCategory.fromErrorCode(writeError.getCode()).equals(ErrorCategory.DUPLICATE_KEY)) { //Dup Key error code
                Matcher matcher = DUP_KEY_WRITE_RESULT_ERROR_PATTERN.matcher(writeError.getMessage());
                if (matcher.find()) {
                    String id = matcher.group(1);
                    nonInsertedIds.add(id);
                    LOGGER.warn("Catch error : {}", writeError.toString());
                    LOGGER.warn("DupKey exception inserting '{}'. Retry!", id);
                } else {
                    LOGGER.error("WriteError with code {} does not match with the pattern {}",
                            writeError.getCode(), DUP_KEY_WRITE_RESULT_ERROR_PATTERN.pattern());
                    throw e;
                }
            } else {
                throw e;
            }
        }
    }
    return nonInsertedIds;
}

From source file:week2.UserDAO.java

License:Apache License

public boolean addUser(String username, String password, String email) {

    String passwordHash = makePasswordHash(password, Integer.toString(random.nextInt()));

    // XXX WORK HERE
    // create an object suitable for insertion into the user collection
    // be sure to add username and hashed password to the document. problem instructions
    // will tell you the schema that the documents must follow.
    Document doc = new Document("_id", username).append("password", passwordHash);

    if (email != null && !email.equals("")) {
        // XXX WORK HERE
        // if there is an email address specified, add it to the document too.
        doc.append("email", email);
    }/*w  w  w .j a  va2 s .c om*/

    try {
        // XXX WORK HERE
        // insert the document into the user collection here
        usersCollection.insertOne(doc);
        return true;
    } catch (MongoWriteException e) {
        if (e.getError().getCategory().equals(ErrorCategory.DUPLICATE_KEY)) {
            System.out.println("Username already in use: " + username);
            return false;
        }
        throw e;
    }
}