/** * Add an error result * * @param exception the exception * @param indexMap the index map */ public void addErrorResult(final MongoBulkWriteException exception, final IndexMap indexMap) { addResult(exception.getWriteResult(), indexMap); mergeWriteErrors(exception.getWriteErrors(), indexMap); mergeWriteConcernError(exception.getWriteConcernError()); }
/** * Gets the combined result. * * @return the result */ public BulkWriteResult getResult() { throwOnError(); return createResult(); }
/** * Gets whether there are errors in the combined result. * * @return whether there are errors in the combined result */ public boolean hasErrors() { return hasWriteErrors() || hasWriteConcernErrors(); }
public void addResult(final BsonDocument result) { if (writeConcern.isAcknowledged()) { if (hasError(result)) { MongoBulkWriteException bulkWriteException = getBulkWriteException(result); bulkWriteBatchCombiner.addErrorResult(bulkWriteException, indexMap); } else { bulkWriteBatchCombiner.addResult(getBulkWriteResult(result), indexMap); } } }
/** * Gets the combined errors as an exception * @return the bulk write exception, or null if there were no errors */ public MongoBulkWriteException getError() { return hasErrors() ? new MongoBulkWriteException(createResult(), new ArrayList<BulkWriteError>(writeErrors), writeConcernErrors.isEmpty() ? null : writeConcernErrors.get(writeConcernErrors.size() - 1), serverAddress) : null; }
/** * True if ordered and has write errors. * * @return true if no more batches should be sent */ public boolean shouldStopSendingMoreBatches() { return ordered && hasWriteErrors(); }
public boolean hasErrors() { return bulkWriteBatchCombiner.hasErrors(); }
public MongoBulkWriteException getError() { return bulkWriteBatchCombiner.getError(); }
public BulkWriteResult getResult() { return bulkWriteBatchCombiner.getResult(); }
/** * Add a result * * @param result the result * @param indexMap the index map */ public void addResult(final BulkWriteResult result, final IndexMap indexMap) { insertedCount += result.getInsertedCount(); matchedCount += result.getMatchedCount(); deletedCount += result.getDeletedCount(); if (result.isModifiedCountAvailable() && modifiedCount != null) { modifiedCount += result.getModifiedCount(); } else { modifiedCount = null; } mergeUpserts(result.getUpserts(), indexMap); }
public static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace, final ServerDescription serverDescription, final ConnectionDescription connectionDescription, final boolean ordered, final WriteConcern writeConcern, final Boolean bypassDocumentValidation, final boolean retryWrites, final List<? extends WriteRequest> writeRequests, final SessionContext sessionContext) { if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !sessionContext.hasActiveTransaction() && !writeConcern.isAcknowledged()) { throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); } boolean canRetryWrites = isRetryableWrite(retryWrites, writeConcern, serverDescription, connectionDescription, sessionContext); List<WriteRequestWithIndex> writeRequestsWithIndex = new ArrayList<WriteRequestWithIndex>(); boolean writeRequestsAreRetryable = true; for (int i = 0; i < writeRequests.size(); i++) { WriteRequest writeRequest = writeRequests.get(i); writeRequestsAreRetryable = writeRequestsAreRetryable && isRetryable(writeRequest); writeRequestsWithIndex.add(new WriteRequestWithIndex(writeRequest, i)); } if (canRetryWrites && !writeRequestsAreRetryable) { canRetryWrites = false; LOGGER.debug("retryWrites set but one or more writeRequests do not support retryable writes"); } return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, canRetryWrites, new BulkWriteBatchCombiner(connectionDescription.getServerAddress(), ordered, writeConcern), writeRequestsWithIndex, sessionContext); }
/** * Gets whether there are errors in the combined result. * * @return whether there are errors in the combined result */ public boolean hasErrors() { return hasWriteErrors() || hasWriteConcernErrors(); }
public void addResult(final BsonDocument result) { if (writeConcern.isAcknowledged()) { if (hasError(result)) { MongoBulkWriteException bulkWriteException = getBulkWriteException(result); bulkWriteBatchCombiner.addErrorResult(bulkWriteException, indexMap); } else { bulkWriteBatchCombiner.addResult(getBulkWriteResult(result), indexMap); } } }
/** * Gets the combined errors as an exception * @return the bulk write exception, or null if there were no errors */ public MongoBulkWriteException getError() { return hasErrors() ? new MongoBulkWriteException(createResult(), new ArrayList<BulkWriteError>(writeErrors), writeConcernErrors.isEmpty() ? null : writeConcernErrors.get(writeConcernErrors.size() - 1), serverAddress) : null; }
/** * True if ordered and has write errors. * * @return true if no more batches should be sent */ public boolean shouldStopSendingMoreBatches() { return ordered && hasWriteErrors(); }
public boolean hasErrors() { return bulkWriteBatchCombiner.hasErrors(); }
public MongoBulkWriteException getError() { return bulkWriteBatchCombiner.getError(); }
public BulkWriteResult getResult() { return bulkWriteBatchCombiner.getResult(); }
/** * Add a result * * @param result the result * @param indexMap the index map */ public void addResult(final BulkWriteResult result, final IndexMap indexMap) { insertedCount += result.getInsertedCount(); matchedCount += result.getMatchedCount(); deletedCount += result.getDeletedCount(); if (result.isModifiedCountAvailable() && modifiedCount != null) { modifiedCount += result.getModifiedCount(); } else { modifiedCount = null; } mergeUpserts(result.getUpserts(), indexMap); }