@Override public void beforeBulk(long l, BulkRequest bulkRequest) { logger.info("bulk request numberOfActions:" + bulkRequest.numberOfActions()); }
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
/** * The number of actions currently in the bulk. */ public int numberOfActions() { return request.numberOfActions(); }
@Override public void run() { synchronized (BulkProcessor.this) { if (closed) { return; } if (bulkRequest.numberOfActions() == 0) { return; } execute(); } } }
@Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { while (nextBulkRequest.numberOfActions() > 0) { // wait until we are allowed to continue with the flushing flushLatch.await(); // create a copy of the accumulated mock requests, so that // re-added requests from the failure handler are included in the next bulk BulkRequest currentBulkRequest = nextBulkRequest; nextBulkRequest = new BulkRequest(); listener.beforeBulk(123L, currentBulkRequest); if (nextBulkFailure == null) { BulkItemResponse[] mockResponses = new BulkItemResponse[currentBulkRequest.requests().size()]; for (int i = 0; i < currentBulkRequest.requests().size(); i++) { Throwable mockItemFailure = mockItemFailuresList.get(i); if (mockItemFailure == null) { // the mock response for the item is success mockResponses[i] = new BulkItemResponse(i, "opType", mock(ActionResponse.class)); } else { // the mock response for the item is failure mockResponses[i] = new BulkItemResponse(i, "opType", new BulkItemResponse.Failure("index", "type", "id", mockItemFailure)); } } listener.afterBulk(123L, currentBulkRequest, new BulkResponse(mockResponses, 1000L)); } else { listener.afterBulk(123L, currentBulkRequest, nextBulkFailure); } } return null; } }).when(mockBulkProcessor).flush();
@Override public void beforeBulk(long executionId, BulkRequest request) { logger.trace("Sending a bulk request of [{}] requests", request.numberOfActions()); }
@Override public void beforeBulk(long executionId, BulkRequest request) { logger.trace("Sending a bulk request of [{}] requests", request.numberOfActions()); }
@Override public void beforeBulk(long executionId, BulkRequest request) { logger.trace("Sending a bulk request of [{}] requests", request.numberOfActions()); }
if (request.numberOfActions() > 0) { BulkResponse response; try {
private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
for (int i = 0; i < bulkRequest.numberOfActions(); i++) { DocWriteRequest<?> action = bulkRequest.requests().get(i);
/** * Flush pending delete or index requests. */ public synchronized void flush() { ensureOpen(); if (bulkRequest.numberOfActions() > 0) { execute(); } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { logger.trace("Executed bulk request with [{}] requests", request.numberOfActions()); if (response.hasFailures()) { final int[] failures = {0}; response.iterator().forEachRemaining(bir -> { if (bir.isFailed()) { failures[0]++; logger.debug("Error caught for [{}]/[{}]/[{}]: {}", bir.getIndex(), bir.getType(), bir.getId(), bir.getFailureMessage()); } }); logger.warn("Got [{}] failures of [{}] requests", failures[0], request.numberOfActions()); } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { logger.trace("Executed bulk request with [{}] requests", request.numberOfActions()); if (response.hasFailures()) { final int[] failures = {0}; response.iterator().forEachRemaining(bir -> { if (bir.isFailed()) { failures[0]++; logger.debug("Error caught for [{}]/[{}]/[{}]: {}", bir.getIndex(), bir.getType(), bir.getId(), bir.getFailureMessage()); } }); logger.warn("Got [{}] failures of [{}] requests", failures[0], request.numberOfActions()); } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { logger.trace("Executed bulk request with [{}] requests", request.numberOfActions()); if (response.hasFailures()) { final int[] failures = {0}; response.iterator().forEachRemaining(bir -> { if (bir.isFailed()) { failures[0]++; logger.debug("Error caught for [{}]/[{}]/[{}]: {}", bir.getIndex(), bir.getType(), bir.getId(), bir.getFailureMessage()); } }); logger.warn("Got [{}] failures of [{}] requests", failures[0], request.numberOfActions()); } }
public void bulkDocument(String type, Map<String, Map<String, Object>> sources) { try { if (this.instance() == null) return; BulkRequest requests = new BulkRequest(); Iterator<String> it = sources.keySet().iterator(); int count = 0; while(it.hasNext()) { count++; String next = it.next(); IndexRequest request = new IndexRequest(name, type, next); request.source(sources.get(next)); requests.add(request); if (count % 1000 == 0) { client.bulk(requests, RequestOptions.DEFAULT); requests.requests().clear(); count = 0; } } if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT); } catch (IOException e) { log.error(e.getMessage()); } }
/** * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed. * <p> * If concurrent requests are not enabled, returns {@code true} immediately. * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then returns {@code true} * If the specified waiting time elapses before all bulk requests complete, {@code false} is returned. * * @param timeout The maximum time to wait for the bulk requests to complete * @param unit The time unit of the {@code timeout} argument * @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the bulk requests * completed * @throws InterruptedException If the current thread is interrupted */ public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { if (closed) { return true; } closed = true; this.cancellableFlushTask.cancel(); if (bulkRequest.numberOfActions() > 0) { execute(); } try { return this.bulkRequestHandler.awaitClose(timeout, unit); } finally { onClose.run(); } }
public void bulkDeleteDocument(String type, List<Integer> ids) { try { if (this.instance() == null) return; BulkRequest requests = new BulkRequest(); int count = 0; for (Integer id: ids) { count++; DeleteRequest request = new DeleteRequest(name, type, String.valueOf(id)); requests.add(request); if (count % 1000 == 0) { client.bulk(requests, RequestOptions.DEFAULT); requests.requests().clear(); count = 0; } } if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT); } catch (IOException e) { log.error(e.getMessage()); } }
/** * The number of actions currently in the bulk. */ public int numberOfActions() { return request.numberOfActions(); } }
@Override public void beforeBulk(long executionId, BulkRequest request) { LOG.debug("beforeBulk {} with {} actions", executionId, request.numberOfActions()); eventCounter.scope("bulks_received").incrBy(1); }