@Override public void bulkIndex(List<IndexQuery> queries) { BulkRequest bulkRequest = new BulkRequest(); for (IndexQuery query : queries) { bulkRequest.add(prepareIndex(query)); } try { checkForBulkUpdateFailure(client.bulk(bulkRequest)); } catch (IOException e) { throw new ElasticsearchException("Error while bulk for request: " + bulkRequest.toString(), e); } }
private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
@Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { while (nextBulkRequest.numberOfActions() > 0) { // wait until we are allowed to continue with the flushing flushLatch.await(); // create a copy of the accumulated mock requests, so that // re-added requests from the failure handler are included in the next bulk BulkRequest currentBulkRequest = nextBulkRequest; nextBulkRequest = new BulkRequest(); listener.beforeBulk(123L, currentBulkRequest); if (nextBulkFailure == null) { BulkItemResponse[] mockResponses = new BulkItemResponse[currentBulkRequest.requests().size()]; for (int i = 0; i < currentBulkRequest.requests().size(); i++) { Throwable mockItemFailure = mockItemFailuresList.get(i); if (mockItemFailure == null) { // the mock response for the item is success mockResponses[i] = new BulkItemResponse(i, "opType", mock(ActionResponse.class)); } else { // the mock response for the item is failure mockResponses[i] = new BulkItemResponse(i, "opType", new BulkItemResponse.Failure("index", "type", "id", mockItemFailure)); } } listener.afterBulk(123L, currentBulkRequest, new BulkResponse(mockResponses, 1000L)); } else { listener.afterBulk(123L, currentBulkRequest, nextBulkFailure); } } return null; } }).when(mockBulkProcessor).flush();
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
@Override public IndexOperationResponse add(List<IndexOperationRequest> operations) throws IOException { BulkRequest bulkRequest = new BulkRequest(); for (int index = 0; index < operations.size(); index++) { IndexOperationRequest or = operations.get(index); IndexRequest indexRequest = new IndexRequest(or.getIndex(), or.getType(), or.getId()) .source(or.getFields()); bulkRequest.add(indexRequest); } bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkResponse response = highLevelClient.bulk(bulkRequest); IndexOperationResponse retVal = new IndexOperationResponse(response.getTookInMillis(), response.getIngestTookInMillis()); return retVal; }
public static BulkRequest toSingleItemBulkRequest(ReplicatedWriteRequest request) { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(((DocWriteRequest) request)); bulkRequest.setRefreshPolicy(request.getRefreshPolicy()); bulkRequest.timeout(request.timeout()); bulkRequest.waitForActiveShards(request.waitForActiveShards()); request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); return bulkRequest; } }
public void bulkDeleteDocument(String type, List<Integer> ids) { try { if (this.instance() == null) return; BulkRequest requests = new BulkRequest(); int count = 0; for (Integer id: ids) { count++; DeleteRequest request = new DeleteRequest(name, type, String.valueOf(id)); requests.add(request); if (count % 1000 == 0) { client.bulk(requests, RequestOptions.DEFAULT); requests.requests().clear(); count = 0; } } if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT); } catch (IOException e) { log.error(e.getMessage()); } }
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) { BulkRequest requestToReissue = new BulkRequest(); int index = 0; for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) { if (bulkItemResponse.isFailed()) { requestToReissue.add(currentBulkRequest.requests().get(index)); } index++; } return requestToReissue; }
@Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { // intercept the request and add it to our mock bulk request nextBulkRequest.add((IndexRequest) invocationOnMock.getArgument(0)); return null; } });
BulkRequest getBulkRequest() { if (itemResponses.isEmpty()) { return bulkRequest; } else { BulkRequest modifiedBulkRequest = new BulkRequest(); modifiedBulkRequest.setRefreshPolicy(bulkRequest.getRefreshPolicy()); modifiedBulkRequest.waitForActiveShards(bulkRequest.waitForActiveShards()); modifiedBulkRequest.timeout(bulkRequest.timeout()); int slot = 0; List<DocWriteRequest<?>> requests = bulkRequest.requests(); originalSlots = new int[requests.size()]; // oversize, but that's ok for (int i = 0; i < requests.size(); i++) { DocWriteRequest request = requests.get(i); if (failedSlots.get(i) == false) { modifiedBulkRequest.add(request); originalSlots[slot++] = i; } } return modifiedBulkRequest; } }
@Override public void beforeBulk(long l, BulkRequest bulkRequest) { logger.info("bulk request numberOfActions:" + bulkRequest.numberOfActions()); }
BulkRequest request = new BulkRequest(); List<String> ids = new ArrayList<String>(); request.add(new DeleteRequest(indexName, typeName, id)); if (request.numberOfActions() > 0) { BulkResponse response; try {
nextBulkRequest = new BulkRequest();
parameters.withTimeout(bulkRequest.timeout()); parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); for (int i = 0; i < bulkRequest.numberOfActions(); i++) { DocWriteRequest<?> action = bulkRequest.requests().get(i); for (DocWriteRequest<?> action : bulkRequest.requests()) { DocWriteRequest.OpType opType = action.opType();
private Supplier<BulkRequest> createBulkRequestWithGlobalDefaults() { return () -> new BulkRequest(globalIndex, globalType) .pipeline(globalPipeline) .routing(globalRouting); } }
log.error("Failed to execute ES query {}", brb.request().timeout(), e); throw convert(e);
final ShardId shardId = entry.getKey(); final List<BulkItemRequest> requests = entry.getValue(); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(), requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.waitForActiveShards(bulkRequest.waitForActiveShards()); bulkShardRequest.timeout(bulkRequest.timeout()); if (task != null) { bulkShardRequest.setParentTask(nodeId, task.getId());
@Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); String defaultType = request.param("type"); String defaultRouting = request.param("routing"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); String fieldsParam = request.param("fields"); if (fieldsParam != null) { DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead"); } String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null; String defaultPipeline = request.param("pipeline"); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, request.getXContentType()); return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel)); }
public void bulkDocument(String type, Map<String, Map<String, Object>> sources) { try { if (this.instance() == null) return; BulkRequest requests = new BulkRequest(); Iterator<String> it = sources.keySet().iterator(); int count = 0; while(it.hasNext()) { count++; String next = it.next(); IndexRequest request = new IndexRequest(name, type, next); request.source(sources.get(next)); requests.add(request); if (count % 1000 == 0) { client.bulk(requests, RequestOptions.DEFAULT); requests.requests().clear(); count = 0; } } if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT); } catch (IOException e) { log.error(e.getMessage()); } }