public void run() { while (running) { try { IndexRequest req = queue.take(); // Send data BulkRequest bulk = new BulkRequest(); int nb = 0; while (req != null && (nb == 0 || nb < max)) { bulk.add(req); nb++; req = queue.poll(); } if (bulk.numberOfActions() > 0) { BulkResponse rep = node.client().bulk(bulk).actionGet(); for (BulkItemResponse bir : rep.items()) { if (bir.failed()) { LOGGER.warn("Error indexing item: {}", bir.getFailureMessage()); } } } } catch (Exception e) { if (running) { LOGGER.warn("Error while sending indexes", e); } } } }
@Override public void bulkIndex(List<IndexQuery> queries) { BulkRequestBuilder bulkRequest = client.prepareBulk(); for(IndexQuery query : queries){ bulkRequest.add(prepareIndex(query)); } BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (bulkResponse.hasFailures()) { Map<String, String> failedDocuments = new HashMap<String, String>(); for (BulkItemResponse item : bulkResponse.items()) { if (item.failed()) failedDocuments.put(item.getId(), item.failureMessage()); } throw new ElasticsearchException("Bulk indexing has failures. Use ElasticsearchException.getFailedDocuments() for detailed messages [" + failedDocuments+"]", failedDocuments); } }
this.indexCounter += response.items().length; this.logger.info("HBase river has indexed {} entries so far", this.indexCounter); final List<byte[]> failedKeys = new ArrayList<byte[]>(); if (response.hasFailures()) { for (BulkItemResponse r : response.items()) { if (r.failed()) { failedKeys.add(keyMapForDeletion.remove(r.getId()));