public MongoDBRiverBulkProcessor build() { return new MongoDBRiverBulkProcessor(river, definition, client, index, type); } }
public void dropIndex() { addBulkRequest(null, DROP_INDEX, null, null); flushBulkProcessor.set(true); }
private void releaseProcessors() { for (MongoDBRiverBulkProcessor processor : processors.values()) { processor.getBulkProcessor().close(); } processors.clear(); }
logger.trace("Insert operation - id: {} - contains attachment: {}", objectId, (data instanceof GridFSDBFile)); getBulkProcessor(index, type).addBulkRequest(objectId, build(data, objectId), routing, parent); getBulkProcessor(index, type).addBulkRequest(objectId, build(data, objectId), routing, parent); if (definition.isDropCollection()) { MongoDBRiverBulkProcessor processor = getBulkProcessor(index, type); processor.dropIndex(); } else { logger.info("Ignore drop collection request [{}], [{}]. The option has been disabled.", index, type);
private void checkBulkProcessorAvailability() { while (!isBulkProcessorAvailable()) { try { if (logger.isDebugEnabled()) { logger.debug("Waiting for bulk queue to empty..."); } Thread.sleep(2000); } catch (InterruptedException e) { logger.warn("checkIndexStatistics interrupted", e); } } }
private void deleteBulkRequest(String objectId, String index, String type, String routing, String parent) { if (logger.isTraceEnabled()) { logger.trace("bulkDeleteRequest - objectId: {} - index: {} - type: {} - routing: {} - parent: {}", objectId, index, type, routing, parent); } if (definition.getParentTypes() != null && definition.getParentTypes().contains(type)) { QueryBuilder builder = QueryBuilders.hasParentQuery(type, QueryBuilders.termQuery(MongoDBRiver.MONGODB_ID_FIELD, objectId)); SearchResponse response = esClient.prepareSearch(index).setQuery(builder).setRouting(routing) .addField(MongoDBRiver.MONGODB_ID_FIELD).execute().actionGet(); for (SearchHit hit : response.getHits().getHits()) { getBulkProcessor(index, hit.getType()).deleteBulkRequest(hit.getId(), routing, objectId); } } getBulkProcessor(index, type).deleteBulkRequest(objectId, routing, parent); }
MongoDBRiverBulkProcessor(MongoDBRiver river, MongoDBRiverDefinition definition, Client client, String index, String type) { super(river); this.river = river; this.bulkProcessor = BulkProcessor.builder(client, listener).setBulkActions(definition.getBulk().getBulkActions()) .setConcurrentRequests(definition.getBulk().getConcurrentRequests()) .setFlushInterval(definition.getBulk().getFlushInterval()).setBulkSize(definition.getBulk().getBulkSize()).build(); this.definition = definition; this.client = client; this.index = index; this.type = type; this.bulkQueueSize = getBulkQueueSize(); }
@Override public void run() { while (context.getStatus() == Status.RUNNING) { try { Timestamp<?> lastTimestamp = null; // 1. Attempt to fill as much of the bulk request as possible QueueEntry entry = context.getStream().take(); lastTimestamp = processBlockingQueue(entry); while ((entry = context.getStream().poll(definition.getBulk().getFlushInterval().millis(), MILLISECONDS)) != null) { lastTimestamp = processBlockingQueue(entry); } // 2. Update the timestamp if (lastTimestamp != null) { river.setLastTimestamp(lastTimestamp, getBulkProcessor(definition.getIndexName(), definition.getTypeName()).getBulkProcessor()); } } catch (InterruptedException e) { logger.info("river-mongodb indexer interrupted"); releaseProcessors(); Thread.currentThread().interrupt(); break; } } }