public Indexer(MongoDBRiver river) { super(river); this.river = river; this.definition = river.definition; this.context = river.context; this.esClient = river.esClient; this.scriptService = river.scriptService; logger.debug( "Create bulk processor with parameters - bulk actions: {} - concurrent request: {} - flush interval: {} - bulk size: {}", definition.getBulk().getBulkActions(), definition.getBulk().getConcurrentRequests(), definition.getBulk() .getFlushInterval(), definition.getBulk().getBulkSize()); getBulkProcessor(definition.getIndexName(), definition.getTypeName()); }
MongoDBRiverBulkProcessor(MongoDBRiver river, MongoDBRiverDefinition definition, Client client, String index, String type) { super(river); this.river = river; this.bulkProcessor = BulkProcessor.builder(client, listener).setBulkActions(definition.getBulk().getBulkActions()) .setConcurrentRequests(definition.getBulk().getConcurrentRequests()) .setFlushInterval(definition.getBulk().getFlushInterval()).setBulkSize(definition.getBulk().getBulkSize()).build(); this.definition = definition; this.client = client; this.index = index; this.type = type; this.bulkQueueSize = getBulkQueueSize(); }
@Override public void run() { while (context.getStatus() == Status.RUNNING) { try { Timestamp<?> lastTimestamp = null; // 1. Attempt to fill as much of the bulk request as possible QueueEntry entry = context.getStream().take(); lastTimestamp = processBlockingQueue(entry); while ((entry = context.getStream().poll(definition.getBulk().getFlushInterval().millis(), MILLISECONDS)) != null) { lastTimestamp = processBlockingQueue(entry); } // 2. Update the timestamp if (lastTimestamp != null) { river.setLastTimestamp(lastTimestamp, getBulkProcessor(definition.getIndexName(), definition.getTypeName()).getBulkProcessor()); } } catch (InterruptedException e) { logger.info("river-mongodb indexer interrupted"); releaseProcessors(); Thread.currentThread().interrupt(); break; } } }