@Override public void run() { while (true) { try { Status status = MongoDBRiverHelper.getRiverStatus(this.mongoDBRiver.esClient, this.definition.getRiverName()); if (status != this.context.getStatus()) { if (status == Status.RUNNING && this.context.getStatus() != Status.STARTING) { logger.trace("About to start river: {}", this.definition.getRiverName()); mongoDBRiver.internalStartRiver(); } else if (status == Status.STOPPED) { logger.info("About to stop river: {}", this.definition.getRiverName()); mongoDBRiver.internalStopRiver(); } } Thread.sleep(1000L); } catch (InterruptedException e) { logger.debug("Status thread interrupted", e, (Object) null); Thread.currentThread().interrupt(); break; } } } }
@Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { if (failure.getClass().equals(ActionRequestValidationException.class)) { if (logger.isTraceEnabled()) { logger.trace("Ignore ActionRequestValidationException : {}", failure); } } else { logger.error("afterBulk - Bulk request failed: {} - {} - {}", executionId, request, failure); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } }
private void isRiverStale(DBCursor cursor, Timestamp<?> time) throws SlurperException { if (cursor == null || time == null) { return; } if (definition.getInitialTimestamp() != null && time.equals(definition.getInitialTimestamp())) { return; } DBObject entry = cursor.next(); Timestamp<?> oplogTimestamp = Timestamp.on(entry); if (!time.equals(oplogTimestamp)) { MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.RIVER_STALE); throw new SlurperException("River out of sync with oplog.rs collection"); } }
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { logger.error("Bulk processor failed. {}", response.buildFailureMessage()); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } else { documentCount.addAndGet(response.getItems().length); logStatistics(response.getTookInMillis()); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); if (logger.isTraceEnabled()) { logger.trace("afterBulk - bulk [{}] success [{} items] [{} ms] total [{}]", executionId, response.getItems().length, response.getTookInMillis(), documentCount.get()); } } } };
esClient, definition.getRiverName(), Status.INITIAL_IMPORT_FAILED); return;
} catch (MongoSocketException | MongoTimeoutException | MongoCursorNotFoundException e) { logger.info("Oplog tailing - {} - {}. Will retry.", e.getClass().getSimpleName(), e.getMessage()); logger.debug("Total documents inserted so far by river {}: {}", definition.getRiverName(), totalDocuments.get()); try { Thread.sleep(MongoDBRiver.MONGODB_RETRY_ERROR_DELAY_MS);
/** * Get the latest timestamp for a given namespace. */ @SuppressWarnings("unchecked") public static Timestamp<?> getLastTimestamp(Client client, MongoDBRiverDefinition definition) { client.admin().indices().prepareRefresh(definition.getRiverIndexName()).get(); GetResponse lastTimestampResponse = client.prepareGet(definition.getRiverIndexName(), definition.getRiverName(), definition.getMongoOplogNamespace()).get(); if (lastTimestampResponse.isExists()) { Map<String, Object> mongodbState = (Map<String, Object>) lastTimestampResponse.getSourceAsMap().get(TYPE); if (mongodbState != null) { Timestamp<?> lastTimestamp = Timestamp.on(mongodbState); if (lastTimestamp != null) { return lastTimestamp; } } } else { if (definition.getInitialTimestamp() != null) { return definition.getInitialTimestamp(); } } return null; }
} catch (Exception e) { logger.error("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED);
/** * Adds an index request operation to a bulk request, updating the last * timestamp for a given namespace (ie: host:dbName.collectionName) * * @param bulk */ void setLastTimestamp(final Timestamp<?> time, final BulkProcessor bulkProcessor) { try { if (logger.isTraceEnabled()) { logger.trace("setLastTimestamp [{}] [{}]", definition.getMongoOplogNamespace(), time); } bulkProcessor.add(indexRequest(definition.getRiverIndexName()).type(definition.getRiverName()) .id(definition.getMongoOplogNamespace()).source(source(time))); } catch (IOException e) { logger.error("error updating last timestamp for namespace {}", definition.getMongoOplogNamespace()); } }
} catch (Exception e) { logger.warn("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED);
} catch (Throwable t) { logger.warn("Failed to start", t); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.START_FAILED); context.setStatus(Status.START_FAILED); } finally {