private void stop(RestRequest request, RestChannel channel, Client esClient) { String river = request.param("river"); if (river == null || river.isEmpty()) { respondError(request, channel, "Parameter 'river' is required", RestStatus.BAD_REQUEST); return; } MongoDBRiverHelper.setRiverStatus(esClient, river, Status.STOPPED); respondSuccess(request, channel, RestStatus.OK); }
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
source.put("status", MongoDBRiverHelper.getRiverStatus(esClient, riverName)); source.put("settings", hit.getSource()); source.put("lastTimestamp", lastTimestamp);
@Override public void run() { while (true) { try { Status status = MongoDBRiverHelper.getRiverStatus(this.mongoDBRiver.esClient, this.definition.getRiverName()); if (status != this.context.getStatus()) { if (status == Status.RUNNING && this.context.getStatus() != Status.STARTING) { logger.trace("About to start river: {}", this.definition.getRiverName()); mongoDBRiver.internalStartRiver(); } else if (status == Status.STOPPED) { logger.info("About to stop river: {}", this.definition.getRiverName()); mongoDBRiver.internalStopRiver(); } } Thread.sleep(1000L); } catch (InterruptedException e) { logger.debug("Status thread interrupted", e, (Object) null); Thread.currentThread().interrupt(); break; } } } }
private void start(RestRequest request, RestChannel channel, Client esClient) { String river = request.param("river"); if (river == null || river.isEmpty()) { respondError(request, channel, "Parameter 'river' is required", RestStatus.BAD_REQUEST); return; } MongoDBRiverHelper.setRiverStatus(esClient, river, Status.RUNNING); respondSuccess(request, channel, RestStatus.OK); }
@Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { if (failure.getClass().equals(ActionRequestValidationException.class)) { if (logger.isTraceEnabled()) { logger.trace("Ignore ActionRequestValidationException : {}", failure); } } else { logger.error("afterBulk - Bulk request failed: {} - {} - {}", executionId, request, failure); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } }
private void isRiverStale(DBCursor cursor, Timestamp<?> time) throws SlurperException { if (cursor == null || time == null) { return; } if (definition.getInitialTimestamp() != null && time.equals(definition.getInitialTimestamp())) { return; } DBObject entry = cursor.next(); Timestamp<?> oplogTimestamp = Timestamp.on(entry); if (!time.equals(oplogTimestamp)) { MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.RIVER_STALE); throw new SlurperException("River out of sync with oplog.rs collection"); } }
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { logger.error("Bulk processor failed. {}", response.buildFailureMessage()); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } else { documentCount.addAndGet(response.getItems().length); logStatistics(response.getTookInMillis()); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); if (logger.isTraceEnabled()) { logger.trace("afterBulk - bulk [{}] success [{} items] [{} ms] total [{}]", executionId, response.getItems().length, response.getTookInMillis(), documentCount.get()); } } } };
MongoDBRiverHelper.setRiverStatus( esClient, definition.getRiverName(), Status.INITIAL_IMPORT_FAILED); return;
} catch (Exception e) { logger.error("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED);
} catch (Exception e) { logger.warn("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED);
} catch (Throwable t) { logger.warn("Failed to start", t); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.START_FAILED); context.setStatus(Status.START_FAILED); } finally {