private String getCollectionFromNamespace(String namespace) { if (namespace.startsWith(definition.getMongoDb() + '.')) { return namespace.substring(definition.getMongoDb().length() + 1); } logger.error("Cannot get collection from namespace [{}]", namespace); return null; }
public static void setRiverStatus(Client client, String riverName, Status status) { logger.info("setRiverStatus called with {} - {}", riverName, status); XContentBuilder xb; try { xb = jsonBuilder().startObject().startObject(MongoDBRiver.TYPE).field(MongoDBRiver.STATUS_FIELD, status).endObject() .endObject(); client.prepareIndex("_river", riverName, MongoDBRiver.STATUS_ID).setSource(xb).get(); } catch (IOException ioEx) { logger.error("setRiverStatus failed for river {}", ioEx, riverName); } }
/** * Execute actions to stop this river. * * The status thread will not be touched, and the river can be restarted by setting its status again * to {@link Status#RUNNING}. */ void internalStopRiver() { logger.info("Stopping"); try { if (startupThread != null) { startupThread.interrupt(); startupThread = null; } for (Thread thread : tailerThreads) { thread.interrupt(); thread = null; } tailerThreads.clear(); if (indexerThread != null) { indexerThread.interrupt(); indexerThread = null; } logger.info("Stopped"); } catch (Throwable t) { logger.error("Failed to stop", t); } finally { this.context.setStatus(Status.STOPPED); } }
@Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { if (failure.getClass().equals(ActionRequestValidationException.class)) { if (logger.isTraceEnabled()) { logger.trace("Ignore ActionRequestValidationException : {}", failure); } } else { logger.error("afterBulk - Bulk request failed: {} - {} - {}", executionId, request, failure); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } }
logger.error("Cannot import collection {} into existing index", definition.getMongoCollection()); MongoDBRiverHelper.setRiverStatus( esClient, definition.getRiverName(), Status.INITIAL_IMPORT_FAILED); logger.error("river-mongodb slurper interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { logger.error("Exception in initial import", e); logger.debug("Total documents inserted so far: {}", totalDocuments.get()); Thread.currentThread().interrupt();
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { logger.error("Bulk processor failed. {}", response.buildFailureMessage()); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } else { documentCount.addAndGet(response.getItems().length); logStatistics(response.getTookInMillis()); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); if (logger.isTraceEnabled()) { logger.trace("afterBulk - bulk [{}] success [{} items] [{} ms] total [{}]", executionId, response.getItems().length, response.getTookInMillis(), documentCount.get()); } } } };
@SuppressWarnings("unchecked") private void flattenOps(DBObject entry) { Object ref = entry.removeField(MongoDBRiver.OPLOG_REF); Object ops = ref == null ? entry.removeField(MongoDBRiver.OPLOG_OPS) : getRefOps(ref); if (ops != null) { try { for (DBObject op : (List<DBObject>) ops) { String operation = (String) op.get(MongoDBRiver.OPLOG_OPERATION); if (operation.equals(MongoDBRiver.OPLOG_COMMAND_OPERATION)) { DBObject object = (DBObject) op.get(MongoDBRiver.OPLOG_OBJECT); if (object.containsField(MongoDBRiver.OPLOG_CREATE_COMMAND)) { continue; } } entry.putAll(op); } } catch (ClassCastException e) { logger.error(e.toString(), e); } } }
.setSource(mapping.getSourceAsMap()).get(); if (!pmr.isAcknowledged()) { logger.error("Failed to put mapping {} / {} / {}.", index, type, mapping.source()); } else { logger.info("Delete and recreate for index / type [{}] [{}] successfully executed.", index, type);
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
logger.error("Could not set initial timestamp", t); } finally { builder.initialTimestamp(timeStamp);
logger.error("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED); updateBulkRequest(new BasicDBObject(data), objectId, operation, index, type, routing, parent); } catch (IOException ioEx) { logger.error("Update bulk failed.", ioEx);
object = file; } else { logger.error("Cannot find file from id: {}", objectId);
Timestamp<?> oplogTimestamp = Timestamp.on(entry); if (Timestamp.compare(oplogTimestamp, startTimestamp) < 0) { logger.error("[Invalid Oplog Entry] - entry timestamp [{}] before startTimestamp [{}]", JSONSerializers.getStrict().serialize(entry), startTimestamp); return false;
/** * Adds an index request operation to a bulk request, updating the last * timestamp for a given namespace (ie: host:dbName.collectionName) * * @param bulk */ void setLastTimestamp(final Timestamp<?> time, final BulkProcessor bulkProcessor) { try { if (logger.isTraceEnabled()) { logger.trace("setLastTimestamp [{}] [{}]", definition.getMongoOplogNamespace(), time); } bulkProcessor.add(indexRequest(definition.getRiverIndexName()).type(definition.getRiverName()) .id(definition.getMongoOplogNamespace()).source(source(time))); } catch (IOException e) { logger.error("error updating last timestamp for namespace {}", definition.getMongoOplogNamespace()); } }
updateBulkRequest(entry.getData(), null, operation, definition.getIndexName(), type, null, null); } catch (IOException ioEx) { logger.error("Update bulk failed.", ioEx); updateBulkRequest(entry.getData(), objectId, operation, definition.getIndexName(), type, null, null); } catch (IOException ioEx) { logger.error("Update bulk failed.", ioEx);
logger.error("failed to create index [{}], disabling river...", e, definition.getIndexName()); return;
logger.error("Unknown operation for id[{}] - entry [{}] - index[{}] - type[{}]", objectId, data, index, type); context.setStatus(Status.IMPORT_FAILED); return;