private void logStatistics(long duration) { if (definition.isStoreStatistics()) { long totalDocuments = deletedDocuments.get() + insertedDocuments.get(); logger.trace("Indexed {} documents: {} insertions, {} updates, {} deletions", totalDocuments, insertedDocuments.get(), updatedDocuments.get(), deletedDocuments.get()); Map<String, Object> source = new HashMap<String, Object>(); Map<String, Object> statistics = Maps.newHashMap(); statistics.put("duration", duration); statistics.put("date", new Date()); statistics.put("index", index); statistics.put("type", type); statistics.put("documents.inserted", insertedDocuments.get()); statistics.put("documents.updated", updatedDocuments.get()); statistics.put("documents.deleted", deletedDocuments.get()); statistics.put("documents.total", documentCount.get()); source.put("statistics", statistics); client.prepareIndex(definition.getStatisticsIndexName(), definition.getStatisticsTypeName()).setSource(source).get(); } } }
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
@Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { if (failure.getClass().equals(ActionRequestValidationException.class)) { if (logger.isTraceEnabled()) { logger.trace("Ignore ActionRequestValidationException : {}", failure); } } else { logger.error("afterBulk - Bulk request failed: {} - {} - {}", executionId, request, failure); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } }
@Inject public RestMongoDBRiverAction(Settings settings, Client esClient, RestController controller, @RiverIndexName String riverIndexName) { super(settings, controller, esClient); this.riverIndexName = riverIndexName; String baseUrl = "/" + riverIndexName + "/" + MongoDBRiver.TYPE; logger.trace("RestMongoDBRiverAction - baseUrl: {}", baseUrl); controller.registerHandler(RestRequest.Method.GET, baseUrl + "/{action}", this); controller.registerHandler(RestRequest.Method.POST, baseUrl + "/{river}/{action}", this); }
private boolean isBulkProcessorAvailable() { NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); for (NodeStats nodeStats : response.getNodes()) { Iterator<Stats> iterator = nodeStats.getThreadPool().iterator(); while (iterator.hasNext()) { Stats stats = iterator.next(); if ("bulk".equals(stats.getName())) { int queue = stats.getQueue(); logger.trace("bulkQueueSize [{}] - queue [{}] - availability [{}]", bulkQueueSize, queue, 1 - (queue / bulkQueueSize)); return 1 - (queue / bulkQueueSize) > 0.1; } } } return true; }
public void deleteBulkRequest(String id, String routing, String parent) { logger.trace("deleteBulkRequest - id: {} - index: {} - type: {} - routing: {} - parent: {}", id, index, type, routing, parent); bulkProcessor.add(deleteRequest(index).type(type).id(id).routing(routing).parent(parent)); deletedDocuments.incrementAndGet(); }
.setQuery(QueryBuilders.queryString(MongoDBRiver.TYPE).defaultField("type")).setFrom(from).setSize(count).get(); long totalHits = searchResponse.getHits().totalHits(); logger.trace("totalHits: {}", totalHits); Map<String, Object> data = new HashMap<String, Object>(); data.put("hits", totalHits); source.put("indexCount", MongoDBRiver.getIndexCount(esClient, definition)); if (logger.isTraceEnabled()) { logger.trace("source: {}", hit.getSourceAsString());
private void addToStream(final Operation operation, final Timestamp<?> currentTimestamp, final DBObject data, final String collection) throws InterruptedException { if (logger.isTraceEnabled()) { String dataString = data.toString(); if (dataString.length() > 400) { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data (_id:[{}], serialized length:{}), collection [{}]", operation, currentTimestamp, data.get("_id"), dataString.length(), collection); } else { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data [{}], collection [{}]", operation, currentTimestamp, dataString, collection); } } if (operation == Operation.DROP_DATABASE) { logger.info("addToStream - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, collection); if (definition.isImportAllCollections()) { for (String name : slurpedDb.getCollectionNames()) { logger.info("addToStream - isImportAllCollections - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, name); context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, name)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, collection)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, operation, data, collection)); } }
private void addToStream(final Operation operation, final Timestamp<?> currentTimestamp, final DBObject data, final String collection) throws InterruptedException { if (logger.isTraceEnabled()) { String dataString = data.toString(); if (dataString.length() > 400) { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data (_id:[{}], serialized length:{}), collection [{}]", operation, currentTimestamp, data.get("_id"), dataString.length(), collection); } else { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data [{}], collection [{}]", operation, currentTimestamp, dataString, collection); } } if (operation == Operation.DROP_DATABASE) { logger.info("addToStream - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, collection); if (definition.isImportAllCollections()) { for (String name : slurpedDb.getCollectionNames()) { logger.info("addToStream - isImportAllCollections - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, name); context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, name)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, collection)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, operation, data, collection)); } }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { logger.error("Bulk processor failed. {}", response.buildFailureMessage()); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } else { documentCount.addAndGet(response.getItems().length); logStatistics(response.getTookInMillis()); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); if (logger.isTraceEnabled()) { logger.trace("afterBulk - bulk [{}] success [{} items] [{} ms] total [{}]", executionId, response.getItems().length, response.getTookInMillis(), documentCount.get()); } } } };
@Override public void run() { while (true) { try { Status status = MongoDBRiverHelper.getRiverStatus(this.mongoDBRiver.esClient, this.definition.getRiverName()); if (status != this.context.getStatus()) { if (status == Status.RUNNING && this.context.getStatus() != Status.STARTING) { logger.trace("About to start river: {}", this.definition.getRiverName()); mongoDBRiver.internalStartRiver(); } else if (status == Status.STOPPED) { logger.info("About to stop river: {}", this.definition.getRiverName()); mongoDBRiver.internalStopRiver(); } } Thread.sleep(1000L); } catch (InterruptedException e) { logger.debug("Status thread interrupted", e, (Object) null); Thread.currentThread().interrupt(); break; } } } }
private void addQueryToStream(final Operation operation, final Timestamp<?> currentTimestamp, final DBObject update, final String collection) throws InterruptedException { if (logger.isTraceEnabled()) { logger.trace("addQueryToStream - operation [{}], currentTimestamp [{}], update [{}]", operation, currentTimestamp, update); } if (collection == null) { for (String name : slurpedDb.getCollectionNames()) { DBCollection slurpedCollection = slurpedDb.getCollection(name); addQueryToStream(operation, currentTimestamp, update, name, slurpedCollection); } } else { DBCollection slurpedCollection = slurpedDb.getCollection(collection); addQueryToStream(operation, currentTimestamp, update, collection, slurpedCollection); } }
} finally { if (cursor != null) { logger.trace("Closing oplog cursor"); cursor.close();
private void updateBulkRequest(DBObject data, String objectId, Operation operation, String index, String type, String routing, String parent) throws IOException { if (logger.isTraceEnabled()) { logger.trace("Operation: {} - index: {} - type: {} - routing: {} - parent: {}", operation, index, type, routing, parent); logger.trace("Insert operation - id: {} - contains attachment: {}", objectId, (data instanceof GridFSDBFile)); logger.trace("Update operation - id: {} - contains attachment: {}", objectId, (data instanceof GridFSDBFile)); logger.trace("Delete request [{}], [{}], [{}]", index, type, objectId); deleteBulkRequest(objectId, index, type, routing, parent);
@Inject public MongoDBRiver(RiverName riverName, RiverSettings settings, @RiverIndexName String riverIndexName, Client esClient, ScriptService scriptService, MongoClientService mongoClientService) { super(riverName, settings); if (logger.isTraceEnabled()) { logger.trace("Initializing"); } this.esClient = esClient; this.scriptService = scriptService; this.mongoClientService = mongoClientService; this.definition = MongoDBRiverDefinition.parseSettings(riverName.name(), riverIndexName, settings, scriptService); BlockingQueue<QueueEntry> stream = definition.getThrottleSize() == -1 ? new LinkedTransferQueue<QueueEntry>() : new ArrayBlockingQueue<QueueEntry>(definition.getThrottleSize()); this.context = new SharedContext(stream, Status.STOPPED); }
private void processAdminCommandOplogEntry(final DBObject entry, final Timestamp<?> startTimestamp) throws InterruptedException { if (logger.isTraceEnabled()) { logger.trace("processAdminCommandOplogEntry - [{}]", entry); } DBObject object = (DBObject) entry.get(MongoDBRiver.OPLOG_OBJECT); if (definition.isImportAllCollections()) { if (object.containsField(MongoDBRiver.OPLOG_RENAME_COLLECTION_COMMAND_OPERATION) && object.containsField(MongoDBRiver.OPLOG_TO)) { String to = object.get(MongoDBRiver.OPLOG_TO).toString(); if (to.startsWith(definition.getMongoDb())) { String newCollection = getCollectionFromNamespace(to); DBCollection coll = slurpedDb.getCollection(newCollection); CollectionSlurper importer = new CollectionSlurper(river, mongoClusterClient); importer.importCollection(coll, timestamp); } } } }
private void dropRecreateMapping() throws IOException, InterruptedException { try { semaphore.acquire(); logger.trace("dropRecreateMapping index[{}] - type[{}]", index, type); client.admin().indices().prepareRefresh(index).get(); ImmutableOpenMap<String, MappingMetaData> mappings = client.admin().cluster().prepareState().get().getState().getMetaData() .index(index).mappings(); logger.trace("mappings contains type {}: {}", type, mappings.containsKey(type)); if (mappings.containsKey(type)) {
private void deleteBulkRequest(String objectId, String index, String type, String routing, String parent) { if (logger.isTraceEnabled()) { logger.trace("bulkDeleteRequest - objectId: {} - index: {} - type: {} - routing: {} - parent: {}", objectId, index, type, routing, parent); } if (definition.getParentTypes() != null && definition.getParentTypes().contains(type)) { QueryBuilder builder = QueryBuilders.hasParentQuery(type, QueryBuilders.termQuery(MongoDBRiver.MONGODB_ID_FIELD, objectId)); SearchResponse response = esClient.prepareSearch(index).setQuery(builder).setRouting(routing) .addField(MongoDBRiver.MONGODB_ID_FIELD).execute().actionGet(); for (SearchHit hit : response.getHits().getHits()) { getBulkProcessor(index, hit.getType()).deleteBulkRequest(hit.getId(), routing, objectId); } } getBulkProcessor(index, type).deleteBulkRequest(objectId, routing, parent); }
return false; logger.trace("Found {} database", MongoDBRiver.MONGODB_ADMIN_DATABASE); DBObject command = BasicDBObjectBuilder.start( ImmutableMap.builder().put("serverStatus", 1).put("asserts", 0).put("backgroundFlushing", 0).put("connections", 0) .put("locks", 0).put("metrics", 0).put("network", 0).put("opcounters", 0).put("opcountersRepl", 0) .put("recordStats", 0).put("repl", 0).build()).get(); logger.trace("About to execute: {}", command); CommandResult cr = adminDb.command(command, ReadPreference.primary()); logger.trace("Command executed return : {}", cr); logger.trace("serverStatus: {}", cr); logger.trace("process: {}", process);
/** * Adds an index request operation to a bulk request, updating the last * timestamp for a given namespace (ie: host:dbName.collectionName) * * @param bulk */ void setLastTimestamp(final Timestamp<?> time, final BulkProcessor bulkProcessor) { try { if (logger.isTraceEnabled()) { logger.trace("setLastTimestamp [{}] [{}]", definition.getMongoOplogNamespace(), time); } bulkProcessor.add(indexRequest(definition.getRiverIndexName()).type(definition.getRiverName()) .id(definition.getMongoOplogNamespace()).source(source(time))); } catch (IOException e) { logger.error("error updating last timestamp for namespace {}", definition.getMongoOplogNamespace()); } }