public static DBObject applyFieldFilter(DBObject object, final Set<String> includeFields, final Set<String> excludeFields) { if (object instanceof GridFSFile) { GridFSFile file = (GridFSFile) object; DBObject metadata = file.getMetaData(); if (metadata != null) { file.setMetaData(applyFieldFilter(metadata, includeFields, excludeFields)); } } else { object = MongoDBHelper.applyExcludeFields(object, excludeFields); object = MongoDBHelper.applyIncludeFields(object, includeFields); } return object; } }
private void stop(RestRequest request, RestChannel channel, Client esClient) { String river = request.param("river"); if (river == null || river.isEmpty()) { respondError(request, channel, "Parameter 'river' is required", RestStatus.BAD_REQUEST); return; } MongoDBRiverHelper.setRiverStatus(esClient, river, Status.STOPPED); respondSuccess(request, channel, RestStatus.OK); }
private DBObject applyFieldFilter(DBObject object) { if (object instanceof GridFSFile) { GridFSFile file = (GridFSFile) object; DBObject metadata = file.getMetaData(); if (metadata != null) { file.setMetaData(applyFieldFilter(metadata)); } } else { object = MongoDBHelper.applyExcludeFields(object, definition.getExcludeFields()); object = MongoDBHelper.applyIncludeFields(object, definition.getIncludeFields()); } return object; }
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
public static DBObject applyIncludeFields(DBObject bsonObject, final Set<String> includeFields) { if (includeFields == null) { return bsonObject; } DBObject filteredObject = new BasicDBObject(); for (String field : bsonObject.keySet()) { if (includeFields.contains(field)) { filteredObject.put(field, bsonObject.get(field)); } } for (String field : includeFields) { if (field.contains(".")) { String rootObject = field.substring(0, field.indexOf(".")); // String childObject = field.substring(field.indexOf(".") + 1); Object object = bsonObject.get(rootObject); if (object instanceof DBObject) { DBObject object2 = (DBObject) object; // object2 = applyIncludeFields(object2, new // HashSet<String>(Arrays.asList(childObject))); System.out.println(getChildItems(rootObject, includeFields)); object2 = applyIncludeFields(object2, getChildItems(rootObject, includeFields)); filteredObject.put(rootObject, object2); } } } return filteredObject; }
@Override public void run() { while (true) { try { Status status = MongoDBRiverHelper.getRiverStatus(this.mongoDBRiver.esClient, this.definition.getRiverName()); if (status != this.context.getStatus()) { if (status == Status.RUNNING && this.context.getStatus() != Status.STARTING) { logger.trace("About to start river: {}", this.definition.getRiverName()); mongoDBRiver.internalStartRiver(); } else if (status == Status.STOPPED) { logger.info("About to stop river: {}", this.definition.getRiverName()); mongoDBRiver.internalStopRiver(); } } Thread.sleep(1000L); } catch (InterruptedException e) { logger.debug("Status thread interrupted", e, (Object) null); Thread.currentThread().interrupt(); break; } } } }
public static DBObject applyExcludeFields(DBObject bsonObject, Set<String> excludeFields) { if (excludeFields == null) { return bsonObject; } DBObject filteredObject = bsonObject; for (String field : excludeFields) { if (field.contains(".")) { String rootObject = field.substring(0, field.indexOf(".")); String childObject = field.substring(field.indexOf(".") + 1); if (filteredObject.containsField(rootObject)) { Object object = filteredObject.get(rootObject); if (object instanceof DBObject) { DBObject object2 = (DBObject) object; object2 = applyExcludeFields(object2, new HashSet<String>(Arrays.asList(childObject))); } } } else { if (filteredObject.containsField(field)) { filteredObject.removeField(field); } } } return filteredObject; }
private XContentBuilder build(final DBObject data, final String objectId) throws IOException { if (data instanceof GridFSDBFile) { logger.info("Add Attachment: {} to index {} / type {}", objectId, definition.getIndexName(), definition.getTypeName()); return MongoDBHelper.serialize((GridFSDBFile) data); } else { Map<String, Object> mapData = this.createObjectMap(data); return XContentFactory.jsonBuilder().map(mapData); } }
private void start(RestRequest request, RestChannel channel, Client esClient) { String river = request.param("river"); if (river == null || river.isEmpty()) { respondError(request, channel, "Parameter 'river' is required", RestStatus.BAD_REQUEST); return; } MongoDBRiverHelper.setRiverStatus(esClient, river, Status.RUNNING); respondSuccess(request, channel, RestStatus.OK); }
private DBObject applyFieldFilter(DBObject object) { if (object instanceof GridFSFile) { GridFSFile file = (GridFSFile) object; DBObject metadata = file.getMetaData(); if (metadata != null) { file.setMetaData(applyFieldFilter(metadata)); } } else { object = MongoDBHelper.applyExcludeFields(object, definition.getExcludeFields()); object = MongoDBHelper.applyIncludeFields(object, definition.getIncludeFields()); } return object; }
source.put("status", MongoDBRiverHelper.getRiverStatus(esClient, riverName)); source.put("settings", hit.getSource()); source.put("lastTimestamp", lastTimestamp);
private void isRiverStale(DBCursor cursor, Timestamp<?> time) throws SlurperException { if (cursor == null || time == null) { return; } if (definition.getInitialTimestamp() != null && time.equals(definition.getInitialTimestamp())) { return; } DBObject entry = cursor.next(); Timestamp<?> oplogTimestamp = Timestamp.on(entry); if (!time.equals(oplogTimestamp)) { MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.RIVER_STALE); throw new SlurperException("River out of sync with oplog.rs collection"); } }
@Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { if (failure.getClass().equals(ActionRequestValidationException.class)) { if (logger.isTraceEnabled()) { logger.trace("Ignore ActionRequestValidationException : {}", failure); } } else { logger.error("afterBulk - Bulk request failed: {} - {} - {}", executionId, request, failure); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } }
@Override public void beforeBulk(long executionId, BulkRequest request) { checkBulkProcessorAvailability(); logger.trace("beforeBulk - new bulk [{}] of items [{}]", executionId, request.numberOfActions()); if (flushBulkProcessor.get()) { logger.trace("About to flush bulk request index[{}] - type[{}]", index, type); int dropDollectionIndex = findLastDropCollection(request.requests()); request.requests().subList(0, dropDollectionIndex + 1).clear(); try { dropRecreateMapping(); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); flushBulkProcessor.set(false); } catch (Throwable t) { logger.error("Drop collection operation failed", t); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } } }
MongoDBRiverHelper.setRiverStatus( esClient, definition.getRiverName(), Status.INITIAL_IMPORT_FAILED); return;
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { logger.error("Bulk processor failed. {}", response.buildFailureMessage()); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } else { documentCount.addAndGet(response.getItems().length); logStatistics(response.getTookInMillis()); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); if (logger.isTraceEnabled()) { logger.trace("afterBulk - bulk [{}] success [{} items] [{} ms] total [{}]", executionId, response.getItems().length, response.getTookInMillis(), documentCount.get()); } } } };
} catch (Exception e) { logger.error("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED);
} catch (Exception e) { logger.warn("failed to script process {}, ignoring", e, ctx); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.SCRIPT_IMPORT_FAILED);
} catch (Throwable t) { logger.warn("Failed to start", t); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.START_FAILED); context.setStatus(Status.START_FAILED); } finally {