private String extractIndex(Map<String, Object> ctx) { String index = (String) ctx.get("_index"); if (index == null) { index = definition.getIndexName(); } return index; }
public static long getIndexCount(Client client, MongoDBRiverDefinition definition) { if (client.admin().indices().prepareExists(definition.getIndexName()).get().isExists()) { if (definition.isImportAllCollections()) { return client.prepareCount(definition.getIndexName()).execute().actionGet().getCount(); } else { if (client.admin().indices().prepareTypesExists(definition.getIndexName()).setTypes(definition.getTypeName()).get() .isExists()) { return client.prepareCount(definition.getIndexName()).setTypes(definition.getTypeName()).get().getCount(); } } } return 0; }
startupThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_startup:" + definition.getIndexName()).newThread( startupRunnable); startupThread.start();
private XContentBuilder build(final DBObject data, final String objectId) throws IOException { if (data instanceof GridFSDBFile) { logger.info("Add Attachment: {} to index {} / type {}", objectId, definition.getIndexName(), definition.getTypeName()); return MongoDBHelper.serialize((GridFSDBFile) data); } else { Map<String, Object> mapData = this.createObjectMap(data); return XContentFactory.jsonBuilder().map(mapData); } }
@Override public void run() { while (context.getStatus() == Status.RUNNING) { try { Timestamp<?> lastTimestamp = null; // 1. Attempt to fill as much of the bulk request as possible QueueEntry entry = context.getStream().take(); lastTimestamp = processBlockingQueue(entry); while ((entry = context.getStream().poll(definition.getBulk().getFlushInterval().millis(), MILLISECONDS)) != null) { lastTimestamp = processBlockingQueue(entry); } // 2. Update the timestamp if (lastTimestamp != null) { river.setLastTimestamp(lastTimestamp, getBulkProcessor(definition.getIndexName(), definition.getTypeName()).getBulkProcessor()); } } catch (InterruptedException e) { logger.info("river-mongodb indexer interrupted"); releaseProcessors(); Thread.currentThread().interrupt(); break; } } }
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
public Indexer(MongoDBRiver river) { super(river); this.river = river; this.definition = river.definition; this.context = river.context; this.esClient = river.esClient; this.scriptService = river.scriptService; logger.debug( "Create bulk processor with parameters - bulk actions: {} - concurrent request: {} - flush interval: {} - bulk size: {}", definition.getBulk().getBulkActions(), definition.getBulk().getConcurrentRequests(), definition.getBulk() .getFlushInterval(), definition.getBulk().getBulkSize()); getBulkProcessor(definition.getIndexName(), definition.getTypeName()); }
document.put("id", objectId); document.put("_index", definition.getIndexName()); document.put("_type", type); document.put("operation", operation.getValue());
definition.isMongoSecondaryReadPreference(), definition.isDropCollection(), definition.getIncludeCollection(), definition.getThrottleSize(), definition.isMongoGridFS(), definition.getMongoOplogFilter(), definition.getMongoDb(), definition.getMongoCollection(), definition.getScript(), definition.getIndexName(), definition.getTypeName()); if (!esClient.admin().indices().prepareExists(definition.getIndexName()).get().isExists()) { esClient.admin().indices().prepareCreate(definition.getIndexName()).get(); logger.error("failed to create index [{}], disabling river...", e, definition.getIndexName()); return; logger.debug("Set explicit attachment mapping."); esClient.admin().indices().preparePutMapping(definition.getIndexName()).setType(definition.getTypeName()) .setSource(getGridFSMapping()).get(); } catch (Exception e) { indexerThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_indexer:" + definition.getIndexName()).newThread( new Indexer(MongoDBRiver.this)); indexerThread.start(); MongoClient mongoClient = mongoClientService.getMongoShardClient(definition, shard.getReplicas()); Thread tailerThread = EsExecutors.daemonThreadFactory( settings.globalSettings(), "mongodb_river_slurper_" + shard.getName() + ":" + definition.getIndexName() ).newThread(new OplogSlurper(MongoDBRiver.this, shardSlurperStartTimestamp, mongoClusterClient, mongoClient)); tailerThreads.add(tailerThread);
try { if (definition.isDisableIndexRefresh()) { updateIndexRefresh(definition.getIndexName(), -1L); updateIndexRefresh(definition.getIndexName(), TimeValue.timeValueSeconds(1));
updateBulkRequest(entry.getData(), null, operation, definition.getIndexName(), type, null, null); } catch (IOException ioEx) { logger.error("Update bulk failed.", ioEx); updateBulkRequest(entry.getData(), objectId, operation, definition.getIndexName(), type, null, null); } catch (IOException ioEx) { logger.error("Update bulk failed.", ioEx);