int mongoPort; if (settings.settings().containsKey(MongoDBRiver.TYPE)) { Map<String, Object> mongoSettings = (Map<String, Object>) settings.settings().get(MongoDBRiver.TYPE); if (mongoSettings.containsKey(SERVERS_FIELD)) { Object mongoServersSettings = mongoSettings.get(SERVERS_FIELD); if (settings.settings().containsKey(INDEX_OBJECT)) { Map<String, Object> indexSettings = (Map<String, Object>) settings.settings().get(INDEX_OBJECT); builder.indexName(XContentMapValues.nodeStringValue(indexSettings.get(NAME_FIELD), builder.mongoDb)); builder.typeName(XContentMapValues.nodeStringValue(indexSettings.get(TYPE_FIELD), builder.mongoDb));
startupThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_startup:" + definition.getIndexName()).newThread( startupRunnable); startupThread.start();
Map<String, Object> source = new HashMap<String, Object>(); String riverName = hit.getType(); RiverSettings riverSettings = new RiverSettings(null, hit.getSource()); MongoDBRiverDefinition definition = MongoDBRiverDefinition.parseSettings(riverName, riverIndexName, riverSettings, null);
@SuppressWarnings({ "unchecked" }) private String readConfig(final String config, final String defaultValue) { if (this.settings.settings().containsKey("mysql")) { Map<String, Object> mysqlSettings = (Map<String, Object>) this.settings.settings().get("mysql"); return XContentMapValues.nodeStringValue(mysqlSettings.get(config), defaultValue); } return defaultValue; }
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
/** * Fetch the value of a configuration that has a default value and is therefore optional. * * @param config Key of the configuration to fetch * @param defaultValue The value to set if no value could be found * @return */ @SuppressWarnings({ "unchecked" }) private String readConfig(final String config, final String defaultValue) { if (this.settings.settings().containsKey(CONFIG_SPACE)) { Map<String, Object> mysqlSettings = (Map<String, Object>) this.settings.settings().get(CONFIG_SPACE); return XContentMapValues.nodeStringValue(mysqlSettings.get(config), defaultValue); } return defaultValue; }
indexerThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_indexer:" + definition.getIndexName()).newThread( new Indexer(MongoDBRiver.this)); indexerThread.start(); MongoClient mongoClient = mongoClientService.getMongoShardClient(definition, shard.getReplicas()); Thread tailerThread = EsExecutors.daemonThreadFactory( settings.globalSettings(), "mongodb_river_slurper_" + shard.getName() + ":" + definition.getIndexName() ).newThread(new OplogSlurper(MongoDBRiver.this, shardSlurperStartTimestamp, mongoClusterClient, mongoClient)); tailerThreads.add(tailerThread);
public Object extract(String path) { return extractValue(path, rs.settings()); } }
@Override public ESLogger createLogger(Class<?> clazz) { return Loggers.getLogger(clazz, settings.globalSettings(), riverName); }
/** * Public constructor used by ElasticSearch. * * @param riverName * @param settings * @param client * @throws MalformedURLException */ @Inject public JiraRiver(RiverName riverName, RiverSettings settings, Client client) throws MalformedURLException { super(riverName, settings); this.client = client; configure(settings.settings()); }
@Override public Thread acquireIndexingThread(String threadName, Runnable runnable) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), threadName).newThread(runnable); }
@SuppressWarnings({"unchecked"}) @Inject public GitHubRiver(RiverName riverName, RiverSettings settings, Client client) { super(riverName, settings); this.client = client; if (!settings.settings().containsKey("github")) { throw new IllegalArgumentException("Need river settings - owner and repository."); } // get settings Map<String, Object> githubSettings = (Map<String, Object>) settings.settings().get("github"); owner = XContentMapValues.nodeStringValue(githubSettings.get("owner"), null); repository = XContentMapValues.nodeStringValue(githubSettings.get("repository"), null); index = String.format("%s&%s", owner, repository); userRequestedInterval = XContentMapValues.nodeIntegerValue(githubSettings.get("interval"), 60); // auth (optional) username = null; password = null; if (githubSettings.containsKey("authentication")) { Map<String, Object> auth = (Map<String, Object>) githubSettings.get("authentication"); username = XContentMapValues.nodeStringValue(auth.get("username"), null); password = XContentMapValues.nodeStringValue(auth.get("password"), null); } // endpoint (optional - default to github.com) endpoint = XContentMapValues.nodeStringValue(githubSettings.get("endpoint"), "https://api.github.com"); logger.info("Created GitHub river."); }
@Override public void start() { try { logger.debug("Index: {}: Starting Kafka River...", riverConfig.getIndexName()); final KafkaWorker kafkaWorker = new KafkaWorker(kafkaConsumer, elasticsearchProducer, riverConfig, stats); thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "Kafka River Worker").newThread(kafkaWorker); thread.start(); } catch (Exception ex) { logger.error("Index: {}: Unexpected Error occurred", ex, riverConfig.getIndexName()); throw new RuntimeException(ex); } }
this.client = client; if (riverSettings.settings().containsKey("cassandra")) { @SuppressWarnings("unchecked") Map<String, Object> couchSettings = (Map<String, Object>) settings.settings().get("cassandra"); this.clusterName = XContentMapValues.nodeStringValue(couchSettings.get("cluster_name"), "DEFAULT_CLUSTER"); this.keyspace = XContentMapValues.nodeStringValue(couchSettings.get("keyspace"), "DEFAULT_KS"); if (riverSettings.settings().containsKey("index")) { @SuppressWarnings("unchecked") Map<String, Object> couchSettings = (Map<String, Object>) settings.settings().get("index"); this.indexName = XContentMapValues.nodeStringValue(couchSettings.get("index"), "DEFAULT_INDEX_NAME"); this.typeName = XContentMapValues.nodeStringValue(couchSettings.get("type"), "DEFAULT_TYPE_NAME");
@Override public void start() { logger.info("starting mysql stream: {}://{}:{} (mysql://{}:{}@{}:{}/{}) indexing to [{}]/[{}]", mysqlStreamProtocol, mysqlStreamHost, mysqlStreamPort, mysqlUser, mysqlPassword, mysqlHost, mysqlPort, mysqlDb, indexName, typeName); try { client.admin().indices().prepareCreate(indexName).execute().actionGet(); } catch (Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { // that's fine } else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) { // ok, not recovered yet..., lets start indexing and hope we recover by the first bulk // TODO: a smarter logic can be to register for cluster event listener here, and only start sampling when the block is removed... } else { logger.warn("failed to create index [{}], disabling river...", e, indexName); return; } } slurperThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mysql_river_slurper").newThread(new Slurper()); indexerThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mysql_river_indexer").newThread(new Indexer()); indexerThread.start(); slurperThread.start(); }
if(settings.settings().containsKey("redis")) { Map<String, Object> redisSettings = (Map<String, Object>) settings.settings().get("redis"); redisHost = XContentMapValues.nodeStringValue(redisSettings.get("host"), "localhost"); redisPort = XContentMapValues.nodeIntegerValue(redisSettings.get("port"), 6379); if(settings.settings().containsKey("index")){ Map<String, Object> indexSettings = (Map<String, Object>) settings.settings().get("index"); bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_size"), 100); bulkTimeout = XContentMapValues.nodeIntegerValue(indexSettings.get("bulk_timeout"), 5);
@Override public void start() { if(logger.isInfoEnabled()) logger.info("Starting Redis River stream"); // Next, we'll try to connect our redis pool try { this.jedisPool = new JedisPool(new JedisPoolConfig(), this.redisHost, this.redisPort, 0, this.redisPsw, this.redisDB); } catch (Exception e) { // We can't connect to redis for some reason, so // let's not even try to finish this. logger.error("Unable to allocate redis pool. Disabling River."); return; } currentRequest = client.prepareBulk(); if(redisMode.equalsIgnoreCase("list")){ thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "redis_listener").newThread(new RedisListRunner()); } else if (redisMode.equalsIgnoreCase("pubsub")){ //thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "redis_listener").newThread(new RedisPubSubRunner()); logger.error("PubSub mode not implemented yet. Switch to list mode."); return; } else { logger.error("Invalid redis river mode specified. Please check your river settings and try again."); return; } thread.start(); }
@Provides @Singleton @Named("arangodb_river_walReaderRunnable_threadfactory") public ThreadFactory getSlurperThreadFactory(RiverSettings settings) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), "arangodb_river_walreader"); }
public RiverConfig(RiverName riverName, RiverSettings riverSettings) { if (riverSettings.settings().containsKey("kafka")) { Map<String, Object> kafkaSettings = (Map<String, Object>) riverSettings.settings().get("kafka"); if (riverSettings.settings().containsKey("index")) { Map<String, Object> indexSettings = (Map<String, Object>) riverSettings.settings().get("index"); indexName = XContentMapValues.nodeStringValue(indexSettings.get(INDEX_NAME), riverName.name()); typeName = XContentMapValues.nodeStringValue(indexSettings.get(MAPPING_TYPE), "status"); if (riverSettings.settings().containsKey("statsd")) { Map<String, Object> statsdSettings = (Map<String, Object>) riverSettings.settings().get("statsd"); statsdHost = XContentMapValues.nodeStringValue(statsdSettings.get(STATSD_HOST), "localhost"); statsdPrefix = XContentMapValues.nodeStringValue(statsdSettings.get(STATSD_PREFIX), "kafka_river");