startupThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_startup:" + definition.getIndexName()).newThread( startupRunnable); startupThread.start();
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
indexerThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_indexer:" + definition.getIndexName()).newThread( new Indexer(MongoDBRiver.this)); indexerThread.start(); MongoClient mongoClient = mongoClientService.getMongoShardClient(definition, shard.getReplicas()); Thread tailerThread = EsExecutors.daemonThreadFactory( settings.globalSettings(), "mongodb_river_slurper_" + shard.getName() + ":" + definition.getIndexName() ).newThread(new OplogSlurper(MongoDBRiver.this, shardSlurperStartTimestamp, mongoClusterClient, mongoClient)); tailerThreads.add(tailerThread);
@Override public Thread acquireIndexingThread(String threadName, Runnable runnable) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), threadName).newThread(runnable); }
@Override public ESLogger createLogger(Class<?> clazz) { return Loggers.getLogger(clazz, settings.globalSettings(), riverName); }
@Override public void start() { try { logger.debug("Index: {}: Starting Kafka River...", riverConfig.getIndexName()); final KafkaWorker kafkaWorker = new KafkaWorker(kafkaConsumer, elasticsearchProducer, riverConfig, stats); thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "Kafka River Worker").newThread(kafkaWorker); thread.start(); } catch (Exception ex) { logger.error("Index: {}: Unexpected Error occurred", ex, riverConfig.getIndexName()); throw new RuntimeException(ex); } }
@Override public void start() { logger.info("starting mysql stream: {}://{}:{} (mysql://{}:{}@{}:{}/{}) indexing to [{}]/[{}]", mysqlStreamProtocol, mysqlStreamHost, mysqlStreamPort, mysqlUser, mysqlPassword, mysqlHost, mysqlPort, mysqlDb, indexName, typeName); try { client.admin().indices().prepareCreate(indexName).execute().actionGet(); } catch (Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) { // that's fine } else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) { // ok, not recovered yet..., lets start indexing and hope we recover by the first bulk // TODO: a smarter logic can be to register for cluster event listener here, and only start sampling when the block is removed... } else { logger.warn("failed to create index [{}], disabling river...", e, indexName); return; } } slurperThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mysql_river_slurper").newThread(new Slurper()); indexerThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mysql_river_indexer").newThread(new Indexer()); indexerThread.start(); slurperThread.start(); }
@Override public void start() { if(logger.isInfoEnabled()) logger.info("Starting Redis River stream"); // Next, we'll try to connect our redis pool try { this.jedisPool = new JedisPool(new JedisPoolConfig(), this.redisHost, this.redisPort, 0, this.redisPsw, this.redisDB); } catch (Exception e) { // We can't connect to redis for some reason, so // let's not even try to finish this. logger.error("Unable to allocate redis pool. Disabling River."); return; } currentRequest = client.prepareBulk(); if(redisMode.equalsIgnoreCase("list")){ thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "redis_listener").newThread(new RedisListRunner()); } else if (redisMode.equalsIgnoreCase("pubsub")){ //thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "redis_listener").newThread(new RedisPubSubRunner()); logger.error("PubSub mode not implemented yet. Switch to list mode."); return; } else { logger.error("Invalid redis river mode specified. Please check your river settings and try again."); return; } thread.start(); }
@Provides @Singleton @Named("arangodb_river_walReaderRunnable_threadfactory") public ThreadFactory getSlurperThreadFactory(RiverSettings settings) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), "arangodb_river_walreader"); }
@Provides @Singleton @Named("arangodb_river_indexWriterRunnable_threadfactory") public ThreadFactory getIndexerThreadFactory(RiverSettings settings) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), "arangodb_river_indexer"); } }
int threadNumber = 0; for (EmailRiverConfig riverConfig : riverConfigs) { Thread thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "email_slurper_" + threadNumber) .newThread(new EmailParser(riverConfig)); thread.start();
this.thread = EsExecutors.daemonThreadFactory(this.settings.globalSettings(), "mysql_slurper").newThread(new Parser()); this.thread.start();
final Thread t = EsExecutors.daemonThreadFactory(this.settings.globalSettings(), "hbase_slurper").newThread(this.parser); t.setUncaughtExceptionHandler(this); t.start();
feedThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "fs_slurper") .newThread(new S3Scanner(feedDefinition)); feedThread.start();