startupThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_startup:" + definition.getIndexName()).newThread( startupRunnable); startupThread.start();
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix) { assert nodeName != null && false == nodeName.isEmpty(); return daemonThreadFactory(threadName(nodeName, namePrefix)); }
public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { return daemonThreadFactory(threadName(settings, namePrefix)); }
public static ThreadFactory daemonThreadFactory(Settings settings, String ... names) { return daemonThreadFactory(threadName(settings, names)); }
indexerThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_indexer:" + definition.getIndexName()).newThread( new Indexer(MongoDBRiver.this)); indexerThread.start(); Timestamp shardSlurperStartTimestamp = slurperStartTimestamp != null ? slurperStartTimestamp : shard.getLatestOplogTimestamp(); MongoClient mongoClient = mongoClientService.getMongoShardClient(definition, shard.getReplicas()); Thread tailerThread = EsExecutors.daemonThreadFactory( settings.globalSettings(), "mongodb_river_slurper_" + shard.getName() + ":" + definition.getIndexName() ).newThread(new OplogSlurper(MongoDBRiver.this, shardSlurperStartTimestamp, mongoClusterClient, mongoClient));
@Override protected synchronized void doStart() { Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting"); Objects.requireNonNull(state.get(), "please set initial state before starting"); addListener(localNodeMasterListeners); threadPoolExecutor = EsExecutors.newSinglePrioritizing( nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME, daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler()); }
@Override protected void doStop() { ExecutorService indicesStopExecutor = Executors.newFixedThreadPool(5, EsExecutors.daemonThreadFactory(settings, "indices_shutdown")); // Copy indices because we modify it asynchronously in the body of the loop final Set<Index> indices = this.indices.values().stream().map(s -> s.index()).collect(Collectors.toSet()); final CountDownLatch latch = new CountDownLatch(indices.size()); for (final Index index : indices) { indicesStopExecutor.execute(() -> { try { removeIndex(index, IndexRemovalReason.NO_LONGER_ASSIGNED, "shutdown"); } finally { latch.countDown(); } }); } try { if (latch.await(shardsClosedTimeout.seconds(), TimeUnit.SECONDS) == false) { logger.warn("Not all shards are closed yet, waited {}sec - stopping service", shardsClosedTimeout.seconds()); } } catch (InterruptedException e) { // ignore } finally { indicesStopExecutor.shutdown(); } }
@Override protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting"); threadPoolExecutor = EsExecutors.newSinglePrioritizing( nodeName + "/" + MASTER_UPDATE_THREAD_NAME, daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler()); taskBatcher = new Batcher(logger, threadPoolExecutor); }
static ScheduledThreadPoolExecutor initScheduler(Settings settings) { ScheduledThreadPoolExecutor scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy()); scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); scheduler.setRemoveOnCancelPolicy(true); return scheduler; }
@Override ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { int size = settings.size; int queueSize = settings.queueSize; final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newFixed(settings.nodeName + "/" + name(), size, queueSize, threadFactory, threadContext); final String name; if ("write".equals(name()) && Booleans.parseBoolean(System.getProperty("es.thread_pool.write.use_bulk_as_display_name", "false"))) { name = "bulk"; } else { name = name(); } final ThreadPool.Info info = new ThreadPool.Info(name, ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); return new ThreadPool.ExecutorHolder(executor, info); }
public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, UnicastHostsProvider unicastHostsProvider, PingContextProvider contextProvider) { this.threadPool = threadPool; this.transportService = transportService; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.hostsProvider = unicastHostsProvider; this.contextProvider = contextProvider; final int concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings); resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings); nodeName = Node.NODE_NAME_SETTING.get(settings); logger.debug( "using concurrent_connects [{}], resolve_timeout [{}]", concurrentConnects, resolveTimeout); transportService.registerRequestHandler(ACTION_NAME, ThreadPool.Names.SAME, UnicastPingRequest::new, new UnicastPingRequestHandler()); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]"); unicastZenPingExecutorService = EsExecutors.newScaling( nodeName + "/" + "unicast_connect", 0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory, threadPool.getThreadContext()); }
ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final ThreadContext threadContext) { TimeValue keepAlive = settings.keepAlive; int core = settings.core; int max = settings.max; final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null); final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newScaling( settings.nodeName + "/" + name(), core, max, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory, threadContext); return new ThreadPool.ExecutorHolder(executor, info); }
@Override ThreadPool.ExecutorHolder build(final AutoExecutorSettings settings, final ThreadContext threadContext) { int size = settings.size; int initialQueueSize = settings.initialQueueSize; int minQueueSize = settings.minQueueSize; int maxQueueSize = settings.maxQueueSize; int frameSize = settings.frameSize; TimeValue targetedResponseTime = settings.targetedResponseTime; final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newAutoQueueFixed( settings.nodeName + "/" + name(), size, initialQueueSize, minQueueSize, maxQueueSize, frameSize, targetedResponseTime, threadFactory, threadContext); // TODO: in a subsequent change we hope to extend ThreadPool.Info to be more specific for the thread pool type final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED_AUTO_QUEUE_SIZE, size, size, null, new SizeValue(initialQueueSize)); return new ThreadPool.ExecutorHolder(executor, info); }
public MockTcpTransport(Settings settings, ThreadPool threadPool, BigArrays bigArrays, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, Version mockVersion) { super("mock-tcp-transport", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); // we have our own crazy cached threadpool this one is not bounded at all... // using the ES thread factory here is crucial for tests otherwise disruption tests won't block that thread executor = Executors.newCachedThreadPool(EsExecutors.daemonThreadFactory(settings, Transports.TEST_MOCK_TRANSPORT_THREAD_PREFIX)); this.mockVersion = mockVersion; }
@Override protected synchronized void doStart() { Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting"); threadPoolExecutor = EsExecutors.newSinglePrioritizing( nodeName() + "/" + MASTER_UPDATE_THREAD_NAME, daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME), threadPool.getThreadContext(), threadPool.scheduler()); taskBatcher = new Batcher(logger, threadPoolExecutor); }
static ScheduledThreadPoolExecutor initScheduler(Settings settings) { ScheduledThreadPoolExecutor scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy()); scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); scheduler.setRemoveOnCancelPolicy(true); return scheduler; }
@Provides @Singleton @Named("arangodb_river_walReaderRunnable_threadfactory") public ThreadFactory getSlurperThreadFactory(RiverSettings settings) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), "arangodb_river_walreader"); }
@Provides @Singleton @Named("arangodb_river_indexWriterRunnable_threadfactory") public ThreadFactory getIndexerThreadFactory(RiverSettings settings) { return EsExecutors.daemonThreadFactory(settings.globalSettings(), "arangodb_river_indexer"); } }
@Override ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { int size = settings.size; int queueSize = settings.queueSize; final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); final ExecutorService executor = EsExecutors.newFixed(name(), size, queueSize, threadFactory, threadContext); final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED, size, size, null, queueSize < 0 ? null : new SizeValue(queueSize)); return new ThreadPool.ExecutorHolder(executor, info); }