this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()), clusterSettingUpgraders); Settings indexSettings = settings.filter((s) -> (s.startsWith("index.") && && clusterSettings.get(s) == null); if (indexSettings.isEmpty() == false) { try { clusterSettings.validate(settings, true); this.settingsFilter = new SettingsFilter(settingsFilterPattern);
void registerClusterSettingsListeners(ClusterSettings clusterSettings) { clusterSettings.addSettingsUpdateConsumer(SCRIPT_MAX_SIZE_IN_BYTES, this::setMaxSizeInBytes); clusterSettings.addSettingsUpdateConsumer(SCRIPT_MAX_COMPILATIONS_RATE, this::setMaxCompilationRate); }
public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool, Map<String, Supplier<ClusterState.Custom>> initialClusterStateCustoms) { super(settings); this.settings = settings; this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.masterService = new MasterService(nodeName, settings, threadPool); this.operationRouting = new OperationRouting(settings, clusterSettings); this.clusterSettings = clusterSettings; this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); // Add a no-op update consumer so changes are logged this.clusterSettings.addAffixUpdateConsumer(USER_DEFINED_META_DATA, (first, second) -> {}, (first, second) -> {}); this.initialClusterStateCustoms = initialClusterStateCustoms; this.clusterApplierService = new ClusterApplierService(nodeName, settings, clusterSettings, threadPool, this::newClusterStateBuilder); }
final Settings unknownOrInvalidTransientSettings = partitionedTransientSettings.v2(); final Settings.Builder transientSettings = Settings.builder().put(knownAndValidTransientSettings); changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient"); final Settings unknownOrInvalidPersistentSettings = partitionedPersistentSettings.v2(); final Settings.Builder persistentSettings = Settings.builder().put(knownAndValidPersistentSettings); changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent"); clusterSettings.validate(transientFinalSettings, true); clusterSettings.validate(persistentFinalSettings, true); clusterSettings.validateUpdate(settings);
public BalancedShardsAllocator(Settings settings) { this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); }
this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); Settings indexSettings = settings.filter((s) -> (s.startsWith("index.") && clusterSettings.validate(settings, true); this.settingsFilter = new SettingsFilter(settings, settingsFilterPattern);
private void applyChanges(UpdateTask task, ClusterState previousClusterState, ClusterState newClusterState) { ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(task.source, newClusterState, previousClusterState); // new cluster state, notify all listeners final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { String summary = nodesDelta.shortSummary(); if (summary.length() > 0) { logger.info("{}, reason: {}", summary, task.source); } } nodeConnectionsService.connectToNodes(newClusterState.nodes()); logger.debug("applying cluster state version {}", newClusterState.version()); try { // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) { final Settings incomingSettings = clusterChangedEvent.state().metaData().settings(); clusterSettings.applySettings(incomingSettings); } } catch (Exception ex) { logger.warn("failed to apply cluster settings", ex); } logger.debug("apply cluster state with version {}", newClusterState.version()); callClusterStateAppliers(clusterChangedEvent); nodeConnectionsService.disconnectFromNodesExcept(newClusterState.nodes()); logger.debug("set locally applied cluster state to version {}", newClusterState.version()); state.set(newClusterState); callClusterStateListeners(clusterChangedEvent); task.listener.onSuccess(task.source); }
@Override public void listenForUpdates(ClusterSettings clusterSettings) { super.listenForUpdates(clusterSettings); clusterSettings.addAffixUpdateConsumer(REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); }
Settings.Builder transientSettings = Settings.builder(); transientSettings.put(currentState.metaData().transientSettings()); changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient"); changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent"); clusterSettings.validateUpdate(settings); return build;
private static void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { final long shardCountLimit = clusterService.getClusterSettings().get(SHARD_COUNT_LIMIT_SETTING); if (shardCount > shardCountLimit) { throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey() + "] to a greater value if you really want to query that many shards at the same time."); } } }
public ClusterSettings( final Settings nodeSettings, final Set<Setting<?>> settingsSet, final Set<SettingUpgrader<?>> settingUpgraders) { super(nodeSettings, settingsSet, settingUpgraders, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); }
static ClusterGetSettingsResponse response( final ClusterState state, final boolean renderDefaults, final SettingsFilter settingsFilter, final ClusterSettings clusterSettings, final Settings settings) { return new ClusterGetSettingsResponse( settingsFilter.filter(state.metaData().persistentSettings()), settingsFilter.filter(state.metaData().transientSettings()), renderDefaults ? settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), settings)) : Settings.EMPTY); }
if (metaData.persistentSettings() != null) { Settings settings = metaData.persistentSettings(); clusterSettings.validateUpdate(settings); mdBuilder.persistentSettings(settings);
private void validateTribeSettings(Settings settings, ClusterSettings clusterSettings) { Map<String, Settings> groups = settings.filter(TRIBE_CLIENT_NODE_SETTINGS_PREDICATE).getGroups("tribe.", true); for (Map.Entry<String, Settings> tribeSettings : groups.entrySet()) { Settings thisTribesSettings = tribeSettings.getValue(); for (Map.Entry<String, String> entry : thisTribesSettings.getAsMap().entrySet()) { try { clusterSettings.validate(entry.getKey(), thisTribesSettings); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("tribe." + tribeSettings.getKey() +" validation failed: "+ ex.getMessage(), ex); } } } }
final Settings unknownOrInvalidTransientSettings = partitionedTransientSettings.v2(); final Settings.Builder transientSettings = Settings.builder().put(knownAndValidTransientSettings); changed |= clusterSettings.updateDynamicSettings(transientToApply, transientSettings, transientUpdates, "transient"); final Settings unknownOrInvalidPersistentSettings = partitionedPersistentSettings.v2(); final Settings.Builder persistentSettings = Settings.builder().put(knownAndValidPersistentSettings); changed |= clusterSettings.updateDynamicSettings(persistentToApply, persistentSettings, persistentUpdates, "persistent"); clusterSettings.validate(transientFinalSettings, true); clusterSettings.validate(persistentFinalSettings, true); clusterSettings.validateUpdate(settings);
/** * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance from * given settings * * @param settings {@link org.elasticsearch.common.settings.Settings} to use */ public SnapshotInProgressAllocationDecider(Settings settings) { this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); }
ClusterSettings clusterSettings = new ClusterSettings(randomSettings, ClusterSettings .BUILT_IN_CLUSTER_SETTINGS); clusterSettings.validate(randomSettings, false); TcpTransport.ProfileSettings settings = new TcpTransport.ProfileSettings( Settings.builder().put(randomSettings).put("transport.profiles.some_profile.port", "9700-9800").build(), // port is required
excludeSettings = "DOESN'T_MATCH"; clusterSettings.applySettings(Settings.builder() .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), includeSettings) .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), excludeSettings)
/** * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { clusterSettings.addAffixUpdateConsumer( RemoteClusterAware.REMOTE_CLUSTERS_PROXY, RemoteClusterAware.REMOTE_CLUSTERS_SEEDS, (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), (namespace, value) -> {}); clusterSettings.addAffixUpdateConsumer( RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY, RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS, (key, value) -> updateRemoteCluster(key, value.v2(), value.v1()), (namespace, value) -> {}); }
private static void failIfOverShardCountLimit(ClusterService clusterService, int shardCount) { final long shardCountLimit = clusterService.getClusterSettings().get(SHARD_COUNT_LIMIT_SETTING); if (shardCount > shardCountLimit) { throw new IllegalArgumentException("Trying to query " + shardCount + " shards, which is over the limit of " + shardCountLimit + ". This limit exists because querying many shards at the same time can make the " + "job of the coordinating node very CPU and/or memory intensive. It is usually a better idea to " + "have a smaller number of larger shards. Update [" + SHARD_COUNT_LIMIT_SETTING.getKey() + "] to a greater value if you really want to query that many shards at the same time."); } } }