private Logger getLogger(String component) { return loggers.computeIfAbsent(component, c -> Loggers.getLogger(parentLogger, "." + c)); }
CompositeIndexEventListener(IndexSettings indexSettings, Collection<IndexEventListener> listeners) { for (IndexEventListener listener : listeners) { if (listener == null) { throw new IllegalArgumentException("listeners must be non-null"); } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); }
protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) { this.shardId = shardId; this.indexSettings = indexSettings; this.logger = Loggers.getLogger(getClass(), shardId); this.deprecationLogger = new DeprecationLogger(logger); }
/** * Constructs a new index component, with the index name and its settings. */ protected AbstractIndexComponent(IndexSettings indexSettings) { this.logger = Loggers.getLogger(getClass(), indexSettings.getIndex()); this.deprecationLogger = new DeprecationLogger(logger); this.indexSettings = indexSettings; }
public static Logger getLogger(Class<?> clazz, Index index, String... prefixes) { return getLogger(clazz, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0])); }
public static Logger getLogger(Class<?> clazz, ShardId shardId, String... prefixes) { return getLogger(clazz, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); }
ElasticsearchConcurrentMergeScheduler(ShardId shardId, IndexSettings indexSettings) { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); this.logger = Loggers.getLogger(getClass(), shardId); refreshConfig(); }
public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock, OnClose onClose) { super(shardId, indexSettings); final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(directory, refreshInterval); this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; assert onClose != null; assert shardLock != null; assert shardLock.getShardId().equals(shardId); }
protected Engine(EngineConfig engineConfig) { Objects.requireNonNull(engineConfig.getStore(), "Store must be provided to the engine"); this.engineConfig = engineConfig; this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); // we use the engine class directly here to make sure all subclasses have the same logger name this.logger = Loggers.getLogger(Engine.class, engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); }
public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget, final StartRecoveryRequest request, final int fileChunkSizeInBytes) { this.shard = shard; this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); }
/** * Creates a new recovery target object that represents a recovery to the provided shard. * * @param indexShard local shard where we want to recover to * @param sourceNode source node of the recovery where we recover from * @param listener called when recovery is completed/failed * @param ensureClusterStateVersionCallback callback to ensure that the current node is at least on a cluster state with the provided * version; necessary for primary relocation so that new primary knows about all other ongoing * replica recoveries when replicating documents (see {@link RecoverySourceHandler}) */ public RecoveryTarget(final IndexShard indexShard, final DiscoveryNode sourceNode, final PeerRecoveryTargetService.RecoveryListener listener, final LongConsumer ensureClusterStateVersionCallback) { super("recovery_status"); this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); this.tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + "."; this.store = indexShard.store(); this.ensureClusterStateVersionCallback = ensureClusterStateVersionCallback; // make sure the store is not released until we are done. store.incRef(); indexShard.recoveryStats().incCurrentAsTarget(); }
public ESLoggerWrapper(String name, Settings settings) { super(settings); this.logger = Loggers.getLogger(name, settings); this.settings = settings; }
public ActionHandler(final Settings settings, final Map<String, Object> sourceMap, final Client client, final ThreadPool pool) { this.client = client; this.settings = settings; rootSettings = sourceMap; this.pool = pool; logger = Loggers.getLogger(getClass(), settings); }
public DocValuesIndexFieldData(Index index, String fieldName) { super(); this.index = index; this.fieldName = fieldName; this.logger = Loggers.getLogger(getClass()); }
public DefaultRequestHandler(final Settings settings, final Client client, final ThreadPool pool) { this.settings = settings; this.client = client; this.pool = pool; maxRetryCount = settings.getAsInt("taste.rest.retry", 20); logger = Loggers.getLogger(getClass(), settings); indexCreationLock = new ReentrantLock(); }
setTracerLogInclude(TransportSettings.TRACE_LOG_INCLUDE_SETTING.get(settings)); setTracerLogExclude(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); taskManager = createTaskManager(settings, threadPool, taskHeaders); this.interceptor = transportInterceptor;
CompositeIndexEventListener(IndexSettings indexSettings, Collection<IndexEventListener> listeners) { for (IndexEventListener listener : listeners) { if (listener == null) { throw new IllegalArgumentException("listeners must be non-null"); } } this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners)); this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex()); }
ElasticSecondaryIndex(ColumnFamilyStore baseCfs, IndexMetadata indexDef) { this.baseCfs = baseCfs; this.typeName = ClusterService.cfNameToType(ElasticSecondaryIndex.this.baseCfs.metadata.cfName); this.typeTermQuery = new TermQuery(new Term(TypeFieldMapper.NAME, typeName)); this.indexMetadata = indexDef; this.index_name = baseCfs.keyspace.getName()+"."+baseCfs.name; this.logger = Loggers.getLogger(this.getClass().getName()+"."+baseCfs.keyspace.getName()+"."+baseCfs.name); }
private static Boolean parseBoolean(String key, String value) { // let the parser handle all cases for non-proper booleans without a deprecation warning by throwing IAE boolean booleanValue = Booleans.parseBooleanExact(value); if (Booleans.isStrictlyBoolean(value) == false) { DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(Setting.class)); deprecationLogger.deprecated("Expected a boolean [true/false] for setting [{}] but got [{}]", key, value); } return booleanValue; }
this.index = indexMetaData.getIndex(); version = IndexMetaData.SETTING_INDEX_VERSION_CREATED.get(settings); logger = Loggers.getLogger(getClass(), index); nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData;