/** * Returns a new searcher instance. The consumer of this * API is responsible for releasing the returned searcher in a * safe manner, preferably in a try/finally block. * * @param source the source API or routing that triggers this searcher acquire * * @see Searcher#close() */ public final Searcher acquireSearcher(String source) throws EngineException { return acquireSearcher(source, SearcherScope.EXTERNAL); }
/** * Returns the {@link DocsStats} for this engine */ public DocsStats docStats() { // we calculate the doc stats based on the internal reader that is more up-to-date and not subject // to external refreshes. For instance we don't refresh an external reader if we flush and indices with // index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics // when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are // safe here. try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) { return docsStats(searcher.reader()); } }
public boolean refreshNeeded() { if (store.tryIncRef()) { /* we need to inc the store here since we acquire a searcher and that might keep a file open on the store. this violates the assumption that all files are closed when the store is closed so we need to make sure we increment it here */ try { try (Searcher searcher = acquireSearcher("refresh_needed", SearcherScope.EXTERNAL)) { return searcher.getDirectoryReader().isCurrent() == false; } } catch (IOException e) { logger.error("failed to access searcher manager", e); failEngine("failed to access searcher manager", e); throw new EngineException(shardId, "failed to access searcher manager", e); } finally { store.decRef(); } } return false; }
Map<String, Segment> segments = new HashMap<>(); try (Searcher searcher = acquireSearcher("segments", SearcherScope.EXTERNAL)){ for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { fillSegmentInfo(Lucene.segmentReader(ctx.reader()), verbose, true, segments); try (Searcher searcher = acquireSearcher("segments", SearcherScope.INTERNAL)){ for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader());
/** * Global stats on segments. */ public SegmentsStats segmentsStats(boolean includeSegmentFileSizes) { ensureOpen(); Set<String> segmentName = new HashSet<>(); SegmentsStats stats = new SegmentsStats(); try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.INTERNAL)) { for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); fillSegmentStats(segmentReader, includeSegmentFileSizes, stats); segmentName.add(segmentReader.getSegmentName()); } } try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.EXTERNAL)) { for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); if (segmentName.contains(segmentReader.getSegmentName()) == false) { fillSegmentStats(segmentReader, includeSegmentFileSizes, stats); } } } writerSegmentStats(stats); return stats; }
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) { readAllowed(); final Engine engine = getEngine(); final Engine.Searcher searcher = engine.acquireSearcher(source, scope); assert ElasticsearchDirectoryReader.unwrap(searcher.getDirectoryReader()) != null : "DirectoryReader must be an instance or ElasticsearchDirectoryReader"; boolean success = false; try { final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(searcher); assert wrappedSearcher != null; success = true; return wrappedSearcher; } catch (IOException ex) { throw new ElasticsearchException("failed to wrap searcher", ex); } finally { if (success == false) { Releasables.close(success, searcher); } } }
/** * Returns a new searcher instance. The consumer of this * API is responsible for releasing the returned searcher in a * safe manner, preferably in a try/finally block. * * @param source the source API or routing that triggers this searcher acquire * * @see Searcher#close() */ public final Searcher acquireSearcher(String source) throws EngineException { return acquireSearcher(source, SearcherScope.EXTERNAL); }
/** * Returns a new searcher instance. The consumer of this * API is responsible for releasing the returned seacher in a * safe manner, preferably in a try/finally block. * * @see Searcher#close() */ public final Searcher acquireSearcher(String source) throws EngineException { return acquireSearcher(source, true); }
/** * Returns the {@link CompletionStats} for this engine */ public CompletionStats completionStats(String... fieldNamePatterns) throws IOException { try (Engine.Searcher currentSearcher = acquireSearcher("completion_stats", SearcherScope.INTERNAL)) { long sizeInBytes = 0; ObjectLongHashMap<String> completionFields = null; if (fieldNamePatterns != null && fieldNamePatterns.length > 0) { completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length); } for (LeafReaderContext atomicReaderContext : currentSearcher.reader().leaves()) { LeafReader atomicReader = atomicReaderContext.reader(); for (FieldInfo info : atomicReader.getFieldInfos()) { Terms terms = atomicReader.terms(info.name); if (terms instanceof CompletionTerms) { // TODO: currently we load up the suggester for reporting its size long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed(); if (Regex.simpleMatch(fieldNamePatterns, info.name)) { completionFields.addTo(info.name, fstSize); } sizeInBytes += fstSize; } } } return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields)); } }
/** * Returns the {@link DocsStats} for this engine */ public DocsStats docStats() { // we calculate the doc stats based on the internal reader that is more up-to-date and not subject // to external refreshes. For instance we don't refresh an external reader if we flush and indices with // index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics // when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are // safe here. try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) { return docsStats(searcher.reader()); } }
public Engine.Searcher acquireSearcher(String source) { readAllowed(); return engine().acquireSearcher(source); }
private int loadQueries(IndexShard shard) { shard.refresh("percolator_load_queries"); // NOTE: we acquire the searcher via the engine directly here since this is executed right // before the shard is marked as POST_RECOVERY try (Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) { Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); IndexSearcher indexSearcher = new IndexSearcher(searcher.reader()); indexSearcher.setQueryCache(null); indexSearcher.search(query, queryCollector); Map<BytesRef, Query> queries = queryCollector.queries(); for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) { Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue()); shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue()); } return queries.size(); } catch (Exception e) { throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e); } }
private void loadQueries(IndexShard shard) { try { shard.refresh("percolator_load_queries"); // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery try(Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) { Query query = new XConstantScoreQuery( indexCache.filter().cache( new TermFilter(new Term(TypeFieldMapper.NAME, BatchPercolatorService.TYPE_NAME)) ) ); BatchQueriesLoaderCollector queryCollector = new BatchQueriesLoaderCollector(BatchPercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); searcher.searcher().search(query, queryCollector); Map<String, QueryAndSource> queries = queryCollector.queries(); for(Map.Entry<String, QueryAndSource> entry : queries.entrySet()) { percolateQueries.put(entry.getKey(), entry.getValue()); } } } catch (Exception e) { throw new BatchPercolatorQueryException(shardId.index(), "failed to load queries from percolator index", e); } }
public final boolean refreshNeeded() { if (store.tryIncRef()) { /* we need to inc the store here since we acquire a searcher and that might keep a file open on the store. this violates the assumption that all files are closed when the store is closed so we need to make sure we increment it here */ try { try (Searcher searcher = acquireSearcher("refresh_needed", SearcherScope.EXTERNAL)) { return searcher.getDirectoryReader().isCurrent() == false; } } catch (IOException e) { logger.error("failed to access searcher manager", e); failEngine("failed to access searcher manager", e); throw new EngineException(shardId, "failed to access searcher manager", e); } finally { store.decRef(); } } return false; }
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) { readAllowed(); final Engine engine = getEngine(); final Engine.Searcher searcher = engine.acquireSearcher(source, scope); boolean success = false; try { final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(searcher); assert wrappedSearcher != null; success = true; return wrappedSearcher; } catch (IOException ex) { throw new ElasticsearchException("failed to wrap searcher", ex); } finally { if (success == false) { Releasables.close(success, searcher); } } }
public Engine.Searcher acquireSearcher(String source) { readAllowed(); final Engine engine = getEngine(); final Engine.Searcher searcher = engine.acquireSearcher(source); boolean success = false; try { final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(searcher); assert wrappedSearcher != null; success = true; return wrappedSearcher; } catch (IOException ex) { throw new ElasticsearchException("failed to wrap searcher", ex); } finally { if (success == false) { Releasables.close(success, searcher); } } }
/** * Global stats on segments. */ public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) { ensureOpen(); Set<String> segmentName = new HashSet<>(); SegmentsStats stats = new SegmentsStats(); try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.INTERNAL)) { for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); fillSegmentStats(segmentReader, includeSegmentFileSizes, stats); segmentName.add(segmentReader.getSegmentName()); } } try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.EXTERNAL)) { for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) { SegmentReader segmentReader = Lucene.segmentReader(ctx.reader()); if (segmentName.contains(segmentReader.getSegmentName()) == false) { fillSegmentStats(segmentReader, includeSegmentFileSizes, stats); } } } writerSegmentStats(stats); return stats; }
@Override protected ShardReconstructIndexResponse shardOperation(ShardReconstructIndexRequest request) throws ElasticsearchException { IndexService indexService = indicesService.indexService(request.index()); InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId()); Engine.Searcher searcher = indexShard.engine().acquireSearcher("transport_reconstruct"); IndexReader reader = searcher.reader(); DocumentReconstructor dr = new DocumentReconstructor(reader); try { return new ShardReconstructIndexResponse(true, dr.reconstruct(request.shardId())); } catch (IOException e) { throw new ElasticsearchException("failed to reconstruct index", e); } }
/** * Global stats on segments. */ public final SegmentsStats segmentsStats() { ensureOpen(); try (final Searcher searcher = acquireSearcher("segments_stats", false)) { SegmentsStats stats = new SegmentsStats(); for (LeafReaderContext reader : searcher.reader().leaves()) { final SegmentReader segmentReader = segmentReader(reader.reader()); stats.add(1, segmentReader.ramBytesUsed()); stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader())); stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader())); stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader())); stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader())); stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader())); } writerSegmentStats(stats); return stats; } }
/** * Global stats on segments. */ public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) { ensureOpen(); try (Searcher searcher = acquireSearcher("segments_stats")) { SegmentsStats stats = new SegmentsStats(); for (LeafReaderContext reader : searcher.reader().leaves()) { final SegmentReader segmentReader = segmentReader(reader.reader()); stats.add(1, segmentReader.ramBytesUsed()); stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader())); stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader())); stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader())); stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader())); stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader())); stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader())); if (includeSegmentFileSizes) { // TODO: consider moving this to StoreStats stats.addFileSizes(getSegmentFileSizes(segmentReader)); } } writerSegmentStats(stats); return stats; } }