@Override public void close() { synchronized (lock) { if (closed.compareAndSet(false, true)) { firstReleaseStack = new RuntimeException(); final int refCount = wrappedSearcher.reader().getRefCount(); // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential // problems. assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]"; try { wrappedSearcher.close(); } catch (RuntimeException ex) { logger.debug("Failed to release searcher", ex); throw ex; } } else { AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]"); error.initCause(firstReleaseStack); throw error; } } }
@Override public void close() throws ElasticsearchException { try { reader().close(); // we close the reader to make sure wrappers can release resources if needed.... // our NonClosingReaderWrapper makes sure that our reader is not closed } catch (IOException e) { throw new ElasticsearchException("failed to close reader", e); } finally { engineSearcher.close(); } } };
@Override public void close() throws ElasticsearchException { try { reader().close(); // we close the reader to make sure wrappers can release resources if needed.... // our NonClosingReaderWrapper makes sure that our reader is not closed } catch (IOException e) { throw new ElasticsearchException("failed to close reader", e); } finally { engineSearcher.close(); } } };
throw new ElasticsearchException("failed to execute term vector request", ex); } finally { searcher.close(); get.release();
throw new ElasticsearchException("failed to execute term vector request", ex); } finally { searcher.close(); get.release();
throw new ElasticsearchException("failed to execute term vector request", ex); } finally { searcher.close(); get.release();
throw new ElasticsearchException("failed to execute term vector request", ex); } finally { searcher.close(); get.release();
searcher.close();
throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); } finally { percolatorSearcher.close();
private void purgeShards(List<IndexShard> shardsToPurge) { for (IndexShard shardToPurge : shardsToPurge) { Query query = shardToPurge.indexService().mapperService().smartNameFieldType(TTLFieldMapper.NAME).rangeQuery(null, System.currentTimeMillis(), false, true); Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl"); try { logger.debug("[{}][{}] purging shard", shardToPurge.routingEntry().index(), shardToPurge.routingEntry().id()); ExpiredDocsCollector expiredDocsCollector = new ExpiredDocsCollector(); searcher.searcher().search(query, expiredDocsCollector); List<DocToPurge> docsToPurge = expiredDocsCollector.getDocsToPurge(); BulkRequest bulkRequest = new BulkRequest(); for (DocToPurge docToPurge : docsToPurge) { bulkRequest.add(new DeleteRequest().index(shardToPurge.routingEntry().index()).type(docToPurge.type).id(docToPurge.id).version(docToPurge.version).routing(docToPurge.routing)); bulkRequest = processBulkIfNeeded(bulkRequest, false); } processBulkIfNeeded(bulkRequest, true); } catch (Exception e) { logger.warn("failed to purge", e); } finally { searcher.close(); } } }
private void purgeShards(List<IndexShard> shardsToPurge) { for (IndexShard shardToPurge : shardsToPurge) { Query query = shardToPurge.mapperService().fullName(TTLFieldMapper.NAME).rangeQuery(null, System.currentTimeMillis(), false, true, null); Engine.Searcher searcher = shardToPurge.acquireSearcher("indices_ttl"); try { logger.debug("[{}][{}] purging shard", shardToPurge.routingEntry().index(), shardToPurge.routingEntry().id()); ExpiredDocsCollector expiredDocsCollector = new ExpiredDocsCollector(); searcher.searcher().search(query, expiredDocsCollector); List<DocToPurge> docsToPurge = expiredDocsCollector.getDocsToPurge(); BulkRequest bulkRequest = new BulkRequest(); for (DocToPurge docToPurge : docsToPurge) { bulkRequest.add(new DeleteRequest().index(shardToPurge.routingEntry().getIndexName()).type(docToPurge.type).id(docToPurge.id).version(docToPurge.version).routing(docToPurge.routing)); bulkRequest = processBulkIfNeeded(bulkRequest, false); } processBulkIfNeeded(bulkRequest, true); } catch (Exception e) { logger.warn("failed to purge", e); } finally { searcher.close(); } } }
@Override public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); try { MatchAndScore matchAndScore = matchAndScore(logger, context, highlightPhase, isNested); queryBasedPercolating(percolatorSearcher, context, matchAndScore); List<BytesRef> matches = matchAndScore.matches(); List<Map<String, HighlightField>> hls = matchAndScore.hls(); float[] scores = matchAndScore.scores().toArray(); long count = matchAndScore.counter(); BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]); return new PercolateShardResponse(finalMatches, hls, count, scores, context, request.shardId()); } catch (Throwable e) { logger.debug("failed to execute", e); throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); } finally { percolatorSearcher.close(); } } };
@Override public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); try { Match match = match(logger, context, highlightPhase, isNested); queryBasedPercolating(percolatorSearcher, context, match); List<BytesRef> matches = match.matches(); List<Map<String, HighlightField>> hls = match.hls(); long count = match.counter(); BytesRef[] finalMatches = matches.toArray(new BytesRef[matches.size()]); return new PercolateShardResponse(finalMatches, hls, count, context, request.shardId()); } catch (Throwable e) { logger.debug("failed to execute", e); throw new PercolateException(context.indexShard().shardId(), "failed to execute", e); } finally { percolatorSearcher.close(); } } };
@Override public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested) { long count = 0; Engine.Searcher percolatorSearcher = context.indexShard().acquireSearcher("percolate"); try { Count countCollector = count(logger, context, isNested); queryBasedPercolating(percolatorSearcher, context, countCollector); count = countCollector.counter(); } catch (Throwable e) { logger.warn("failed to execute", e); } finally { percolatorSearcher.close(); } return new PercolateShardResponse(count, context, request.shardId()); }
public void release() { if (searcher != null) { searcher.close(); } } }
public CompletionStats completionStats(String... fields) { CompletionStats completionStats = new CompletionStats(); final Engine.Searcher currentSearcher = acquireSearcher("completion_stats"); try { Completion090PostingsFormat postingsFormat = ((Completion090PostingsFormat) PostingsFormat.forName(Completion090PostingsFormat.CODEC_NAME)); completionStats.add(postingsFormat.completionStats(currentSearcher.reader(), fields)); } finally { currentSearcher.close(); } return completionStats; }
throw new ElasticsearchException("failed to execute term vector request", ex); } finally { searcher.close(); get.release();