/** * {@inheritDoc} */ @Override public void maybeRefreshBlocking() throws IOException { searcherManager.maybeRefreshBlocking(); }
@Override public void flush() { try { searcherManager.maybeRefreshBlocking(); } catch ( IOException e ) { throw new RuntimeException( e ); } }
private void insert( List<Object> data ) throws IOException { for ( int i = 0; i < data.size(); i++ ) { Document doc = LuceneDocumentStructure.documentRepresentingProperties( i, Values.of( data.get( i ) ) ); writer.addDocument( doc ); } searcherManager.maybeRefreshBlocking(); }
@Override protected IndexSearcher refreshIfNeeded(IndexSearcher referenceToRefresh) throws IOException { // we simply run a blocking refresh on the internal reference manager and then steal it's reader // it's a save operation since we acquire the reader which incs it's reference but then down the road // steal it by calling incRef on the "stolen" reader internalSearcherManager.maybeRefreshBlocking(); IndexSearcher acquire = internalSearcherManager.acquire(); try { final IndexReader previousReader = referenceToRefresh.getIndexReader(); assert previousReader instanceof ElasticsearchDirectoryReader: "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; final IndexReader newReader = acquire.getIndexReader(); if (newReader == previousReader) { // nothing has changed - both ref managers share the same instance so we can use reference equality return null; } else { newReader.incRef(); // steal the reader - getSearcher will decrement if it fails return SearcherManager.getSearcher(searcherFactory, newReader, previousReader); } } finally { internalSearcherManager.release(acquire); } }
/** Reopens the underlying searcher; it's best to "batch * up" many additions/updates, and then call refresh * once in the end. */ public void refresh() throws IOException { if (searcherMgr == null) { throw new IllegalStateException("suggester was not built"); } searcherMgr.maybeRefreshBlocking(); }
private void checkIfChanged() { try{ if (!searcherManager.isSearcherCurrent()) { searcherManager.maybeRefreshBlocking(); } }catch(IOException ex){ throw Throwables.propagate(ex); } }
@Override public IndexSearcher acquire() { try { searcherManager.maybeRefreshBlocking(); return searcherManager.acquire(); } catch (IOException e) { throw new RuntimeException(e); } }
/** * Marks the current searcher to be closed once all searches are finished, * and creates a new one for later searches. * <p></p> * Doesn't refresh the searcher if a batch operation is in progress (i.e. * {@link #batchMode} is <tt>true</tt>. */ private void commitAndRefreshSearcher() { // don't refresh searcher during a batch operation if (batchMode.get()) return; searcherRefreshLock.lock(); try (Ticker ignored = commitAndRefreshMetric.start()) { writer.commit(); searcherManager.maybeRefreshBlocking(); } catch (IOException e) { throw new LuceneException("Error refreshing index searcher", e); } finally { searcherRefreshLock.unlock(); } }
/** * update the documents in the lucene index and share it on HDFS * * @param documents the document to update */ public void insertOrUpdateDocument(List<DQDocument> documents) { List<DQDocument> documentsToCreate = new ArrayList<>(); try { LOGGER.debug("update " + documents.size() + " documents"); for (DQDocument document : documents) { final Term term = new Term(DictionarySearcher.F_DOCID, document.getId()); mgr.maybeRefreshBlocking(); IndexSearcher searcher = mgr.acquire(); if (searcher.search(new TermQuery(term), 1).totalHits == 1) { getWriter().updateDocument(term, DictionaryUtils.dqDocumentToLuceneDocument(document).getFields()); } else { documentsToCreate.add(document); } mgr.release(searcher); } createDocument(documentsToCreate); } catch (IOException e) { LOGGER.error(e.getMessage(), e); } }
@Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { throw e; } catch (Exception e) { try { failEngine("refresh failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new RefreshFailedEngineException(shardId, e); } }
public void insertOrUpdateCategory(DQCategory category) { LOGGER.debug("insertOrUpdateCategory: " + category); final Term searchTerm = new Term(DictionarySearcher.F_CATID, category.getId()); final TermQuery termQuery = new TermQuery(searchTerm); try { mgr.maybeRefreshBlocking(); IndexSearcher searcher = mgr.acquire(); TopDocs result = searcher.search(termQuery, 1); mgr.release(searcher); if (result.totalHits == 1) { final Term term = new Term(DictionarySearcher.F_CATID, category.getId()); List<IndexableField> fields = DictionaryUtils.categoryToDocument(category).getFields(); getWriter().updateDocument(term, fields); } else { createCategory(category); } } catch (IOException e) { LOGGER.error(e.getMessage(), e); } }
@Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("refresh failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new RefreshFailedEngineException(shardId, e); } // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: maybePruneDeletedTombstones(); versionMapRefreshPending.set(false); mergeScheduler.refreshConfig(); }
@Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { ensureOpen(); } catch (EngineClosedException e) { throw e; } catch (Throwable t) { failEngine("refresh failed", t); throw new RefreshFailedEngineException(shardId, t); } }
internalSearcherManager.maybeRefreshBlocking(); IndexSearcher acquire = internalSearcherManager.acquire(); try {
@Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); throw e; } catch (Exception e) { try { failEngine("refresh failed", e); } catch (Exception inner) { e.addSuppressed(inner); } throw new RefreshFailedEngineException(shardId, e); } // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: maybePruneDeletedTombstones(); //versionMapRefreshPending.set(false); mergeScheduler.refreshConfig(); }
@Override public void refresh(String source) throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); searcherManager.maybeRefreshBlocking(); } catch (AlreadyClosedException e) { ensureOpen(); maybeFailEngine("refresh", e); } catch (EngineClosedException e) { throw e; } catch (Throwable t) { failEngine("refresh failed", t); throw new RefreshFailedEngineException(shardId, t); } // TODO: maybe we should just put a scheduled job in threadPool? // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes // for a long time: maybePruneDeletedTombstones(); versionMapRefreshPending.set(false); mergeScheduler.refreshConfig(); }
@Override protected IndexSearcher refreshIfNeeded(IndexSearcher referenceToRefresh) throws IOException { // we simply run a blocking refresh on the internal reference manager and then steal it's reader // it's a save operation since we acquire the reader which incs it's reference but then down the road // steal it by calling incRef on the "stolen" reader internalSearcherManager.maybeRefreshBlocking(); IndexSearcher acquire = internalSearcherManager.acquire(); try { final IndexReader previousReader = referenceToRefresh.getIndexReader(); assert previousReader instanceof ElasticsearchDirectoryReader: "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; final IndexReader newReader = acquire.getIndexReader(); if (newReader == previousReader) { // nothing has changed - both ref managers share the same instance so we can use reference equality return null; } else { newReader.incRef(); // steal the reader - getSearcher will decrement if it fails return SearcherManager.getSearcher(searcherFactory, newReader, previousReader); } } finally { internalSearcherManager.release(acquire); } }