private static boolean hasCommits( IndexWriter indexWriter ) throws IOException { Directory directory = indexWriter.getDirectory(); return hasCommits( directory ); }
private synchronized void initDynamicDefaults(IndexWriter writer) throws IOException { if (maxThreadCount == AUTO_DETECT_MERGES_AND_THREADS) { boolean spins = IOUtils.spins(writer.getDirectory()); // Let tests override this to help reproducing a failure on a machine that has a different // core count than the one where the test originally failed: try { String value = System.getProperty(DEFAULT_SPINS_PROPERTY); if (value != null) { spins = Boolean.parseBoolean(value); } } catch (Exception ignored) { // that's fine we might hit a SecurityException etc. here just continue } setDefaultMaxMergesAndThreads(spins); if (verbose()) { message("initDynamicDefaults spins=" + spins + " maxThreadCount=" + maxThreadCount + " maxMergeCount=" + maxMergeCount); } } }
@Override protected void checkRepository(IndexRepositoryImpl repo0, int... bucketId) { IndexWriter writer0 = repo0.getWriter(); Directory dir0 = writer0.getDirectory(); assertTrue(dir0 instanceof NIOFSDirectory); }
@Before public void createMocks() throws BucketNotFoundException { GemFireCacheImpl cache = Fakes.cache(); context = mock(RegionFunctionContext.class); ResultSender sender = mock(ResultSender.class); Region region = mock(Region.class); InternalLuceneService service = mock(InternalLuceneService.class); InternalLuceneIndex index = mock(InternalLuceneIndex.class); RepositoryManager repoManager = mock(RepositoryManager.class); IndexRepository repo = mock(IndexRepository.class); IndexWriter writer = mock(IndexWriter.class); RegionDirectory directory = mock(RegionDirectory.class); fileSystem = mock(FileSystem.class); Region bucket = mock(Region.class); when(bucket.getFullPath()).thenReturn(bucketName); when(context.getArguments()).thenReturn(new String[] {directoryName, indexName}); when(context.getResultSender()).thenReturn(sender); when(context.getDataSet()).thenReturn(region); when(region.getCache()).thenReturn(cache); when(cache.getService(any())).thenReturn(service); when(repoManager.getRepositories(eq(context))).thenReturn(Collections.singleton(repo)); when(index.getRepositoryManager()).thenReturn(repoManager); when(index.getName()).thenReturn(indexName); when(service.getIndex(eq(indexName), any())).thenReturn(index); when(directory.getFileSystem()).thenReturn(fileSystem); when(writer.getDirectory()).thenReturn(directory); when(repo.getWriter()).thenReturn(writer); when(repo.getRegion()).thenReturn(bucket); }
repositories.stream().forEach(repo -> { final IndexWriter writer = repo.getWriter(); RegionDirectory directory = (RegionDirectory) writer.getDirectory(); FileSystem fs = directory.getFileSystem();
final Directory dir = writer.getDirectory();
handleMergeException(writer.getDirectory(), exc);
protected void checkRepository(IndexRepositoryImpl repo0, int... bucketIds) { IndexWriter writer0 = repo0.getWriter(); RegionDirectory dir0 = (RegionDirectory) writer0.getDirectory(); boolean result = false; for (int bucketId : bucketIds) { BucketTargetingMap bucketTargetingMap = new BucketTargetingMap(fileAndChunkBuckets.get(bucketId), bucketId); result |= bucketTargetingMap.equals(dir0.getFileSystem().getFileAndChunkRegion()); } assertTrue(result); assertEquals(serializer, repo0.getSerializer()); }
@Override public String toString() { return writer.getDirectory().toString(); } }
private UpdateSequence getUpdateSequence(final IndexWriter writer) throws IOException { return getUpdateSequence(writer.getDirectory()); }
/** Manage a Lucene index that has already been created */ public IndexBuilderBase(IndexWriter existingWriter) { dir = existingWriter.getDirectory() ; indexWriter = existingWriter ; }
SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge) { directory = writer.getDirectory(); segment = name; if (merge != null) checkAbort = new CheckAbort(merge, directory); termIndexInterval = writer.getTermIndexInterval(); }
SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge) { directory = writer.getDirectory(); segment = name; if (merge != null) checkAbort = new CheckAbort(merge, directory); termIndexInterval = writer.getTermIndexInterval(); }
public synchronized void delete() { try { File directory = ((FSDirectory) getWriter().getDirectory()).getDirectory(); close(); FileUtils.deleteDirectory(directory); } catch (Exception ignored) { } }
/** Returns true if this single info is already fully merged (has no * pending deletes, is in the same dir as the * writer, and matches the current compound file setting */ protected final boolean isMerged(SegmentInfos infos, SegmentCommitInfo info, IndexWriter writer) throws IOException { assert writer != null; boolean hasDeletions = writer.numDeletedDocs(info) > 0; return !hasDeletions && info.info.dir == writer.getDirectory() && useCompoundFile(infos, info, writer) == info.info.getUseCompoundFile(); }
/** Returns true if this single nfo is optimized (has no * pending norms or deletes, is in the same dir as the * writer, and matches the current compound file setting */ private boolean isOptimized(IndexWriter writer, SegmentInfo info) throws IOException { return !info.hasDeletions() && !info.hasSeparateNorms() && info.dir == writer.getDirectory() && info.getUseCompoundFile() == useCompoundFile; }
/** Returns true if this single nfo is optimized (has no * pending norms or deletes, is in the same dir as the * writer, and matches the current compound file setting */ private boolean isOptimized(IndexWriter writer, SegmentInfo info) throws IOException { return !info.hasDeletions() && !info.hasSeparateNorms() && info.dir == writer.getDirectory() && info.getUseCompoundFile() == useCompoundFile; }
public void close(boolean terminate) throws IOException { if (!terminate) { forceCommit(); } Directory directory = indexWriter.getDirectory(); indexWriter.close(); directory.close(); directory = taxoWriter.getDirectory(); taxoWriter.close(); directory.close(); }
void updateDocument(Term termId) throws IOException { if (termId == null) throw new ServerException(Response.Status.BAD_REQUEST, "The field " + FieldDefinition.ID_FIELD + " is missing - Index: " + indexWriter.getDirectory()); indexWriter.updateDocument(termId, getFacetedDoc()); count++; documentBuilder.reset(); }
/** Returns true if this single nfo is optimized (has no * pending norms or deletes, is in the same dir as the * writer, and matches the current compound file setting */ @Override protected boolean isMerged(SegmentInfo info) throws IOException { IndexWriter w = writer.get(); return !info.hasDeletions() && !info.hasSeparateNorms() && info.dir == w.getDirectory() && info.getUseCompoundFile() == getUseCompoundFile(); }