if (mergeState.mergeFieldInfos.hasVectors()) { if (mergeState.infoStream.isEnabled("SM")) { t0 = System.nanoTime();
infoStream.message("DWPT", "new segment has " + flushState.softDelCountOnFlush + " soft-deleted docs"); infoStream.message("DWPT", "new segment has " + (flushState.fieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (flushState.fieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (flushState.fieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
double segmentMB = (merge.info.sizeInBytes()/1024./1024.); infoStream.message("IW", "merge codec=" + codec + " maxDoc=" + merge.info.info.maxDoc() + "; merged segment has " + (mergeState.mergeFieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (mergeState.mergeFieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (mergeState.mergeFieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
if (coreFieldInfos.hasVectors()) { // open term vector files only as needed termVectorsReaderOrig = si.info.getCodec().termVectorsFormat().vectorsReader(cfsDir, si.info, coreFieldInfos, context); } else {
if (fieldInfos.hasVectors() && mergeDocStores) { for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.length; i++) { files.add(segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]);
if (fieldInfos.hasVectors() && mergeDocStores) { for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.length; i++) { files.add(segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]);
if (fieldInfos.hasVectors()) { for (int i = 0; i < VECTOR_EXTENSIONS.length; i++) { files.add(segment + "." + VECTOR_EXTENSIONS[i]);
/** * Merges the readers specified by the {@link #add} method * into the directory passed to the constructor. * @param mergeDocStores if false, we will not merge the * stored fields nor vectors files * @return The number of documents that were merged * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ final int merge(boolean mergeDocStores) throws CorruptIndexException, IOException { this.mergeDocStores = mergeDocStores; // NOTE: it's important to add calls to // checkAbort.work(...) if you make any changes to this // method that will spend alot of time. The frequency // of this check impacts how long // IndexWriter.close(false) takes to actually stop the // threads. mergedDocs = mergeFields(); mergeTerms(); mergeNorms(); if (mergeDocStores && fieldInfos.hasVectors()) mergeVectors(); return mergedDocs; }
/** * Merges the readers specified by the {@link #add} method * into the directory passed to the constructor. * @param mergeDocStores if false, we will not merge the * stored fields nor vectors files * @return The number of documents that were merged * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ final int merge(boolean mergeDocStores) throws CorruptIndexException, IOException { this.mergeDocStores = mergeDocStores; // NOTE: it's important to add calls to // checkAbort.work(...) if you make any changes to this // method that will spend alot of time. The frequency // of this check impacts how long // IndexWriter.close(false) takes to actually stop the // threads. mergedDocs = mergeFields(); mergeTerms(); mergeNorms(); if (mergeDocStores && fieldInfos.hasVectors()) mergeVectors(); return mergedDocs; }
/** * Merges the readers specified by the {@link #add} method into the directory passed to the constructor * @return The number of documents that were merged * @throws IOException */ final int merge() throws IOException { int value; value = mergeFields(); mergeTerms(); mergeNorms(); if (fieldInfos.hasVectors()) mergeVectors(); if (useCompoundFile) createCompoundFile(); return value; }
if (mergeState.mergeFieldInfos.hasVectors()) { if (mergeState.infoStream.isEnabled("SM")) { t0 = System.nanoTime();
if (mergeState.mergeFieldInfos.hasVectors()) { if (mergeState.infoStream.isEnabled("SM")) { t0 = System.nanoTime();
infoStream.message("DWPT", "new segment has " + (flushState.liveDocs == null ? 0 : flushState.delCountOnFlush) + " deleted docs"); infoStream.message("DWPT", "new segment has " + (flushState.fieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (flushState.fieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (flushState.fieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
infoStream.message("DWPT", "new segment has " + (flushState.liveDocs == null ? 0 : flushState.delCountOnFlush) + " deleted docs"); infoStream.message("DWPT", "new segment has " + (flushState.fieldInfos.hasVectors() ? "vectors" : "no vectors") + "; " + (flushState.fieldInfos.hasNorms() ? "norms" : "no norms") + "; " + (flushState.fieldInfos.hasDocValues() ? "docValues" : "no docValues") + "; " +
private void initialize(SegmentInfo si) throws IOException { segment = si.name; // Use compound file directory for some files, if it exists Directory cfsDir = directory(); if (directory().fileExists(segment + ".cfs")) { cfsReader = new CompoundFileReader(directory(), segment + ".cfs"); cfsDir = cfsReader; } // No compound file exists - use the multi-file format fieldInfos = new FieldInfos(cfsDir, segment + ".fnm"); fieldsReader = new FieldsReader(cfsDir, segment, fieldInfos); tis = new TermInfosReader(cfsDir, segment, fieldInfos); // NOTE: the bitvector is stored using the regular directory, not cfs if (hasDeletions(si)) deletedDocs = new BitVector(directory(), segment + ".del"); // make sure that all index files have been read or are kept open // so that if an index update removes them we'll still have them freqStream = cfsDir.openFile(segment + ".frq"); proxStream = cfsDir.openFile(segment + ".prx"); openNorms(cfsDir); if (fieldInfos.hasVectors()) { // open term vector files only as needed termVectorsReader = new TermVectorsReader(cfsDir, segment, fieldInfos); } }
if (coreFieldInfos.hasVectors()) { // open term vector files only as needed termVectorsReaderOrig = si.info.getCodec().termVectorsFormat().vectorsReader(cfsDir, si.info, coreFieldInfos, context); } else {
if (coreFieldInfos.hasVectors()) { // open term vector files only as needed termVectorsReaderOrig = si.info.getCodec().termVectorsFormat().vectorsReader(cfsDir, si.info, coreFieldInfos, context); } else {
if (coreFieldInfos.hasVectors()) { // open term vector files only as needed termVectorsReaderOrig = si.info.getCodec().termVectorsFormat().vectorsReader(cfsDir, si.info, coreFieldInfos, context); } else {
openNorms(cfsDir, readBufferSize); if (doOpenStores && fieldInfos.hasVectors()) { // open term vector files only as needed final String vectorsSegment; if (si.getDocStoreOffset() != -1)
openNorms(cfsDir, readBufferSize); if (doOpenStores && fieldInfos.hasVectors()) { // open term vector files only as needed final String vectorsSegment; if (si.getDocStoreOffset() != -1)