/** If the current segment has too many points then we spill over to temp files / offline sort. */ private void spillToOffline() throws IOException { // For each .add we just append to this input file, then in .finish we sort this input and resursively build the tree: offlinePointWriter = new OfflinePointWriter(tempDir, tempFileNamePrefix, packedBytesLength, longOrds, "spill", 0, singleValuePerDoc); tempInput = offlinePointWriter.out; PointReader reader = heapPointWriter.getReader(0, pointCount); for(int i=0;i<pointCount;i++) { boolean hasNext = reader.next(); assert hasNext; offlinePointWriter.append(reader.packedValue(), i, heapPointWriter.docIDs[i]); } heapPointWriter = null; }
/** Visits all docIDs and packed values in a single leaf block */ public void visitLeafBlockValues(IndexTree index, IntersectState state) throws IOException { // Leaf node; scan and filter all points in this block: int count = readDocIDs(state.in, index.getLeafBlockFP(), state.scratchDocIDs); // Again, this time reading values and checking with the visitor visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, count, state.visitor); }
@Override public void pushRight() { super.pushRight(); setNodeData(); }
/** Pull a partition back into heap once the point count is low enough while recursing. */ private PathSlice switchToHeap(PathSlice source, List<Closeable> toCloseHeroically) throws IOException { int count = Math.toIntExact(source.count); // Not inside the try because we don't want to close it here: PointReader reader = source.writer.getSharedReader(source.start, source.count, toCloseHeroically); try (PointWriter writer = new HeapPointWriter(count, count, packedBytesLength, longOrds, singleValuePerDoc)) { for(int i=0;i<count;i++) { boolean hasNext = reader.next(); assert hasNext; writer.append(reader.packedValue(), reader.ord(), reader.docID()); } return new PathSlice(writer, 0, count); } catch (Throwable t) { throw verifyChecksum(t, source.writer); } }
@Override public void pushLeft() { super.pushLeft(); setNodeData(); }
@Override public void intersect(IntersectVisitor visitor) throws IOException { intersect(getIntersectState(visitor), minPackedValue, maxPackedValue); }
/** Write a field from a {@link MutablePointValues}. This way of writing * points is faster than regular writes with {@link BKDWriter#add} since * there is opportunity for reordering points before writing them to * disk. This method does not use transient disk in order to reorder points. */ public long writeField(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException { if (numDataDims == 1) { return writeField1Dim(out, fieldName, reader); } else { return writeFieldNDims(out, fieldName, reader); } }
public byte[] getSplitPackedValue() { assert isLeafNode() == false; assert splitPackedValueStack[level] != null: "level=" + level; return splitPackedValueStack[level]; }
@Override public BytesRef getSplitDimValue() { assert isLeafNode() == false; scratch.bytes = splitValuesStack[level]; scratch.offset = splitDim * bytesPerDim; return scratch; }
@Override public long getLeafBlockFP() { assert isLeafNode(); return leafBlockFP; }
@Override public void pop() { super.pop(); leafBlockFP = -1; } }
public LegacyIndexTree() { setNodeData(); scratch.bytes = splitDimValue; scratch.length = bytesPerDim; }
@Override public byte[] packedValue() { readPackedValue(curRead, scratch); return scratch; }
@Override public PointReader getReader(long start, long length) throws IOException { assert closed; assert start + length <= count: "start=" + start + " length=" + length + " count=" + count; assert expectedCount == 0 || count == expectedCount; return new OfflinePointReader(tempDir, name, packedBytesLength, start, length, longOrds, singleValuePerDoc); }
private int getNumLeavesSlow(int node) { if (node >= 2 * leafNodeOffset) { return 0; } else if (node >= leafNodeOffset) { return 1; } else { final int leftCount = getNumLeavesSlow(node * 2); final int rightCount = getNumLeavesSlow(node * 2 + 1); return leftCount + rightCount; } } }
public void visit(int docID, byte[] packedValue) throws IOException { writer.add(packedValue, docID); }
/** Only valid after pushLeft or pushRight, not pop! */ public int getSplitDim() { assert isLeafNode() == false; return splitDim; }
@Override public long getLeafBlockFP() { assert isLeafNode(): "nodeID=" + nodeID + " is not a leaf"; return leafBlockFPStack[level]; }
@Override public BytesRef getSplitDimValue() { assert isLeafNode() == false; return scratch; }
@Override public void pop() { super.pop(); splitDim = splitDims[level]; }