@Override public EncodedReader encodedReader(Object fileKey, DataCache dataCache, DataReader dataReader, PoolFactory pf, IoTrace trace, boolean useCodecPool, String tag) throws IOException { return new EncodedReaderImpl(fileKey, types, getSchema(), compressionKind, getWriterVersion(), bufferSize, rowIndexStride, dataCache, dataReader, pf, trace, useCodecPool, tag); } }
public void init(Object fileKey, int stripeIx, int rgIx, int columnCount) { if (batchKey == null) { batchKey = new OrcBatchKey(fileKey, stripeIx, rgIx); } else { batchKey.set(fileKey, stripeIx, rgIx); } resetColumnArrays(columnCount); }
private static Pools createPools(PoolFactory pf) { Pools pools = new Pools(); pools.ecbPool = pf.createEncodedColumnBatchPool(); pools.csdPool = pf.createColumnStreamDataPool(); return pools; }
@Override public DiskRangeList createCacheChunk(MemoryBuffer buffer, long offset, long end) { return new CacheChunk(buffer, offset, end); } };
@Override public OrcEncodedColumnBatch create() { return new OrcEncodedColumnBatch(); } @Override
@Override public OrcBatchKey clone() throws CloneNotSupportedException { return new OrcBatchKey(fileKey, stripeIx, rgIx); } }
public void logPartialCb(DiskRange current) { if (log == null) return; int offset = this.offset; if (offset + 4 > log.length) return; log[offset] = makeIntPair(PARTIAL_CB, 0); logRange(current, offset + 1); this.offset += 4; }
public void reset() { if (isAlwaysDump && !hasDumped) { dumpLog(LOG); } offset = 0; hasDumped = false; }
@Override public String toString() { return super.toString() + ", original is set " + (this.originalData != null) + ", buffer was replaced " + (originalCbIndex == -1); }
private void releaseBuffers(Collection<ByteBuffer> toRelease, boolean isFromDataReader) { if (toRelease == null) return; for (ByteBuffer buf : toRelease) { releaseBuffer(buf, isFromDataReader); } }
@Override public void seek(PositionProvider[] index) throws IOException { // This string reader should simply redirect to its own seek (what other types already do). this.seek(index[columnId]); }
@Override public void seek(PositionProvider[] index) throws IOException { // This string reader should simply redirect to its own seek (what other types already do). this.seek(index[columnId]); }
public void logTreeReaderNextVector(int idx) { if (log == null) return; int offset = this.offset; if (offset + 1 > log.length) return; log[offset] = makeIntPair(TREE_READER_NEXT_VECTOR, idx); this.offset += 1; }
@Override public void resetBeforeOffer(OrcEncodedColumnBatch t) { t.reset(); } });
@Override public DiskRangeList createCacheChunk(MemoryBuffer buffer, long offset, long end) { return new CacheChunk(buffer, offset, end); } };
@Override public OrcEncodedColumnBatch create() { return new OrcEncodedColumnBatch(); } @Override
public void logCompositeOrcCb(int lastChunkTaken, int lastChunkRemaining, DiskRange cc) { if (log == null) return; int offset = this.offset; if (offset + 5 > log.length) return; log[offset] = makeIntPair(COMPOSITE_ORC_CB, lastChunkTaken); log[offset + 1] = lastChunkRemaining; logRange(cc, offset + 2); this.offset += 5; }
public void logInvalidOrcCb(long cbStartOffset, long end) { if (log == null) return; int offset = this.offset; if (offset + 2 > log.length) return; log[offset] = makeIntPair(INVALID_ORC_CB, (int)(end - cbStartOffset)); log[offset + 1] = cbStartOffset; this.offset += 2; }
@Override public DiskRangeList createCacheChunk( MemoryBuffer buffer, long startOffset, long endOffset) { return new CacheChunk(buffer, startOffset, endOffset); } }, gotAllData);
public void logSargResult(int stripeIx, int rgCount) { if (log == null) return; int offset = this.offset; if (offset + 2 > log.length) return; log[offset] = makeIntPair(SARG_RESULT, stripeIx); log[offset + 1] = rgCount; this.offset += 2; }