@Override public void close() throws IOException { reader.close(); }
@Override public Buffer getNextBuffer() throws IOException, InterruptedException { if (fileReader.hasReachedEndOfFile() || isSpillInProgress) { return null; } // TODO This is fragile as we implicitly expect that multiple calls to // this method don't happen before recycling buffers returned earlier. Buffer buffer = bufferPool.requestBufferBlocking(); fileReader.readInto(buffer); return buffer; }
@Override public void deleteChannel() { reader.deleteChannel(); }
@Override public void recycle(MemorySegment segment) { try { reader.readInto(new NetworkBuffer(segment, this)); } catch (IOException e) { throw new RuntimeException(e); } } }
@Override public synchronized boolean isAvailable() { if (nextBuffer != null) { return true; } return !fileReader.hasReachedEndOfFile(); }
private void clearMerged(ChannelDeleteRegistry<Tuple2<Integer, T>> channelDeleteRegistry, Set<AsynchronousPartitionedStreamFileReaderDelegate> allReaders) throws IOException { // close the reader and delete the underlying file for already merged channels for (AsynchronousPartitionedStreamFileReaderDelegate reader : allReaders) { // close the file reader reader.close(); channelDeleteRegistry.unregisterOpenChannel(reader.getReader()); // delete the file reader.getReader().deleteChannel(); channelDeleteRegistry.unregisterChannelToBeDelete(reader.getReader().getChannelID()); } allReaders.clear(); }
@Override public ID getChannelID() { return reader.getChannelID(); }
@Override public void closeAndDelete() throws IOException { reader.closeAndDelete(); }
@Override public void recycle(MemorySegment segment) { try { reader.readInto(new NetworkBuffer(segment, this)); } catch (IOException e) { throw new RuntimeException(e); } } }
@Override public synchronized boolean isAvailable() { if (nextBuffer != null) { return true; } return !fileReader.hasReachedEndOfFile(); }
@Override public void closeAndDelete() throws IOException { reader.closeAndDelete(); }
@Nullable private Buffer requestAndFillBuffer() throws IOException, InterruptedException { assert Thread.holdsLock(this); if (fileReader.hasReachedEndOfFile()) { return null; } // TODO This is fragile as we implicitly expect that multiple calls to // this method don't happen before recycling buffers returned earlier. Buffer buffer = bufferPool.requestBufferBlocking(); fileReader.readInto(buffer); return buffer; }
void close() throws IOException { this.reader.close(); this.freeSegments.clear(); } }
private void sendRequestIfFeasible(MemorySegment memorySegment) throws IOException { long nextReadLength = 0; while (nextPartitionIdx < partitionIndices.size()) { PartitionIndex partitionIndex = partitionIndices.get(nextPartitionIdx); long partitionEndOffset = partitionIndex.getStartOffset() + partitionIndex.getLength(); assert partitionEndOffset >= nextOffset; if (partitionEndOffset > nextOffset) { nextReadLength = Math.min(partitionEndOffset - nextOffset, memorySegment.size()); break; } nextPartitionIdx++; } if (nextReadLength > 0) { Buffer buffer = new NetworkBuffer(memorySegment, this); reader.readInto(buffer, nextReadLength); nextOffset += nextReadLength; } }
@Override public synchronized boolean isAvailable() { if (nextBuffer != null) { return true; } return !fileReader.hasReachedEndOfFile(); }
@Nullable private Buffer requestAndFillBuffer() throws IOException, InterruptedException { assert Thread.holdsLock(this); if (fileReader.hasReachedEndOfFile()) { return null; } // TODO This is fragile as we implicitly expect that multiple calls to // this method don't happen before recycling buffers returned earlier. Buffer buffer = bufferPool.requestBufferBlocking(); fileReader.readInto(buffer); return buffer; }
@Override public List<MemorySegment> close() throws IOException { reader.close(); return Collections.emptyList(); }
public CompressedBlockChannelReader(IOManager ioManager, ID channel, LinkedBlockingQueue<MemorySegment> blockQueue, BlockCompressionFactory codecFactory, int preferBlockSize, int segmentSize) throws IOException { this.reader = ioManager.createBufferFileReader(channel, this); this.blockQueue = blockQueue; copyCompress = preferBlockSize > segmentSize * 2; int blockSize = copyCompress ? preferBlockSize : segmentSize; this.decompressor = codecFactory.getDecompressor(); cause = new AtomicReference<>(); if (copyCompress) { this.buf = new byte[blockSize]; this.bufWrapper = ByteBuffer.wrap(buf); } AbstractBlockCompressor compressor = codecFactory.getCompressor(); for (int i = 0; i < 2; i++) { MemorySegment segment = MemorySegmentFactory.wrap(new byte[compressor.getMaxCompressedSize(blockSize)]); reader.readInto(new NetworkBuffer(segment, this)); } }
@Nullable private Buffer requestAndFillBuffer() throws IOException, InterruptedException { assert Thread.holdsLock(this); if (fileReader.hasReachedEndOfFile()) { return null; } // TODO This is fragile as we implicitly expect that multiple calls to // this method don't happen before recycling buffers returned earlier. Buffer buffer = bufferPool.requestBufferBlocking(); fileReader.readInto(buffer); return buffer; }
@Override public void releaseAllResources() throws IOException { if (isReleased.compareAndSet(false, true)) { // TODO This can block until all buffers are written out to // disk if a spill is in-progress before deleting the file. // It is possibly called from the Netty event loop threads, // which can bring down the network. spillWriter.closeAndDelete(); fileReader.close(); bufferPool.destroy(); } }