/** * Creates a new buffer spiller, spilling to one of the I/O manager's temp directories. * * @param ioManager The I/O manager for access to the temp directories. * @param pageSize The page size used to re-create spilled buffers. * @throws IOException Thrown if the temp files for spilling cannot be initialized. */ public BufferSpiller(IOManager ioManager, int pageSize) throws IOException { this.pageSize = pageSize; this.readBuffer = ByteBuffer.allocateDirect(READ_BUFFER_SIZE); this.readBuffer.order(ByteOrder.LITTLE_ENDIAN); this.headBuffer = ByteBuffer.allocateDirect(16); this.headBuffer.order(ByteOrder.LITTLE_ENDIAN); File[] tempDirs = ioManager.getSpillingDirectories(); this.tempDir = tempDirs[DIRECTORY_INDEX.getAndIncrement() % tempDirs.length]; byte[] rndBytes = new byte[32]; ThreadLocalRandom.current().nextBytes(rndBytes); this.spillFilePrefix = StringUtils.byteToHexString(rndBytes) + '.'; // prepare for first contents createSpillingChannel(); }
@AfterClass public static void shutdownIOManager() { ioManager.shutdown(); }
private void shutdownIOManager() throws Exception { this.mockEnv.getIOManager().shutdown(); Assert.assertTrue("IO Manager has not properly shut down.", this.mockEnv.getIOManager().isProperlyShutDown()); }
@Override public int releaseMemory() throws IOException { synchronized (buffers) { ResultSubpartitionView view = readView; if (view != null && view.getClass() == SpillableSubpartitionView.class) { // If there is a spillable view, it's the responsibility of the // view to release memory. SpillableSubpartitionView spillableView = (SpillableSubpartitionView) view; return spillableView.releaseMemory(); } else if (spillWriter == null) { // No view and in-memory => spill to disk spillWriter = ioManager.createBufferFileWriter(ioManager.createChannel()); int numberOfBuffers = buffers.size(); long spilledBytes = spillFinishedBufferConsumers(isFinished); int spilledBuffers = numberOfBuffers - buffers.size(); LOG.debug("Spilling {} bytes ({} buffers} for sub partition {} of {}.", spilledBytes, spilledBuffers, index, parent.getPartitionId()); return spilledBuffers; } } // Else: We have already spilled and don't hold any buffers return 0; }
} else { this.writer = this.ioManager.createBlockChannelWriter(this.ioManager.createChannel());
public ChannelReaderInputViewIterator(IOManager ioAccess, FileIOChannel.ID channel, LinkedBlockingQueue<MemorySegment> returnQueue, List<MemorySegment> segments, List<MemorySegment> freeMemTarget, TypeSerializer<E> accessors, int numBlocks) throws IOException { this(ioAccess.createBlockChannelReader(channel, returnQueue), returnQueue, segments, freeMemTarget, accessors, numBlocks); }
public ReOpenableMutableHashTable(TypeSerializer<BT> buildSideSerializer, TypeSerializer<PT> probeSideSerializer, TypeComparator<BT> buildSideComparator, TypeComparator<PT> probeSideComparator, TypePairComparator<PT, BT> comparator, List<MemorySegment> memorySegments, IOManager ioManager, boolean useBitmapFilters) { super(buildSideSerializer, probeSideSerializer, buildSideComparator, probeSideComparator, comparator, memorySegments, ioManager, useBitmapFilters); keepBuildSidePartitions = true; spilledInMemoryPartitions = ioManager.createChannelEnumerator(); }
/** * Creates a block channel writer that writes to the given channel. The writer adds the * written segment to its return-queue afterwards (to allow for asynchronous implementations). * * @param channelID The descriptor for the channel to write to. * @return A block channel writer that writes to the given channel. * @throws IOException Thrown, if the channel for the writer could not be opened. */ public BlockChannelWriter<MemorySegment> createBlockChannelWriter(FileIOChannel.ID channelID) throws IOException { return createBlockChannelWriter(channelID, new LinkedBlockingQueue<MemorySegment>()); }
final BulkBlockChannelReader reader = this.ioManager.createBulkBlockChannelReader(p.getBuildSideChannel().getChannelID(), this.availableMemory, p.getBuildSideBlockCount()); segments.add(getNextBuffer()); final BlockChannelReader<MemorySegment> inReader = this.ioManager.createBlockChannelReader(p.getBuildSideChannel().getChannelID()); final ChannelReaderInputView inView = new HeaderlessChannelReaderInputView(inReader, segments, p.getBuildSideBlockCount(), p.getLastSegmentLimit(), false);
/** * Utility method to check whether the IO manager has been properly shut down. The IO manager is considered * to be properly shut down when it is closed and its threads have ceased operation. * * @return True, if the IO manager has properly shut down, false otherwise. */ @Override public boolean isProperlyShutDown() { boolean readersShutDown = true; for (ReaderThread rt : readers) { readersShutDown &= rt.getState() == Thread.State.TERMINATED; } boolean writersShutDown = true; for (WriterThread wt : writers) { writersShutDown &= wt.getState() == Thread.State.TERMINATED; } return isShutdown.get() && readersShutDown && writersShutDown && super.isProperlyShutDown(); }
public static AbstractChannelWriterOutputView createOutputView( IOManager ioManager, FileIOChannel.ID channel, boolean compressionEnable, BlockCompressionFactory compressionCodecFactory, int compressionBlockSize, int segmentSize) throws IOException { if (compressionEnable) { BufferFileWriter bufferWriter = ioManager.createBufferFileWriter(channel); return new CompressedHeaderlessChannelWriterOutputView( bufferWriter, compressionCodecFactory, compressionBlockSize); } else { BlockChannelWriter<MemorySegment> blockWriter = ioManager.createBlockChannelWriter(channel); return new HeaderlessChannelWriterOutputView( blockWriter, Arrays.asList(allocateUnpooledSegment(segmentSize), allocateUnpooledSegment(segmentSize)), segmentSize); } }
ioManager.deleteChannel(recordsChannel); recordsChannel = null; } catch (Throwable t) { ioManager.deleteChannel(keysChannel); keysChannel = null; } catch (Throwable t) {
/** * This method is called every time a multi-match hash map is opened again for a new probe input. * @param ioManager * @param availableMemory * @throws IOException */ void restorePartitionBuffers(IOManager ioManager, List<MemorySegment> availableMemory) throws IOException { final BulkBlockChannelReader reader = ioManager.createBulkBlockChannelReader(this.initialBuildSideChannel, availableMemory, this.initialPartitionBuffersCount); reader.close(); final List<MemorySegment> partitionBuffersFromDisk = reader.getFullSegments(); this.partitionBuffers = (MemorySegment[]) partitionBuffersFromDisk.toArray(new MemorySegment[partitionBuffersFromDisk.size()]); this.overflowSegments = new MemorySegment[2]; this.numOverflowSegments = 0; this.nextOverflowBucket = 0; this.isRestored = true; }
fileWriters[i] = ioManager.createStreamFileWriter(ioManager.createChannel(new File(path))); bytesWritten[i] = 0;
@Override public SortedDataFile<T> createFile(List<MemorySegment> writeMemory) throws IOException { int fileId = nextFileId++; String path = ExternalBlockShuffleUtils.generateSpillPath(partitionDataRootPath, fileId); return new BufferSortedDataFile<T>(ioManager.createChannel(new File(path)), fileId, serialize, ioManager, writeMemory, serializerManager, numBytesOut, numBuffersOut); } }
public CompressedBlockChannelWriter(IOManager ioManager, ID channel, LinkedBlockingQueue<MemorySegment> blockQueue, BlockCompressionFactory codecFactory, int preferBlockSize, int segmentSize) throws IOException { this.writer = ioManager.createBufferFileWriter(channel); this.blockQueue = blockQueue; copyCompress = preferBlockSize > segmentSize * 2; int blockSize = copyCompress ? preferBlockSize : segmentSize; this.compressor = codecFactory.getCompressor(); if (copyCompress) { this.buf = new byte[blockSize]; this.bufWrapper = ByteBuffer.wrap(buf); } for (int i = 0; i < 2; i++) { compressedBuffers.add(MemorySegmentFactory.wrap( new byte[compressor.getMaxCompressedSize(blockSize)])); } }
@Override public int releaseMemory() throws IOException { synchronized (buffers) { ResultSubpartitionView view = readView; if (view != null && view.getClass() == SpillableSubpartitionView.class) { // If there is a spillable view, it's the responsibility of the // view to release memory. SpillableSubpartitionView spillableView = (SpillableSubpartitionView) view; return spillableView.releaseMemory(); } else if (spillWriter == null) { // No view and in-memory => spill to disk spillWriter = ioManager.createBufferFileWriter(ioManager.createChannel()); int numberOfBuffers = buffers.size(); long spilledBytes = spillFinishedBufferConsumers(isFinished); int spilledBuffers = numberOfBuffers - buffers.size(); LOG.debug("{}: Spilling {} bytes ({} buffers} for sub partition {} of {}.", parent.getOwningTaskName(), spilledBytes, spilledBuffers, index, parent.getPartitionId()); return spilledBuffers; } } // Else: We have already spilled and don't hold any buffers return 0; }
} else { this.writer = this.ioManager.createBlockChannelWriter(this.ioManager.createChannel());
/** * Creates a block channel reader that reads blocks from the given channel. The reader pushed * full memory segments (with the read data) to its "return queue", to allow for asynchronous read * implementations. * * @param channelID The descriptor for the channel to write to. * @return A block channel reader that reads from the given channel. * @throws IOException Thrown, if the channel for the reader could not be opened. */ public BlockChannelReader<MemorySegment> createBlockChannelReader(FileIOChannel.ID channelID) throws IOException { return createBlockChannelReader(channelID, new LinkedBlockingQueue<MemorySegment>()); }
public ReOpenableMutableHashTable(TypeSerializer<BT> buildSideSerializer, TypeSerializer<PT> probeSideSerializer, TypeComparator<BT> buildSideComparator, TypeComparator<PT> probeSideComparator, TypePairComparator<PT, BT> comparator, List<MemorySegment> memorySegments, IOManager ioManager, boolean useBitmapFilters) { super(buildSideSerializer, probeSideSerializer, buildSideComparator, probeSideComparator, comparator, memorySegments, ioManager, useBitmapFilters); keepBuildSidePartitions = true; spilledInMemoryPartitions = ioManager.createChannelEnumerator(); }