@Override public void close() { // nothing in base class channel.close(); }
public Throwable close(Throwable accumulate) { if (channel != null) return channel.close(accumulate); return accumulate; }
@Override public void close() { BufferPool.put(buffer); channel.close(); }
@Override public void close() { BufferPool.put(buffer); channel.close(); }
public Throwable close(Throwable accumulate) { if (!compressed && regions != null) accumulate = regions.close(accumulate); if (channel != null) return channel.close(accumulate); return accumulate; }
public Throwable close(Throwable accumulate) { if (!compressed && regions != null) accumulate = regions.close(accumulate); if (channel != null) return channel.close(accumulate); return accumulate; }
@Override public void close() { BufferPool.put(buffer); channel.close(); }
public Throwable close(Throwable accumulate) { if (!compressed && regions != null) accumulate = regions.close(accumulate); if (channel != null) return channel.close(accumulate); return accumulate; }
@VisibleForTesting protected MappedBuffer(ChannelProxy file, int numPageBits) { if (numPageBits > Integer.SIZE - 1) throw new IllegalArgumentException("page size can't be bigger than 1G"); sizeBits = numPageBits; pageSize = 1 << sizeBits; position = 0; limit = capacity = file.size(); pages = new MappedByteBuffer[(int) (file.size() / pageSize) + 1]; try { long offset = 0; for (int i = 0; i < pages.length; i++) { long pageSize = Math.min(this.pageSize, (capacity - offset)); pages[i] = file.map(MapMode.READ_ONLY, offset, pageSize); offset += pageSize; } } finally { file.close(); } }
private Throwable close(Throwable accumulate) { accumulate = channel.close(accumulate); /* * Try forcing the unmapping of segments using undocumented unsafe sun APIs. * If this fails (non Sun JVM), we'll have to wait for the GC to finalize the mapping. * If this works and a thread tries to access any segment, hell will unleash on earth. */ if (!FileUtils.isCleanerAvailable) return accumulate; return perform(accumulate, channel.filePath(), Throwables.FileOpType.READ, of(buffers) .map((buffer) -> () -> { if (buffer != null) FileUtils.clean(buffer); })); } }
private Throwable close(Throwable accumulate) { accumulate = channel.close(accumulate); /* * Try forcing the unmapping of segments using undocumented unsafe sun APIs. * If this fails (non Sun JVM), we'll have to wait for the GC to finalize the mapping. * If this works and a thread tries to access any segment, hell will unleash on earth. */ if (!FileUtils.isCleanerAvailable) return accumulate; return perform(accumulate, channel.filePath(), Throwables.FileOpType.READ, of(buffers) .map((buffer) -> () -> { if (buffer != null) FileUtils.clean(buffer); })); } }
private Throwable close(Throwable accumulate) { accumulate = channel.close(accumulate); /* * Try forcing the unmapping of segments using undocumented unsafe sun APIs. * If this fails (non Sun JVM), we'll have to wait for the GC to finalize the mapping. * If this works and a thread tries to access any segment, hell will unleash on earth. */ if (!FileUtils.isCleanerAvailable) return accumulate; return perform(accumulate, channel.filePath(), Throwables.FileOpType.READ, of(buffers) .map((buffer) -> () -> { if (buffer != null) FileUtils.clean(buffer); })); } }
@Override public void close() { try { super.close(); } finally { try { rebufferer.close(); } finally { getChannel().close(); } } } }
@Override public void close() { try { super.close(); } finally { try { rebufferer.close(); } finally { getChannel().close(); } } } }
@Override public void close() { try { super.close(); } finally { try { rebufferer.close(); } finally { getChannel().close(); } } } }
public void tidy() { chunkCache.ifPresent(cache -> cache.invalidateFile(name())); try { if (compressionMetadata != null) { compressionMetadata.close(); } } finally { try { channel.close(); } finally { rebufferer.close(); } } } }
public void tidy() { chunkCache.ifPresent(cache -> cache.invalidateFile(name())); try { if (compressionMetadata != null) { compressionMetadata.close(); } } finally { try { channel.close(); } finally { rebufferer.close(); } } } }
@SuppressWarnings("resource") // The Rebufferer owns both the channel and the validator and handles closing both. public static RandomAccessReader open(File file, File crcFile) throws IOException { ChannelProxy channel = new ChannelProxy(file); try { DataIntegrityMetadata.ChecksumValidator validator = new DataIntegrityMetadata.ChecksumValidator(ChecksumType.CRC32, RandomAccessReader.open(crcFile), file.getPath()); Rebufferer rebufferer = new ChecksummedRebufferer(channel, validator); return new RandomAccessReader.RandomAccessReaderWithOwnChannel(rebufferer); } catch (Throwable t) { channel.close(); throw t; } } }
@SuppressWarnings("resource") // The Rebufferer owns both the channel and the validator and handles closing both. public static RandomAccessReader open(File file, File crcFile) throws IOException { ChannelProxy channel = new ChannelProxy(file); try { DataIntegrityMetadata.ChecksumValidator validator = new DataIntegrityMetadata.ChecksumValidator(ChecksumType.CRC32, RandomAccessReader.open(crcFile), file.getPath()); Rebufferer rebufferer = new ChecksummedRebufferer(channel, validator); return new RandomAccessReader.RandomAccessReaderWithOwnChannel(rebufferer); } catch (Throwable t) { channel.close(); throw t; } } }
@SuppressWarnings("resource") // The Rebufferer owns both the channel and the validator and handles closing both. public static RandomAccessReader open(File file, File crcFile) throws IOException { ChannelProxy channel = new ChannelProxy(file); try { DataIntegrityMetadata.ChecksumValidator validator = new DataIntegrityMetadata.ChecksumValidator(ChecksumType.CRC32, RandomAccessReader.open(crcFile), file.getPath()); Rebufferer rebufferer = new ChecksummedRebufferer(channel, validator); return new RandomAccessReader.RandomAccessReaderWithOwnChannel(rebufferer); } catch (Throwable t) { channel.close(); throw t; } } }