public OrcOutputBuffer(CompressionKind compression, int maxBufferSize) { requireNonNull(compression, "compression is null"); checkArgument(maxBufferSize > 0, "maximum buffer size should be greater than 0"); this.maxBufferSize = maxBufferSize; this.buffer = new byte[INITIAL_BUFFER_SIZE]; this.slice = wrappedBuffer(buffer); compressedOutputStream = new ChunkedSliceOutput(MINIMUM_OUTPUT_BUFFER_CHUNK_SIZE, MAXIMUM_OUTPUT_BUFFER_CHUNK_SIZE); if (compression == CompressionKind.NONE) { this.compressor = null; } else if (compression == CompressionKind.SNAPPY) { this.compressor = new SnappyCompressor(); } else if (compression == CompressionKind.ZLIB) { this.compressor = new DeflateCompressor(); } else if (compression == CompressionKind.LZ4) { this.compressor = new Lz4Compressor(); } else if (compression == CompressionKind.ZSTD) { this.compressor = new ZstdJniCompressor(); } else { throw new IllegalArgumentException("Unsupported compression " + compression); } }
public static byte[] blockCompress(byte[] data) { SnappyCompressor compressor = new SnappyCompressor(); byte[] compressedOut = new byte[compressor.maxCompressedLength(data.length)]; int compressedSize = compressor.compress(data, 0, data.length, compressedOut, 0, compressedOut.length); byte[] trimmedBuffer = Arrays.copyOf(compressedOut, compressedSize); return trimmedBuffer; }
@Override public int maxCompressedLength(int uncompressedSize) { return new SnappyCompressor().maxCompressedLength(uncompressedSize); }
private void writeNextChunk(byte[] input, int inputOffset, int inputLength) throws IOException { int compressedSize = compressor.compress(input, inputOffset, inputLength, outputBuffer, 0, outputBuffer.length); writeBigEndianInt(inputLength); writeBigEndianInt(compressedSize); out.write(outputBuffer, 0, compressedSize); this.inputOffset = 0; }
public HadoopSnappyOutputStream(OutputStream out, int bufferSize) { super(out); inputBuffer = new byte[bufferSize]; // leave extra space free at end of buffers to make compression (slightly) faster inputMaxSize = inputBuffer.length - compressionOverhead(bufferSize); outputBuffer = new byte[compressor.maxCompressedLength(inputMaxSize) + SIZE_OF_LONG]; }
@Override public int maxCompressedLength(int uncompressedSize) { return new SnappyCompressor().maxCompressedLength(uncompressedSize); }
private void writeNextChunk(byte[] input, int inputOffset, int inputLength) throws IOException { int compressedSize = compressor.compress(input, inputOffset, inputLength, outputBuffer, 0, outputBuffer.length); writeBigEndianInt(inputLength); writeBigEndianInt(compressedSize); out.write(outputBuffer, 0, compressedSize); this.inputOffset = 0; }
public HadoopSnappyOutputStream(OutputStream out, int bufferSize) { super(out); inputBuffer = new byte[bufferSize]; // leave extra space free at end of buffers to make compression (slightly) faster inputMaxSize = inputBuffer.length - compressionOverhead(bufferSize); outputBuffer = new byte[compressor.maxCompressedLength(inputMaxSize) + SIZE_OF_LONG]; }
@Override protected Compressor getCompressor() { return new SnappyCompressor(); }
/** * {@link Crc32C#maskedCrc32c(byte[], int, int) Calculates} the crc, compresses * the data, determines if the compression ratio is acceptable and calls * {@link #writeBlock(OutputStream, byte[], int, int, boolean, int)} to * actually write the frame. * * @param input The byte[] containing the raw data to be compressed. * @param offset The offset into <i>input</i> where the data starts. * @param length The amount of data in <i>input</i>. */ private void writeCompressed(byte[] input, int offset, int length) throws IOException { // crc is based on the user supplied input data int crc32c = writeChecksums ? Crc32C.maskedCrc32c(input, offset, length) : 0; int compressed = compressor.compress(input, offset, length, outputBuffer, 0, outputBuffer.length); // only use the compressed data if compression ratio is <= the minCompressionRatio if (((double) compressed / (double) length) <= minCompressionRatio) { writeBlock(out, outputBuffer, 0, compressed, true, crc32c); } else { // otherwise use the uncompressed data. writeBlock(out, input, offset, length, false, crc32c); } }
public SnappyFramedOutputStream(OutputStream out, boolean writeChecksums, int blockSize, double minCompressionRatio) throws IOException { this.out = SnappyInternalUtils.checkNotNull(out, "out is null"); this.writeChecksums = writeChecksums; SnappyInternalUtils.checkArgument(minCompressionRatio > 0 && minCompressionRatio <= 1.0, "minCompressionRatio %1s must be between (0,1.0].", minCompressionRatio); this.minCompressionRatio = minCompressionRatio; this.blockSize = blockSize; this.buffer = new byte[blockSize]; this.outputBuffer = new byte[compressor.maxCompressedLength(blockSize)]; out.write(SnappyFramed.HEADER_BYTES); SnappyInternalUtils.checkArgument(blockSize > 0 && blockSize <= MAX_BLOCK_SIZE, "blockSize must be in (0, 65536]", blockSize); }
SnappyCodec() { super(new SnappyCompressor(), new SnappyDecompressor()); }
/** * {@link Crc32C#maskedCrc32c(byte[], int, int) Calculates} the crc, compresses * the data, determines if the compression ratio is acceptable and calls * {@link #writeBlock(OutputStream, byte[], int, int, boolean, int)} to * actually write the frame. * * @param input The byte[] containing the raw data to be compressed. * @param offset The offset into <i>input</i> where the data starts. * @param length The amount of data in <i>input</i>. */ private void writeCompressed(byte[] input, int offset, int length) throws IOException { // crc is based on the user supplied input data int crc32c = writeChecksums ? Crc32C.maskedCrc32c(input, offset, length) : 0; int compressed = compressor.compress(input, offset, length, outputBuffer, 0, outputBuffer.length); // only use the compressed data if compression ratio is <= the minCompressionRatio if (((double) compressed / (double) length) <= minCompressionRatio) { writeBlock(out, outputBuffer, 0, compressed, true, crc32c); } else { // otherwise use the uncompressed data. writeBlock(out, input, offset, length, false, crc32c); } }
public SnappyFramedOutputStream(OutputStream out, boolean writeChecksums, int blockSize, double minCompressionRatio) throws IOException { this.out = SnappyInternalUtils.checkNotNull(out, "out is null"); this.writeChecksums = writeChecksums; SnappyInternalUtils.checkArgument(minCompressionRatio > 0 && minCompressionRatio <= 1.0, "minCompressionRatio %1s must be between (0,1.0].", minCompressionRatio); this.minCompressionRatio = minCompressionRatio; this.blockSize = blockSize; this.buffer = new byte[blockSize]; this.outputBuffer = new byte[compressor.maxCompressedLength(blockSize)]; out.write(SnappyFramed.HEADER_BYTES); SnappyInternalUtils.checkArgument(blockSize > 0 && blockSize <= MAX_BLOCK_SIZE, "blockSize must be in (0, 65536]", blockSize); }
@Override protected Compressor getVerifyCompressor() { return new HadoopCodecCompressor(verifyCodec, new SnappyCompressor()); }
@Override protected Compressor getVerifyCompressor() { return new HadoopCodecCompressor(verifyCodec, new SnappyCompressor()); }
public OrcOutputBuffer(CompressionKind compression, int maxBufferSize) { requireNonNull(compression, "compression is null"); checkArgument(maxBufferSize > 0, "maximum buffer size should be greater than 0"); this.maxBufferSize = maxBufferSize; this.buffer = new byte[INITIAL_BUFFER_SIZE]; this.slice = wrappedBuffer(buffer); compressedOutputStream = new ChunkedSliceOutput(MINIMUM_OUTPUT_BUFFER_CHUNK_SIZE, MAXIMUM_OUTPUT_BUFFER_CHUNK_SIZE); if (compression == CompressionKind.NONE) { this.compressor = null; } else if (compression == CompressionKind.SNAPPY) { this.compressor = new SnappyCompressor(); } else if (compression == CompressionKind.ZLIB) { this.compressor = new DeflateCompressor(); } else if (compression == CompressionKind.LZ4) { this.compressor = new Lz4Compressor(); } else if (compression == CompressionKind.ZSTD) { this.compressor = new ZstdJniCompressor(); } else { throw new IllegalArgumentException("Unsupported compression " + compression); } }
@Override protected Compressor getCompressor() { return new HadoopCodecCompressor(new SnappyCodec(), new SnappyCompressor()); }
public OrcOutputBuffer(CompressionKind compression, int maxBufferSize) { requireNonNull(compression, "compression is null"); checkArgument(maxBufferSize > 0, "maximum buffer size should be greater than 0"); this.maxBufferSize = maxBufferSize; this.buffer = new byte[INITIAL_BUFFER_SIZE]; this.slice = wrappedBuffer(buffer); compressedOutputStream = new ChunkedSliceOutput(MINIMUM_OUTPUT_BUFFER_CHUNK_SIZE, MAXIMUM_OUTPUT_BUFFER_CHUNK_SIZE); if (compression == CompressionKind.NONE) { this.compressor = null; } else if (compression == CompressionKind.SNAPPY) { this.compressor = new SnappyCompressor(); } else if (compression == CompressionKind.ZLIB) { this.compressor = new DeflateCompressor(); } else if (compression == CompressionKind.LZ4) { this.compressor = new Lz4Compressor(); } else if (compression == CompressionKind.ZSTD) { this.compressor = new ZstdJniCompressor(); } else { throw new IllegalArgumentException("Unsupported compression " + compression); } }
@Override protected Compressor getCompressor() { return new HadoopCodecCompressor(new SnappyCodec(), new SnappyCompressor()); }