/** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other * values will generate an exception * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for * every block of data * @param useBrokenFlagDescriptorChecksum Default: false. When true, writes an incorrect FrameDescriptor checksum * compatible with older kafka clients. * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum, boolean useBrokenFlagDescriptorChecksum) throws IOException { this.out = out; compressor = LZ4Factory.fastestInstance().fastCompressor(); checksum = XXHashFactory.fastestInstance().hash32(); this.useBrokenFlagDescriptorChecksum = useBrokenFlagDescriptorChecksum; bd = new BD(blockSize); flg = new FLG(blockChecksum); bufferOffset = 0; maxBlockSize = bd.getBlockMaximumSize(); buffer = new byte[maxBlockSize]; compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)]; finished = false; writeHeader(); }
public SixtPartitioner() { XXHashFactory factory = XXHashFactory.fastestInstance(); xxHasher = factory.hash32(); }
int hash = XXHashFactory.fastestInstance().hash32().hash(compressed, off, len, 0);
/** * check and initialize xxHash32 if enabled */ public ColumnReverseDictionaryInfo() { boolean useXXHash = Boolean.valueOf(CarbonProperties.getInstance() .getProperty(CarbonCommonConstants.ENABLE_XXHASH, CarbonCommonConstants.ENABLE_XXHASH_DEFAULT)); if (useXXHash) { xxHash32 = XXHashFactory.fastestInstance().hash32(); } }
@Override public int hash(ByteBuffer buf, int off, int len, int seed) { if (buf.isDirect()) { checkRange(buf, off, len); return XXHashJNI.XXH32BB(buf, off, len, seed); } else if (buf.hasArray()) { return hash(buf.array(), off + buf.arrayOffset(), len, seed); } else { XXHash32 safeInstance = SAFE_INSTANCE; if (safeInstance == null) { safeInstance = SAFE_INSTANCE = XXHashFactory.safeInstance().hash32(); } return safeInstance.hash(buf, off, len, seed); } }
@Override public int hash(ByteBuffer buf, int off, int len, int seed) { if (buf.isDirect()) { checkRange(buf, off, len); return XXHashJNI.XXH32BB(buf, off, len, seed); } else if (buf.hasArray()) { return hash(buf.array(), off, len, seed); } else { XXHash32 safeInstance = SAFE_INSTANCE; if (safeInstance == null) { safeInstance = SAFE_INSTANCE = XXHashFactory.safeInstance().hash32(); } return safeInstance.hash(buf, off, len, seed); } }
/** * Create a new {@link InputStream} that will decompress data using the LZ4 algorithm. * * @param in The stream to decompress * @throws IOException */ public LZ4FrameInputStream(InputStream in) throws IOException { this(in, LZ4Factory.fastestInstance().safeDecompressor(), XXHashFactory.fastestInstance().hash32()); }
/** * Create a new {@link InputStream} that will decompress data using the LZ4 algorithm. * * @param in The stream to decompress * @throws IOException */ public KafkaLZ4BlockInputStream(InputStream in) throws IOException { super(in); decompressor = LZ4Factory.fastestInstance().safeDecompressor(); checksum = XXHashFactory.fastestInstance().hash32(); readHeader(); maxBlockSize = bd.getBlockMaximumSize(); buffer = new byte[maxBlockSize]; compressedBuffer = new byte[maxBlockSize]; bufferOffset = 0; bufferSize = 0; finished = false; }
/** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @param blockSize The BLOCKSIZE to use * @param knownSize The size of the uncompressed data. A value less than zero means unknown. * @param bits A set of features to use * @throws IOException */ public LZ4FrameOutputStream(OutputStream out, BLOCKSIZE blockSize, long knownSize, FLG.Bits... bits) throws IOException { super(out); compressor = LZ4Factory.fastestInstance().fastCompressor(); checksum = XXHashFactory.fastestInstance().hash32(); frameInfo = new FrameInfo(new FLG(FLG.DEFAULT_VERSION, bits), new BD(blockSize)); maxBlockSize = frameInfo.getBD().getBlockMaximumSize(); buffer = ByteBuffer.allocate(maxBlockSize).order(ByteOrder.LITTLE_ENDIAN); compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)]; if (frameInfo.getFLG().isEnabled(FLG.Bits.CONTENT_SIZE) && knownSize < 0) { throw new IllegalArgumentException("Known size must be greater than zero in order to use the known size feature"); } this.knownSize = knownSize; writeHeader(); }
/** * Create a new {@link OutputStream} that will compress data using the LZ4 algorithm. * * @param out The output stream to compress * @param blockSize Default: 4. The block size used during compression. 4=64kb, 5=256kb, 6=1mb, 7=4mb. All other values will generate an exception * @param blockChecksum Default: false. When true, a XXHash32 checksum is computed and appended to the stream for every block of data * @throws IOException */ public KafkaLZ4BlockOutputStream(OutputStream out, int blockSize, boolean blockChecksum) throws IOException { super(out); compressor = LZ4Factory.fastestInstance().fastCompressor(); checksum = XXHashFactory.fastestInstance().hash32(); bd = new BD(blockSize); flg = new FLG(blockChecksum); bufferOffset = 0; maxBlockSize = bd.getBlockMaximumSize(); buffer = new byte[maxBlockSize]; compressedBuffer = new byte[compressor.maxCompressedLength(maxBlockSize)]; finished = false; writeHeader(); }