public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary) { assert entries.length() > 0; int count = (int) (offsets.length() / 4); long entriesLength = entries.length(); if (boundary != null) { count = boundary.summaryCount; entriesLength = boundary.entriesLength; } int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval); assert count > 0; return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(), count, entries.currentBuffer().sharedCopy(), entriesLength, sizeAtFullSampling, minIndexInterval, samplingLevel); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
@SuppressWarnings("resource") public CompressionMetadata open(long dataLength, long compressedLength) { SafeMemory offsets = this.offsets.sharedCopy(); // calculate how many entries we need, if our dataLength is truncated int count = (int) (dataLength / parameters.chunkLength()); if (dataLength % parameters.chunkLength() != 0) count++; assert count > 0; // grab our actual compressed length from the next offset from our the position we're opened to if (count < this.count) compressedLength = offsets.getLong(count * 8L); return new CompressionMetadata(filePath, parameters, offsets, count * 8L, dataLength, compressedLength, ChecksumType.CRC32); }
offsets = this.offsets.sharedCopy(); offsets = this.offsets.sharedCopy();