/** * Returns the number of entries in the IndexSummary. At full sampling, this is approximately 1/INDEX_INTERVALth of * the keys in this SSTable. */ public int getIndexSummarySize() { return indexSummary.size(); }
/** * Returns the number of entries in the IndexSummary. At full sampling, this is approximately 1/INDEX_INTERVALth of * the keys in this SSTable. */ public int getIndexSummarySize() { return indexSummary.size(); }
/** * Returns the number of entries in the IndexSummary. At full sampling, this is approximately 1/INDEX_INTERVALth of * the keys in this SSTable. */ public int getIndexSummarySize() { return indexSummary.size(); }
/** * Returns the number of entries in the IndexSummary. At full sampling, this is approximately 1/INDEX_INTERVALth of * the keys in this SSTable. */ public int getIndexSummarySize() { return indexSummary.size(); }
/** * Returns the number of entries in the IndexSummary. At full sampling, this is approximately 1/INDEX_INTERVALth of * the keys in this SSTable. */ public int getIndexSummarySize() { return indexSummary.size(); }
for (int i = 0; i < indexSummary.size(); i++)
long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int dataBufferSize = sstable.optimizationStrategy.bufferSize(statsMetadata.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); int indexBufferSize = sstable.optimizationStrategy.bufferSize(indexFileLength / sstable.indexSummary.size()); sstable.ifile = ibuilder.bufferSize(indexBufferSize).complete(); sstable.dfile = dbuilder.bufferSize(dataBufferSize).complete();
int newKeyCount = existing.size(); long newEntriesLength = existing.getEntriesLength(); for (int start : startPoints) for (int j = start; j < existing.size(); j += currentSamplingLevel) int newEntriesOffset = 0; outer: for (int oldSummaryIndex = 0; oldSummaryIndex < existing.size(); oldSummaryIndex++)
long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(); if (compression)
long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(); if (compression)
long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(); if (compression)
@SuppressWarnings("resource") public SSTableReader openEarly() { // find the max (exclusive) readable key IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable(); if (boundary == null) return null; StatsMetadata stats = statsMetadata(); assert boundary.indexLength > 0 && boundary.dataLength > 0; // open the reader early IndexSummary indexSummary = iwriter.summary.build(metadata.partitioner, boundary); long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength); if (compression) dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength)); int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength); invalidateCacheAtBoundary(dfile); SSTableReader sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, SSTableReader.OpenReason.EARLY, header); // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed) sstable.first = getMinimalKey(first); sstable.last = getMinimalKey(boundary.lastKey); return sstable; }
@SuppressWarnings("resource") public SSTableReader openEarly() { // find the max (exclusive) readable key IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable(); if (boundary == null) return null; StatsMetadata stats = statsMetadata(); assert boundary.indexLength > 0 && boundary.dataLength > 0; // open the reader early IndexSummary indexSummary = iwriter.summary.build(metadata.partitioner, boundary); long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength); if (compression) dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength)); int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength); invalidateCacheAtBoundary(dfile); SSTableReader sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, SSTableReader.OpenReason.EARLY, header); // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed) sstable.first = getMinimalKey(first); sstable.last = getMinimalKey(boundary.lastKey); return sstable; }
@SuppressWarnings("resource") public SSTableReader openEarly() { // find the max (exclusive) readable key IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable(); if (boundary == null) return null; StatsMetadata stats = statsMetadata(); assert boundary.indexLength > 0 && boundary.dataLength > 0; // open the reader early IndexSummary indexSummary = iwriter.summary.build(metadata.partitioner, boundary); long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length(); int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); FileHandle ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength); if (compression) dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength)); int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile())); FileHandle dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength); invalidateCacheAtBoundary(dfile); SSTableReader sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, SSTableReader.OpenReason.EARLY, header); // now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed) sstable.first = getMinimalKey(first); sstable.last = getMinimalKey(boundary.lastKey); return sstable; }
int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size()); ifile = ibuilder.bufferSize(indexBufferSize).complete();