public CompactionMetadata deserialize(Descriptor.Version version, DataInput in) throws IOException { int nbAncestors = in.readInt(); Set<Integer> ancestors = new HashSet<>(nbAncestors); for (int i = 0; i < nbAncestors; i++) ancestors.add(in.readInt()); ICardinality cardinality = HyperLogLogPlus.Builder.build(ByteBufferUtil.readBytes(in, in.readInt())); return new CompactionMetadata(ancestors, cardinality); } }
public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt) { Map<MetadataType, MetadataComponent> components = Maps.newHashMap(); components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance)); components.put(MetadataType.STATS, new StatsMetadata(estimatedRowSize, estimatedColumnCount, replayPosition, minTimestamp, maxTimestamp, maxLocalDeletionTime, compressionRatio, estimatedTombstoneDropTime, sstableLevel, ImmutableList.copyOf(minColumnNames), ImmutableList.copyOf(maxColumnNames), hasLegacyCounterShards, repairedAt)); components.put(MetadataType.COMPACTION, new CompactionMetadata(ancestors, cardinality)); return components; } }
if (types.contains(MetadataType.COMPACTION)) components.put(MetadataType.COMPACTION, new CompactionMetadata(ancestors, null));
public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt, SerializationHeader header) { Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class); components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance)); components.put(MetadataType.STATS, new StatsMetadata(estimatedPartitionSize, estimatedCellPerPartitionCount, commitLogIntervals, timestampTracker.min(), timestampTracker.max(), localDeletionTimeTracker.min(), localDeletionTimeTracker.max(), ttlTracker.min(), ttlTracker.max(), compressionRatio, estimatedTombstoneDropTime.build(), sstableLevel, makeList(minClusteringValues), makeList(maxClusteringValues), hasLegacyCounterShards, repairedAt, totalColumnsSet, totalRows)); components.put(MetadataType.COMPACTION, new CompactionMetadata(cardinality)); components.put(MetadataType.HEADER, header.toComponent()); return components; }
public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt, SerializationHeader header) { Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class); components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance)); components.put(MetadataType.STATS, new StatsMetadata(estimatedPartitionSize, estimatedCellPerPartitionCount, commitLogIntervals, timestampTracker.min(), timestampTracker.max(), localDeletionTimeTracker.min(), localDeletionTimeTracker.max(), ttlTracker.min(), ttlTracker.max(), compressionRatio, estimatedTombstoneDropTime.build(), sstableLevel, makeList(minClusteringValues), makeList(maxClusteringValues), hasLegacyCounterShards, repairedAt, totalColumnsSet, totalRows)); components.put(MetadataType.COMPACTION, new CompactionMetadata(cardinality)); components.put(MetadataType.HEADER, header.toComponent()); return components; }
public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt, SerializationHeader header) { Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class); components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance)); components.put(MetadataType.STATS, new StatsMetadata(estimatedPartitionSize, estimatedCellPerPartitionCount, commitLogIntervals, timestampTracker.min(), timestampTracker.max(), localDeletionTimeTracker.min(), localDeletionTimeTracker.max(), ttlTracker.min(), ttlTracker.max(), compressionRatio, estimatedTombstoneDropTime, sstableLevel, makeList(minClusteringValues), makeList(maxClusteringValues), hasLegacyCounterShards, repairedAt, totalColumnsSet, totalRows)); components.put(MetadataType.COMPACTION, new CompactionMetadata(cardinality)); components.put(MetadataType.HEADER, header.toComponent()); return components; }
public Map<MetadataType, MetadataComponent> finalizeMetadata(String partitioner, double bloomFilterFPChance, long repairedAt, SerializationHeader header) { Map<MetadataType, MetadataComponent> components = new EnumMap<>(MetadataType.class); components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance)); components.put(MetadataType.STATS, new StatsMetadata(estimatedPartitionSize, estimatedCellPerPartitionCount, commitLogIntervals, timestampTracker.min(), timestampTracker.max(), localDeletionTimeTracker.min(), localDeletionTimeTracker.max(), ttlTracker.min(), ttlTracker.max(), compressionRatio, estimatedTombstoneDropTime.build(), sstableLevel, makeList(minClusteringValues), makeList(maxClusteringValues), hasLegacyCounterShards, repairedAt, totalColumnsSet, totalRows)); components.put(MetadataType.COMPACTION, new CompactionMetadata(cardinality)); components.put(MetadataType.HEADER, header.toComponent()); return components; }
public CompactionMetadata deserialize(Version version, DataInputPlus in) throws IOException { if (version.hasCompactionAncestors()) { // skip ancestors int nbAncestors = in.readInt(); in.skipBytes(nbAncestors * TypeSizes.sizeof(nbAncestors)); } ICardinality cardinality = HyperLogLogPlus.Builder.build(ByteBufferUtil.readBytes(in, in.readInt())); return new CompactionMetadata(cardinality); } }
public CompactionMetadata deserialize(Version version, DataInputPlus in) throws IOException { if (version.hasCompactionAncestors()) { // skip ancestors int nbAncestors = in.readInt(); in.skipBytes(nbAncestors * TypeSizes.sizeof(nbAncestors)); } ICardinality cardinality = HyperLogLogPlus.Builder.build(ByteBufferUtil.readBytes(in, in.readInt())); return new CompactionMetadata(cardinality); } }
if (types.contains(MetadataType.COMPACTION)) components.put(MetadataType.COMPACTION, new CompactionMetadata(null));
public CompactionMetadata deserialize(Version version, DataInputPlus in) throws IOException { if (version.hasCompactionAncestors()) { // skip ancestors int nbAncestors = in.readInt(); in.skipBytes(nbAncestors * TypeSizes.sizeof(nbAncestors)); } ICardinality cardinality = HyperLogLogPlus.Builder.build(ByteBufferUtil.readBytes(in, in.readInt())); return new CompactionMetadata(cardinality); } }
public CompactionMetadata deserialize(Version version, DataInputPlus in) throws IOException { if (version.hasCompactionAncestors()) { // skip ancestors int nbAncestors = in.readInt(); in.skipBytes(nbAncestors * TypeSizes.sizeof(nbAncestors)); } ICardinality cardinality = HyperLogLogPlus.Builder.build(ByteBufferUtil.readBytes(in, in.readInt())); return new CompactionMetadata(cardinality); } }
if (types.contains(MetadataType.COMPACTION)) components.put(MetadataType.COMPACTION, new CompactionMetadata(null));
if (types.contains(MetadataType.COMPACTION)) components.put(MetadataType.COMPACTION, new CompactionMetadata(null));