Murmur3Partitioner partitioner = new Murmur3Partitioner(); try (DataInputStream in = new DataInputStream(new FileInputStream(new File("la-2-big-Filter.db"))); IFilter filter = FilterFactory.deserialize(in, true)) { for (int i = 1; i <= 10; i++) { DecoratedKey decoratedKey = partitioner.decorateKey(Int32Type.instance.decompose(i)); if (filter.isPresent(decoratedKey)) { System.out.println(i + " is present "); } else { System.out.println(i + " is not present "); } } }
@Override protected Throwable doPostCleanup(Throwable accumulate) { accumulate = summary.close(accumulate); accumulate = bf.close(accumulate); accumulate = builder.close(accumulate); return accumulate; } }
/** * Returns the amount of memory in bytes used off heap by the bloom filter. * @return the amount of memory in bytes used off heap by the bloom filter */ public long getBloomFilterOffHeapSize() { return bf.offHeapSize(); }
/** * Clone this reader with the new values and set the clone as replacement. * * @param newFirst the first key for the replacement (which can be different from the original due to the pre-emptive * opening of compaction results). * @param reason the {@code OpenReason} for the replacement. * @param newSummary the index summary for the replacement. * * @return the cloned reader. That reader is set as a replacement by the method. */ private SSTableReader cloneAndReplace(DecoratedKey newFirst, OpenReason reason, IndexSummary newSummary) { SSTableReader replacement = internalOpen(descriptor, components, metadata, ifile != null ? ifile.sharedCopy() : null, dfile.sharedCopy(), newSummary, bf.sharedCopy(), maxDataAge, sstableMetadata, reason, header); replacement.first = newFirst; replacement.last = last; replacement.isSuspect.set(isSuspect.get()); return replacement; }
public void append(DecoratedKey key, RowIndexEntry indexEntry, long dataEnd, ByteBuffer indexInfo) throws IOException { bf.add(key); long indexStart = indexFile.position(); try { ByteBufferUtil.writeWithShortLength(key.getKey(), indexFile); rowIndexEntrySerializer.serialize(indexEntry, indexFile, indexInfo); } catch (IOException e) { throw new FSWriteError(e, indexFile.getPath()); } long indexEnd = indexFile.position(); if (logger.isTraceEnabled()) logger.trace("wrote index entry: {} at {}", indexEntry, indexStart); summary.maybeAddEntry(key, indexStart, indexEnd, dataEnd); }
dfile, iwriter.summary.build(partitioner), iwriter.bf.sharedCopy(), maxDataAge, metadata, iwriter.bf.close(); iwriter.summary.close();
public long getBloomFilterSerializedSize() { return bf.serializedSize(); }
public void addTo(Ref.IdentityCollection identities) { identities.add(this); identities.add(tidy.globalRef); dfile.addTo(identities); ifile.addTo(identities); bf.addTo(identities); indexSummary.addTo(identities); }
/** * Clone this reader with the new values and set the clone as replacement. * * @param newFirst the first key for the replacement (which can be different from the original due to the pre-emptive * opening of compaction results). * @param reason the {@code OpenReason} for the replacement. * @param newSummary the index summary for the replacement. * * @return the cloned reader. That reader is set as a replacement by the method. */ private SSTableReader cloneAndReplace(DecoratedKey newFirst, OpenReason reason, IndexSummary newSummary) { SSTableReader replacement = internalOpen(descriptor, components, metadata, ifile != null ? ifile.sharedCopy() : null, dfile.sharedCopy(), newSummary, bf.sharedCopy(), maxDataAge, sstableMetadata, reason, header); replacement.first = newFirst; replacement.last = last; replacement.isSuspect.set(isSuspect.get()); return replacement; }
public void append(DecoratedKey key, RowIndexEntry indexEntry, long dataEnd, ByteBuffer indexInfo) throws IOException { bf.add(key); long indexStart = indexFile.position(); try { ByteBufferUtil.writeWithShortLength(key.getKey(), indexFile); rowIndexEntrySerializer.serialize(indexEntry, indexFile, indexInfo); } catch (IOException e) { throw new FSWriteError(e, indexFile.getPath()); } long indexEnd = indexFile.position(); if (logger.isTraceEnabled()) logger.trace("wrote index entry: {} at {}", indexEntry, indexStart); summary.maybeAddEntry(key, indexStart, indexEnd, dataEnd); }
public long getBloomFilterSerializedSize() { return bf.serializedSize(); }
public void addTo(Ref.IdentityCollection identities) { identities.add(this); identities.add(tidy.globalRef); dfile.addTo(identities); ifile.addTo(identities); bf.addTo(identities); indexSummary.addTo(identities); }
/** * @return the largest timestamp before which it's okay to drop tombstones for the given partition; * i.e., after the maxPurgeableTimestamp there may exist newer data that still needs to be suppressed * in other sstables. This returns the minimum timestamp for any SSTable that contains this partition and is not * participating in this compaction, or LONG.MAX_VALUE if no such SSTable exists. */ public long maxPurgeableTimestamp(DecoratedKey key) { if (NEVER_PURGE_TOMBSTONES) return Long.MIN_VALUE; List<SSTableReader> filteredSSTables = overlappingTree.search(key); long min = Long.MAX_VALUE; for (SSTableReader sstable : filteredSSTables) { // if we don't have bloom filter(bf_fp_chance=1.0 or filter file is missing), // we check index file instead. if (sstable.getBloomFilter() instanceof AlwaysPresentFilter && sstable.getPosition(key, SSTableReader.Operator.EQ, false) != null) min = Math.min(min, sstable.getMinTimestamp()); else if (sstable.getBloomFilter().isPresent(key.getKey())) min = Math.min(min, sstable.getMinTimestamp()); } return min; }
/** * Clone this reader with the new values and set the clone as replacement. * * @param newFirst the first key for the replacement (which can be different from the original due to the pre-emptive * opening of compaction results). * @param reason the {@code OpenReason} for the replacement. * @param newSummary the index summary for the replacement. * * @return the cloned reader. That reader is set as a replacement by the method. */ private SSTableReader cloneAndReplace(DecoratedKey newFirst, OpenReason reason, IndexSummary newSummary) { SSTableReader replacement = internalOpen(descriptor, components, metadata, ifile != null ? ifile.sharedCopy() : null, dfile.sharedCopy(), newSummary, bf.sharedCopy(), maxDataAge, sstableMetadata, reason, header); replacement.first = newFirst; replacement.last = last; replacement.isSuspect.set(isSuspect.get()); return replacement; }
@Override protected Throwable doPostCleanup(Throwable accumulate) { accumulate = summary.close(accumulate); accumulate = bf.close(accumulate); accumulate = builder.close(accumulate); return accumulate; } }
public void append(DecoratedKey key, RowIndexEntry indexEntry, long dataEnd, ByteBuffer indexInfo) throws IOException { bf.add(key); long indexStart = indexFile.position(); try { ByteBufferUtil.writeWithShortLength(key.getKey(), indexFile); rowIndexEntrySerializer.serialize(indexEntry, indexFile, indexInfo); } catch (IOException e) { throw new FSWriteError(e, indexFile.getPath()); } long indexEnd = indexFile.position(); if (logger.isTraceEnabled()) logger.trace("wrote index entry: {} at {}", indexEntry, indexStart); summary.maybeAddEntry(key, indexStart, indexEnd, dataEnd); }
/** * Returns the amount of memory in bytes used off heap by the bloom filter. * @return the amount of memory in bytes used off heap by the bloom filter */ public long getBloomFilterOffHeapSize() { return bf.offHeapSize(); }
public long getBloomFilterSerializedSize() { return bf.serializedSize(); }
public void addTo(Ref.IdentityCollection identities) { identities.add(this); identities.add(tidy.globalRef); dfile.addTo(identities); ifile.addTo(identities); bf.addTo(identities); indexSummary.addTo(identities); }
|| sstable.getBloomFilter().isPresent(key))