/** * Used by {@link Peer} to decide whether or not to discard this block and any blocks building upon it, in case * the Bloom filter used to request them may be exhausted, that is, not have sufficient keys in the deterministic * sequence within it to reliably find relevant transactions. */ public boolean checkForFilterExhaustion(FilteredBlock block) { keyChainGroupLock.lock(); try { int epoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); for (Transaction tx : block.getAssociatedTransactions().values()) { markKeysAsUsed(tx); } int newEpoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); checkState(newEpoch >= epoch); // If the key lookahead epoch has advanced, there was a call to addKeys and the PeerGroup already has a // pending request to recalculate the filter queued up on another thread. The calling Peer should abandon // block at this point and await a new filter before restarting the download. return newEpoch > epoch; } finally { keyChainGroupLock.unlock(); } }
/** * Used by {@link Peer} to decide whether or not to discard this block and any blocks building upon it, in case * the Bloom filter used to request them may be exhausted, that is, not have sufficient keys in the deterministic * sequence within it to reliably find relevant transactions. */ public boolean checkForFilterExhaustion(FilteredBlock block) { keyChainGroupLock.lock(); try { int epoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); for (Transaction tx : block.getAssociatedTransactions().values()) { markKeysAsUsed(tx); } int newEpoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); checkState(newEpoch >= epoch); // If the key lookahead epoch has advanced, there was a call to addKeys and the PeerGroup already has a // pending request to recalculate the filter queued up on another thread. The calling Peer should abandon // block at this point and await a new filter before restarting the download. return newEpoch > epoch; } finally { keyChainGroupLock.unlock(); } }
@Override public synchronized void onBlocksDownloaded(Peer peer, Block block, @Nullable FilteredBlock filteredBlock, int blocksLeft) { blocksInLastSecond++; bytesInLastSecond += Block.HEADER_SIZE; List<Transaction> blockTransactions = block.getTransactions(); // This whole area of the type hierarchy is a mess. int txCount = (blockTransactions != null ? countAndMeasureSize(blockTransactions) : 0) + (filteredBlock != null ? countAndMeasureSize(filteredBlock.getAssociatedTransactions().values()) : 0); txnsInLastSecond = txnsInLastSecond + txCount; if (filteredBlock != null) origTxnsInLastSecond += filteredBlock.getTransactionCount(); }
@Override public synchronized void onBlocksDownloaded(Peer peer, Block block, @Nullable FilteredBlock filteredBlock, int blocksLeft) { blocksInLastSecond++; bytesInLastSecond += Block.HEADER_SIZE; List<Transaction> blockTransactions = block.getTransactions(); // This whole area of the type hierarchy is a mess. int txCount = (blockTransactions != null ? countAndMeasureSize(blockTransactions) : 0) + (filteredBlock != null ? countAndMeasureSize(filteredBlock.getAssociatedTransactions().values()) : 0); txnsInLastSecond = txnsInLastSecond + txCount; if (filteredBlock != null) origTxnsInLastSecond += filteredBlock.getTransactionCount(); }
/** * Used by {@link Peer} to decide whether or not to discard this block and any blocks building upon it, in case * the Bloom filter used to request them may be exhausted, that is, not have sufficient keys in the deterministic * sequence within it to reliably find relevant transactions. */ public boolean checkForFilterExhaustion(FilteredBlock block) { keyChainGroupLock.lock(); try { int epoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); for (Transaction tx : block.getAssociatedTransactions().values()) { markKeysAsUsed(tx); } int newEpoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); checkState(newEpoch >= epoch); // If the key lookahead epoch has advanced, there was a call to addKeys and the PeerGroup already has a // pending request to recalculate the filter queued up on another thread. The calling Peer should abandon // block at this point and await a new filter before restarting the download. return newEpoch > epoch; } finally { keyChainGroupLock.unlock(); } }
@Override public synchronized void onBlocksDownloaded(Peer peer, Block block, @Nullable FilteredBlock filteredBlock, int blocksLeft) { blocksInLastSecond++; bytesInLastSecond += Block.HEADER_SIZE; List<Transaction> blockTransactions = block.getTransactions(); // This whole area of the type hierarchy is a mess. int txCount = (blockTransactions != null ? countAndMeasureSize(blockTransactions) : 0) + (filteredBlock != null ? countAndMeasureSize(filteredBlock.getAssociatedTransactions().values()) : 0); txnsInLastSecond = txnsInLastSecond + txCount; if (filteredBlock != null) origTxnsInLastSecond += filteredBlock.getTransactionCount(); }
/** * Used by {@link Peer} to decide whether or not to discard this block and any blocks building upon it, in case * the Bloom filter used to request them may be exhausted, that is, not have sufficient keys in the deterministic * sequence within it to reliably find relevant transactions. */ public boolean checkForFilterExhaustion(FilteredBlock block) { keyChainGroupLock.lock(); try { int epoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); for (Transaction tx : block.getAssociatedTransactions().values()) { markKeysAsUsed(tx); } int newEpoch = keyChainGroup.getCombinedKeyLookaheadEpochs(); checkState(newEpoch >= epoch); // If the key lookahead epoch has advanced, there was a call to addKeys and the PeerGroup already has a // pending request to recalculate the filter queued up on another thread. The calling Peer should abandon // block at this point and await a new filter before restarting the download. return newEpoch > epoch; } finally { keyChainGroupLock.unlock(); } }
@Override public synchronized void onBlocksDownloaded(Peer peer, Block block, @Nullable FilteredBlock filteredBlock, int blocksLeft) { blocksInLastSecond++; bytesInLastSecond += Block.HEADER_SIZE; List<Transaction> blockTransactions = block.getTransactions(); // This whole area of the type hierarchy is a mess. int txCount = (blockTransactions != null ? countAndMeasureSize(blockTransactions) : 0) + (filteredBlock != null ? countAndMeasureSize(filteredBlock.getAssociatedTransactions().values()) : 0); txnsInLastSecond = txnsInLastSecond + txCount; if (filteredBlock != null) origTxnsInLastSecond += filteredBlock.getTransactionCount(); }
class OrphanBlock { final Block block; final List<Sha256Hash> filteredTxHashes; final Map<Sha256Hash, Transaction> filteredTxn; OrphanBlock(Block block, @Nullable List<Sha256Hash> filteredTxHashes, @Nullable Map<Sha256Hash, Transaction> filteredTxn) { final boolean filtered = filteredTxHashes != null && filteredTxn != null; Preconditions.checkArgument((block.transactions == null && filtered) || (block.transactions != null && !filtered)); this.block = block; this.filteredTxHashes = filteredTxHashes; this.filteredTxn = filteredTxn; } } // Holds blocks that we have received but can't plug into the chain yet, eg because they were created whilst we
class OrphanBlock { final Block block; final List<Sha256Hash> filteredTxHashes; final Map<Sha256Hash, Transaction> filteredTxn; OrphanBlock(Block block, @Nullable List<Sha256Hash> filteredTxHashes, @Nullable Map<Sha256Hash, Transaction> filteredTxn) { final boolean filtered = filteredTxHashes != null && filteredTxn != null; Preconditions.checkArgument((block.transactions == null && filtered) || (block.transactions != null && !filtered)); this.block = block; this.filteredTxHashes = filteredTxHashes; this.filteredTxn = filteredTxn; } } // Holds blocks that we have received but can't plug into the chain yet, eg because they were created whilst we
class OrphanBlock { final Block block; final List<Sha256Hash> filteredTxHashes; final Map<Sha256Hash, Transaction> filteredTxn; OrphanBlock(Block block, @Nullable List<Sha256Hash> filteredTxHashes, @Nullable Map<Sha256Hash, Transaction> filteredTxn) { final boolean filtered = filteredTxHashes != null && filteredTxn != null; Preconditions.checkArgument((block.transactions == null && filtered) || (block.transactions != null && !filtered)); this.block = block; this.filteredTxHashes = filteredTxHashes; this.filteredTxn = filteredTxn; } } // Holds blocks that we have received but can't plug into the chain yet, eg because they were created whilst we
class OrphanBlock { final Block block; final List<Sha256Hash> filteredTxHashes; final Map<Sha256Hash, Transaction> filteredTxn; OrphanBlock(Block block, @Nullable List<Sha256Hash> filteredTxHashes, @Nullable Map<Sha256Hash, Transaction> filteredTxn) { final boolean filtered = filteredTxHashes != null && filteredTxn != null; Preconditions.checkArgument((block.transactions == null && filtered) || (block.transactions != null && !filtered)); this.block = block; this.filteredTxHashes = filteredTxHashes; this.filteredTxn = filteredTxn; } } // Holds blocks that we have received but can't plug into the chain yet, eg because they were created whilst we
private void filterAndSend(InboundMessageQueuer p1, List<Block> blocks, BloomFilter filter) { for (Block block : blocks) { FilteredBlock fb = filter.applyAndUpdate(block); inbound(p1, fb); for (Transaction tx : fb.getAssociatedTransactions().values()) inbound(p1, tx); } } }