Drop last bits of rescan as its too complicated to be worth having

Previously, we had a concept of "rescaning" blocks when we detected
a need to monitor for a new set of outputs in future blocks while
connecting a block. In such cases, we'd need to possibly learn about
these new spends later in the *same block*, requiring clients who
filter blocks to get a newly-filtered copy of the same block. While
redoing the chain access API, it became increasingly clear this was
an overly complicated API feature, and it seems likely most clients
will not use it anyway.

Further, any client who *does* filter blocks can simply update their
filtering algorithm to include any descendants of matched
transactions in the filter results, avoiding the need for rescan
support entirely.

Thus, it was decided that we'd move forward without rescan support
in #649, however to avoid significant further changes in the
already-large 649, we decided to fully remove support in a
follow-up.

Here, we remove the API features that existed for rescan and fix
the few tests to not rely on it.

After this commit, we now only ever have one possible version of
block connection transactions, making it possible to be
significantly more confident in our test coverage actually
capturing all realistic scenarios.
This commit is contained in:
Matt Corallo 2020-09-30 15:18:29 -04:00
parent b627aa6e7c
commit 2bd571d2f9
3 changed files with 25 additions and 35 deletions

View file

@ -80,32 +80,27 @@ impl<ChanSigner: ChannelKeys, C: Deref, T: Deref, F: Deref, L: Deref> ChainMonit
/// [`ChannelMonitor::block_connected`] for details. Any HTLCs that were resolved on chain will
/// be returned by [`chain::Watch::release_pending_monitor_events`].
///
/// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch, returning
/// `true` if so. Subsequent calls must not exclude any transactions matching the new outputs
/// nor any in-block descendants of such transactions. It is not necessary to re-fetch the block
/// to obtain updated `txdata`.
/// Calls back to [`chain::Filter`] if any monitor indicated new outputs to watch. Subsequent
/// calls must not exclude any transactions matching the new outputs nor any in-block
/// descendants of such transactions. It is not necessary to re-fetch the block to obtain
/// updated `txdata`.
///
/// [`ChannelMonitor::block_connected`]: ../channelmonitor/struct.ChannelMonitor.html#method.block_connected
/// [`chain::Watch::release_pending_monitor_events`]: ../trait.Watch.html#tymethod.release_pending_monitor_events
/// [`chain::Filter`]: ../trait.Filter.html
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) -> bool {
let mut has_new_outputs_to_watch = false;
{
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
let mut txn_outputs = monitor.block_connected(header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
has_new_outputs_to_watch |= !txn_outputs.is_empty();
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
let mut txn_outputs = monitor.block_connected(header, txdata, height, &*self.broadcaster, &*self.fee_estimator, &*self.logger);
if let Some(ref chain_source) = self.chain_source {
for (txid, outputs) in txn_outputs.drain(..) {
for (idx, output) in outputs.iter().enumerate() {
chain_source.register_output(&OutPoint { txid, index: idx as u16 }, &output.script_pubkey);
}
if let Some(ref chain_source) = self.chain_source {
for (txid, outputs) in txn_outputs.drain(..) {
for (idx, output) in outputs.iter().enumerate() {
chain_source.register_output(&OutPoint { txid, index: idx as u16 }, &output.script_pubkey);
}
}
}
}
has_new_outputs_to_watch
}
/// Dispatches to per-channel monitors, which are responsible for updating their on-chain view

View file

@ -81,7 +81,7 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32, he
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, height: u32) {
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
while node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height) {}
node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
node.node.block_connected(&block.header, &txdata, height);
}

View file

@ -4852,8 +4852,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
assert_eq!(spend_txn.len(), 2);
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0]);
}
@ -4885,10 +4884,9 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
assert_eq!(spend_txn.len(), 3);
assert_eq!(spend_txn[0], spend_txn[1]); // to_remote output on revoked remote commitment_tx
check_spends!(spend_txn[0], revoked_local_txn[0]);
check_spends!(spend_txn[2], node_txn[0]);
assert_eq!(spend_txn.len(), 2);
check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
check_spends!(spend_txn[1], node_txn[0]);
}
#[test]
@ -4983,8 +4981,8 @@ fn test_static_spendable_outputs_timeout_tx() {
expect_payment_failed!(nodes[1], our_payment_hash, true);
let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote (*2), timeout_tx.output (*1)
check_spends!(spend_txn[2], node_txn[0].clone());
assert_eq!(spend_txn.len(), 2); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
check_spends!(spend_txn[1], node_txn[0]);
}
#[test]
@ -5159,12 +5157,11 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
// Check A's ChannelMonitor was able to generate the right spendable output descriptor
let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000);
assert_eq!(spend_txn.len(), 3); // Duplicated SpendableOutput due to block rescan after revoked htlc output tracking
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 2);
assert_eq!(spend_txn[0].input.len(), 1);
check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
check_spends!(spend_txn[2], node_txn[1]); // spending justice tx output on the htlc success tx
check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
}
#[test]
@ -5726,10 +5723,9 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
// Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000);
assert_eq!(spend_txn.len(), 3);
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 2);
check_spends!(spend_txn[0], local_txn[0]);
check_spends!(spend_txn[2], htlc_timeout);
check_spends!(spend_txn[1], htlc_timeout);
}
#[test]
@ -5797,10 +5793,9 @@ fn test_key_derivation_params() {
// Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
let spend_txn = check_spendable_outputs!(nodes[0], 1, new_keys_manager, 100000);
assert_eq!(spend_txn.len(), 3);
assert_eq!(spend_txn[0], spend_txn[1]);
assert_eq!(spend_txn.len(), 2);
check_spends!(spend_txn[0], local_txn_1[0]);
check_spends!(spend_txn[2], htlc_timeout);
check_spends!(spend_txn[1], htlc_timeout);
}
#[test]