Track spent WatchedOutputs and re-add if unconfirmed

Previously, we would track a spending transaction but wouldn't account
for it being reorged out of the chain, in which case we wouldn't monitor
the `WatchedOutput`s until they'd be reloaded on restart.

Here, we keep any `WatchedOutput`s around until their spends are
sufficiently confirmed and only prune them after `ANTI_REORG_DELAY`.
This commit is contained in:
Elias Rohrer 2024-03-19 15:07:27 +01:00
parent 2c9dbb959d
commit 2f581104b2
No known key found for this signature in database
GPG key ID: 36153082BDF676FD
3 changed files with 33 additions and 3 deletions

View file

@ -1,4 +1,5 @@
use lightning::chain::{Confirm, WatchedOutput};
use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
use bitcoin::{Txid, BlockHash, Transaction, OutPoint};
use bitcoin::block::Header;
@ -13,6 +14,9 @@ pub(crate) struct SyncState {
// Outputs that were previously processed, but must not be forgotten yet as
// as we still need to monitor any spends on-chain.
pub watched_outputs: HashMap<OutPoint, WatchedOutput>,
// Outputs for which we previously saw a spend on-chain but kept around until the spends reach
// sufficient depth.
pub outputs_spends_pending_threshold_conf: Vec<(Txid, u32, OutPoint, WatchedOutput)>,
// The tip hash observed during our last sync.
pub last_sync_hash: Option<BlockHash>,
// Indicates whether we need to resync, e.g., after encountering an error.
@ -24,6 +28,7 @@ impl SyncState {
Self {
watched_transactions: HashSet::new(),
watched_outputs: HashMap::new(),
outputs_spends_pending_threshold_conf: Vec::new(),
last_sync_hash: None,
pending_sync: false,
}
@ -38,6 +43,17 @@ impl SyncState {
}
self.watched_transactions.insert(txid);
// If a previously-confirmed output spend is unconfirmed, re-add the watched output to
// the tracking map.
self.outputs_spends_pending_threshold_conf.retain(|(conf_txid, _, prev_outpoint, output)| {
if txid == *conf_txid {
self.watched_outputs.insert(*prev_outpoint, output.clone());
false
} else {
true
}
})
}
}
@ -57,10 +73,18 @@ impl SyncState {
self.watched_transactions.remove(&ctx.tx.txid());
for input in &ctx.tx.input {
self.watched_outputs.remove(&input.previous_output);
if let Some(output) = self.watched_outputs.remove(&input.previous_output) {
self.outputs_spends_pending_threshold_conf.push((ctx.tx.txid(), ctx.block_height, input.previous_output, output));
}
}
}
}
pub fn prune_output_spends(&mut self, cur_height: u32) {
self.outputs_spends_pending_threshold_conf.retain(|(_, conf_height, _, _)| {
cur_height < conf_height + ANTI_REORG_DELAY - 1
});
}
}

View file

@ -157,6 +157,9 @@ where
for c in &confirmables {
c.best_block_updated(&tip_header, tip_height);
}
// Prune any sufficiently confirmed output spends
sync_state.prune_output_spends(tip_height);
}
match self.get_confirmed_transactions(&sync_state) {

View file

@ -153,7 +153,7 @@ where
}
}
match maybe_await!(self.sync_best_block_updated(&confirmables, &tip_hash)) {
match maybe_await!(self.sync_best_block_updated(&confirmables, &mut sync_state, &tip_hash)) {
Ok(()) => {}
Err(InternalError::Inconsistency) => {
// Immediately restart syncing when we encounter any inconsistencies.
@ -238,7 +238,7 @@ where
#[maybe_async]
fn sync_best_block_updated(
&self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, tip_hash: &BlockHash,
&self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, sync_state: &mut SyncState, tip_hash: &BlockHash,
) -> Result<(), InternalError> {
// Inform the interface of the new block.
@ -249,6 +249,9 @@ where
for c in confirmables {
c.best_block_updated(&tip_header, tip_height);
}
// Prune any sufficiently confirmed output spends
sync_state.prune_output_spends(tip_height);
}
} else {
return Err(InternalError::Inconsistency);