mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Generate a funding_locked on relevant transactions_confirmed calls
Previously, if we were offline when a funding transaction was locked in, and then we came back online, calling `best_block_updated` once followed by `transactions_confirmed`, we'd not generate a funding_locked until the next `best_block_updated`. We address this by re-calling `best_block_updated` in `transactions_confirmed`, similar to how `ChannelMonitor` works.
This commit is contained in:
parent
ca163c3fae
commit
ea769427fe
2 changed files with 32 additions and 0 deletions
|
@ -5505,6 +5505,12 @@ where
|
||||||
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
|
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
|
||||||
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
|
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
|
||||||
.map(|(a, b)| (a, Vec::new(), b)));
|
.map(|(a, b)| (a, Vec::new(), b)));
|
||||||
|
|
||||||
|
let last_best_block_height = self.best_block.read().unwrap().height();
|
||||||
|
if height < last_best_block_height {
|
||||||
|
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
|
||||||
|
self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn best_block_updated(&self, header: &BlockHeader, height: u32) {
|
fn best_block_updated(&self, header: &BlockHeader, height: u32) {
|
||||||
|
|
|
@ -3940,6 +3940,32 @@ fn test_funding_peer_disconnect() {
|
||||||
assert!(found_announcement);
|
assert!(found_announcement);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_funding_locked_without_best_block_updated() {
|
||||||
|
// Previously, if we were offline when a funding transaction was locked in, and then we came
|
||||||
|
// back online, calling best_block_updated once followed by transactions_confirmed, we'd not
|
||||||
|
// generate a funding_locked until a later best_block_updated. This tests that we generate the
|
||||||
|
// funding_locked immediately instead.
|
||||||
|
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||||
|
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||||
|
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||||
|
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||||
|
*nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
|
||||||
|
|
||||||
|
let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0, InitFeatures::known(), InitFeatures::known());
|
||||||
|
|
||||||
|
let conf_height = nodes[0].best_block_info().1 + 1;
|
||||||
|
connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
|
||||||
|
let block_txn = [funding_tx];
|
||||||
|
let conf_txn: Vec<_> = block_txn.iter().enumerate().collect();
|
||||||
|
let conf_block_header = nodes[0].get_block_header(conf_height);
|
||||||
|
nodes[0].node.transactions_confirmed(&conf_block_header, &conf_txn[..], conf_height);
|
||||||
|
|
||||||
|
// Ensure nodes[0] generates a funding_locked after the transactions_confirmed
|
||||||
|
let as_funding_locked = get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id());
|
||||||
|
nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &as_funding_locked);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_drop_messages_peer_disconnect_dual_htlc() {
|
fn test_drop_messages_peer_disconnect_dual_htlc() {
|
||||||
// Test that we can handle reconnecting when both sides of a channel have pending
|
// Test that we can handle reconnecting when both sides of a channel have pending
|
||||||
|
|
Loading…
Add table
Reference in a new issue