mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Queue BackgroundEvent to force close channels upon ChannelManager::read
This results in a new, potentially redundant, `ChannelMonitorUpdate` that must be applied to `ChannelMonitor`s to broadcast the holder's latest commitment transaction. This is a behavior change for anchor channels since their commitments may require additional fees to be attached through a child anchor transaction. Recall that anchor transactions are only generated by the event consumer after processing a `BumpTransactionEvent::ChannelClose` event, which is yielded after applying a `ChannelMonitorUpdateStep::ChannelForceClosed` monitor update. Assuming the node operator is not watching the mempool to generate these anchor transactions without LDK, an anchor channel which we had to fail when deserializing our `ChannelManager` would have its commitment transaction broadcast by itself, potentially exposing the node operator to loss of funds if the commitment transaction's fee is not enough to be accepted into the network's mempools.
This commit is contained in:
parent
5a90f014f2
commit
bd4eb0da76
6 changed files with 81 additions and 59 deletions
|
@ -69,8 +69,7 @@ use crate::sync::{Mutex, LockTestExt};
|
||||||
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
|
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
|
||||||
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
|
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
|
||||||
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
|
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
|
||||||
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
#[derive(Clone)]
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub struct ChannelMonitorUpdate {
|
pub struct ChannelMonitorUpdate {
|
||||||
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
|
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
|
||||||
|
@ -491,8 +490,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
|
||||||
|
|
||||||
);
|
);
|
||||||
|
|
||||||
#[cfg_attr(any(test, fuzzing, feature = "_test_utils"), derive(PartialEq, Eq))]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) enum ChannelMonitorUpdateStep {
|
pub(crate) enum ChannelMonitorUpdateStep {
|
||||||
LatestHolderCommitmentTXInfo {
|
LatestHolderCommitmentTXInfo {
|
||||||
commitment_tx: HolderCommitmentTransaction,
|
commitment_tx: HolderCommitmentTransaction,
|
||||||
|
@ -2268,10 +2266,14 @@ impl<Signer: WriteableEcdsaChannelSigner> ChannelMonitorImpl<Signer> {
|
||||||
{
|
{
|
||||||
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
|
log_info!(logger, "Applying update to monitor {}, bringing update_id from {} to {} with {} changes.",
|
||||||
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
|
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
|
||||||
// ChannelMonitor updates may be applied after force close if we receive a
|
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
|
||||||
// preimage for a broadcasted commitment transaction HTLC output that we'd
|
// broadcasted commitment transaction HTLC output that we'd like to claim on-chain. If this
|
||||||
// like to claim on-chain. If this is the case, we no longer have guaranteed
|
// is the case, we no longer have guaranteed access to the monitor's update ID, so we use a
|
||||||
// access to the monitor's update ID, so we use a sentinel value instead.
|
// sentinel value instead.
|
||||||
|
//
|
||||||
|
// The `ChannelManager` may also queue redundant `ChannelForceClosed` updates if it still
|
||||||
|
// thinks the channel needs to have its commitment transaction broadcast, so we'll allow
|
||||||
|
// them as well.
|
||||||
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
|
if updates.update_id == CLOSED_CHANNEL_UPDATE_ID {
|
||||||
assert_eq!(updates.updates.len(), 1);
|
assert_eq!(updates.updates.len(), 1);
|
||||||
match updates.updates[0] {
|
match updates.updates[0] {
|
||||||
|
|
|
@ -7310,6 +7310,7 @@ where
|
||||||
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
|
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
|
||||||
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
|
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
|
||||||
let mut channel_closures = Vec::new();
|
let mut channel_closures = Vec::new();
|
||||||
|
let mut pending_background_events = Vec::new();
|
||||||
for _ in 0..channel_count {
|
for _ in 0..channel_count {
|
||||||
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
|
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
|
||||||
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
|
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
|
||||||
|
@ -7339,9 +7340,11 @@ where
|
||||||
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
|
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
|
||||||
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
|
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
|
||||||
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
|
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
|
||||||
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
|
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
|
||||||
|
if let Some(monitor_update) = monitor_update {
|
||||||
|
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
|
||||||
|
}
|
||||||
failed_htlcs.append(&mut new_failed_htlcs);
|
failed_htlcs.append(&mut new_failed_htlcs);
|
||||||
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
|
|
||||||
channel_closures.push(events::Event::ChannelClosed {
|
channel_closures.push(events::Event::ChannelClosed {
|
||||||
channel_id: channel.channel_id(),
|
channel_id: channel.channel_id(),
|
||||||
user_channel_id: channel.get_user_id(),
|
user_channel_id: channel.get_user_id(),
|
||||||
|
@ -7406,10 +7409,13 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
|
for (funding_txo, _) in args.channel_monitors.iter() {
|
||||||
if !funding_txo_set.contains(funding_txo) {
|
if !funding_txo_set.contains(funding_txo) {
|
||||||
log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
|
let monitor_update = ChannelMonitorUpdate {
|
||||||
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
|
update_id: CLOSED_CHANNEL_UPDATE_ID,
|
||||||
|
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
|
||||||
|
};
|
||||||
|
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7462,10 +7468,17 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
let background_event_count: u64 = Readable::read(reader)?;
|
let background_event_count: u64 = Readable::read(reader)?;
|
||||||
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
|
|
||||||
for _ in 0..background_event_count {
|
for _ in 0..background_event_count {
|
||||||
match <u8 as Readable>::read(reader)? {
|
match <u8 as Readable>::read(reader)? {
|
||||||
0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
|
0 => {
|
||||||
|
let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
|
||||||
|
if pending_background_events.iter().find(|e| {
|
||||||
|
let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
|
||||||
|
*pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
|
||||||
|
}).is_none() {
|
||||||
|
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
|
||||||
|
}
|
||||||
|
}
|
||||||
_ => return Err(DecodeError::InvalidValue),
|
_ => return Err(DecodeError::InvalidValue),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7840,7 +7853,7 @@ where
|
||||||
per_peer_state: FairRwLock::new(per_peer_state),
|
per_peer_state: FairRwLock::new(per_peer_state),
|
||||||
|
|
||||||
pending_events: Mutex::new(pending_events_read),
|
pending_events: Mutex::new(pending_events_read),
|
||||||
pending_background_events: Mutex::new(pending_background_events_read),
|
pending_background_events: Mutex::new(pending_background_events),
|
||||||
total_consistency_lock: RwLock::new(()),
|
total_consistency_lock: RwLock::new(()),
|
||||||
persistence_notifier: Notifier::new(),
|
persistence_notifier: Notifier::new(),
|
||||||
|
|
||||||
|
|
|
@ -1859,15 +1859,18 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
|
||||||
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
|
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
|
||||||
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
|
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000);
|
||||||
|
|
||||||
|
// Serialize Bob with the initial state of both channels, which we'll use later.
|
||||||
|
let bob_serialized = nodes[1].node.encode();
|
||||||
|
|
||||||
// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
|
// Route two payments for each channel from Alice to Bob to lock in the HTLCs.
|
||||||
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
||||||
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
||||||
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
||||||
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000);
|
||||||
|
|
||||||
// Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this
|
// Serialize Bob's monitors with the HTLCs locked in. We'll restart Bob later on with the state
|
||||||
// point such that he broadcasts a revoked commitment transaction.
|
// at this point such that he broadcasts a revoked commitment transaction with the HTLCs
|
||||||
let bob_serialized = nodes[1].node.encode();
|
// present.
|
||||||
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
|
let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode();
|
||||||
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
|
let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode();
|
||||||
|
|
||||||
|
@ -1897,30 +1900,26 @@ fn test_anchors_aggregated_revoked_htlc_tx() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bob force closes by broadcasting his revoked state for each channel.
|
// Bob force closes by restarting with the outdated state, prompting the ChannelMonitors to
|
||||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap();
|
// broadcast the latest commitment transaction known to them, which in our case is the one with
|
||||||
check_added_monitors(&nodes[1], 1);
|
// the HTLCs still pending.
|
||||||
check_closed_broadcast(&nodes[1], 1, true);
|
nodes[1].node.timer_tick_occurred();
|
||||||
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
|
check_added_monitors(&nodes[1], 2);
|
||||||
let revoked_commitment_a = {
|
check_closed_event!(&nodes[1], 2, ClosureReason::OutdatedChannelManager);
|
||||||
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
let (revoked_commitment_a, revoked_commitment_b) = {
|
||||||
assert_eq!(txn.len(), 1);
|
let txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||||
let revoked_commitment = txn.pop().unwrap();
|
assert_eq!(txn.len(), 2);
|
||||||
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
|
assert_eq!(txn[0].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
|
||||||
check_spends!(revoked_commitment, chan_a.3);
|
assert_eq!(txn[1].output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
|
||||||
revoked_commitment
|
if txn[0].input[0].previous_output.txid == chan_a.3.txid() {
|
||||||
};
|
check_spends!(&txn[0], &chan_a.3);
|
||||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap();
|
check_spends!(&txn[1], &chan_b.3);
|
||||||
check_added_monitors(&nodes[1], 1);
|
(txn[0].clone(), txn[1].clone())
|
||||||
check_closed_broadcast(&nodes[1], 1, true);
|
} else {
|
||||||
check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed);
|
check_spends!(&txn[1], &chan_a.3);
|
||||||
let revoked_commitment_b = {
|
check_spends!(&txn[0], &chan_b.3);
|
||||||
let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
(txn[1].clone(), txn[0].clone())
|
||||||
assert_eq!(txn.len(), 1);
|
}
|
||||||
let revoked_commitment = txn.pop().unwrap();
|
|
||||||
assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs
|
|
||||||
check_spends!(revoked_commitment, chan_b.3);
|
|
||||||
revoked_commitment
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Bob should now receive two events to bump his revoked commitment transaction fees.
|
// Bob should now receive two events to bump his revoked commitment transaction fees.
|
||||||
|
|
|
@ -334,9 +334,15 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
|
||||||
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
|
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
|
||||||
assert!(nodes[0].node.list_channels().is_empty());
|
assert!(nodes[0].node.list_channels().is_empty());
|
||||||
assert!(nodes[0].node.has_pending_payments());
|
assert!(nodes[0].node.has_pending_payments());
|
||||||
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
nodes[0].node.timer_tick_occurred();
|
||||||
assert_eq!(as_broadcasted_txn.len(), 1);
|
if !confirm_before_reload {
|
||||||
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
|
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||||
|
assert_eq!(as_broadcasted_txn.len(), 1);
|
||||||
|
assert_eq!(as_broadcasted_txn[0], as_commitment_tx);
|
||||||
|
} else {
|
||||||
|
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
|
||||||
|
}
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
|
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
|
||||||
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
|
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
|
||||||
|
@ -499,9 +505,11 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
|
||||||
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
|
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
|
||||||
// force-close the channel.
|
// force-close the channel.
|
||||||
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
|
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
|
||||||
|
nodes[0].node.timer_tick_occurred();
|
||||||
assert!(nodes[0].node.list_channels().is_empty());
|
assert!(nodes[0].node.list_channels().is_empty());
|
||||||
assert!(nodes[0].node.has_pending_payments());
|
assert!(nodes[0].node.has_pending_payments());
|
||||||
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
|
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
|
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
|
||||||
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
||||||
|
@ -2794,6 +2802,7 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) {
|
||||||
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
|
if let Event::PaymentSent { payment_preimage, .. } = events[1] { assert_eq!(payment_preimage, our_payment_preimage); } else { panic!(); }
|
||||||
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
|
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
|
||||||
// the double-claim that would otherwise appear at the end of this test.
|
// the double-claim that would otherwise appear at the end of this test.
|
||||||
|
nodes[0].node.timer_tick_occurred();
|
||||||
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
let as_broadcasted_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||||
assert_eq!(as_broadcasted_txn.len(), 1);
|
assert_eq!(as_broadcasted_txn.len(), 1);
|
||||||
|
|
||||||
|
|
|
@ -422,20 +422,22 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
|
||||||
nodes_0_deserialized = nodes_0_deserialized_tmp;
|
nodes_0_deserialized = nodes_0_deserialized_tmp;
|
||||||
assert!(nodes_0_read.is_empty());
|
assert!(nodes_0_read.is_empty());
|
||||||
|
|
||||||
{ // Channel close should result in a commitment tx
|
|
||||||
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
||||||
assert_eq!(txn.len(), 1);
|
|
||||||
check_spends!(txn[0], funding_tx);
|
|
||||||
assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
|
|
||||||
}
|
|
||||||
|
|
||||||
for monitor in node_0_monitors.drain(..) {
|
for monitor in node_0_monitors.drain(..) {
|
||||||
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
|
assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
|
||||||
ChannelMonitorUpdateStatus::Completed);
|
ChannelMonitorUpdateStatus::Completed);
|
||||||
check_added_monitors!(nodes[0], 1);
|
check_added_monitors!(nodes[0], 1);
|
||||||
}
|
}
|
||||||
nodes[0].node = &nodes_0_deserialized;
|
nodes[0].node = &nodes_0_deserialized;
|
||||||
|
|
||||||
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
|
check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
|
||||||
|
{ // Channel close should result in a commitment tx
|
||||||
|
nodes[0].node.timer_tick_occurred();
|
||||||
|
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
|
assert_eq!(txn.len(), 1);
|
||||||
|
check_spends!(txn[0], funding_tx);
|
||||||
|
assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
|
||||||
|
}
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
// nodes[1] and nodes[2] have no lost state with nodes[0]...
|
// nodes[1] and nodes[2] have no lost state with nodes[0]...
|
||||||
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
||||||
|
@ -920,8 +922,10 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nodes[1].node.timer_tick_occurred();
|
||||||
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
let bs_commitment_tx = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||||
assert_eq!(bs_commitment_tx.len(), 1);
|
assert_eq!(bs_commitment_tx.len(), 1);
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||||
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
|
||||||
|
|
|
@ -320,12 +320,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
|
||||||
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
|
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
|
||||||
|
|
||||||
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
|
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
|
||||||
if !reorg_after_reload {
|
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
|
||||||
// If the channel is already closed when we reload the node, we'll broadcast a closing
|
|
||||||
// transaction via the ChannelMonitor which is missing a corresponding channel.
|
|
||||||
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
|
|
||||||
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if reorg_after_reload {
|
if reorg_after_reload {
|
||||||
|
|
Loading…
Add table
Reference in a new issue