mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-03-15 15:39:09 +01:00
Merge pull request #2169 from TheBlueMatt/2023-03-monitor-e-monitor
Block the mon update removing a preimage until upstream mon writes
This commit is contained in:
commit
3dcdb14ce1
9 changed files with 596 additions and 90 deletions
|
@ -67,7 +67,7 @@ use crate::sync::{Mutex, LockTestExt};
|
|||
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
|
||||
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
|
||||
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[must_use]
|
||||
pub struct ChannelMonitorUpdate {
|
||||
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
|
||||
|
@ -487,7 +487,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
|
|||
|
||||
);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum ChannelMonitorUpdateStep {
|
||||
LatestHolderCommitmentTXInfo {
|
||||
commitment_tx: HolderCommitmentTransaction,
|
||||
|
|
|
@ -450,7 +450,7 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
|
|||
/// channel basepoints via the new function, or they were obtained via
|
||||
/// CommitmentTransaction.trust().keys() because we trusted the source of the
|
||||
/// pre-calculated keys.
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct TxCreationKeys {
|
||||
/// The broadcaster's per-commitment public key which was used to derive the other keys.
|
||||
pub per_commitment_point: PublicKey,
|
||||
|
@ -1028,7 +1028,7 @@ impl<'a> DirectedChannelTransactionParameters<'a> {
|
|||
/// Information needed to build and sign a holder's commitment transaction.
|
||||
///
|
||||
/// The transaction is only signed once we are ready to broadcast.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HolderCommitmentTransaction {
|
||||
inner: CommitmentTransaction,
|
||||
/// Our counterparty's signature for the transaction
|
||||
|
@ -1134,7 +1134,7 @@ impl HolderCommitmentTransaction {
|
|||
}
|
||||
|
||||
/// A pre-built Bitcoin commitment transaction and its txid.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BuiltCommitmentTransaction {
|
||||
/// The commitment transaction
|
||||
pub transaction: Transaction,
|
||||
|
@ -1305,7 +1305,7 @@ impl<'a> TrustedClosingTransaction<'a> {
|
|||
///
|
||||
/// This class can be used inside a signer implementation to generate a signature given the relevant
|
||||
/// secret key.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CommitmentTransaction {
|
||||
commitment_number: u64,
|
||||
to_broadcaster_value_sat: u64,
|
||||
|
|
|
@ -3038,8 +3038,8 @@ fn test_blocked_chan_preimage_release() {
|
|||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
create_announced_chan_between_nodes(&nodes, 1, 2).2;
|
||||
create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
|
||||
|
||||
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
|
||||
|
||||
|
@ -3068,11 +3068,20 @@ fn test_blocked_chan_preimage_release() {
|
|||
let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
|
||||
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
|
||||
assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Finish the CS dance between nodes[0] and nodes[1].
|
||||
do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
|
||||
// Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
|
||||
// update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
|
||||
// channel.
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
|
||||
assert!(a.is_none());
|
||||
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
let events = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 3);
|
||||
|
@ -3080,8 +3089,8 @@ fn test_blocked_chan_preimage_release() {
|
|||
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
|
||||
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
|
||||
|
||||
// The event processing should release the last RAA update.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
// The event processing should release the last RAA updates on both channels.
|
||||
check_added_monitors(&nodes[1], 2);
|
||||
|
||||
// When we fetch the next update the message getter will generate the next update for nodes[2],
|
||||
// generating a further monitor update.
|
||||
|
@ -3092,3 +3101,432 @@ fn test_blocked_chan_preimage_release() {
|
|||
do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
|
||||
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
|
||||
}
|
||||
|
||||
fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
|
||||
// When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
|
||||
// from the downstream channel, we immediately claim the HTLC on the upstream channel, before
|
||||
// even doing a `commitment_signed` dance on the downstream channel. This implies that our
|
||||
// `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
|
||||
// money, then we write the update that resolves the downstream node claiming their money. This
|
||||
// is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
|
||||
// generated, but of course this may not be the case. For asynchronous update writes, we have
|
||||
// to ensure monitor updates can block each other, preventing the inversion all together.
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
|
||||
|
||||
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
|
||||
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
|
||||
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
|
||||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
|
||||
|
||||
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
|
||||
let mut manager_b = Vec::new();
|
||||
if !with_latest_manager {
|
||||
manager_b = nodes[1].node.encode();
|
||||
}
|
||||
|
||||
nodes[2].node.claim_funds(payment_preimage);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
expect_payment_claimed!(nodes[2], payment_hash, 100_000);
|
||||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
|
||||
|
||||
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
|
||||
// for it since the monitor update is marked in-progress.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
|
||||
// won't get the preimage when the nodes reconnect and we have to get it from the
|
||||
// ChannelMonitor.
|
||||
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
if complete_bc_commitment_dance {
|
||||
let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||
|
||||
// At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
|
||||
// preimage in the A <-> B channel, which will prevent it from persisting the
|
||||
// `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage.
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
}
|
||||
|
||||
// Now reload node B
|
||||
if with_latest_manager {
|
||||
manager_b = nodes[1].node.encode();
|
||||
}
|
||||
|
||||
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
|
||||
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
|
||||
if with_latest_manager {
|
||||
// If we used the latest ChannelManager to reload from, we should have both channels still
|
||||
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
|
||||
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
|
||||
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
|
||||
// complete after reconnecting to our peers.
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
|
||||
// the end go ahead and do that, though the
|
||||
// `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
|
||||
// expect to *not* receive the final RAA ChannelMonitorUpdate.
|
||||
if complete_bc_commitment_dance {
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
|
||||
} else {
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
|
||||
reconnect_args.pending_raa = (false, true);
|
||||
reconnect_nodes(reconnect_args);
|
||||
}
|
||||
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
|
||||
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
|
||||
// process.
|
||||
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
|
||||
|
||||
// When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
|
||||
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
|
||||
// channel.
|
||||
} else {
|
||||
// If the ChannelManager used in the reload was stale, check that the B <-> C channel was
|
||||
// closed.
|
||||
//
|
||||
// Note that this will also process the ChannelMonitorUpdates which were queued up when we
|
||||
// reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
|
||||
// force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
|
||||
// commitment update will be allowed to go out.
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
|
||||
check_added_monitors(&nodes[1], 2);
|
||||
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
|
||||
// Don't bother to reconnect B to C - that channel has been closed. We don't need to
|
||||
// exchange any messages here even though there's a pending commitment update because the
|
||||
// ChannelMonitorUpdate hasn't yet completed.
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
|
||||
|
||||
// The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
|
||||
// preimage (as it was a replay of the original ChannelMonitorUpdate from before we
|
||||
// restarted). When we go to fetch the commitment transaction updates we'll poll the
|
||||
// ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
|
||||
// with the actual commitment transaction, which will allow us to fulfill the HTLC with
|
||||
// node A.
|
||||
}
|
||||
|
||||
let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
|
||||
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
|
||||
|
||||
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
|
||||
|
||||
// Finally, check that the payment was, ultimately, seen as sent by node A.
|
||||
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inverted_mon_completion_order() {
|
||||
do_test_inverted_mon_completion_order(true, true);
|
||||
do_test_inverted_mon_completion_order(true, false);
|
||||
do_test_inverted_mon_completion_order(false, true);
|
||||
do_test_inverted_mon_completion_order(false, false);
|
||||
}
|
||||
|
||||
fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
|
||||
// Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
|
||||
// is force-closed between when we generate the update on reload and when we go to handle the
|
||||
// update or prior to generating the update at all.
|
||||
|
||||
if !close_chans_before_reload && close_only_a {
|
||||
// If we're not closing, it makes no sense to "only close A"
|
||||
panic!();
|
||||
}
|
||||
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
|
||||
|
||||
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
|
||||
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
|
||||
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
|
||||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
|
||||
|
||||
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
|
||||
|
||||
nodes[2].node.claim_funds(payment_preimage);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
|
||||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
|
||||
|
||||
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
|
||||
// for it since the monitor update is marked in-progress.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
|
||||
// the preimage when the nodes reconnect, at which point we have to ensure we get it from the
|
||||
// ChannelMonitor.
|
||||
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||
|
||||
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
|
||||
|
||||
if close_chans_before_reload {
|
||||
if !close_only_a {
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
|
||||
}
|
||||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
|
||||
}
|
||||
|
||||
// Now reload node B
|
||||
let manager_b = nodes[1].node.encode();
|
||||
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
|
||||
if close_chans_before_reload {
|
||||
// If the channels were already closed, B will rebroadcast its closing transactions here.
|
||||
let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
if close_only_a {
|
||||
assert_eq!(bs_close_txn.len(), 2);
|
||||
} else {
|
||||
assert_eq!(bs_close_txn.len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
|
||||
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(as_closing_tx.len(), 1);
|
||||
|
||||
// In order to give A's closing transaction to B without processing background events first,
|
||||
// use the _without_consistency_checks utility method. This is similar to connecting blocks
|
||||
// during startup prior to the node being full initialized.
|
||||
mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
|
||||
|
||||
// After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
|
||||
// ChannelMonitor (possible twice), even though the channel has since been closed.
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
|
||||
if hold_post_reload_mon_update {
|
||||
for _ in 0..mons_added {
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
}
|
||||
}
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], mons_added);
|
||||
|
||||
// Finally, check that B created a payment preimage transaction and close out the payment.
|
||||
let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
|
||||
let bs_preimage_tx = &bs_txn[0];
|
||||
check_spends!(bs_preimage_tx, as_closing_tx[0]);
|
||||
|
||||
if !close_chans_before_reload {
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
|
||||
} else {
|
||||
// While we forwarded the payment a while ago, we don't want to process events too early or
|
||||
// we'll run background tasks we wanted to test individually.
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
|
||||
}
|
||||
|
||||
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
|
||||
check_closed_broadcast(&nodes[0], 1, true);
|
||||
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
|
||||
|
||||
if !close_chans_before_reload || close_only_a {
|
||||
// Make sure the B<->C channel is still alive and well by sending a payment over it.
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
if !close_chans_before_reload {
|
||||
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
|
||||
// will consider the forwarded payment complete and allow the B<->C
|
||||
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
|
||||
// be allowed, and needs fixing.
|
||||
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
|
||||
}
|
||||
reconnect_args.pending_raa.1 = true;
|
||||
|
||||
reconnect_nodes(reconnect_args);
|
||||
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
|
||||
if !close_chans_before_reload {
|
||||
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
|
||||
// channel will fly, removing the payment preimage from it.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
}
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
send_payment(&nodes[1], &[&nodes[2]], 100_000);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_durable_preimages_on_closed_channel() {
|
||||
do_test_durable_preimages_on_closed_channel(true, true, true);
|
||||
do_test_durable_preimages_on_closed_channel(true, true, false);
|
||||
do_test_durable_preimages_on_closed_channel(true, false, true);
|
||||
do_test_durable_preimages_on_closed_channel(true, false, false);
|
||||
do_test_durable_preimages_on_closed_channel(false, false, true);
|
||||
do_test_durable_preimages_on_closed_channel(false, false, false);
|
||||
}
|
||||
|
||||
fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
|
||||
// Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
|
||||
// before restart we run the monitor update completion action on startup.
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
|
||||
|
||||
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
|
||||
// `update_fulfill_htlc`+`commitment_signed` we have a monitor update for both of B's channels.
|
||||
// We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor
|
||||
// update pending, then reload B. At that point, the final monitor update on the B<->C channel
|
||||
// is still pending because it can't fly until the preimage is persisted on the A<->B monitor.
|
||||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
|
||||
|
||||
nodes[2].node.claim_funds(payment_preimage);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
|
||||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
|
||||
|
||||
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
|
||||
// for it since the monitor update is marked in-progress.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Now step the Commitment Signed Dance between B and C and check that after the final RAA B
|
||||
// doesn't let the preimage-removing monitor update fly.
|
||||
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||
|
||||
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
|
||||
let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_final_raa);
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
|
||||
// Finally, reload node B and check that after we call `process_pending_events` once we realize
|
||||
// we've completed the A<->B preimage-including monitor update and so can release the B<->C
|
||||
// preimage-removing monitor update.
|
||||
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
|
||||
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
|
||||
let manager_b = nodes[1].node.encode();
|
||||
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
if close_during_reload {
|
||||
// Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
|
||||
// (as learned about during the on-reload block connection).
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
check_closed_broadcast!(nodes[0], true);
|
||||
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
|
||||
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
|
||||
}
|
||||
|
||||
let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
|
||||
let mut events = nodes[1].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
|
||||
expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
|
||||
if close_during_reload {
|
||||
match events[0] {
|
||||
Event::ChannelClosed { .. } => {},
|
||||
_ => panic!(),
|
||||
}
|
||||
check_closed_broadcast!(nodes[1], true);
|
||||
}
|
||||
|
||||
// Once we run event processing the monitor should free, check that it was indeed the B<->C
|
||||
// channel which was updated.
|
||||
check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 });
|
||||
let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
|
||||
assert!(bc_update_id != post_ev_bc_update_id);
|
||||
|
||||
// Finally, check that there's nothing left to do on B<->C reconnect and the channel operates
|
||||
// fine.
|
||||
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
|
||||
send_payment(&nodes[1], &[&nodes[2]], 100_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reload_mon_update_completion_actions() {
|
||||
do_test_reload_mon_update_completion_actions(true);
|
||||
do_test_reload_mon_update_completion_actions(false);
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ pub(super) enum HTLCForwardInfo {
|
|||
}
|
||||
|
||||
/// Tracks the inbound corresponding to an outbound HTLC
|
||||
#[derive(Clone, Hash, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub(crate) struct HTLCPreviousHopData {
|
||||
// Note that this may be an outbound SCID alias for the associated channel.
|
||||
short_channel_id: u64,
|
||||
|
@ -283,7 +283,7 @@ impl Readable for InterceptId {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
|
||||
pub(crate) enum SentHTLCId {
|
||||
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
|
||||
|
@ -314,7 +314,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
|
|||
|
||||
/// Tracks the inbound corresponding to an outbound HTLC
|
||||
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum HTLCSource {
|
||||
PreviousHopData(HTLCPreviousHopData),
|
||||
OutboundRoute {
|
||||
|
@ -656,7 +656,6 @@ pub(crate) enum RAAMonitorUpdateBlockingAction {
|
|||
}
|
||||
|
||||
impl RAAMonitorUpdateBlockingAction {
|
||||
#[allow(unused)]
|
||||
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
|
||||
Self::ForwardedPaymentInboundClaim {
|
||||
channel_id: prev_hop.outpoint.to_channel_id(),
|
||||
|
@ -5175,11 +5174,17 @@ where
|
|||
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
|
||||
}
|
||||
|
||||
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
|
||||
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
|
||||
forwarded_htlc_value_msat: Option<u64>, from_onchain: bool,
|
||||
next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
|
||||
) {
|
||||
match source {
|
||||
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
|
||||
debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
|
||||
"We don't support claim_htlc claims during startup - monitors may not be available yet");
|
||||
if let Some(pubkey) = next_channel_counterparty_node_id {
|
||||
debug_assert_eq!(pubkey, path.hops[0].pubkey);
|
||||
}
|
||||
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
|
||||
channel_funding_outpoint: next_channel_outpoint,
|
||||
counterparty_node_id: path.hops[0].pubkey,
|
||||
|
@ -5190,6 +5195,7 @@ where
|
|||
},
|
||||
HTLCSource::PreviousHopData(hop_data) => {
|
||||
let prev_outpoint = hop_data.outpoint;
|
||||
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
|
||||
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|
||||
|htlc_claim_value_msat| {
|
||||
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
|
||||
|
@ -5205,7 +5211,17 @@ where
|
|||
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
|
||||
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
|
||||
},
|
||||
downstream_counterparty_and_funding_outpoint: None,
|
||||
downstream_counterparty_and_funding_outpoint:
|
||||
if let Some(node_id) = next_channel_counterparty_node_id {
|
||||
Some((node_id, next_channel_outpoint, completed_blocker))
|
||||
} else {
|
||||
// We can only get `None` here if we are processing a
|
||||
// `ChannelMonitor`-originated event, in which case we
|
||||
// don't care about ensuring we wake the downstream
|
||||
// channel's monitor updating - the channel is already
|
||||
// closed.
|
||||
None
|
||||
},
|
||||
})
|
||||
} else { None }
|
||||
});
|
||||
|
@ -6044,6 +6060,17 @@ where
|
|||
hash_map::Entry::Occupied(mut chan_phase_entry) => {
|
||||
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
|
||||
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
|
||||
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
|
||||
peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
|
||||
}
|
||||
// Note that we do not need to push an `actions_blocking_raa_monitor_updates`
|
||||
// entry here, even though we *do* need to block the next RAA monitor update.
|
||||
// We do this instead in the `claim_funds_internal` by attaching a
|
||||
// `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
|
||||
// outbound HTLC is claimed. This is guaranteed to all complete before we
|
||||
// process the RAA as messages are processed from single peers serially.
|
||||
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
|
||||
res
|
||||
} else {
|
||||
|
@ -6054,7 +6081,7 @@ where
|
|||
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
|
||||
}
|
||||
};
|
||||
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
|
||||
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -6256,6 +6283,23 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "_test_utils"))]
|
||||
pub(crate) fn test_raa_monitor_updates_held(&self,
|
||||
counterparty_node_id: PublicKey, channel_id: ChannelId
|
||||
) -> bool {
|
||||
let per_peer_state = self.per_peer_state.read().unwrap();
|
||||
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
|
||||
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
|
||||
let peer_state = &mut *peer_state_lck;
|
||||
|
||||
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
|
||||
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
|
||||
chan.context().get_funding_txo().unwrap(), counterparty_node_id);
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
|
||||
let (htlcs_to_fail, res) = {
|
||||
let per_peer_state = self.per_peer_state.read().unwrap();
|
||||
|
@ -6477,8 +6521,8 @@ where
|
|||
match monitor_event {
|
||||
MonitorEvent::HTLCEvent(htlc_update) => {
|
||||
if let Some(preimage) = htlc_update.payment_preimage {
|
||||
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", &preimage);
|
||||
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
|
||||
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
|
||||
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
|
||||
} else {
|
||||
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
|
||||
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
|
||||
|
@ -9298,6 +9342,7 @@ where
|
|||
// downstream chan is closed (because we don't have a
|
||||
// channel_id -> peer map entry).
|
||||
counterparty_opt.is_none(),
|
||||
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
|
||||
monitor.get_funding_txo().0))
|
||||
} else { None }
|
||||
} else {
|
||||
|
@ -9576,12 +9621,12 @@ where
|
|||
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
|
||||
}
|
||||
|
||||
for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay {
|
||||
for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
|
||||
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
|
||||
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
|
||||
// channel is closed we just assume that it probably came from an on-chain claim.
|
||||
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
|
||||
downstream_closed, downstream_funding);
|
||||
downstream_closed, downstream_node_id, downstream_funding);
|
||||
}
|
||||
|
||||
//TODO: Broadcast channel update for closed channels, but only after we've made a
|
||||
|
|
|
@ -17,7 +17,7 @@ use crate::chain::transaction::OutPoint;
|
|||
use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
|
||||
use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource};
|
||||
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
|
||||
use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
|
||||
use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
|
||||
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
|
||||
use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
|
||||
use crate::ln::features::InitFeatures;
|
||||
|
@ -73,6 +73,20 @@ pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Tra
|
|||
let height = node.best_block_info().1 + 1;
|
||||
confirm_transactions_at(node, txn, height);
|
||||
}
|
||||
/// Mine a single block containing the given transaction without extra consistency checks which may
|
||||
/// impact ChannelManager state.
|
||||
pub fn mine_transaction_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
|
||||
let height = node.best_block_info().1 + 1;
|
||||
let mut block = Block {
|
||||
header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
|
||||
txdata: Vec::new(),
|
||||
};
|
||||
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
|
||||
block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
|
||||
}
|
||||
block.txdata.push((*tx).clone());
|
||||
do_connect_block_without_consistency_checks(node, block, false);
|
||||
}
|
||||
/// Mine the given transaction at the given height, mining blocks as required to build to that
|
||||
/// height
|
||||
///
|
||||
|
@ -211,16 +225,16 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
|
|||
assert!(depth >= 1);
|
||||
for i in 1..depth {
|
||||
let prev_blockhash = block.header.block_hash();
|
||||
do_connect_block(node, block, skip_intermediaries);
|
||||
do_connect_block_with_consistency_checks(node, block, skip_intermediaries);
|
||||
block = create_dummy_block(prev_blockhash, height + i, Vec::new());
|
||||
}
|
||||
let hash = block.header.block_hash();
|
||||
do_connect_block(node, block, false);
|
||||
do_connect_block_with_consistency_checks(node, block, false);
|
||||
hash
|
||||
}
|
||||
|
||||
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
|
||||
do_connect_block(node, block.clone(), false);
|
||||
do_connect_block_with_consistency_checks(node, block.clone(), false);
|
||||
}
|
||||
|
||||
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
|
||||
|
@ -230,8 +244,14 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
|
|||
}
|
||||
}
|
||||
|
||||
fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
|
||||
fn do_connect_block_with_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
|
||||
call_claimable_balances(node);
|
||||
do_connect_block_without_consistency_checks(node, block, skip_intermediaries);
|
||||
call_claimable_balances(node);
|
||||
node.node.test_process_background_events();
|
||||
}
|
||||
|
||||
fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
|
||||
let height = node.best_block_info().1 + 1;
|
||||
#[cfg(feature = "std")] {
|
||||
eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
|
||||
|
@ -286,8 +306,6 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, sk
|
|||
}
|
||||
}
|
||||
}
|
||||
call_claimable_balances(node);
|
||||
node.node.test_process_background_events();
|
||||
|
||||
for tx in &block.txdata {
|
||||
for input in &tx.input {
|
||||
|
@ -1772,20 +1790,7 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '
|
|||
check_added_monitors!(node_a, 1);
|
||||
|
||||
// If this commitment signed dance was due to a claim, don't check for an RAA monitor update.
|
||||
let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| {
|
||||
let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate
|
||||
{ channel_funding_outpoint, counterparty_node_id }) = action
|
||||
{
|
||||
if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id {
|
||||
assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id());
|
||||
true
|
||||
} else { false }
|
||||
} else { false };
|
||||
if matching_action {
|
||||
if let Event::PaymentSent { .. } = ev {} else { panic!(); }
|
||||
}
|
||||
matching_action
|
||||
});
|
||||
let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id);
|
||||
if fail_backwards { assert!(!got_claim); }
|
||||
commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim);
|
||||
|
||||
|
@ -2000,29 +2005,38 @@ macro_rules! expect_payment_path_successful {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
|
||||
event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
|
||||
upstream_force_closed: bool, downstream_force_closed: bool
|
||||
) {
|
||||
match event {
|
||||
Event::PaymentForwarded {
|
||||
fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
|
||||
outbound_amount_forwarded_msat: _
|
||||
} => {
|
||||
assert_eq!(fee_earned_msat, expected_fee);
|
||||
if !upstream_force_closed {
|
||||
// Is the event prev_channel_id in one of the channels between the two nodes?
|
||||
assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
|
||||
}
|
||||
// We check for force closures since a force closed channel is removed from the
|
||||
// node's channel list
|
||||
if !downstream_force_closed {
|
||||
assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
|
||||
}
|
||||
assert_eq!(claim_from_onchain_tx, downstream_force_closed);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! expect_payment_forwarded {
|
||||
($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
|
||||
let events = $node.node.get_and_clear_pending_events();
|
||||
let mut events = $node.node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentForwarded {
|
||||
fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
|
||||
outbound_amount_forwarded_msat: _
|
||||
} => {
|
||||
assert_eq!(fee_earned_msat, $expected_fee);
|
||||
if fee_earned_msat.is_some() {
|
||||
// Is the event prev_channel_id in one of the channels between the two nodes?
|
||||
assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $prev_node.node.get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
|
||||
}
|
||||
// We check for force closures since a force closed channel is removed from the
|
||||
// node's channel list
|
||||
if !$downstream_force_closed {
|
||||
assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
|
||||
}
|
||||
assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
$crate::ln::functional_test_utils::expect_payment_forwarded(
|
||||
events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee,
|
||||
$upstream_force_closed, $downstream_force_closed);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2399,7 +2413,7 @@ pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, '
|
|||
}
|
||||
};
|
||||
if $idx == 1 { fee += expected_extra_fees[i]; }
|
||||
expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
|
||||
expect_payment_forwarded!(*$node, $next_node, $prev_node, Some(fee as u64), false, false);
|
||||
expected_total_fee_msat += fee as u64;
|
||||
check_added_monitors!($node, 1);
|
||||
let new_next_msgs = if $new_msgs {
|
||||
|
@ -3030,7 +3044,11 @@ pub struct ReconnectArgs<'a, 'b, 'c, 'd> {
|
|||
pub node_a: &'a Node<'b, 'c, 'd>,
|
||||
pub node_b: &'a Node<'b, 'c, 'd>,
|
||||
pub send_channel_ready: (bool, bool),
|
||||
pub pending_htlc_adds: (i64, i64),
|
||||
pub pending_responding_commitment_signed: (bool, bool),
|
||||
/// Indicates that the pending responding commitment signed will be a dup for the recipient,
|
||||
/// and no monitor update is expected
|
||||
pub pending_responding_commitment_signed_dup_monitor: (bool, bool),
|
||||
pub pending_htlc_adds: (usize, usize),
|
||||
pub pending_htlc_claims: (usize, usize),
|
||||
pub pending_htlc_fails: (usize, usize),
|
||||
pub pending_cell_htlc_claims: (usize, usize),
|
||||
|
@ -3044,6 +3062,8 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
|
|||
node_a,
|
||||
node_b,
|
||||
send_channel_ready: (false, false),
|
||||
pending_responding_commitment_signed: (false, false),
|
||||
pending_responding_commitment_signed_dup_monitor: (false, false),
|
||||
pending_htlc_adds: (0, 0),
|
||||
pending_htlc_claims: (0, 0),
|
||||
pending_htlc_fails: (0, 0),
|
||||
|
@ -3059,7 +3079,8 @@ impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
|
|||
pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
||||
let ReconnectArgs {
|
||||
node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
|
||||
pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
|
||||
pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa,
|
||||
pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor,
|
||||
} = args;
|
||||
node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
|
||||
features: node_b.node.init_features(), networks: None, remote_network_address: None
|
||||
|
@ -3144,13 +3165,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
|||
} else {
|
||||
assert!(chan_msgs.1.is_none());
|
||||
}
|
||||
if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
|
||||
if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 ||
|
||||
pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 ||
|
||||
pending_responding_commitment_signed.0
|
||||
{
|
||||
let commitment_update = chan_msgs.2.unwrap();
|
||||
if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
|
||||
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
|
||||
} else {
|
||||
assert!(commitment_update.update_add_htlcs.is_empty());
|
||||
}
|
||||
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0);
|
||||
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
|
||||
assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0 + pending_cell_htlc_fails.0);
|
||||
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
|
||||
|
@ -3164,7 +3184,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
|||
node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail);
|
||||
}
|
||||
|
||||
if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
|
||||
if !pending_responding_commitment_signed.0 {
|
||||
commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
|
||||
} else {
|
||||
node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed);
|
||||
|
@ -3173,7 +3193,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
|||
// No commitment_signed so get_event_msg's assert(len == 1) passes
|
||||
node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack);
|
||||
assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
|
||||
check_added_monitors!(node_b, 1);
|
||||
check_added_monitors!(node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 });
|
||||
}
|
||||
} else {
|
||||
assert!(chan_msgs.2.is_none());
|
||||
|
@ -3203,11 +3223,12 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
|||
} else {
|
||||
assert!(chan_msgs.1.is_none());
|
||||
}
|
||||
if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
|
||||
if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 ||
|
||||
pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 ||
|
||||
pending_responding_commitment_signed.1
|
||||
{
|
||||
let commitment_update = chan_msgs.2.unwrap();
|
||||
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
|
||||
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
|
||||
}
|
||||
assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1);
|
||||
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
|
||||
assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.1 + pending_cell_htlc_fails.1);
|
||||
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
|
||||
|
@ -3221,7 +3242,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
|||
node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail);
|
||||
}
|
||||
|
||||
if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
|
||||
if !pending_responding_commitment_signed.1 {
|
||||
commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
|
||||
} else {
|
||||
node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed);
|
||||
|
@ -3230,7 +3251,7 @@ pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
|
|||
// No commitment_signed so get_event_msg's assert(len == 1) passes
|
||||
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
|
||||
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||
check_added_monitors!(node_a, 1);
|
||||
check_added_monitors!(node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 });
|
||||
}
|
||||
} else {
|
||||
assert!(chan_msgs.2.is_none());
|
||||
|
|
|
@ -17,7 +17,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator;
|
|||
use crate::chain::channelmonitor;
|
||||
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
|
||||
use crate::chain::transaction::OutPoint;
|
||||
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
|
||||
use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
|
||||
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
|
||||
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
|
||||
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
|
||||
|
@ -3880,13 +3880,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
|
|||
} else if messages_delivered == 3 {
|
||||
// nodes[0] still wants its RAA + commitment_signed
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
|
||||
reconnect_args.pending_htlc_adds.0 = -1;
|
||||
reconnect_args.pending_responding_commitment_signed.0 = true;
|
||||
reconnect_args.pending_raa.0 = true;
|
||||
reconnect_nodes(reconnect_args);
|
||||
} else if messages_delivered == 4 {
|
||||
// nodes[0] still wants its commitment_signed
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
|
||||
reconnect_args.pending_htlc_adds.0 = -1;
|
||||
reconnect_args.pending_responding_commitment_signed.0 = true;
|
||||
reconnect_nodes(reconnect_args);
|
||||
} else if messages_delivered == 5 {
|
||||
// nodes[1] still wants its final RAA
|
||||
|
@ -4014,13 +4014,13 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
|
|||
} else if messages_delivered == 2 {
|
||||
// nodes[0] still wants its RAA + commitment_signed
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
|
||||
reconnect_args.pending_htlc_adds.1 = -1;
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
reconnect_args.pending_raa.1 = true;
|
||||
reconnect_nodes(reconnect_args);
|
||||
} else if messages_delivered == 3 {
|
||||
// nodes[0] still wants its commitment_signed
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
|
||||
reconnect_args.pending_htlc_adds.1 = -1;
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
reconnect_nodes(reconnect_args);
|
||||
} else if messages_delivered == 4 {
|
||||
// nodes[1] still wants its final RAA
|
||||
|
@ -8777,7 +8777,8 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
|
|||
assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
|
||||
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
|
||||
let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
|
||||
// If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
|
||||
if !go_onchain_before_fulfill && broadcast_alice {
|
||||
let events = nodes[1].node.get_and_clear_pending_msg_events();
|
||||
|
|
|
@ -611,7 +611,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
|
|||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false);
|
||||
|
||||
if confirm_before_reload {
|
||||
let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
|
||||
|
|
|
@ -209,7 +209,7 @@ fn test_lnd_bug_6039() {
|
|||
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
|
||||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
|
||||
let (payment_preimage, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
|
||||
|
||||
nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
|
|
|
@ -62,7 +62,6 @@ use regex;
|
|||
use crate::io;
|
||||
use crate::prelude::*;
|
||||
use core::cell::RefCell;
|
||||
use core::ops::Deref;
|
||||
use core::time::Duration;
|
||||
use crate::sync::{Mutex, Arc};
|
||||
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
|
@ -297,6 +296,7 @@ pub(crate) struct WatchtowerPersister {
|
|||
}
|
||||
|
||||
impl WatchtowerPersister {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn new(destination_script: Script) -> Self {
|
||||
WatchtowerPersister {
|
||||
persister: TestPersister::new(),
|
||||
|
@ -306,6 +306,7 @@ impl WatchtowerPersister {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn justice_tx(&self, funding_txo: OutPoint, commitment_txid: &Txid)
|
||||
-> Option<Transaction> {
|
||||
self.watchtower_state.lock().unwrap().get(&funding_txo).unwrap().get(commitment_txid).cloned()
|
||||
|
|
Loading…
Add table
Reference in a new issue