mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-24 23:08:36 +01:00
Update tests to test re-claiming of forwarded HTLCs on startup
Because some of these tests require connecting blocks without calling `get_and_clear_pending_msg_events`, we need to split up the block connection utilities to only optionally call sanity-checks.
This commit is contained in:
parent
46453bf078
commit
0d8b0961a5
5 changed files with 276 additions and 55 deletions
|
@ -67,7 +67,7 @@ use crate::sync::{Mutex, LockTestExt};
|
|||
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
|
||||
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
|
||||
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[must_use]
|
||||
pub struct ChannelMonitorUpdate {
|
||||
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
|
||||
|
@ -487,7 +487,7 @@ impl_writeable_tlv_based_enum_upgradable!(OnchainEvent,
|
|||
|
||||
);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum ChannelMonitorUpdateStep {
|
||||
LatestHolderCommitmentTXInfo {
|
||||
commitment_tx: HolderCommitmentTransaction,
|
||||
|
|
|
@ -450,7 +450,7 @@ pub fn derive_public_revocation_key<T: secp256k1::Verification>(secp_ctx: &Secp2
|
|||
/// channel basepoints via the new function, or they were obtained via
|
||||
/// CommitmentTransaction.trust().keys() because we trusted the source of the
|
||||
/// pre-calculated keys.
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct TxCreationKeys {
|
||||
/// The broadcaster's per-commitment public key which was used to derive the other keys.
|
||||
pub per_commitment_point: PublicKey,
|
||||
|
@ -1028,7 +1028,7 @@ impl<'a> DirectedChannelTransactionParameters<'a> {
|
|||
/// Information needed to build and sign a holder's commitment transaction.
|
||||
///
|
||||
/// The transaction is only signed once we are ready to broadcast.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HolderCommitmentTransaction {
|
||||
inner: CommitmentTransaction,
|
||||
/// Our counterparty's signature for the transaction
|
||||
|
@ -1134,7 +1134,7 @@ impl HolderCommitmentTransaction {
|
|||
}
|
||||
|
||||
/// A pre-built Bitcoin commitment transaction and its txid.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BuiltCommitmentTransaction {
|
||||
/// The commitment transaction
|
||||
pub transaction: Transaction,
|
||||
|
@ -1305,7 +1305,7 @@ impl<'a> TrustedClosingTransaction<'a> {
|
|||
///
|
||||
/// This class can be used inside a signer implementation to generate a signature given the relevant
|
||||
/// secret key.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CommitmentTransaction {
|
||||
commitment_number: u64,
|
||||
to_broadcaster_value_sat: u64,
|
||||
|
|
|
@ -3102,10 +3102,10 @@ fn test_blocked_chan_preimage_release() {
|
|||
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
|
||||
}
|
||||
|
||||
fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
|
||||
// When we forward a payment and receive an `update_fulfill_htlc` message from the downstream
|
||||
// channel, we immediately claim the HTLC on the upstream channel, before even doing a
|
||||
// `commitment_signed` dance on the downstream channel. This implies that our
|
||||
fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
|
||||
// When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
|
||||
// from the downstream channel, we immediately claim the HTLC on the upstream channel, before
|
||||
// even doing a `commitment_signed` dance on the downstream channel. This implies that our
|
||||
// `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
|
||||
// money, then we write the update that resolves the downstream node claiming their money. This
|
||||
// is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
|
||||
|
@ -3130,6 +3130,10 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
|
|||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
|
||||
|
||||
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
|
||||
let mut manager_b = Vec::new();
|
||||
if !with_latest_manager {
|
||||
manager_b = nodes[1].node.encode();
|
||||
}
|
||||
|
||||
nodes[2].node.claim_funds(payment_preimage);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
|
@ -3166,7 +3170,9 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
|
|||
}
|
||||
|
||||
// Now reload node B
|
||||
let manager_b = nodes[1].node.encode();
|
||||
if with_latest_manager {
|
||||
manager_b = nodes[1].node.encode();
|
||||
}
|
||||
|
||||
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
|
||||
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
@ -3174,48 +3180,82 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
|
|||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
|
||||
// If we used the latest ChannelManager to reload from, we should have both channels still
|
||||
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
|
||||
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
|
||||
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
|
||||
// complete after reconnecting to our peers.
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
if with_latest_manager {
|
||||
// If we used the latest ChannelManager to reload from, we should have both channels still
|
||||
// live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
|
||||
// before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
|
||||
// When we call `timer_tick_occurred` we will get that monitor update back, which we'll
|
||||
// complete after reconnecting to our peers.
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
|
||||
// the end go ahead and do that, though the
|
||||
// `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
|
||||
// expect to *not* receive the final RAA ChannelMonitorUpdate.
|
||||
if complete_bc_commitment_dance {
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
|
||||
// Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
|
||||
// the end go ahead and do that, though the
|
||||
// `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
|
||||
// expect to *not* receive the final RAA ChannelMonitorUpdate.
|
||||
if complete_bc_commitment_dance {
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
|
||||
} else {
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
|
||||
reconnect_args.pending_raa = (false, true);
|
||||
reconnect_nodes(reconnect_args);
|
||||
}
|
||||
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
|
||||
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
|
||||
// process.
|
||||
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
|
||||
|
||||
// When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
|
||||
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
|
||||
// channel.
|
||||
} else {
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
|
||||
reconnect_args.pending_raa = (false, true);
|
||||
reconnect_nodes(reconnect_args);
|
||||
// If the ChannelManager used in the reload was stale, check that the B <-> C channel was
|
||||
// closed.
|
||||
//
|
||||
// Note that this will also process the ChannelMonitorUpdates which were queued up when we
|
||||
// reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
|
||||
// force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
|
||||
// commitment update will be allowed to go out.
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
|
||||
check_added_monitors(&nodes[1], 2);
|
||||
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
|
||||
// Don't bother to reconnect B to C - that channel has been closed. We don't need to
|
||||
// exchange any messages here even though there's a pending commitment update because the
|
||||
// ChannelMonitorUpdate hasn't yet completed.
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
|
||||
|
||||
// The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
|
||||
// preimage (as it was a replay of the original ChannelMonitorUpdate from before we
|
||||
// restarted). When we go to fetch the commitment transaction updates we'll poll the
|
||||
// ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
|
||||
// with the actual commitment transaction, which will allow us to fulfill the HTLC with
|
||||
// node A.
|
||||
}
|
||||
|
||||
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
|
||||
|
||||
// (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
|
||||
// disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
|
||||
// process.
|
||||
let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
|
||||
|
||||
// When we fetch B's HTLC update messages here (now that the ChannelMonitorUpdate has
|
||||
// completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
|
||||
// channel.
|
||||
let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
|
||||
do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
|
||||
|
||||
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, false);
|
||||
expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
|
||||
|
||||
// Finally, check that the payment was, ultimately, seen as sent by node A.
|
||||
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
|
||||
|
@ -3223,6 +3263,169 @@ fn do_test_inverted_mon_completion_order(complete_bc_commitment_dance: bool) {
|
|||
|
||||
#[test]
|
||||
fn test_inverted_mon_completion_order() {
|
||||
do_test_inverted_mon_completion_order(true);
|
||||
do_test_inverted_mon_completion_order(false);
|
||||
do_test_inverted_mon_completion_order(true, true);
|
||||
do_test_inverted_mon_completion_order(true, false);
|
||||
do_test_inverted_mon_completion_order(false, true);
|
||||
do_test_inverted_mon_completion_order(false, false);
|
||||
}
|
||||
|
||||
fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
|
||||
// Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
|
||||
// is force-closed between when we generate the update on reload and when we go to handle the
|
||||
// update or prior to generating the update at all.
|
||||
|
||||
if !close_chans_before_reload && close_only_a {
|
||||
// If we're not closing, it makes no sense to "only close A"
|
||||
panic!();
|
||||
}
|
||||
|
||||
let chanmon_cfgs = create_chanmon_cfgs(3);
|
||||
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
|
||||
|
||||
let persister;
|
||||
let new_chain_monitor;
|
||||
let nodes_1_deserialized;
|
||||
|
||||
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
|
||||
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
|
||||
|
||||
let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
|
||||
let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
|
||||
|
||||
// Route a payment from A, through B, to C, then claim it on C. Once we pass B the
|
||||
// `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
|
||||
// on the B<->C channel but leave the A<->B monitor update pending, then reload B.
|
||||
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
|
||||
|
||||
let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
|
||||
|
||||
nodes[2].node.claim_funds(payment_preimage);
|
||||
check_added_monitors(&nodes[2], 1);
|
||||
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
|
||||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
|
||||
|
||||
// B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
|
||||
// for it since the monitor update is marked in-progress.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
// Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
|
||||
// the preimage when the nodes reconnect, at which point we have to ensure we get it from the
|
||||
// ChannelMonitor.
|
||||
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
|
||||
|
||||
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
|
||||
|
||||
if close_chans_before_reload {
|
||||
if !close_only_a {
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
|
||||
}
|
||||
|
||||
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
|
||||
}
|
||||
|
||||
// Now reload node B
|
||||
let manager_b = nodes[1].node.encode();
|
||||
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
|
||||
|
||||
if close_chans_before_reload {
|
||||
// If the channels were already closed, B will rebroadcast its closing transactions here.
|
||||
let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
if close_only_a {
|
||||
assert_eq!(bs_close_txn.len(), 2);
|
||||
} else {
|
||||
assert_eq!(bs_close_txn.len(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
|
||||
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
|
||||
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(as_closing_tx.len(), 1);
|
||||
|
||||
// In order to give A's closing transaction to B without processing background events first,
|
||||
// use the _without_consistency_checks utility method. This is similar to connecting blocks
|
||||
// during startup prior to the node being full initialized.
|
||||
mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
|
||||
|
||||
// After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
|
||||
// ChannelMonitor (possible twice), even though the channel has since been closed.
|
||||
check_added_monitors(&nodes[1], 0);
|
||||
let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
|
||||
if hold_post_reload_mon_update {
|
||||
for _ in 0..mons_added {
|
||||
persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
|
||||
}
|
||||
}
|
||||
nodes[1].node.timer_tick_occurred();
|
||||
check_added_monitors(&nodes[1], mons_added);
|
||||
|
||||
// Finally, check that B created a payment preimage transaction and close out the payment.
|
||||
let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
|
||||
assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
|
||||
let bs_preimage_tx = &bs_txn[0];
|
||||
check_spends!(bs_preimage_tx, as_closing_tx[0]);
|
||||
|
||||
if !close_chans_before_reload {
|
||||
check_closed_broadcast(&nodes[1], 1, true);
|
||||
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
|
||||
} else {
|
||||
// While we forwarded the payment a while ago, we don't want to process events too early or
|
||||
// we'll run background tasks we wanted to test individually.
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
|
||||
}
|
||||
|
||||
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
|
||||
check_closed_broadcast(&nodes[0], 1, true);
|
||||
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
|
||||
|
||||
if !close_chans_before_reload || close_only_a {
|
||||
// Make sure the B<->C channel is still alive and well by sending a payment over it.
|
||||
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
|
||||
reconnect_args.pending_responding_commitment_signed.1 = true;
|
||||
if !close_chans_before_reload {
|
||||
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
|
||||
// will consider the forwarded payment complete and allow the B<->C
|
||||
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
|
||||
// be allowed, and needs fixing.
|
||||
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
|
||||
}
|
||||
reconnect_args.pending_raa.1 = true;
|
||||
|
||||
reconnect_nodes(reconnect_args);
|
||||
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
|
||||
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
|
||||
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
|
||||
if !close_chans_before_reload {
|
||||
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
|
||||
// channel will fly, removing the payment preimage from it.
|
||||
check_added_monitors(&nodes[1], 1);
|
||||
}
|
||||
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
|
||||
send_payment(&nodes[1], &[&nodes[2]], 100_000);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_durable_preimages_on_closed_channel() {
|
||||
do_test_durable_preimages_on_closed_channel(true, true, true);
|
||||
do_test_durable_preimages_on_closed_channel(true, true, false);
|
||||
do_test_durable_preimages_on_closed_channel(true, false, true);
|
||||
do_test_durable_preimages_on_closed_channel(true, false, false);
|
||||
do_test_durable_preimages_on_closed_channel(false, false, true);
|
||||
do_test_durable_preimages_on_closed_channel(false, false, false);
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@ pub(super) enum HTLCForwardInfo {
|
|||
}
|
||||
|
||||
/// Tracks the inbound corresponding to an outbound HTLC
|
||||
#[derive(Clone, Hash, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub(crate) struct HTLCPreviousHopData {
|
||||
// Note that this may be an outbound SCID alias for the associated channel.
|
||||
short_channel_id: u64,
|
||||
|
@ -283,7 +283,7 @@ impl Readable for InterceptId {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
|
||||
pub(crate) enum SentHTLCId {
|
||||
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
|
||||
|
@ -314,7 +314,7 @@ impl_writeable_tlv_based_enum!(SentHTLCId,
|
|||
|
||||
/// Tracks the inbound corresponding to an outbound HTLC
|
||||
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum HTLCSource {
|
||||
PreviousHopData(HTLCPreviousHopData),
|
||||
OutboundRoute {
|
||||
|
|
|
@ -17,7 +17,7 @@ use crate::chain::transaction::OutPoint;
|
|||
use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
|
||||
use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource};
|
||||
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
|
||||
use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
|
||||
use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
|
||||
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
|
||||
use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
|
||||
use crate::ln::features::InitFeatures;
|
||||
|
@ -73,6 +73,20 @@ pub fn mine_transactions<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, txn: &[&Tra
|
|||
let height = node.best_block_info().1 + 1;
|
||||
confirm_transactions_at(node, txn, height);
|
||||
}
|
||||
/// Mine a single block containing the given transaction without extra consistency checks which may
|
||||
/// impact ChannelManager state.
|
||||
pub fn mine_transaction_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
|
||||
let height = node.best_block_info().1 + 1;
|
||||
let mut block = Block {
|
||||
header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
|
||||
txdata: Vec::new(),
|
||||
};
|
||||
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
|
||||
block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
|
||||
}
|
||||
block.txdata.push((*tx).clone());
|
||||
do_connect_block_without_consistency_checks(node, block, false);
|
||||
}
|
||||
/// Mine the given transaction at the given height, mining blocks as required to build to that
|
||||
/// height
|
||||
///
|
||||
|
@ -211,16 +225,16 @@ pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) ->
|
|||
assert!(depth >= 1);
|
||||
for i in 1..depth {
|
||||
let prev_blockhash = block.header.block_hash();
|
||||
do_connect_block(node, block, skip_intermediaries);
|
||||
do_connect_block_with_consistency_checks(node, block, skip_intermediaries);
|
||||
block = create_dummy_block(prev_blockhash, height + i, Vec::new());
|
||||
}
|
||||
let hash = block.header.block_hash();
|
||||
do_connect_block(node, block, false);
|
||||
do_connect_block_with_consistency_checks(node, block, false);
|
||||
hash
|
||||
}
|
||||
|
||||
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
|
||||
do_connect_block(node, block.clone(), false);
|
||||
do_connect_block_with_consistency_checks(node, block.clone(), false);
|
||||
}
|
||||
|
||||
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
|
||||
|
@ -230,8 +244,14 @@ fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
|
|||
}
|
||||
}
|
||||
|
||||
fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
|
||||
fn do_connect_block_with_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
|
||||
call_claimable_balances(node);
|
||||
do_connect_block_without_consistency_checks(node, block, skip_intermediaries);
|
||||
call_claimable_balances(node);
|
||||
node.node.test_process_background_events();
|
||||
}
|
||||
|
||||
fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
|
||||
let height = node.best_block_info().1 + 1;
|
||||
#[cfg(feature = "std")] {
|
||||
eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
|
||||
|
@ -286,8 +306,6 @@ fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, sk
|
|||
}
|
||||
}
|
||||
}
|
||||
call_claimable_balances(node);
|
||||
node.node.test_process_background_events();
|
||||
|
||||
for tx in &block.txdata {
|
||||
for input in &tx.input {
|
||||
|
|
Loading…
Add table
Reference in a new issue