2020-08-10 15:00:09 -04:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
2020-02-18 17:57:15 -05:00
//! Further functional tests which test blockchain reorganizations.
2022-11-15 02:43:51 +00:00
use crate ::chain ::channelmonitor ::ANTI_REORG_DELAY ;
2022-10-13 02:35:48 -04:00
use crate ::chain ::transaction ::OutPoint ;
2022-11-15 02:43:51 +00:00
use crate ::chain ::Confirm ;
2023-01-11 10:21:29 -08:00
use crate ::ln ::channelmanager ::ChannelManager ;
2022-10-13 02:35:48 -04:00
use crate ::ln ::msgs ::ChannelMessageHandler ;
2023-01-12 22:50:43 +01:00
use crate ::util ::events ::{ Event , MessageSendEventsProvider , ClosureReason , HTLCDestination } ;
2022-10-13 02:35:48 -04:00
use crate ::util ::test_utils ;
2022-11-15 02:43:51 +00:00
use crate ::util ::ser ::Writeable ;
2020-02-18 17:57:15 -05:00
use bitcoin ::blockdata ::block ::{ Block , BlockHeader } ;
2021-07-29 19:49:09 +00:00
use bitcoin ::blockdata ::script ::Builder ;
use bitcoin ::blockdata ::opcodes ;
use bitcoin ::secp256k1 ::Secp256k1 ;
2020-02-18 17:57:15 -05:00
2022-10-13 02:35:48 -04:00
use crate ::prelude ::* ;
2022-08-09 17:39:51 +02:00
use bitcoin ::hashes ::Hash ;
use bitcoin ::TxMerkleNode ;
2020-02-18 17:57:15 -05:00
2022-10-13 02:35:48 -04:00
use crate ::ln ::functional_test_utils ::* ;
2020-02-18 17:57:15 -05:00
fn do_test_onchain_htlc_reorg ( local_commitment : bool , claim : bool ) {
// Our on-chain HTLC-claim learning has a few properties worth testing:
// * If an upstream HTLC is claimed with a preimage (both against our own commitment
// transaction our counterparty's), we claim it backwards immediately.
// * If an upstream HTLC is claimed with a timeout, we delay ANTI_REORG_DELAY before failing
// it backwards to ensure our counterparty can't claim with a preimage in a reorg.
//
// Here we test both properties in any combination based on the two bools passed in as
// arguments.
//
// If local_commitment is set, we first broadcast a local commitment containing an offered HTLC
// and an HTLC-Timeout tx, otherwise we broadcast a remote commitment containing a received
// HTLC and a local HTLC-Timeout tx spending it.
//
// We then either allow these transactions to confirm (if !claim) or we wait until one block
// before they otherwise would and reorg them out, confirming an HTLC-Success tx instead.
2020-02-20 14:14:12 -05:00
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
2020-02-18 17:57:15 -05:00
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
let chan_2 = create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
2020-02-18 17:57:15 -05:00
2021-03-15 19:49:51 -04:00
// Make sure all nodes are at the same starting height
2021-03-16 23:22:59 -04:00
connect_blocks ( & nodes [ 0 ] , 2 * CHAN_CONFIRM_DEPTH + 1 - nodes [ 0 ] . best_block_info ( ) . 1 ) ;
connect_blocks ( & nodes [ 1 ] , 2 * CHAN_CONFIRM_DEPTH + 1 - nodes [ 1 ] . best_block_info ( ) . 1 ) ;
connect_blocks ( & nodes [ 2 ] , 2 * CHAN_CONFIRM_DEPTH + 1 - nodes [ 2 ] . best_block_info ( ) . 1 ) ;
2021-03-15 19:49:51 -04:00
2022-04-18 20:12:15 +00:00
let ( our_payment_preimage , our_payment_hash , _ ) = route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , 1_000_000 ) ;
2020-02-18 17:57:15 -05:00
// Provide preimage to node 2 by claiming payment
2021-04-26 23:05:56 +00:00
nodes [ 2 ] . node . claim_funds ( our_payment_preimage ) ;
2022-04-18 20:12:15 +00:00
expect_payment_claimed! ( nodes [ 2 ] , our_payment_hash , 1_000_000 ) ;
2020-02-18 17:57:15 -05:00
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
2022-08-09 17:39:51 +02:00
let mut header = BlockHeader { version : 0x2000_0000 , prev_blockhash : nodes [ 2 ] . best_block_hash ( ) , merkle_root : TxMerkleNode ::all_zeros ( ) , time : 42 , bits : 42 , nonce : 42 } ;
2020-02-18 17:57:15 -05:00
let claim_txn = if local_commitment {
// Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
2020-03-18 21:30:34 -04:00
let node_1_commitment_txn = get_local_commitment_txn! ( nodes [ 1 ] , chan_2 . 2 ) ;
2020-02-18 17:57:15 -05:00
assert_eq! ( node_1_commitment_txn . len ( ) , 2 ) ; // 1 local commitment tx, 1 Outbound HTLC-Timeout
assert_eq! ( node_1_commitment_txn [ 0 ] . output . len ( ) , 2 ) ; // to-self and Offered HTLC (to-remote/to-node-3 is dust)
check_spends! ( node_1_commitment_txn [ 0 ] , chan_2 . 3 ) ;
2020-03-04 17:36:12 -05:00
check_spends! ( node_1_commitment_txn [ 1 ] , node_1_commitment_txn [ 0 ] ) ;
2020-02-18 17:57:15 -05:00
// Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
2021-03-17 22:00:47 -04:00
connect_block ( & nodes [ 2 ] , & Block { header , txdata : node_1_commitment_txn . clone ( ) } ) ;
2020-03-18 16:30:05 -04:00
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
2021-03-18 20:32:20 -04:00
check_closed_broadcast! ( nodes [ 2 ] , true ) ; // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
2021-09-21 12:25:38 -04:00
check_closed_event! ( nodes [ 2 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
2022-04-12 17:28:15 +00:00
let node_2_commitment_txn = nodes [ 2 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
2022-12-12 19:00:06 -08:00
assert_eq! ( node_2_commitment_txn . len ( ) , 1 ) ; // ChannelMonitor: 1 offered HTLC-Claim
2020-02-18 17:57:15 -05:00
check_spends! ( node_2_commitment_txn [ 0 ] , node_1_commitment_txn [ 0 ] ) ;
2021-03-15 19:49:51 -04:00
// Make sure node 1's height is the same as the !local_commitment case
2021-03-16 23:22:59 -04:00
connect_blocks ( & nodes [ 1 ] , 1 ) ;
2020-02-18 17:57:15 -05:00
// Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
2021-03-15 19:49:51 -04:00
header . prev_blockhash = nodes [ 1 ] . best_block_hash ( ) ;
2021-03-17 22:00:47 -04:00
connect_block ( & nodes [ 1 ] , & Block { header , txdata : node_1_commitment_txn . clone ( ) } ) ;
2020-02-18 17:57:15 -05:00
// ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
vec! [ node_1_commitment_txn [ 0 ] . clone ( ) , node_2_commitment_txn [ 0 ] . clone ( ) ]
} else {
// Broadcast node 2 commitment txn
2022-02-25 05:18:29 +00:00
let mut node_2_commitment_txn = get_local_commitment_txn! ( nodes [ 2 ] , chan_2 . 2 ) ;
2020-02-18 17:57:15 -05:00
assert_eq! ( node_2_commitment_txn . len ( ) , 2 ) ; // 1 local commitment tx, 1 Received HTLC-Claim
assert_eq! ( node_2_commitment_txn [ 0 ] . output . len ( ) , 2 ) ; // to-remote and Received HTLC (to-self is dust)
check_spends! ( node_2_commitment_txn [ 0 ] , chan_2 . 3 ) ;
2020-03-04 17:36:12 -05:00
check_spends! ( node_2_commitment_txn [ 1 ] , node_2_commitment_txn [ 0 ] ) ;
2020-02-18 17:57:15 -05:00
// Give node 1 node 2's commitment transaction and get its response (timing the HTLC out)
2021-03-15 19:49:51 -04:00
mine_transaction ( & nodes [ 1 ] , & node_2_commitment_txn [ 0 ] ) ;
2021-05-19 21:47:42 +00:00
connect_blocks ( & nodes [ 1 ] , TEST_FINAL_CLTV - 1 ) ; // Confirm blocks until the HTLC expires
2021-05-26 19:59:35 +00:00
let node_1_commitment_txn = nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clone ( ) ;
2022-12-12 19:00:06 -08:00
assert_eq! ( node_1_commitment_txn . len ( ) , 1 ) ; // ChannelMonitor: 1 offered HTLC-Timeout
check_spends! ( node_1_commitment_txn [ 0 ] , node_2_commitment_txn [ 0 ] ) ;
2020-02-18 17:57:15 -05:00
2022-02-25 05:18:29 +00:00
// Confirm node 1's HTLC-Timeout on node 1
2022-12-12 19:00:06 -08:00
mine_transaction ( & nodes [ 1 ] , & node_1_commitment_txn [ 0 ] ) ;
2020-02-18 17:57:15 -05:00
// ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
2022-02-25 05:18:29 +00:00
vec! [ node_2_commitment_txn . pop ( ) . unwrap ( ) ]
2020-02-18 17:57:15 -05:00
} ;
2020-03-18 16:30:05 -04:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2021-03-18 20:32:20 -04:00
check_closed_broadcast! ( nodes [ 1 ] , true ) ; // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
2021-09-21 12:25:38 -04:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
2021-03-15 19:49:51 -04:00
// Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1.
2021-03-16 23:22:59 -04:00
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 2 ) ;
2020-02-18 17:57:15 -05:00
check_added_monitors! ( nodes [ 1 ] , 0 ) ;
assert_eq! ( nodes [ 1 ] . node . get_and_clear_pending_events ( ) . len ( ) , 0 ) ;
if claim {
2022-05-17 23:57:52 +00:00
// Disconnect Node 1's HTLC-Timeout which was connected above
disconnect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 1 ) ;
2020-02-18 17:57:15 -05:00
2021-03-15 19:49:51 -04:00
let block = Block {
2022-08-09 17:39:51 +02:00
header : BlockHeader { version : 0x20000000 , prev_blockhash : nodes [ 1 ] . best_block_hash ( ) , merkle_root : TxMerkleNode ::all_zeros ( ) , time : 42 , bits : 42 , nonce : 42 } ,
2020-06-16 15:10:17 -07:00
txdata : claim_txn ,
} ;
2021-03-17 22:00:47 -04:00
connect_block ( & nodes [ 1 ] , & block ) ;
2020-02-18 17:57:15 -05:00
2020-07-20 17:03:52 -07:00
// ChannelManager only polls chain::Watch::release_pending_monitor_events when we
2021-07-16 02:16:50 +00:00
// probe it for events, so we probe non-message events here (which should just be the
// PaymentForwarded event).
2022-04-22 12:33:18 +05:30
expect_payment_forwarded! ( nodes [ 1 ] , nodes [ 0 ] , nodes [ 2 ] , Some ( 1000 ) , true , true ) ;
2020-02-18 17:57:15 -05:00
} else {
// Confirm the timeout tx and check that we fail the HTLC backwards
2021-03-15 19:49:51 -04:00
let block = Block {
2022-08-09 17:39:51 +02:00
header : BlockHeader { version : 0x20000000 , prev_blockhash : nodes [ 1 ] . best_block_hash ( ) , merkle_root : TxMerkleNode ::all_zeros ( ) , time : 42 , bits : 42 , nonce : 42 } ,
2020-06-16 15:10:17 -07:00
txdata : vec ! [ ] ,
} ;
2021-03-17 22:00:47 -04:00
connect_block ( & nodes [ 1 ] , & block ) ;
2022-07-25 11:28:51 -07:00
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , vec! [ HTLCDestination ::NextHopChannel { node_id : Some ( nodes [ 2 ] . node . get_our_node_id ( ) ) , channel_id : chan_2 . 2 } ] ) ;
2020-02-18 17:57:15 -05:00
}
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
// Which should result in an immediate claim/fail of the HTLC:
let htlc_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
if claim {
assert_eq! ( htlc_updates . update_fulfill_htlcs . len ( ) , 1 ) ;
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_updates . update_fulfill_htlcs [ 0 ] ) ;
} else {
assert_eq! ( htlc_updates . update_fail_htlcs . len ( ) , 1 ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_updates . update_fail_htlcs [ 0 ] ) ;
}
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , htlc_updates . commitment_signed , false , true ) ;
if claim {
expect_payment_sent! ( nodes [ 0 ] , our_payment_preimage ) ;
} else {
2021-08-12 15:30:53 -05:00
expect_payment_failed_with_update! ( nodes [ 0 ] , our_payment_hash , false , chan_2 . 0. contents . short_channel_id , true ) ;
2020-02-18 17:57:15 -05:00
}
}
#[ test ]
fn test_onchain_htlc_claim_reorg_local_commitment ( ) {
do_test_onchain_htlc_reorg ( true , true ) ;
}
#[ test ]
fn test_onchain_htlc_timeout_delay_local_commitment ( ) {
do_test_onchain_htlc_reorg ( true , false ) ;
}
#[ test ]
fn test_onchain_htlc_claim_reorg_remote_commitment ( ) {
do_test_onchain_htlc_reorg ( false , true ) ;
}
#[ test ]
fn test_onchain_htlc_timeout_delay_remote_commitment ( ) {
do_test_onchain_htlc_reorg ( false , false ) ;
}
2021-02-07 18:06:58 -05:00
2022-05-17 00:12:20 +00:00
#[ test ]
fn test_counterparty_revoked_reorg ( ) {
// Test what happens when a revoked counterparty transaction is broadcast but then reorg'd out
// of the main chain. Specifically, HTLCs in the latest commitment transaction which are not
// included in the revoked commitment transaction should not be considered failed, and should
// still be claim-from-able after the reorg.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let chan = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 500_000_000 ) ;
2022-05-17 00:12:20 +00:00
// Get the initial commitment transaction for broadcast, before any HTLCs are added at all.
let revoked_local_txn = get_local_commitment_txn! ( nodes [ 0 ] , chan . 2 ) ;
assert_eq! ( revoked_local_txn . len ( ) , 1 ) ;
// Now add two HTLCs in each direction, one dust and one not.
route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , 5_000_000 ) ;
route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , 5_000 ) ;
let ( payment_preimage_3 , payment_hash_3 , .. ) = route_payment ( & nodes [ 1 ] , & [ & nodes [ 0 ] ] , 4_000_000 ) ;
let payment_hash_4 = route_payment ( & nodes [ 1 ] , & [ & nodes [ 0 ] ] , 4_000 ) . 1 ;
nodes [ 0 ] . node . claim_funds ( payment_preimage_3 ) ;
let _ = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
expect_payment_claimed! ( nodes [ 0 ] , payment_hash_3 , 4_000_000 ) ;
let mut unrevoked_local_txn = get_local_commitment_txn! ( nodes [ 0 ] , chan . 2 ) ;
assert_eq! ( unrevoked_local_txn . len ( ) , 3 ) ; // commitment + 2 HTLC txn
// Sort the unrevoked transactions in reverse order, ie commitment tx, then HTLC 1 then HTLC 3
unrevoked_local_txn . sort_unstable_by_key ( | tx | 1_000_000 - tx . output . iter ( ) . map ( | outp | outp . value ) . sum ::< u64 > ( ) ) ;
// Now mine A's old commitment transaction, which should close the channel, but take no action
// on any of the HTLCs, at least until we get six confirmations (which we won't get).
mine_transaction ( & nodes [ 1 ] , & revoked_local_txn [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
check_closed_broadcast! ( nodes [ 1 ] , true ) ;
// Connect up to one block before the revoked transaction would be considered final, then do a
// reorg that disconnects the full chain and goes up to the height at which the revoked
// transaction would be final.
let theoretical_conf_height = nodes [ 1 ] . best_block_info ( ) . 1 + ANTI_REORG_DELAY - 1 ;
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 2 ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
disconnect_all_blocks ( & nodes [ 1 ] ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
connect_blocks ( & nodes [ 1 ] , theoretical_conf_height ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
// Now connect A's latest commitment transaction instead and resolve the HTLCs
mine_transaction ( & nodes [ 1 ] , & unrevoked_local_txn [ 0 ] ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
// Connect the HTLC claim transaction for HTLC 3
mine_transaction ( & nodes [ 1 ] , & unrevoked_local_txn [ 2 ] ) ;
expect_payment_sent! ( nodes [ 1 ] , payment_preimage_3 ) ;
assert! ( nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
// Connect blocks to confirm the unrevoked commitment transaction
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 2 ) ;
Mark failed counterparty-is-destination HTLCs retryable
When our counterparty is the payment destination and we receive
an `HTLCFailReason::Reason` in `fail_htlc_backwards_internal` we
currently always set `rejected_by_dest` in the `PaymentPathFailed`
event, implying the HTLC should *not* be retried.
There are a number of cases where we use `HTLCFailReason::Reason`,
but most should reasonably be treated as retryable even if our
counterparty was the destination (i.e. `!rejected_by_dest`):
* If an HTLC times out on-chain, this doesn't imply that the
payment is no longer retryable, though the peer may well be
offline so retrying may not be very useful,
* If a commitment transaction "containing" a dust HTLC is
confirmed on-chain, this definitely does not imply the payment
is no longer retryable
* If the channel we intended to relay over was closed (or
force-closed) we should retry over another path,
* If the channel we intended to relay over did not have enough
capacity we should retry over another path,
* If we received a update_fail_malformed_htlc message from our
peer, we likely should *not* retry, however this should be
exceedingly rare, and appears to nearly never appear in practice
Thus, this commit simply disables the behavior here, opting to
treat all `HTLCFailReason::Reason` errors as retryable.
Note that prior to 93e645daf46f85949ae0edf60d36bf21e9fde8af this
change would not have made sense as it would have resulted in us
retrying the payment over the same channel in some cases, however
we now "blame" our own channel and will avoid it when routing for
the same payment.
2022-09-07 20:02:04 +00:00
expect_payment_failed! ( nodes [ 1 ] , payment_hash_4 , false ) ;
2022-05-17 00:12:20 +00:00
}
2021-03-22 18:07:13 -04:00
fn do_test_unconf_chan ( reload_node : bool , reorg_after_reload : bool , use_funding_unconfirmed : bool , connect_style : ConnectStyle ) {
2021-02-26 12:50:46 -05:00
// After creating a chan between nodes, we disconnect all blocks previously seen to force a
// channel close on nodes[0] side. We also use this to provide very basic testing of logic
// around freeing background events which store monitor updates during block_[dis]connected.
2021-02-07 18:06:58 -05:00
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
2021-02-26 12:50:46 -05:00
let persister : test_utils ::TestPersister ;
let new_chain_monitor : test_utils ::TestChainMonitor ;
2022-12-20 14:46:08 -08:00
let nodes_0_deserialized : ChannelManager < & test_utils ::TestChainMonitor , & test_utils ::TestBroadcaster , & test_utils ::TestKeysInterface , & test_utils ::TestKeysInterface , & test_utils ::TestKeysInterface , & test_utils ::TestFeeEstimator , & test_utils ::TestRouter , & test_utils ::TestLogger > ;
2021-02-26 12:50:46 -05:00
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2021-03-09 22:05:21 -05:00
* nodes [ 0 ] . connect_style . borrow_mut ( ) = connect_style ;
Track block hash, return via `get_relevant_txids`
Previously, `Confirm::get_relevant_txids()` only returned a list of
transactions that have to be monitored for reorganization out of the
chain. This interface however required double bookkeeping: while we
internally keep track of the best block, height, etc, it would also
require the user to keep track which transaction was previously
confirmed in which block and to take actions based on any change, e.g,
to reconfirm them when the block would be reorged-out and the
transactions had been reconfirmed in another block.
Here, we track the confirmation block hash internally and return it via
`Confirm::get_relevant_txids()` to the user, which alleviates the
requirement for double bookkeeping: the user can now simply check
whether the given transaction is still confirmed and in the given block,
and take action if not.
We also split `update_claims_view`: Previously it was one, now it's two
methods: `update_claims_view_from_matched_txn` and
`update_claims_view_from_requests`.
2022-11-08 21:45:28 +01:00
let chan_conf_height = core ::cmp ::max ( nodes [ 0 ] . best_block_info ( ) . 1 + 1 , nodes [ 1 ] . best_block_info ( ) . 1 + 1 ) ;
2023-01-11 10:21:29 -08:00
let chan = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
2021-02-07 18:06:58 -05:00
2022-11-26 09:02:20 +01:00
{
let per_peer_state = nodes [ 0 ] . node . per_peer_state . read ( ) . unwrap ( ) ;
let peer_state = per_peer_state . get ( & nodes [ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) . lock ( ) . unwrap ( ) ;
assert_eq! ( peer_state . channel_by_id . len ( ) , 1 ) ;
assert_eq! ( nodes [ 0 ] . node . short_to_chan_info . read ( ) . unwrap ( ) . len ( ) , 2 ) ;
}
2021-02-07 18:06:58 -05:00
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_channels ( ) [ 0 ] . confirmations , Some ( 10 ) ) ;
assert_eq! ( nodes [ 1 ] . node . list_channels ( ) [ 0 ] . confirmations , Some ( 10 ) ) ;
2021-02-26 12:50:46 -05:00
if ! reorg_after_reload {
2021-03-22 18:07:13 -04:00
if use_funding_unconfirmed {
let relevant_txids = nodes [ 0 ] . node . get_relevant_txids ( ) ;
Track block hash, return via `get_relevant_txids`
Previously, `Confirm::get_relevant_txids()` only returned a list of
transactions that have to be monitored for reorganization out of the
chain. This interface however required double bookkeeping: while we
internally keep track of the best block, height, etc, it would also
require the user to keep track which transaction was previously
confirmed in which block and to take actions based on any change, e.g,
to reconfirm them when the block would be reorged-out and the
transactions had been reconfirmed in another block.
Here, we track the confirmation block hash internally and return it via
`Confirm::get_relevant_txids()` to the user, which alleviates the
requirement for double bookkeeping: the user can now simply check
whether the given transaction is still confirmed and in the given block,
and take action if not.
We also split `update_claims_view`: Previously it was one, now it's two
methods: `update_claims_view_from_matched_txn` and
`update_claims_view_from_requests`.
2022-11-08 21:45:28 +01:00
assert_eq! ( relevant_txids . len ( ) , 1 ) ;
let block_hash_opt = relevant_txids [ 0 ] . 1 ;
let expected_hash = nodes [ 0 ] . get_block_header ( chan_conf_height ) . block_hash ( ) ;
assert_eq! ( block_hash_opt , Some ( expected_hash ) ) ;
let txid = relevant_txids [ 0 ] . 0 ;
assert_eq! ( txid , chan . 3. txid ( ) ) ;
nodes [ 0 ] . node . transaction_unconfirmed ( & txid ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 0 ) ;
2022-05-02 15:07:20 +00:00
} else if connect_style = = ConnectStyle ::FullBlockViaListen {
disconnect_blocks ( & nodes [ 0 ] , CHAN_CONFIRM_DEPTH - 1 ) ;
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 1 ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_channels ( ) [ 0 ] . confirmations , Some ( 1 ) ) ;
2022-05-02 15:07:20 +00:00
disconnect_blocks ( & nodes [ 0 ] , 1 ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 0 ) ;
2021-03-22 18:07:13 -04:00
} else {
disconnect_all_blocks ( & nodes [ 0 ] ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 0 ) ;
2021-03-22 18:07:13 -04:00
}
Track block hash, return via `get_relevant_txids`
Previously, `Confirm::get_relevant_txids()` only returned a list of
transactions that have to be monitored for reorganization out of the
chain. This interface however required double bookkeeping: while we
internally keep track of the best block, height, etc, it would also
require the user to keep track which transaction was previously
confirmed in which block and to take actions based on any change, e.g,
to reconfirm them when the block would be reorged-out and the
transactions had been reconfirmed in another block.
Here, we track the confirmation block hash internally and return it via
`Confirm::get_relevant_txids()` to the user, which alleviates the
requirement for double bookkeeping: the user can now simply check
whether the given transaction is still confirmed and in the given block,
and take action if not.
We also split `update_claims_view`: Previously it was one, now it's two
methods: `update_claims_view_from_matched_txn` and
`update_claims_view_from_requests`.
2022-11-08 21:45:28 +01:00
let relevant_txids = nodes [ 0 ] . node . get_relevant_txids ( ) ;
assert_eq! ( relevant_txids . len ( ) , 0 ) ;
2022-05-02 02:51:50 +00:00
handle_announce_close_broadcast_events ( & nodes , 0 , 1 , true , " Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs. " ) ;
2021-03-22 18:07:13 -04:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2021-02-26 12:50:46 -05:00
{
2022-11-26 09:02:20 +01:00
let per_peer_state = nodes [ 0 ] . node . per_peer_state . read ( ) . unwrap ( ) ;
let peer_state = per_peer_state . get ( & nodes [ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) . lock ( ) . unwrap ( ) ;
assert_eq! ( peer_state . channel_by_id . len ( ) , 0 ) ;
2022-07-26 22:59:24 +02:00
assert_eq! ( nodes [ 0 ] . node . short_to_chan_info . read ( ) . unwrap ( ) . len ( ) , 0 ) ;
2021-02-26 12:50:46 -05:00
}
}
if reload_node {
// Since we currently have a background event pending, it's good to test that we survive a
// serialization roundtrip. Further, this tests the somewhat awkward edge-case of dropping
// the Channel object from the ChannelManager, but still having a monitor event pending for
// it when we go to deserialize, and then use the ChannelManager.
let nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
2022-11-15 02:43:51 +00:00
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan . 2 ) . encode ( ) ;
reload_node! ( nodes [ 0 ] , * nodes [ 0 ] . node . get_current_default_configuration ( ) , & nodes_0_serialized , & [ & chan_0_monitor_serialized ] , persister , new_chain_monitor , nodes_0_deserialized ) ;
2021-03-22 18:07:13 -04:00
if ! reorg_after_reload {
// If the channel is already closed when we reload the node, we'll broadcast a closing
// transaction via the ChannelMonitor which is missing a corresponding channel.
assert_eq! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . len ( ) , 1 ) ;
nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
}
2021-02-07 18:06:58 -05:00
}
2021-02-26 12:50:46 -05:00
if reorg_after_reload {
2021-03-22 18:07:13 -04:00
if use_funding_unconfirmed {
let relevant_txids = nodes [ 0 ] . node . get_relevant_txids ( ) ;
Track block hash, return via `get_relevant_txids`
Previously, `Confirm::get_relevant_txids()` only returned a list of
transactions that have to be monitored for reorganization out of the
chain. This interface however required double bookkeeping: while we
internally keep track of the best block, height, etc, it would also
require the user to keep track which transaction was previously
confirmed in which block and to take actions based on any change, e.g,
to reconfirm them when the block would be reorged-out and the
transactions had been reconfirmed in another block.
Here, we track the confirmation block hash internally and return it via
`Confirm::get_relevant_txids()` to the user, which alleviates the
requirement for double bookkeeping: the user can now simply check
whether the given transaction is still confirmed and in the given block,
and take action if not.
We also split `update_claims_view`: Previously it was one, now it's two
methods: `update_claims_view_from_matched_txn` and
`update_claims_view_from_requests`.
2022-11-08 21:45:28 +01:00
assert_eq! ( relevant_txids . len ( ) , 1 ) ;
let block_hash_opt = relevant_txids [ 0 ] . 1 ;
let expected_hash = nodes [ 0 ] . get_block_header ( chan_conf_height ) . block_hash ( ) ;
assert_eq! ( block_hash_opt , Some ( expected_hash ) ) ;
let txid = relevant_txids [ 0 ] . 0 ;
assert_eq! ( txid , chan . 3. txid ( ) ) ;
nodes [ 0 ] . node . transaction_unconfirmed ( & txid ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_channels ( ) . len ( ) , 0 ) ;
2022-05-02 15:07:20 +00:00
} else if connect_style = = ConnectStyle ::FullBlockViaListen {
disconnect_blocks ( & nodes [ 0 ] , CHAN_CONFIRM_DEPTH - 1 ) ;
assert_eq! ( nodes [ 0 ] . node . list_channels ( ) . len ( ) , 1 ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_channels ( ) [ 0 ] . confirmations , Some ( 1 ) ) ;
2022-05-02 15:07:20 +00:00
disconnect_blocks ( & nodes [ 0 ] , 1 ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 0 ) ;
2021-03-22 18:07:13 -04:00
} else {
disconnect_all_blocks ( & nodes [ 0 ] ) ;
2022-11-17 11:31:35 +01:00
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 0 ) ;
2021-03-22 18:07:13 -04:00
}
Track block hash, return via `get_relevant_txids`
Previously, `Confirm::get_relevant_txids()` only returned a list of
transactions that have to be monitored for reorganization out of the
chain. This interface however required double bookkeeping: while we
internally keep track of the best block, height, etc, it would also
require the user to keep track which transaction was previously
confirmed in which block and to take actions based on any change, e.g,
to reconfirm them when the block would be reorged-out and the
transactions had been reconfirmed in another block.
Here, we track the confirmation block hash internally and return it via
`Confirm::get_relevant_txids()` to the user, which alleviates the
requirement for double bookkeeping: the user can now simply check
whether the given transaction is still confirmed and in the given block,
and take action if not.
We also split `update_claims_view`: Previously it was one, now it's two
methods: `update_claims_view_from_matched_txn` and
`update_claims_view_from_requests`.
2022-11-08 21:45:28 +01:00
let relevant_txids = nodes [ 0 ] . node . get_relevant_txids ( ) ;
assert_eq! ( relevant_txids . len ( ) , 0 ) ;
2022-05-02 02:51:50 +00:00
handle_announce_close_broadcast_events ( & nodes , 0 , 1 , true , " Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs. " ) ;
2021-03-22 18:07:13 -04:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2021-02-26 12:50:46 -05:00
{
2022-11-26 09:02:20 +01:00
let per_peer_state = nodes [ 0 ] . node . per_peer_state . read ( ) . unwrap ( ) ;
let peer_state = per_peer_state . get ( & nodes [ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) . lock ( ) . unwrap ( ) ;
assert_eq! ( peer_state . channel_by_id . len ( ) , 0 ) ;
2022-07-26 22:59:24 +02:00
assert_eq! ( nodes [ 0 ] . node . short_to_chan_info . read ( ) . unwrap ( ) . len ( ) , 0 ) ;
2021-02-26 12:50:46 -05:00
}
}
2021-03-01 17:30:41 -05:00
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
// is a ChannelForcClosed on the right channel with should_broadcast set.
2021-03-22 18:07:13 -04:00
* nodes [ 0 ] . chain_monitor . expect_channel_force_closed . lock ( ) . unwrap ( ) = Some ( ( chan . 2 , true ) ) ;
Process monitor update events in block_[dis]connected asynchronously
The instructions for `ChannelManagerReadArgs` indicate that you need
to connect blocks on a newly-deserialized `ChannelManager` in a
separate pass from the newly-deserialized `ChannelMontiors` as the
`ChannelManager` assumes the ability to update the monitors during
block [dis]connected events, saying that users need to:
```
4) Reconnect blocks on your ChannelMonitors
5) Move the ChannelMonitors into your local chain::Watch.
6) Disconnect/connect blocks on the ChannelManager.
```
This is fine for `ChannelManager`'s purpose, but is very awkward
for users. Notably, our new `lightning-block-sync` implemented
on-load reconnection in the most obvious (and performant) way -
connecting the blocks all at once, violating the
`ChannelManagerReadArgs` API.
Luckily, the events in question really don't need to be processed
with the same urgency as most channel monitor updates. The only two
monitor updates which can occur in block_[dis]connected is either
a) in block_connected, we identify a now-confirmed commitment
transaction, closing one of our channels, or
b) in block_disconnected, the funding transaction is reorganized
out of the chain, making our channel no longer funded.
In the case of (a), sending a monitor update which broadcasts a
conflicting holder commitment transaction is far from
time-critical, though we should still ensure we do it. In the case
of (b), we should try to broadcast our holder commitment transaction
when we can, but within a few minutes is fine on the scale of
block mining anyway.
Note that in both cases cannot simply move the logic to
ChannelMonitor::block[dis]_connected, as this could result in us
broadcasting a commitment transaction from ChannelMonitor, then
revoking the now-broadcasted state, and only then receiving the
block_[dis]connected event in the ChannelManager.
Thus, we move both events into an internal invent queue and process
them in timer_chan_freshness_every_min().
2021-02-26 12:02:11 -05:00
nodes [ 0 ] . node . test_process_background_events ( ) ; // Required to free the pending background monitor update
2021-02-07 18:06:58 -05:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2022-05-02 02:51:50 +00:00
let expected_err = " Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs. " ;
2021-09-30 21:35:40 +00:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CounterpartyForceClosed { peer_msg : " Channel closed because of an exception: " . to_owned ( ) + expected_err } ) ;
check_closed_event! ( nodes [ 0 ] , 1 , ClosureReason ::ProcessingError { err : expected_err . to_owned ( ) } ) ;
2021-03-22 18:07:13 -04:00
assert_eq! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . len ( ) , 1 ) ;
nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
// Now check that we can create a new channel
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
2021-04-26 23:05:56 +00:00
send_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , 8000000 ) ;
2021-02-26 12:50:46 -05:00
}
#[ test ]
fn test_unconf_chan ( ) {
2021-03-22 18:07:13 -04:00
do_test_unconf_chan ( true , true , false , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
do_test_unconf_chan ( false , true , false , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
do_test_unconf_chan ( true , false , false , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
do_test_unconf_chan ( false , false , false , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
2022-05-15 19:03:45 +00:00
do_test_unconf_chan ( true , true , false , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
do_test_unconf_chan ( false , true , false , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
do_test_unconf_chan ( true , false , false , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
do_test_unconf_chan ( false , false , false , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
2021-03-09 22:05:21 -05:00
}
#[ test ]
fn test_unconf_chan_via_listen ( ) {
2021-03-22 18:07:13 -04:00
do_test_unconf_chan ( true , true , false , ConnectStyle ::FullBlockViaListen ) ;
do_test_unconf_chan ( false , true , false , ConnectStyle ::FullBlockViaListen ) ;
do_test_unconf_chan ( true , false , false , ConnectStyle ::FullBlockViaListen ) ;
do_test_unconf_chan ( false , false , false , ConnectStyle ::FullBlockViaListen ) ;
}
#[ test ]
fn test_unconf_chan_via_funding_unconfirmed ( ) {
do_test_unconf_chan ( true , true , true , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
do_test_unconf_chan ( false , true , true , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
do_test_unconf_chan ( true , false , true , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
do_test_unconf_chan ( false , false , true , ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
2022-05-15 19:03:45 +00:00
do_test_unconf_chan ( true , true , true , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
do_test_unconf_chan ( false , true , true , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
do_test_unconf_chan ( true , false , true , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
do_test_unconf_chan ( false , false , true , ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
2021-03-22 18:07:13 -04:00
do_test_unconf_chan ( true , true , true , ConnectStyle ::FullBlockViaListen ) ;
do_test_unconf_chan ( false , true , true , ConnectStyle ::FullBlockViaListen ) ;
do_test_unconf_chan ( true , false , true , ConnectStyle ::FullBlockViaListen ) ;
do_test_unconf_chan ( false , false , true , ConnectStyle ::FullBlockViaListen ) ;
2021-02-07 18:06:58 -05:00
}
#[ test ]
fn test_set_outpoints_partial_claiming ( ) {
// - remote party claim tx, new bump tx
// - disconnect remote claiming tx, new bump
// - disconnect tx, see no tx anymore
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let chan = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1000000 , 59000000 ) ;
2022-04-18 20:12:15 +00:00
let ( payment_preimage_1 , payment_hash_1 , _ ) = route_payment ( & nodes [ 1 ] , & [ & nodes [ 0 ] ] , 3_000_000 ) ;
let ( payment_preimage_2 , payment_hash_2 , _ ) = route_payment ( & nodes [ 1 ] , & [ & nodes [ 0 ] ] , 3_000_000 ) ;
2021-02-07 18:06:58 -05:00
// Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
let remote_txn = get_local_commitment_txn! ( nodes [ 1 ] , chan . 2 ) ;
assert_eq! ( remote_txn . len ( ) , 3 ) ;
assert_eq! ( remote_txn [ 0 ] . output . len ( ) , 4 ) ;
assert_eq! ( remote_txn [ 0 ] . input . len ( ) , 1 ) ;
assert_eq! ( remote_txn [ 0 ] . input [ 0 ] . previous_output . txid , chan . 3. txid ( ) ) ;
check_spends! ( remote_txn [ 1 ] , remote_txn [ 0 ] ) ;
check_spends! ( remote_txn [ 2 ] , remote_txn [ 0 ] ) ;
// Connect blocks on node A to advance height towards TEST_FINAL_CLTV
// Provide node A with both preimage
2021-04-26 23:05:56 +00:00
nodes [ 0 ] . node . claim_funds ( payment_preimage_1 ) ;
2022-04-18 20:12:15 +00:00
expect_payment_claimed! ( nodes [ 0 ] , payment_hash_1 , 3_000_000 ) ;
2021-04-26 23:05:56 +00:00
nodes [ 0 ] . node . claim_funds ( payment_preimage_2 ) ;
2022-04-18 20:12:15 +00:00
expect_payment_claimed! ( nodes [ 0 ] , payment_hash_2 , 3_000_000 ) ;
2021-02-07 18:06:58 -05:00
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
// Connect blocks on node A commitment transaction
2021-03-15 19:49:51 -04:00
mine_transaction ( & nodes [ 0 ] , & remote_txn [ 0 ] ) ;
2021-03-18 20:32:20 -04:00
check_closed_broadcast! ( nodes [ 0 ] , true ) ;
2021-09-21 12:25:38 -04:00
check_closed_event! ( nodes [ 0 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
2021-02-07 18:06:58 -05:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
// Verify node A broadcast tx claiming both HTLCs
{
let mut node_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
2022-12-12 19:00:06 -08:00
// ChannelMonitor: claim tx
assert_eq! ( node_txn . len ( ) , 1 ) ;
2021-02-07 18:06:58 -05:00
check_spends! ( node_txn [ 0 ] , remote_txn [ 0 ] ) ;
assert_eq! ( node_txn [ 0 ] . input . len ( ) , 2 ) ;
node_txn . clear ( ) ;
}
// Connect blocks on node B
2021-03-16 23:22:59 -04:00
connect_blocks ( & nodes [ 1 ] , 135 ) ;
2021-03-18 20:32:20 -04:00
check_closed_broadcast! ( nodes [ 1 ] , true ) ;
2021-09-21 12:25:38 -04:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
2021-02-07 18:06:58 -05:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
// Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = {
let node_txn = nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
assert_eq! ( node_txn . len ( ) , 3 ) ;
check_spends! ( node_txn [ 1 ] , node_txn [ 0 ] ) ;
check_spends! ( node_txn [ 2 ] , node_txn [ 0 ] ) ;
assert_eq! ( node_txn [ 1 ] . input . len ( ) , 1 ) ;
assert_eq! ( node_txn [ 2 ] . input . len ( ) , 1 ) ;
node_txn [ 1 ] . clone ( )
} ;
// Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
2021-03-15 19:49:51 -04:00
mine_transaction ( & nodes [ 0 ] , & partial_claim_tx ) ;
2021-02-07 18:06:58 -05:00
{
let mut node_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
assert_eq! ( node_txn . len ( ) , 1 ) ;
check_spends! ( node_txn [ 0 ] , remote_txn [ 0 ] ) ;
assert_eq! ( node_txn [ 0 ] . input . len ( ) , 1 ) ; //dropped HTLC
node_txn . clear ( ) ;
}
nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
// Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
2021-03-15 19:49:51 -04:00
disconnect_blocks ( & nodes [ 0 ] , 1 ) ;
2021-02-07 18:06:58 -05:00
{
let mut node_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
assert_eq! ( node_txn . len ( ) , 1 ) ;
check_spends! ( node_txn [ 0 ] , remote_txn [ 0 ] ) ;
assert_eq! ( node_txn [ 0 ] . input . len ( ) , 2 ) ; //resurrected HTLC
node_txn . clear ( ) ;
}
//// Disconnect one more block and then reconnect multiple no transaction should be generated
2021-03-15 19:49:51 -04:00
disconnect_blocks ( & nodes [ 0 ] , 1 ) ;
2021-03-16 23:22:59 -04:00
connect_blocks ( & nodes [ 0 ] , 15 ) ;
2021-02-07 18:06:58 -05:00
{
let mut node_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) ;
assert_eq! ( node_txn . len ( ) , 0 ) ;
node_txn . clear ( ) ;
}
}
2021-07-29 19:49:09 +00:00
fn do_test_to_remote_after_local_detection ( style : ConnectStyle ) {
// In previous code, detection of to_remote outputs in a counterparty commitment transaction
// was dependent on whether a local commitment transaction had been seen on-chain previously.
// This resulted in some edge cases around not being able to generate a SpendableOutput event
// after a reorg.
//
// Here, we test this by first confirming one set of commitment transactions, then
// disconnecting them and reconnecting another. We then confirm them and check that the correct
// SpendableOutput event is generated.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
* nodes [ 0 ] . connect_style . borrow_mut ( ) = style ;
* nodes [ 1 ] . connect_style . borrow_mut ( ) = style ;
let ( _ , _ , chan_id , funding_tx ) =
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 100_000_000 ) ;
2021-07-29 19:49:09 +00:00
let funding_outpoint = OutPoint { txid : funding_tx . txid ( ) , index : 0 } ;
assert_eq! ( funding_outpoint . to_channel_id ( ) , chan_id ) ;
let remote_txn_a = get_local_commitment_txn! ( nodes [ 0 ] , chan_id ) ;
let remote_txn_b = get_local_commitment_txn! ( nodes [ 1 ] , chan_id ) ;
mine_transaction ( & nodes [ 0 ] , & remote_txn_a [ 0 ] ) ;
mine_transaction ( & nodes [ 1 ] , & remote_txn_a [ 0 ] ) ;
assert! ( nodes [ 0 ] . node . list_channels ( ) . is_empty ( ) ) ;
check_closed_broadcast! ( nodes [ 0 ] , true ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2021-09-21 12:25:38 -04:00
check_closed_event! ( nodes [ 0 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
2021-07-29 19:49:09 +00:00
assert! ( nodes [ 1 ] . node . list_channels ( ) . is_empty ( ) ) ;
check_closed_broadcast! ( nodes [ 1 ] , true ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2021-09-21 12:25:38 -04:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CommitmentTxConfirmed ) ;
2021-07-29 19:49:09 +00:00
assert! ( nodes [ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
disconnect_blocks ( & nodes [ 0 ] , 1 ) ;
disconnect_blocks ( & nodes [ 1 ] , 1 ) ;
assert! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
assert! ( nodes [ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
connect_blocks ( & nodes [ 0 ] , ANTI_REORG_DELAY - 1 ) ;
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 1 ) ;
assert! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
assert! ( nodes [ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
mine_transaction ( & nodes [ 0 ] , & remote_txn_b [ 0 ] ) ;
mine_transaction ( & nodes [ 1 ] , & remote_txn_b [ 0 ] ) ;
assert! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
assert! ( nodes [ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
connect_blocks ( & nodes [ 0 ] , ANTI_REORG_DELAY - 1 ) ;
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 1 ) ;
let mut node_a_spendable = nodes [ 0 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) ;
assert_eq! ( node_a_spendable . len ( ) , 1 ) ;
if let Event ::SpendableOutputs { outputs } = node_a_spendable . pop ( ) . unwrap ( ) {
assert_eq! ( outputs . len ( ) , 1 ) ;
let spend_tx = nodes [ 0 ] . keys_manager . backing . spend_spendable_outputs ( & [ & outputs [ 0 ] ] , Vec ::new ( ) ,
Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_RETURN ) . into_script ( ) , 253 , & Secp256k1 ::new ( ) ) . unwrap ( ) ;
check_spends! ( spend_tx , remote_txn_b [ 0 ] ) ;
}
// nodes[1] is waiting for the to_self_delay to expire, which is many more than
// ANTI_REORG_DELAY. Instead, walk it back and confirm the original remote_txn_a commitment
// again and check that nodes[1] generates a similar spendable output.
// Technically a reorg of ANTI_REORG_DELAY violates our assumptions, so this is undefined by
// our API spec, but we currently handle this correctly and there's little reason we shouldn't
// in the future.
assert! ( nodes [ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
disconnect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY ) ;
mine_transaction ( & nodes [ 1 ] , & remote_txn_a [ 0 ] ) ;
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 1 ) ;
let mut node_b_spendable = nodes [ 1 ] . chain_monitor . chain_monitor . get_and_clear_pending_events ( ) ;
assert_eq! ( node_b_spendable . len ( ) , 1 ) ;
if let Event ::SpendableOutputs { outputs } = node_b_spendable . pop ( ) . unwrap ( ) {
assert_eq! ( outputs . len ( ) , 1 ) ;
let spend_tx = nodes [ 1 ] . keys_manager . backing . spend_spendable_outputs ( & [ & outputs [ 0 ] ] , Vec ::new ( ) ,
Builder ::new ( ) . push_opcode ( opcodes ::all ::OP_RETURN ) . into_script ( ) , 253 , & Secp256k1 ::new ( ) ) . unwrap ( ) ;
check_spends! ( spend_tx , remote_txn_a [ 0 ] ) ;
}
}
#[ test ]
fn test_to_remote_after_local_detection ( ) {
do_test_to_remote_after_local_detection ( ConnectStyle ::BestBlockFirst ) ;
do_test_to_remote_after_local_detection ( ConnectStyle ::BestBlockFirstSkippingBlocks ) ;
2022-05-15 19:03:45 +00:00
do_test_to_remote_after_local_detection ( ConnectStyle ::BestBlockFirstReorgsOnlyTip ) ;
2021-07-29 19:49:09 +00:00
do_test_to_remote_after_local_detection ( ConnectStyle ::TransactionsFirst ) ;
do_test_to_remote_after_local_detection ( ConnectStyle ::TransactionsFirstSkippingBlocks ) ;
2022-05-15 19:03:45 +00:00
do_test_to_remote_after_local_detection ( ConnectStyle ::TransactionsFirstReorgsOnlyTip ) ;
2021-07-29 19:49:09 +00:00
do_test_to_remote_after_local_detection ( ConnectStyle ::FullBlockViaListen ) ;
}