2021-10-05 19:10:42 +00:00
// This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! Tests that test the payment retry logic in ChannelManager, including various edge-cases around
//! serialization ordering between ChannelManager/ChannelMonitors and ensuring we can still retry
//! payments thereafter.
2022-10-13 02:35:48 -04:00
use crate ::chain ::{ ChannelMonitorUpdateStatus , Confirm , Listen , Watch } ;
2023-04-04 04:12:55 +00:00
use crate ::chain ::channelmonitor ::{ ANTI_REORG_DELAY , HTLC_FAIL_BACK_BUFFER , LATENCY_GRACE_PERIOD_BLOCKS } ;
2023-04-28 14:11:37 -05:00
use crate ::sign ::EntropySource ;
2022-10-13 02:35:48 -04:00
use crate ::chain ::transaction ::OutPoint ;
2023-08-08 04:15:20 +00:00
use crate ::events ::{ ClosureReason , Event , HTLCDestination , MessageSendEvent , MessageSendEventsProvider , PathFailure , PaymentFailureReason , PaymentPurpose } ;
2023-08-28 07:20:23 +02:00
use crate ::ln ::channel ::EXPIRE_PREV_CONFIG_TICKS ;
2023-08-16 08:48:17 -07:00
use crate ::ln ::channelmanager ::{ BREAKDOWN_TIMEOUT , MPP_TIMEOUT_TICKS , MIN_CLTV_EXPIRY_DELTA , PaymentId , PaymentSendFailure , IDEMPOTENCY_TIMEOUT_TICKS , RecentPaymentDetails , RecipientOnionFields , HTLCForwardInfo , PendingHTLCRouting , PendingAddHTLCInfo } ;
2023-07-14 14:41:58 -05:00
use crate ::ln ::features ::Bolt11InvoiceFeatures ;
2023-08-26 01:30:40 +02:00
use crate ::ln ::{ msgs , ChannelId , PaymentSecret , PaymentPreimage } ;
2022-10-13 02:35:48 -04:00
use crate ::ln ::msgs ::ChannelMessageHandler ;
2022-12-19 00:38:54 -05:00
use crate ::ln ::outbound_payment ::Retry ;
2023-02-07 14:10:41 -05:00
use crate ::routing ::gossip ::{ EffectiveCapacity , RoutingFees } ;
2023-04-05 17:45:41 -05:00
use crate ::routing ::router ::{ get_route , Path , PaymentParameters , Route , Router , RouteHint , RouteHintHop , RouteHop , RouteParameters , find_route } ;
2023-02-07 14:10:41 -05:00
use crate ::routing ::scoring ::ChannelUsage ;
2022-10-13 02:35:48 -04:00
use crate ::util ::test_utils ;
use crate ::util ::errors ::APIError ;
2022-11-15 02:43:51 +00:00
use crate ::util ::ser ::Writeable ;
2023-03-17 22:24:23 -07:00
use crate ::util ::string ::UntrustedString ;
2021-10-05 19:10:42 +00:00
2022-03-09 11:13:47 -06:00
use bitcoin ::network ::constants ::Network ;
2021-10-05 19:10:42 +00:00
2022-10-13 02:35:48 -04:00
use crate ::prelude ::* ;
2021-10-05 19:10:42 +00:00
2022-10-13 02:35:48 -04:00
use crate ::ln ::functional_test_utils ::* ;
2022-11-12 20:16:52 -08:00
use crate ::routing ::gossip ::NodeId ;
2022-12-19 00:38:54 -05:00
#[ cfg(feature = " std " ) ]
use {
crate ::util ::time ::tests ::SinceEpoch ,
2023-02-03 23:05:58 +00:00
std ::time ::{ SystemTime , Instant , Duration }
2022-12-19 00:38:54 -05:00
} ;
2021-10-05 19:10:42 +00:00
2021-10-13 12:19:14 -05:00
#[ test ]
fn mpp_failure ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let chan_1_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
let chan_2_id = create_announced_chan_between_nodes ( & nodes , 0 , 2 ) . 0. contents . short_channel_id ;
let chan_3_id = create_announced_chan_between_nodes ( & nodes , 1 , 3 ) . 0. contents . short_channel_id ;
let chan_4_id = create_announced_chan_between_nodes ( & nodes , 2 , 3 ) . 0. contents . short_channel_id ;
2021-10-13 12:19:14 -05:00
let ( mut route , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 3 ] , 100000 ) ;
let path = route . paths [ 0 ] . clone ( ) ;
route . paths . push ( path ) ;
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 0 ] . pubkey = nodes [ 1 ] . node . get_our_node_id ( ) ;
route . paths [ 0 ] . hops [ 0 ] . short_channel_id = chan_1_id ;
route . paths [ 0 ] . hops [ 1 ] . short_channel_id = chan_3_id ;
route . paths [ 1 ] . hops [ 0 ] . pubkey = nodes [ 2 ] . node . get_our_node_id ( ) ;
route . paths [ 1 ] . hops [ 0 ] . short_channel_id = chan_2_id ;
route . paths [ 1 ] . hops [ 1 ] . short_channel_id = chan_4_id ;
2021-10-13 12:19:14 -05:00
send_along_route_with_secret ( & nodes [ 0 ] , route , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] , 200_000 , payment_hash , payment_secret ) ;
fail_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] , false , payment_hash ) ;
}
2021-10-05 19:10:42 +00:00
#[ test ]
fn mpp_retry ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let ( chan_1_update , _ , _ , _ ) = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
let ( chan_2_update , _ , _ , _ ) = create_announced_chan_between_nodes ( & nodes , 0 , 2 ) ;
let ( chan_3_update , _ , _ , _ ) = create_announced_chan_between_nodes ( & nodes , 1 , 3 ) ;
let ( chan_4_update , _ , chan_4_id , _ ) = create_announced_chan_between_nodes ( & nodes , 3 , 2 ) ;
2021-10-05 19:10:42 +00:00
// Rebalance
send_payment ( & nodes [ 3 ] , & vec! ( & nodes [ 2 ] ) [ .. ] , 1_500_000 ) ;
2023-02-03 12:53:01 -05:00
let amt_msat = 1_000_000 ;
let ( mut route , payment_hash , payment_preimage , payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 3 ] , amt_msat ) ;
2021-10-05 19:10:42 +00:00
let path = route . paths [ 0 ] . clone ( ) ;
route . paths . push ( path ) ;
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 0 ] . pubkey = nodes [ 1 ] . node . get_our_node_id ( ) ;
route . paths [ 0 ] . hops [ 0 ] . short_channel_id = chan_1_update . contents . short_channel_id ;
route . paths [ 0 ] . hops [ 1 ] . short_channel_id = chan_3_update . contents . short_channel_id ;
route . paths [ 1 ] . hops [ 0 ] . pubkey = nodes [ 2 ] . node . get_our_node_id ( ) ;
route . paths [ 1 ] . hops [ 0 ] . short_channel_id = chan_2_update . contents . short_channel_id ;
route . paths [ 1 ] . hops [ 1 ] . short_channel_id = chan_4_update . contents . short_channel_id ;
2021-10-05 19:10:42 +00:00
// Initiate the MPP payment.
2022-10-06 21:31:02 +00:00
let payment_id = PaymentId ( payment_hash . 0 ) ;
2023-08-31 15:10:09 +02:00
let mut route_params = route . route_params . clone ( ) . unwrap ( ) ;
2023-02-03 12:53:01 -05:00
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
payment_id , route_params . clone ( ) , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2021-10-05 19:10:42 +00:00
check_added_monitors! ( nodes [ 0 ] , 2 ) ; // one monitor per path
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
// Pass half of the payment along the success path.
2023-01-12 22:50:43 +01:00
let success_path_msgs = remove_first_msg_event_to_node ( & nodes [ 1 ] . node . get_our_node_id ( ) , & mut events ) ;
2021-10-05 19:10:42 +00:00
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 3 ] ] , 2_000_000 , payment_hash , Some ( payment_secret ) , success_path_msgs , false , None ) ;
// Add the HTLC along the first hop.
2023-01-12 22:50:43 +01:00
let fail_path_msgs_1 = remove_first_msg_event_to_node ( & nodes [ 2 ] . node . get_our_node_id ( ) , & mut events ) ;
2021-10-05 19:10:42 +00:00
let ( update_add , commitment_signed ) = match fail_path_msgs_1 {
MessageSendEvent ::UpdateHTLCs { node_id : _ , updates : msgs ::CommitmentUpdate { ref update_add_htlcs , ref update_fulfill_htlcs , ref update_fail_htlcs , ref update_fail_malformed_htlcs , ref update_fee , ref commitment_signed } } = > {
assert_eq! ( update_add_htlcs . len ( ) , 1 ) ;
assert! ( update_fail_htlcs . is_empty ( ) ) ;
assert! ( update_fulfill_htlcs . is_empty ( ) ) ;
assert! ( update_fail_malformed_htlcs . is_empty ( ) ) ;
assert! ( update_fee . is_none ( ) ) ;
( update_add_htlcs [ 0 ] . clone ( ) , commitment_signed . clone ( ) )
} ,
_ = > panic! ( " Unexpected event " ) ,
} ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & update_add ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 0 ] , commitment_signed , false ) ;
// Attempt to forward the payment and complete the 2nd path's failure.
expect_pending_htlcs_forwardable! ( & nodes [ 2 ] ) ;
2022-07-25 11:28:51 -07:00
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( & nodes [ 2 ] , vec! [ HTLCDestination ::NextHopChannel { node_id : Some ( nodes [ 3 ] . node . get_our_node_id ( ) ) , channel_id : chan_4_id } ] ) ;
2021-10-05 19:10:42 +00:00
let htlc_updates = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
assert! ( htlc_updates . update_add_htlcs . is_empty ( ) ) ;
assert_eq! ( htlc_updates . update_fail_htlcs . len ( ) , 1 ) ;
assert! ( htlc_updates . update_fulfill_htlcs . is_empty ( ) ) ;
assert! ( htlc_updates . update_fail_malformed_htlcs . is_empty ( ) ) ;
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & htlc_updates . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 2 ] , htlc_updates . commitment_signed , false ) ;
2023-02-03 12:53:01 -05:00
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
match events [ 1 ] {
Event ::PendingHTLCsForwardable { .. } = > { } ,
_ = > panic! ( " Unexpected event " )
}
events . remove ( 1 ) ;
expect_payment_failed_conditions_event ( events , payment_hash , false , PaymentFailedConditions ::new ( ) . mpp_parts_remain ( ) ) ;
2021-10-05 19:10:42 +00:00
// Rebalance the channel so the second half of the payment can succeed.
send_payment ( & nodes [ 3 ] , & vec! ( & nodes [ 2 ] ) [ .. ] , 1_500_000 ) ;
// Retry the second half of the payment and make sure it succeeds.
2023-02-03 12:53:01 -05:00
route . paths . remove ( 0 ) ;
route_params . final_value_msat = 1_000_000 ;
route_params . payment_params . previously_failed_channels . push ( chan_4_update . contents . short_channel_id ) ;
nodes [ 0 ] . router . expect_find_route ( route_params , Ok ( route ) ) ;
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
2021-10-05 19:10:42 +00:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] , 2_000_000 , payment_hash , Some ( payment_secret ) , events . pop ( ) . unwrap ( ) , true , None ) ;
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] , false , payment_preimage ) ;
}
2022-03-03 21:06:40 +02:00
fn do_mpp_receive_timeout ( send_partial_mpp : bool ) {
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let ( chan_1_update , _ , _ , _ ) = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
let ( chan_2_update , _ , _ , _ ) = create_announced_chan_between_nodes ( & nodes , 0 , 2 ) ;
let ( chan_3_update , _ , chan_3_id , _ ) = create_announced_chan_between_nodes ( & nodes , 1 , 3 ) ;
let ( chan_4_update , _ , _ , _ ) = create_announced_chan_between_nodes ( & nodes , 2 , 3 ) ;
2022-03-03 21:06:40 +02:00
let ( mut route , payment_hash , payment_preimage , payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 3 ] , 100_000 ) ;
let path = route . paths [ 0 ] . clone ( ) ;
route . paths . push ( path ) ;
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 0 ] . pubkey = nodes [ 1 ] . node . get_our_node_id ( ) ;
route . paths [ 0 ] . hops [ 0 ] . short_channel_id = chan_1_update . contents . short_channel_id ;
route . paths [ 0 ] . hops [ 1 ] . short_channel_id = chan_3_update . contents . short_channel_id ;
route . paths [ 1 ] . hops [ 0 ] . pubkey = nodes [ 2 ] . node . get_our_node_id ( ) ;
route . paths [ 1 ] . hops [ 0 ] . short_channel_id = chan_2_update . contents . short_channel_id ;
route . paths [ 1 ] . hops [ 1 ] . short_channel_id = chan_4_update . contents . short_channel_id ;
2022-03-03 21:06:40 +02:00
// Initiate the MPP payment.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_hash . 0 ) ) . unwrap ( ) ;
2022-03-03 21:06:40 +02:00
check_added_monitors! ( nodes [ 0 ] , 2 ) ; // one monitor per path
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
// Pass half of the payment along the first path.
2023-01-12 22:50:43 +01:00
let node_1_msgs = remove_first_msg_event_to_node ( & nodes [ 1 ] . node . get_our_node_id ( ) , & mut events ) ;
2023-01-03 10:52:10 +01:00
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 3 ] ] , 200_000 , payment_hash , Some ( payment_secret ) , node_1_msgs , false , None ) ;
2022-03-03 21:06:40 +02:00
if send_partial_mpp {
// Time out the partial MPP
for _ in 0 .. MPP_TIMEOUT_TICKS {
nodes [ 3 ] . node . timer_tick_occurred ( ) ;
}
// Failed HTLC from node 3 -> 1
2022-07-25 11:28:51 -07:00
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 3 ] , vec! [ HTLCDestination ::FailedPayment { payment_hash } ] ) ;
2022-03-03 21:06:40 +02:00
let htlc_fail_updates_3_1 = get_htlc_update_msgs! ( nodes [ 3 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
assert_eq! ( htlc_fail_updates_3_1 . update_fail_htlcs . len ( ) , 1 ) ;
nodes [ 1 ] . node . handle_update_fail_htlc ( & nodes [ 3 ] . node . get_our_node_id ( ) , & htlc_fail_updates_3_1 . update_fail_htlcs [ 0 ] ) ;
check_added_monitors! ( nodes [ 3 ] , 1 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 3 ] , htlc_fail_updates_3_1 . commitment_signed , false ) ;
// Failed HTLC from node 1 -> 0
2022-07-25 11:28:51 -07:00
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , vec! [ HTLCDestination ::NextHopChannel { node_id : Some ( nodes [ 3 ] . node . get_our_node_id ( ) ) , channel_id : chan_3_id } ] ) ;
2022-03-03 21:06:40 +02:00
let htlc_fail_updates_1_0 = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
assert_eq! ( htlc_fail_updates_1_0 . update_fail_htlcs . len ( ) , 1 ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_fail_updates_1_0 . update_fail_htlcs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , htlc_fail_updates_1_0 . commitment_signed , false ) ;
2022-05-20 02:56:59 +00:00
expect_payment_failed_conditions ( & nodes [ 0 ] , payment_hash , false , PaymentFailedConditions ::new ( ) . mpp_parts_remain ( ) . expected_htlc_error_data ( 23 , & [ ] [ .. ] ) ) ;
2022-03-03 21:06:40 +02:00
} else {
// Pass half of the payment along the second path.
2023-01-12 22:50:43 +01:00
let node_2_msgs = remove_first_msg_event_to_node ( & nodes [ 2 ] . node . get_our_node_id ( ) , & mut events ) ;
2023-01-03 10:52:10 +01:00
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] , 200_000 , payment_hash , Some ( payment_secret ) , node_2_msgs , true , None ) ;
2022-03-03 21:06:40 +02:00
// Even after MPP_TIMEOUT_TICKS we should not timeout the MPP if we have all the parts
for _ in 0 .. MPP_TIMEOUT_TICKS {
nodes [ 3 ] . node . timer_tick_occurred ( ) ;
}
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] , false , payment_preimage ) ;
}
}
#[ test ]
fn mpp_receive_timeout ( ) {
do_mpp_receive_timeout ( true ) ;
do_mpp_receive_timeout ( false ) ;
}
2023-08-04 18:59:05 -05:00
#[ test ]
2023-08-04 19:20:46 -05:00
fn test_keysend_payments ( ) {
do_test_keysend_payments ( false , false ) ;
do_test_keysend_payments ( false , true ) ;
do_test_keysend_payments ( true , false ) ;
do_test_keysend_payments ( true , true ) ;
2023-08-04 18:59:05 -05:00
}
2023-08-04 19:20:46 -05:00
fn do_test_keysend_payments ( public_node : bool , with_retry : bool ) {
2023-08-04 18:59:05 -05:00
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-08-04 19:20:46 -05:00
if public_node {
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
} else {
create_chan_between_nodes ( & nodes [ 0 ] , & nodes [ 1 ] ) ;
}
2023-08-04 18:59:05 -05:00
let payer_pubkey = nodes [ 0 ] . node . get_our_node_id ( ) ;
let payee_pubkey = nodes [ 1 ] . node . get_our_node_id ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value (
PaymentParameters ::for_keysend ( payee_pubkey , 40 , false ) , 10000 ) ;
2023-08-04 19:20:46 -05:00
2023-08-04 18:59:05 -05:00
let network_graph = nodes [ 0 ] . network_graph . clone ( ) ;
2023-08-04 19:20:46 -05:00
let channels = nodes [ 0 ] . node . list_usable_channels ( ) ;
let first_hops = channels . iter ( ) . collect ::< Vec < _ > > ( ) ;
let first_hops = if public_node { None } else { Some ( first_hops . as_slice ( ) ) } ;
2023-08-04 18:59:05 -05:00
let scorer = test_utils ::TestScorer ::new ( ) ;
let random_seed_bytes = chanmon_cfgs [ 1 ] . keys_manager . get_secure_random_bytes ( ) ;
let route = find_route (
2023-08-04 19:20:46 -05:00
& payer_pubkey , & route_params , & network_graph , first_hops ,
2023-08-04 18:59:05 -05:00
nodes [ 0 ] . logger , & scorer , & ( ) , & random_seed_bytes
) . unwrap ( ) ;
2023-08-08 04:15:20 +00:00
{
let test_preimage = PaymentPreimage ( [ 42 ; 32 ] ) ;
if with_retry {
nodes [ 0 ] . node . send_spontaneous_payment_with_retry ( Some ( test_preimage ) ,
RecipientOnionFields ::spontaneous_empty ( ) , PaymentId ( test_preimage . 0 ) ,
route_params , Retry ::Attempts ( 1 ) ) . unwrap ( )
} else {
nodes [ 0 ] . node . send_spontaneous_payment ( & route , Some ( test_preimage ) ,
RecipientOnionFields ::spontaneous_empty ( ) , PaymentId ( test_preimage . 0 ) ) . unwrap ( )
} ;
}
2023-08-04 18:59:05 -05:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2023-08-08 04:15:20 +00:00
let send_event = SendEvent ::from_node ( & nodes [ 0 ] ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & send_event . msgs [ 0 ] ) ;
do_commitment_signed_dance ( & nodes [ 1 ] , & nodes [ 0 ] , & send_event . commitment_msg , false , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
// Previously, a refactor caused us to stop including the payment preimage in the onion which
// is sent as a part of keysend payments. Thus, to be extra careful here, we scope the preimage
// above to demonstrate that we have no way to get the preimage at this point except by
// extracting it from the onion nodes[1] received.
let event = nodes [ 1 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( event . len ( ) , 1 ) ;
if let Event ::PaymentClaimable { purpose : PaymentPurpose ::SpontaneousPayment ( preimage ) , .. } = event [ 0 ] {
claim_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , preimage ) ;
} else { panic! ( ) ; }
2023-08-04 18:59:05 -05:00
}
2023-04-05 17:45:41 -05:00
#[ test ]
fn test_mpp_keysend ( ) {
let mut mpp_keysend_config = test_default_channel_config ( ) ;
mpp_keysend_config . accept_mpp_keysend = true ;
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , Some ( mpp_keysend_config ) ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 2 ) ;
create_announced_chan_between_nodes ( & nodes , 1 , 3 ) ;
create_announced_chan_between_nodes ( & nodes , 2 , 3 ) ;
let network_graph = nodes [ 0 ] . network_graph . clone ( ) ;
let payer_pubkey = nodes [ 0 ] . node . get_our_node_id ( ) ;
let payee_pubkey = nodes [ 3 ] . node . get_our_node_id ( ) ;
let recv_value = 15_000_000 ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value (
PaymentParameters ::for_keysend ( payee_pubkey , 40 , true ) , recv_value ) ;
2023-04-05 17:45:41 -05:00
let scorer = test_utils ::TestScorer ::new ( ) ;
let random_seed_bytes = chanmon_cfgs [ 0 ] . keys_manager . get_secure_random_bytes ( ) ;
let route = find_route ( & payer_pubkey , & route_params , & network_graph , None , nodes [ 0 ] . logger ,
& scorer , & ( ) , & random_seed_bytes ) . unwrap ( ) ;
let payment_preimage = PaymentPreimage ( [ 42 ; 32 ] ) ;
let payment_secret = PaymentSecret ( payment_preimage . 0 ) ;
let payment_hash = nodes [ 0 ] . node . send_spontaneous_payment ( & route , Some ( payment_preimage ) ,
RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_preimage . 0 ) ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
let expected_route : & [ & [ & Node ] ] = & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
let ev = remove_first_msg_event_to_node ( & nodes [ 1 ] . node . get_our_node_id ( ) , & mut events ) ;
pass_along_path ( & nodes [ 0 ] , expected_route [ 0 ] , recv_value , payment_hash . clone ( ) ,
Some ( payment_secret ) , ev . clone ( ) , false , Some ( payment_preimage ) ) ;
let ev = remove_first_msg_event_to_node ( & nodes [ 2 ] . node . get_our_node_id ( ) , & mut events ) ;
pass_along_path ( & nodes [ 0 ] , expected_route [ 1 ] , recv_value , payment_hash . clone ( ) ,
Some ( payment_secret ) , ev . clone ( ) , true , Some ( payment_preimage ) ) ;
claim_payment_along_route ( & nodes [ 0 ] , expected_route , false , payment_preimage ) ;
}
#[ test ]
fn test_reject_mpp_keysend_htlc ( ) {
// This test enforces that we reject MPP keysend HTLCs if our config states we don't support
// MPP keysend. When receiving a payment, if we don't support MPP keysend we'll reject the
// payment if it's keysend and has a payment secret, never reaching our payment validation
// logic. To check that we enforce rejecting MPP keysends in our payment logic, here we send
// keysend payments without payment secrets, then modify them by adding payment secrets in the
// final node in between receiving the HTLCs and actually processing them.
let mut reject_mpp_keysend_cfg = test_default_channel_config ( ) ;
reject_mpp_keysend_cfg . accept_mpp_keysend = false ;
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , Some ( reject_mpp_keysend_cfg ) ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
let chan_1_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
let chan_2_id = create_announced_chan_between_nodes ( & nodes , 0 , 2 ) . 0. contents . short_channel_id ;
let chan_3_id = create_announced_chan_between_nodes ( & nodes , 1 , 3 ) . 0. contents . short_channel_id ;
let ( update_a , _ , chan_4_channel_id , _ ) = create_announced_chan_between_nodes ( & nodes , 2 , 3 ) ;
let chan_4_id = update_a . contents . short_channel_id ;
let amount = 40_000 ;
let ( mut route , payment_hash , payment_preimage , _ ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 3 ] , amount ) ;
// Pay along nodes[1]
route . paths [ 0 ] . hops [ 0 ] . pubkey = nodes [ 1 ] . node . get_our_node_id ( ) ;
route . paths [ 0 ] . hops [ 0 ] . short_channel_id = chan_1_id ;
route . paths [ 0 ] . hops [ 1 ] . short_channel_id = chan_3_id ;
let payment_id_0 = PaymentId ( nodes [ 0 ] . keys_manager . backing . get_secure_random_bytes ( ) ) ;
nodes [ 0 ] . node . send_spontaneous_payment ( & route , Some ( payment_preimage ) , RecipientOnionFields ::spontaneous_empty ( ) , payment_id_0 ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let update_0 = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
let update_add_0 = update_0 . update_add_htlcs [ 0 ] . clone ( ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & update_add_0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , & update_0 . commitment_signed , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
check_added_monitors! ( & nodes [ 1 ] , 1 ) ;
let update_1 = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 3 ] . node . get_our_node_id ( ) ) ;
let update_add_1 = update_1 . update_add_htlcs [ 0 ] . clone ( ) ;
nodes [ 3 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & update_add_1 ) ;
commitment_signed_dance! ( nodes [ 3 ] , nodes [ 1 ] , update_1 . commitment_signed , false , true ) ;
assert! ( nodes [ 3 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
for ( _ , pending_forwards ) in nodes [ 3 ] . node . forward_htlcs . lock ( ) . unwrap ( ) . iter_mut ( ) {
for f in pending_forwards . iter_mut ( ) {
match f {
& mut HTLCForwardInfo ::AddHTLC ( PendingAddHTLCInfo { ref mut forward_info , .. } ) = > {
match forward_info . routing {
PendingHTLCRouting ::ReceiveKeysend { ref mut payment_data , .. } = > {
* payment_data = Some ( msgs ::FinalOnionHopData {
payment_secret : PaymentSecret ( [ 42 ; 32 ] ) ,
total_msat : amount * 2 ,
} ) ;
} ,
_ = > panic! ( " Expected PendingHTLCRouting::ReceiveKeysend " ) ,
}
} ,
_ = > { } ,
}
}
}
expect_pending_htlcs_forwardable! ( nodes [ 3 ] ) ;
// Pay along nodes[2]
route . paths [ 0 ] . hops [ 0 ] . pubkey = nodes [ 2 ] . node . get_our_node_id ( ) ;
route . paths [ 0 ] . hops [ 0 ] . short_channel_id = chan_2_id ;
route . paths [ 0 ] . hops [ 1 ] . short_channel_id = chan_4_id ;
let payment_id_1 = PaymentId ( nodes [ 0 ] . keys_manager . backing . get_secure_random_bytes ( ) ) ;
nodes [ 0 ] . node . send_spontaneous_payment ( & route , Some ( payment_preimage ) , RecipientOnionFields ::spontaneous_empty ( ) , payment_id_1 ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let update_2 = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 2 ] . node . get_our_node_id ( ) ) ;
let update_add_2 = update_2 . update_add_htlcs [ 0 ] . clone ( ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & update_add_2 ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 0 ] , & update_2 . commitment_signed , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 2 ] ) ;
check_added_monitors! ( & nodes [ 2 ] , 1 ) ;
let update_3 = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 3 ] . node . get_our_node_id ( ) ) ;
let update_add_3 = update_3 . update_add_htlcs [ 0 ] . clone ( ) ;
nodes [ 3 ] . node . handle_update_add_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & update_add_3 ) ;
commitment_signed_dance! ( nodes [ 3 ] , nodes [ 2 ] , update_3 . commitment_signed , false , true ) ;
assert! ( nodes [ 3 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
for ( _ , pending_forwards ) in nodes [ 3 ] . node . forward_htlcs . lock ( ) . unwrap ( ) . iter_mut ( ) {
for f in pending_forwards . iter_mut ( ) {
match f {
& mut HTLCForwardInfo ::AddHTLC ( PendingAddHTLCInfo { ref mut forward_info , .. } ) = > {
match forward_info . routing {
PendingHTLCRouting ::ReceiveKeysend { ref mut payment_data , .. } = > {
* payment_data = Some ( msgs ::FinalOnionHopData {
payment_secret : PaymentSecret ( [ 42 ; 32 ] ) ,
total_msat : amount * 2 ,
} ) ;
} ,
_ = > panic! ( " Expected PendingHTLCRouting::ReceiveKeysend " ) ,
}
} ,
_ = > { } ,
}
}
}
expect_pending_htlcs_forwardable! ( nodes [ 3 ] ) ;
check_added_monitors! ( nodes [ 3 ] , 1 ) ;
// Fail back along nodes[2]
let update_fail_0 = get_htlc_update_msgs! ( & nodes [ 3 ] , & nodes [ 2 ] . node . get_our_node_id ( ) ) ;
nodes [ 2 ] . node . handle_update_fail_htlc ( & nodes [ 3 ] . node . get_our_node_id ( ) , & update_fail_0 . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 3 ] , update_fail_0 . commitment_signed , false ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 2 ] , vec! [ HTLCDestination ::NextHopChannel { node_id : Some ( nodes [ 3 ] . node . get_our_node_id ( ) ) , channel_id : chan_4_channel_id } ] ) ;
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
let update_fail_1 = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & update_fail_1 . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 2 ] , update_fail_1 . commitment_signed , false ) ;
expect_payment_failed_conditions ( & nodes [ 0 ] , payment_hash , true , PaymentFailedConditions ::new ( ) ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 3 ] , vec! [ HTLCDestination ::FailedPayment { payment_hash } ] ) ;
}
2021-10-04 04:47:33 +00:00
#[ test ]
fn no_pending_leak_on_initial_send_failure ( ) {
// In an earlier version of our payment tracking, we'd have a retry entry even when the initial
// HTLC for payment failed to send due to local channel errors (e.g. peer disconnected). In this
// case, the user wouldn't have a PaymentId to retry the payment with, but we'd think we have a
// pending payment forever and never time it out.
// Here we test exactly that - retrying a payment when a peer was disconnected on the first
// try, and then check that no pending payment is being tracked.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
2021-10-04 04:47:33 +00:00
let ( route , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 1 ] , 100_000 ) ;
2023-02-21 19:10:43 +00:00
nodes [ 0 ] . node . peer_disconnected ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2021-10-04 04:47:33 +00:00
2023-03-22 21:48:22 +00:00
unwrap_send_err! ( nodes [ 0 ] . node . send_payment_with_route ( & route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_hash . 0 )
) , true , APIError ::ChannelUnavailable { ref err } ,
2023-02-01 20:54:10 +00:00
assert_eq! ( err , " Peer for first hop currently disconnected " ) ) ;
2021-10-04 04:47:33 +00:00
assert! ( ! nodes [ 0 ] . node . has_pending_payments ( ) ) ;
}
2021-10-10 23:36:57 +00:00
fn do_retry_with_no_persist ( confirm_before_reload : bool ) {
// If we send a pending payment and `send_payment` returns success, we should always either
// return a payment failure event or a payment success event, and on failure the payment should
// be retryable.
//
// In order to do so when the ChannelManager isn't immediately persisted (which is normal - its
// always persisted asynchronously), the ChannelManager has to reload some payment data from
// ChannelMonitor(s) in some cases. This tests that reloading.
//
// `confirm_before_reload` confirms the channel-closing commitment transaction on-chain prior
// to reloading the ChannelManager, increasing test coverage in ChannelMonitor HTLC tracking
// which has separate codepaths for "commitment transaction already confirmed" and not.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
2023-08-15 19:19:03 +00:00
let persister ;
let new_chain_monitor ;
2021-10-10 23:36:57 +00:00
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
2023-08-15 19:19:03 +00:00
let nodes_0_deserialized ;
2021-10-10 23:36:57 +00:00
let mut nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let chan_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
let ( _ , _ , chan_id_2 , _ ) = create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
2021-10-10 23:36:57 +00:00
// Serialize the ChannelManager prior to sending payments
let nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
// Send two payments - one which will get to nodes[2] and will be claimed, one which we'll time
// out and retry.
2023-02-03 12:53:01 -05:00
let amt_msat = 1_000_000 ;
let ( route , payment_hash , payment_preimage , payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 2 ] , amt_msat ) ;
2022-04-18 20:12:15 +00:00
let ( payment_preimage_1 , payment_hash_1 , _ , payment_id_1 ) = send_along_route ( & nodes [ 0 ] , route . clone ( ) , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , 1_000_000 ) ;
2023-08-31 15:10:09 +02:00
let route_params = route . route_params . unwrap ( ) . clone ( ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2021-10-10 23:36:57 +00:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let payment_event = SendEvent ::from_event ( events . pop ( ) . unwrap ( ) ) ;
assert_eq! ( payment_event . node_id , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
// We relay the payment to nodes[1] while its disconnected from nodes[2], causing the payment
// to be returned immediately to nodes[0], without having nodes[2] fail the inbound payment
// which would prevent retry.
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 2 ] . node . get_our_node_id ( ) ) ;
nodes [ 2 ] . node . peer_disconnected ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ;
2021-10-10 23:36:57 +00:00
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , payment_event . commitment_msg , false , true ) ;
// nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
let _ = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2023-07-27 15:58:07 -07:00
reconnect_nodes ( ReconnectArgs ::new ( & nodes [ 1 ] , & nodes [ 2 ] ) ) ;
2021-10-10 23:36:57 +00:00
let as_commitment_tx = get_local_commitment_txn! ( nodes [ 0 ] , chan_id ) [ 0 ] . clone ( ) ;
if confirm_before_reload {
mine_transaction ( & nodes [ 0 ] , & as_commitment_tx ) ;
nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
}
// The ChannelMonitor should always be the latest version, as we're required to persist it
// during the `commitment_signed_dance!()`.
2022-11-15 02:43:51 +00:00
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , & nodes_0_serialized , & [ & chan_0_monitor_serialized ] , persister , new_chain_monitor , nodes_0_deserialized ) ;
2021-10-10 23:36:57 +00:00
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
2023-07-12 14:58:22 +03:00
check_closed_event! ( nodes [ 0 ] , 1 , ClosureReason ::OutdatedChannelManager , [ nodes [ 1 ] . node . get_our_node_id ( ) ] , 100000 ) ;
2021-10-10 23:36:57 +00:00
assert! ( nodes [ 0 ] . node . list_channels ( ) . is_empty ( ) ) ;
assert! ( nodes [ 0 ] . node . has_pending_payments ( ) ) ;
2023-02-28 10:45:48 -08:00
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
if ! confirm_before_reload {
let as_broadcasted_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
assert_eq! ( as_broadcasted_txn . len ( ) , 1 ) ;
2023-04-19 15:08:29 -07:00
assert_eq! ( as_broadcasted_txn [ 0 ] . txid ( ) , as_commitment_tx . txid ( ) ) ;
2023-02-28 10:45:48 -08:00
} else {
assert! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . is_empty ( ) ) ;
}
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2021-10-10 23:36:57 +00:00
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2023-06-01 10:23:55 +02:00
nodes [ 0 ] . node . peer_connected ( & nodes [ 1 ] . node . get_our_node_id ( ) , & msgs ::Init {
features : nodes [ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
2021-10-10 23:36:57 +00:00
assert! ( nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
// Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
// error, as the channel has hit the chain.
2023-06-01 10:23:55 +02:00
nodes [ 1 ] . node . peer_connected ( & nodes [ 0 ] . node . get_our_node_id ( ) , & msgs ::Init {
features : nodes [ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2022-09-06 21:30:33 +00:00
let bs_reestablish = get_chan_reestablish_msgs! ( nodes [ 1 ] , nodes [ 0 ] ) . pop ( ) . unwrap ( ) ;
2021-10-10 23:36:57 +00:00
nodes [ 0 ] . node . handle_channel_reestablish ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_reestablish ) ;
let as_err = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( as_err . len ( ) , 1 ) ;
match as_err [ 0 ] {
MessageSendEvent ::HandleError { node_id , action : msgs ::ErrorAction ::SendErrorMessage { ref msg } } = > {
assert_eq! ( node_id , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_error ( & nodes [ 0 ] . node . get_our_node_id ( ) , msg ) ;
2023-08-16 08:48:17 -07:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CounterpartyForceClosed { peer_msg : UntrustedString ( format! ( " Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {} " ,
2023-07-12 14:58:22 +03:00
& nodes [ 1 ] . node . get_our_node_id ( ) ) ) } , [ nodes [ 0 ] . node . get_our_node_id ( ) ] , 100000 ) ;
2021-10-10 23:36:57 +00:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
assert_eq! ( nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) . len ( ) , 1 ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
check_closed_broadcast! ( nodes [ 1 ] , false ) ;
// Now claim the first payment, which should allow nodes[1] to claim the payment on-chain when
// we close in a moment.
nodes [ 2 ] . node . claim_funds ( payment_preimage_1 ) ;
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
2022-04-18 20:12:15 +00:00
expect_payment_claimed! ( nodes [ 2 ] , payment_hash_1 , 1_000_000 ) ;
2021-10-10 23:36:57 +00:00
let htlc_fulfill_updates = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_update_fulfill_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & htlc_fulfill_updates . update_fulfill_htlcs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 2 ] , htlc_fulfill_updates . commitment_signed , false ) ;
2022-11-01 09:57:37 +01:00
expect_payment_forwarded! ( nodes [ 1 ] , nodes [ 0 ] , nodes [ 2 ] , None , false , false ) ;
2021-10-10 23:36:57 +00:00
if confirm_before_reload {
let best_block = nodes [ 0 ] . blocks . lock ( ) . unwrap ( ) . last ( ) . unwrap ( ) . clone ( ) ;
2022-05-15 18:47:32 +00:00
nodes [ 0 ] . node . best_block_updated ( & best_block . 0. header , best_block . 1 ) ;
2021-10-10 23:36:57 +00:00
}
// Create a new channel on which to retry the payment before we fail the payment via the
// HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
// connecting several blocks while creating the channel (implying time has passed).
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
2021-10-10 23:36:57 +00:00
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 1 ) ;
mine_transaction ( & nodes [ 1 ] , & as_commitment_tx ) ;
let bs_htlc_claim_txn = nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
assert_eq! ( bs_htlc_claim_txn . len ( ) , 1 ) ;
check_spends! ( bs_htlc_claim_txn [ 0 ] , as_commitment_tx ) ;
2022-02-25 05:18:29 +00:00
if ! confirm_before_reload {
mine_transaction ( & nodes [ 0 ] , & as_commitment_tx ) ;
}
2021-10-10 23:36:57 +00:00
mine_transaction ( & nodes [ 0 ] , & bs_htlc_claim_txn [ 0 ] ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_sent ( & nodes [ 0 ] , payment_preimage_1 , None , true , false ) ;
2021-10-10 23:36:57 +00:00
connect_blocks ( & nodes [ 0 ] , TEST_FINAL_CLTV * 4 + 20 ) ;
2023-04-14 17:02:16 -07:00
let ( first_htlc_timeout_tx , second_htlc_timeout_tx ) = {
let mut txn = nodes [ 0 ] . tx_broadcaster . unique_txn_broadcast ( ) ;
assert_eq! ( txn . len ( ) , 2 ) ;
( txn . remove ( 0 ) , txn . remove ( 0 ) )
} ;
2022-05-15 19:38:01 +00:00
check_spends! ( first_htlc_timeout_tx , as_commitment_tx ) ;
check_spends! ( second_htlc_timeout_tx , as_commitment_tx ) ;
if first_htlc_timeout_tx . input [ 0 ] . previous_output = = bs_htlc_claim_txn [ 0 ] . input [ 0 ] . previous_output {
confirm_transaction ( & nodes [ 0 ] , & second_htlc_timeout_tx ) ;
2021-10-10 23:36:57 +00:00
} else {
2022-05-15 19:38:01 +00:00
confirm_transaction ( & nodes [ 0 ] , & first_htlc_timeout_tx ) ;
2021-10-10 23:36:57 +00:00
}
nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
2023-02-03 12:53:01 -05:00
expect_payment_failed_conditions ( & nodes [ 0 ] , payment_hash , false , PaymentFailedConditions ::new ( ) ) ;
2021-10-10 23:36:57 +00:00
// Finally, retry the payment (which was reloaded from the ChannelMonitor when nodes[0] was
// reloaded) via a route over the new channel, which work without issue and eventually be
// received and claimed at the recipient just like any other payment.
2021-10-27 19:04:22 +00:00
let ( mut new_route , _ , _ , _ ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 2 ] , 1_000_000 ) ;
// Update the fee on the middle hop to ensure PaymentSent events have the correct (retried) fee
// and not the original fee. We also update node[1]'s relevant config as
// do_claim_payment_along_route expects us to never overpay.
2022-06-15 16:34:30 -07:00
{
2022-11-26 09:02:20 +01:00
let per_peer_state = nodes [ 1 ] . node . per_peer_state . read ( ) . unwrap ( ) ;
let mut peer_state = per_peer_state . get ( & nodes [ 2 ] . node . get_our_node_id ( ) )
. unwrap ( ) . lock ( ) . unwrap ( ) ;
let mut channel = peer_state . channel_by_id . get_mut ( & chan_id_2 ) . unwrap ( ) ;
2023-06-07 12:15:24 +02:00
let mut new_config = channel . context . config ( ) ;
2022-06-15 16:34:30 -07:00
new_config . forwarding_fee_base_msat + = 100_000 ;
2023-06-07 12:15:24 +02:00
channel . context . update_config ( & new_config ) ;
2023-04-09 13:50:44 -04:00
new_route . paths [ 0 ] . hops [ 0 ] . fee_msat + = 100_000 ;
2022-06-15 16:34:30 -07:00
}
// Force expiration of the channel's previous config.
for _ in 0 .. EXPIRE_PREV_CONFIG_TICKS {
nodes [ 1 ] . node . timer_tick_occurred ( ) ;
}
2021-10-10 23:36:57 +00:00
2023-03-22 21:48:22 +00:00
assert! ( nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash , // Shouldn't be allowed to retry a fulfilled payment
RecipientOnionFields ::secret_only ( payment_secret ) , payment_id_1 ) . is_err ( ) ) ;
nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_hash . 0 ) ) . unwrap ( ) ;
2021-10-10 23:36:57 +00:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , 1_000_000 , payment_hash , Some ( payment_secret ) , events . pop ( ) . unwrap ( ) , true , None ) ;
2021-10-27 19:04:22 +00:00
do_claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 2 ] ] ] , false , payment_preimage ) ;
2023-04-09 13:50:44 -04:00
expect_payment_sent! ( nodes [ 0 ] , payment_preimage , Some ( new_route . paths [ 0 ] . hops [ 0 ] . fee_msat ) ) ;
2021-10-10 23:36:57 +00:00
}
#[ test ]
fn retry_with_no_persist ( ) {
do_retry_with_no_persist ( true ) ;
do_retry_with_no_persist ( false ) ;
}
2021-10-10 23:42:03 +00:00
2022-09-02 21:10:43 +00:00
fn do_test_completed_payment_not_retryable_on_reload ( use_dust : bool ) {
// Test that an off-chain completed payment is not retryable on restart. This was previously
// broken for dust payments, but we test for both dust and non-dust payments.
//
// `use_dust` switches to using a dust HTLC, which results in the HTLC not having an on-chain
// output at all.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let mut manually_accept_config = test_default_channel_config ( ) ;
manually_accept_config . manually_accept_inbound_channels = true ;
2023-08-15 19:19:03 +00:00
let first_persister ;
let first_new_chain_monitor ;
let second_persister ;
let second_new_chain_monitor ;
let third_persister ;
let third_new_chain_monitor ;
2022-09-02 21:10:43 +00:00
2023-08-15 19:19:03 +00:00
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , Some ( manually_accept_config ) , None ] ) ;
let first_nodes_0_deserialized ;
let second_nodes_0_deserialized ;
let third_nodes_0_deserialized ;
2022-09-02 21:10:43 +00:00
let mut nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
// Because we set nodes[1] to manually accept channels, just open a 0-conf channel.
let ( funding_tx , chan_id ) = open_zero_conf_channel ( & nodes [ 0 ] , & nodes [ 1 ] , None ) ;
confirm_transaction ( & nodes [ 0 ] , & funding_tx ) ;
confirm_transaction ( & nodes [ 1 ] , & funding_tx ) ;
// Ignore the announcement_signatures messages
nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
2023-01-11 10:21:29 -08:00
let chan_id_2 = create_announced_chan_between_nodes ( & nodes , 1 , 2 ) . 2 ;
2022-09-02 21:10:43 +00:00
// Serialize the ChannelManager prior to sending payments
let mut nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
let route = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 2 ] , if use_dust { 1_000 } else { 1_000_000 } ) . 0 ;
let ( payment_preimage , payment_hash , payment_secret , payment_id ) = send_along_route ( & nodes [ 0 ] , route , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , if use_dust { 1_000 } else { 1_000_000 } ) ;
// The ChannelMonitor should always be the latest version, as we're required to persist it
// during the `commitment_signed_dance!()`.
2022-11-15 02:43:51 +00:00
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
2022-09-02 21:10:43 +00:00
2022-11-15 02:43:51 +00:00
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , nodes_0_serialized , & [ & chan_0_monitor_serialized ] , first_persister , first_new_chain_monitor , first_nodes_0_deserialized ) ;
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2022-09-02 21:10:43 +00:00
// On reload, the ChannelManager should realize it is stale compared to the ChannelMonitor and
// force-close the channel.
2023-07-12 14:58:22 +03:00
check_closed_event! ( nodes [ 0 ] , 1 , ClosureReason ::OutdatedChannelManager , [ nodes [ 1 ] . node . get_our_node_id ( ) ] , 100000 ) ;
2023-02-28 10:45:48 -08:00
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
2022-09-02 21:10:43 +00:00
assert! ( nodes [ 0 ] . node . list_channels ( ) . is_empty ( ) ) ;
assert! ( nodes [ 0 ] . node . has_pending_payments ( ) ) ;
assert_eq! ( nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) . len ( ) , 1 ) ;
2023-02-28 10:45:48 -08:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2022-09-02 21:10:43 +00:00
2023-06-01 10:23:55 +02:00
nodes [ 0 ] . node . peer_connected ( & nodes [ 1 ] . node . get_our_node_id ( ) , & msgs ::Init {
features : nodes [ 1 ] . node . init_features ( ) , networks : None , remote_network_address : None
} , true ) . unwrap ( ) ;
2022-09-02 21:10:43 +00:00
assert! ( nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
// Now nodes[1] should send a channel reestablish, which nodes[0] will respond to with an
// error, as the channel has hit the chain.
2023-06-01 10:23:55 +02:00
nodes [ 1 ] . node . peer_connected ( & nodes [ 0 ] . node . get_our_node_id ( ) , & msgs ::Init {
features : nodes [ 0 ] . node . init_features ( ) , networks : None , remote_network_address : None
} , false ) . unwrap ( ) ;
2022-09-06 22:34:29 +00:00
let bs_reestablish = get_chan_reestablish_msgs! ( nodes [ 1 ] , nodes [ 0 ] ) . pop ( ) . unwrap ( ) ;
2022-09-02 21:10:43 +00:00
nodes [ 0 ] . node . handle_channel_reestablish ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_reestablish ) ;
let as_err = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( as_err . len ( ) , 1 ) ;
let bs_commitment_tx ;
match as_err [ 0 ] {
MessageSendEvent ::HandleError { node_id , action : msgs ::ErrorAction ::SendErrorMessage { ref msg } } = > {
assert_eq! ( node_id , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_error ( & nodes [ 0 ] . node . get_our_node_id ( ) , msg ) ;
2023-07-12 14:58:22 +03:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CounterpartyForceClosed { peer_msg : UntrustedString ( format! ( " Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {} " , & nodes [ 1 ] . node . get_our_node_id ( ) ) ) }
, [ nodes [ 0 ] . node . get_our_node_id ( ) ] , 100000 ) ;
2022-09-02 21:10:43 +00:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
bs_commitment_tx = nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
check_closed_broadcast! ( nodes [ 1 ] , false ) ;
// Now fail back the payment from nodes[2] to nodes[1]. This doesn't really matter as the
// previous hop channel is already on-chain, but it makes nodes[2] willing to see additional
// incoming HTLCs with the same payment hash later.
nodes [ 2 ] . node . fail_htlc_backwards ( & payment_hash ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 2 ] , [ HTLCDestination ::FailedPayment { payment_hash } ] ) ;
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
let htlc_fulfill_updates = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_update_fail_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & htlc_fulfill_updates . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 2 ] , htlc_fulfill_updates . commitment_signed , false ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] ,
[ HTLCDestination ::NextHopChannel { node_id : Some ( nodes [ 2 ] . node . get_our_node_id ( ) ) , channel_id : chan_id_2 } ] ) ;
// Connect the HTLC-Timeout transaction, timing out the HTLC on both nodes (but not confirming
// the HTLC-Timeout transaction beyond 1 conf). For dust HTLCs, the HTLC is considered resolved
// after the commitment transaction, so always connect the commitment transaction.
mine_transaction ( & nodes [ 0 ] , & bs_commitment_tx [ 0 ] ) ;
mine_transaction ( & nodes [ 1 ] , & bs_commitment_tx [ 0 ] ) ;
if ! use_dust {
2023-04-14 17:03:51 -07:00
connect_blocks ( & nodes [ 0 ] , TEST_FINAL_CLTV + ( MIN_CLTV_EXPIRY_DELTA as u32 ) ) ;
connect_blocks ( & nodes [ 1 ] , TEST_FINAL_CLTV + ( MIN_CLTV_EXPIRY_DELTA as u32 ) ) ;
2022-09-02 21:10:43 +00:00
let as_htlc_timeout = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
check_spends! ( as_htlc_timeout [ 0 ] , bs_commitment_tx [ 0 ] ) ;
assert_eq! ( as_htlc_timeout . len ( ) , 1 ) ;
mine_transaction ( & nodes [ 0 ] , & as_htlc_timeout [ 0 ] ) ;
// nodes[0] may rebroadcast (or RBF-bump) its HTLC-Timeout, so wipe the announced set.
nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . clear ( ) ;
mine_transaction ( & nodes [ 1 ] , & as_htlc_timeout [ 0 ] ) ;
}
// Create a new channel on which to retry the payment before we fail the payment via the
// HTLC-Timeout transaction. This avoids ChannelManager timing out the payment due to us
// connecting several blocks while creating the channel (implying time has passed).
// We do this with a zero-conf channel to avoid connecting blocks as a side-effect.
let ( _ , chan_id_3 ) = open_zero_conf_channel ( & nodes [ 0 ] , & nodes [ 1 ] , None ) ;
assert_eq! ( nodes [ 0 ] . node . list_usable_channels ( ) . len ( ) , 1 ) ;
// If we attempt to retry prior to the HTLC-Timeout (or commitment transaction, for dust HTLCs)
// confirming, we will fail as it's considered still-pending...
let ( new_route , _ , _ , _ ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 2 ] , if use_dust { 1_000 } else { 1_000_000 } ) ;
2023-03-22 21:48:22 +00:00
match nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) , payment_id ) {
2023-02-05 17:05:12 -05:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
_ = > panic! ( " Unexpected error " )
}
2022-09-02 21:10:43 +00:00
assert! ( nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
// After ANTI_REORG_DELAY confirmations, the HTLC should be failed and we can try the payment
// again. We serialize the node first as we'll then test retrying the HTLC after a restart
// (which should also still work).
connect_blocks ( & nodes [ 0 ] , ANTI_REORG_DELAY - 1 ) ;
connect_blocks ( & nodes [ 1 ] , ANTI_REORG_DELAY - 1 ) ;
2023-02-03 12:53:01 -05:00
expect_payment_failed_conditions ( & nodes [ 0 ] , payment_hash , false , PaymentFailedConditions ::new ( ) ) ;
2022-09-02 21:10:43 +00:00
2022-11-15 02:43:51 +00:00
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
let chan_1_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id_3 ) . encode ( ) ;
2022-09-02 21:10:43 +00:00
nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
2023-02-03 12:53:01 -05:00
// After the payment failed, we're free to send it again.
2023-03-22 21:48:22 +00:00
assert! ( nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , payment_id ) . is_ok ( ) ) ;
2022-09-02 21:10:43 +00:00
assert! ( ! nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
2022-11-15 02:43:51 +00:00
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , nodes_0_serialized , & [ & chan_0_monitor_serialized , & chan_1_monitor_serialized ] , second_persister , second_new_chain_monitor , second_nodes_0_deserialized ) ;
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2022-11-15 02:43:51 +00:00
2023-04-06 19:56:01 +00:00
nodes [ 0 ] . node . test_process_background_events ( ) ;
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
2023-07-27 15:58:07 -07:00
let mut reconnect_args = ReconnectArgs ::new ( & nodes [ 0 ] , & nodes [ 1 ] ) ;
reconnect_args . send_channel_ready = ( true , true ) ;
reconnect_nodes ( reconnect_args ) ;
2022-09-02 21:10:43 +00:00
// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
// the payment is not (spuriously) listed as still pending.
2023-03-22 21:48:22 +00:00
assert! ( nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , payment_id ) . is_ok ( ) ) ;
2022-09-02 21:10:43 +00:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
pass_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 2 ] ] ] , if use_dust { 1_000 } else { 1_000_000 } , payment_hash , payment_secret ) ;
claim_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , payment_preimage ) ;
2023-03-22 21:48:22 +00:00
match nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) , payment_id ) {
2023-02-05 17:05:12 -05:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
_ = > panic! ( " Unexpected error " )
}
2022-09-02 21:10:43 +00:00
assert! ( nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
2022-11-15 02:43:51 +00:00
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
let chan_1_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id_3 ) . encode ( ) ;
2022-09-02 21:10:43 +00:00
nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
2023-02-03 12:53:01 -05:00
// Check that after reload we can send the payment again (though we shouldn't, since it was
// claimed previously).
2022-11-15 02:43:51 +00:00
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , nodes_0_serialized , & [ & chan_0_monitor_serialized , & chan_1_monitor_serialized ] , third_persister , third_new_chain_monitor , third_nodes_0_deserialized ) ;
2023-02-21 19:10:43 +00:00
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2022-11-15 02:43:51 +00:00
2023-04-06 19:56:01 +00:00
nodes [ 0 ] . node . test_process_background_events ( ) ;
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
2023-07-27 15:58:07 -07:00
reconnect_nodes ( ReconnectArgs ::new ( & nodes [ 0 ] , & nodes [ 1 ] ) ) ;
2022-09-02 21:10:43 +00:00
2023-03-22 21:48:22 +00:00
match nodes [ 0 ] . node . send_payment_with_route ( & new_route , payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) , payment_id ) {
2023-02-05 17:05:12 -05:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
_ = > panic! ( " Unexpected error " )
}
2022-09-02 21:10:43 +00:00
assert! ( nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . is_empty ( ) ) ;
}
#[ test ]
fn test_completed_payment_not_retryable_on_reload ( ) {
do_test_completed_payment_not_retryable_on_reload ( true ) ;
do_test_completed_payment_not_retryable_on_reload ( false ) ;
}
2021-10-10 23:42:03 +00:00
fn do_test_dup_htlc_onchain_fails_on_reload ( persist_manager_post_event : bool , confirm_commitment_tx : bool , payment_timeout : bool ) {
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
// having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
// the ChannelMonitor tells it to.
//
// If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
// ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
// duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
2023-08-15 19:19:03 +00:00
let persister ;
let new_chain_monitor ;
2021-10-10 23:42:03 +00:00
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
2023-08-15 19:19:03 +00:00
let nodes_0_deserialized ;
2021-10-10 23:42:03 +00:00
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let ( _ , _ , chan_id , funding_tx ) = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
2021-10-10 23:42:03 +00:00
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
// nodes[0].
2022-04-18 20:12:15 +00:00
let ( payment_preimage , payment_hash , _ ) = route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , 10_000_000 ) ;
2022-06-23 20:25:58 +00:00
nodes [ 0 ] . node . force_close_broadcasting_latest_txn ( & nodes [ 0 ] . node . list_channels ( ) [ 0 ] . channel_id , & nodes [ 1 ] . node . get_our_node_id ( ) ) . unwrap ( ) ;
2021-10-10 23:42:03 +00:00
check_closed_broadcast! ( nodes [ 0 ] , true ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2023-07-12 14:58:22 +03:00
check_closed_event! ( nodes [ 0 ] , 1 , ClosureReason ::HolderForceClosed , [ nodes [ 1 ] . node . get_our_node_id ( ) ] , 100000 ) ;
2021-10-10 23:42:03 +00:00
2023-02-21 19:10:43 +00:00
nodes [ 0 ] . node . peer_disconnected ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
2021-10-10 23:42:03 +00:00
// Connect blocks until the CLTV timeout is up so that we get an HTLC-Timeout transaction
connect_blocks ( & nodes [ 0 ] , TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1 ) ;
let node_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
assert_eq! ( node_txn . len ( ) , 3 ) ;
2023-04-19 15:08:29 -07:00
assert_eq! ( node_txn [ 0 ] . txid ( ) , node_txn [ 1 ] . txid ( ) ) ;
2021-10-10 23:42:03 +00:00
check_spends! ( node_txn [ 1 ] , funding_tx ) ;
check_spends! ( node_txn [ 2 ] , node_txn [ 1 ] ) ;
let timeout_txn = vec! [ node_txn [ 2 ] . clone ( ) ] ;
2022-04-18 20:12:15 +00:00
nodes [ 1 ] . node . claim_funds ( payment_preimage ) ;
2021-10-10 23:42:03 +00:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2022-04-18 20:12:15 +00:00
expect_payment_claimed! ( nodes [ 1 ] , payment_hash , 10_000_000 ) ;
2021-10-10 23:42:03 +00:00
2023-04-27 13:31:04 -07:00
connect_block ( & nodes [ 1 ] , & create_dummy_block ( nodes [ 1 ] . best_block_hash ( ) , 42 , vec! [ node_txn [ 1 ] . clone ( ) ] ) ) ;
2021-10-10 23:42:03 +00:00
check_closed_broadcast! ( nodes [ 1 ] , true ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2023-07-12 14:58:22 +03:00
check_closed_event! ( nodes [ 1 ] , 1 , ClosureReason ::CommitmentTxConfirmed , [ nodes [ 0 ] . node . get_our_node_id ( ) ] , 100000 ) ;
2021-10-10 23:42:03 +00:00
let claim_txn = nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
2022-12-12 19:00:06 -08:00
assert_eq! ( claim_txn . len ( ) , 1 ) ;
2022-02-25 05:18:29 +00:00
check_spends! ( claim_txn [ 0 ] , node_txn [ 1 ] ) ;
2021-10-10 23:42:03 +00:00
2023-04-27 13:31:04 -07:00
connect_block ( & nodes [ 0 ] , & create_dummy_block ( nodes [ 0 ] . best_block_hash ( ) , 42 , vec! [ node_txn [ 1 ] . clone ( ) ] ) ) ;
2021-10-10 23:42:03 +00:00
if confirm_commitment_tx {
connect_blocks ( & nodes [ 0 ] , BREAKDOWN_TIMEOUT as u32 - 1 ) ;
}
2023-04-27 13:31:04 -07:00
let claim_block = create_dummy_block ( nodes [ 0 ] . best_block_hash ( ) , 42 , if payment_timeout { timeout_txn } else { vec! [ claim_txn [ 0 ] . clone ( ) ] } ) ;
2021-10-10 23:42:03 +00:00
if payment_timeout {
assert! ( confirm_commitment_tx ) ; // Otherwise we're spending below our CSV!
connect_block ( & nodes [ 0 ] , & claim_block ) ;
connect_blocks ( & nodes [ 0 ] , ANTI_REORG_DELAY - 2 ) ;
}
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
2022-07-18 01:32:27 +00:00
// returning InProgress. This should cause the claim event to never make its way to the
2021-10-10 23:42:03 +00:00
// ChannelManager.
chanmon_cfgs [ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( ) . clear ( ) ;
2022-07-18 01:32:27 +00:00
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::InProgress ) ;
2021-10-10 23:42:03 +00:00
if payment_timeout {
connect_blocks ( & nodes [ 0 ] , 1 ) ;
} else {
connect_block ( & nodes [ 0 ] , & claim_block ) ;
}
let funding_txo = OutPoint { txid : funding_tx . txid ( ) , index : 0 } ;
let mon_updates : Vec < _ > = chanmon_cfgs [ 0 ] . persister . chain_sync_monitor_persistences . lock ( ) . unwrap ( )
. get_mut ( & funding_txo ) . unwrap ( ) . drain ( ) . collect ( ) ;
2022-11-18 18:54:16 +00:00
// If we are using chain::Confirm instead of chain::Listen, we will get the same update twice.
// If we're testing connection idempotency we may get substantially more.
assert! ( mon_updates . len ( ) > = 1 ) ;
2021-10-10 23:42:03 +00:00
assert! ( nodes [ 0 ] . chain_monitor . release_pending_monitor_events ( ) . is_empty ( ) ) ;
assert! ( nodes [ 0 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
// If we persist the ChannelManager here, we should get the PaymentSent event after
// deserialization.
2022-11-15 02:43:51 +00:00
let mut chan_manager_serialized = Vec ::new ( ) ;
2021-10-10 23:42:03 +00:00
if ! persist_manager_post_event {
2022-11-15 02:43:51 +00:00
chan_manager_serialized = nodes [ 0 ] . node . encode ( ) ;
2021-10-10 23:42:03 +00:00
}
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
// payment sent event.
2022-07-18 01:32:27 +00:00
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::Completed ) ;
2022-11-15 02:43:51 +00:00
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
2022-05-15 19:38:01 +00:00
for update in mon_updates {
nodes [ 0 ] . chain_monitor . chain_monitor . channel_monitor_updated ( funding_txo , update ) . unwrap ( ) ;
}
2021-10-10 23:42:03 +00:00
if payment_timeout {
Mark failed counterparty-is-destination HTLCs retryable
When our counterparty is the payment destination and we receive
an `HTLCFailReason::Reason` in `fail_htlc_backwards_internal` we
currently always set `rejected_by_dest` in the `PaymentPathFailed`
event, implying the HTLC should *not* be retried.
There are a number of cases where we use `HTLCFailReason::Reason`,
but most should reasonably be treated as retryable even if our
counterparty was the destination (i.e. `!rejected_by_dest`):
* If an HTLC times out on-chain, this doesn't imply that the
payment is no longer retryable, though the peer may well be
offline so retrying may not be very useful,
* If a commitment transaction "containing" a dust HTLC is
confirmed on-chain, this definitely does not imply the payment
is no longer retryable
* If the channel we intended to relay over was closed (or
force-closed) we should retry over another path,
* If the channel we intended to relay over did not have enough
capacity we should retry over another path,
* If we received a update_fail_malformed_htlc message from our
peer, we likely should *not* retry, however this should be
exceedingly rare, and appears to nearly never appear in practice
Thus, this commit simply disables the behavior here, opting to
treat all `HTLCFailReason::Reason` errors as retryable.
Note that prior to 93e645daf46f85949ae0edf60d36bf21e9fde8af this
change would not have made sense as it would have resulted in us
retrying the payment over the same channel in some cases, however
we now "blame" our own channel and will avoid it when routing for
the same payment.
2022-09-07 20:02:04 +00:00
expect_payment_failed! ( nodes [ 0 ] , payment_hash , false ) ;
2021-10-10 23:42:03 +00:00
} else {
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_sent ( & nodes [ 0 ] , payment_preimage , None , true , false ) ;
2021-10-10 23:42:03 +00:00
}
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
// twice.
if persist_manager_post_event {
2022-11-15 02:43:51 +00:00
chan_manager_serialized = nodes [ 0 ] . node . encode ( ) ;
2021-10-10 23:42:03 +00:00
}
// Now reload nodes[0]...
2022-11-15 02:43:51 +00:00
reload_node! ( nodes [ 0 ] , & chan_manager_serialized , & [ & chan_0_monitor_serialized ] , persister , new_chain_monitor , nodes_0_deserialized ) ;
2021-10-10 23:42:03 +00:00
if persist_manager_post_event {
assert! ( nodes [ 0 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
} else if payment_timeout {
Mark failed counterparty-is-destination HTLCs retryable
When our counterparty is the payment destination and we receive
an `HTLCFailReason::Reason` in `fail_htlc_backwards_internal` we
currently always set `rejected_by_dest` in the `PaymentPathFailed`
event, implying the HTLC should *not* be retried.
There are a number of cases where we use `HTLCFailReason::Reason`,
but most should reasonably be treated as retryable even if our
counterparty was the destination (i.e. `!rejected_by_dest`):
* If an HTLC times out on-chain, this doesn't imply that the
payment is no longer retryable, though the peer may well be
offline so retrying may not be very useful,
* If a commitment transaction "containing" a dust HTLC is
confirmed on-chain, this definitely does not imply the payment
is no longer retryable
* If the channel we intended to relay over was closed (or
force-closed) we should retry over another path,
* If the channel we intended to relay over did not have enough
capacity we should retry over another path,
* If we received a update_fail_malformed_htlc message from our
peer, we likely should *not* retry, however this should be
exceedingly rare, and appears to nearly never appear in practice
Thus, this commit simply disables the behavior here, opting to
treat all `HTLCFailReason::Reason` errors as retryable.
Note that prior to 93e645daf46f85949ae0edf60d36bf21e9fde8af this
change would not have made sense as it would have resulted in us
retrying the payment over the same channel in some cases, however
we now "blame" our own channel and will avoid it when routing for
the same payment.
2022-09-07 20:02:04 +00:00
expect_payment_failed! ( nodes [ 0 ] , payment_hash , false ) ;
2021-10-10 23:42:03 +00:00
} else {
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_sent ( & nodes [ 0 ] , payment_preimage , None , true , false ) ;
2021-10-10 23:42:03 +00:00
}
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
// which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
// payment events should kick in, leaving us with no pending events here.
let height = nodes [ 0 ] . blocks . lock ( ) . unwrap ( ) . len ( ) as u32 - 1 ;
nodes [ 0 ] . chain_monitor . chain_monitor . block_connected ( & claim_block , height ) ;
assert! ( nodes [ 0 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
2023-04-06 19:56:01 +00:00
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
2021-10-10 23:42:03 +00:00
}
#[ test ]
fn test_dup_htlc_onchain_fails_on_reload ( ) {
do_test_dup_htlc_onchain_fails_on_reload ( true , true , true ) ;
do_test_dup_htlc_onchain_fails_on_reload ( true , true , false ) ;
do_test_dup_htlc_onchain_fails_on_reload ( true , false , false ) ;
do_test_dup_htlc_onchain_fails_on_reload ( false , true , true ) ;
do_test_dup_htlc_onchain_fails_on_reload ( false , true , false ) ;
do_test_dup_htlc_onchain_fails_on_reload ( false , false , false ) ;
}
2021-10-20 23:40:09 +00:00
#[ test ]
fn test_fulfill_restart_failure ( ) {
// When we receive an update_fulfill_htlc message, we immediately consider the HTLC fully
// fulfilled. At this point, the peer can reconnect and decide to either fulfill the HTLC
// again, or fail it, giving us free money.
//
// Of course probably they won't fail it and give us free money, but because we have code to
// handle it, we should test the logic for it anyway. We do that here.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
2023-08-15 19:19:03 +00:00
let persister ;
let new_chain_monitor ;
2021-10-20 23:40:09 +00:00
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
2023-08-15 19:19:03 +00:00
let nodes_1_deserialized ;
2021-10-20 23:40:09 +00:00
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let chan_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
2021-10-20 23:40:09 +00:00
let ( payment_preimage , payment_hash , _ ) = route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , 100_000 ) ;
// The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
// pre-fulfill, which we do by serializing it here.
2022-11-15 02:43:51 +00:00
let chan_manager_serialized = nodes [ 1 ] . node . encode ( ) ;
let chan_0_monitor_serialized = get_monitor! ( nodes [ 1 ] , chan_id ) . encode ( ) ;
2021-10-20 23:40:09 +00:00
nodes [ 1 ] . node . claim_funds ( payment_preimage ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2022-04-18 20:12:15 +00:00
expect_payment_claimed! ( nodes [ 1 ] , payment_hash , 100_000 ) ;
2021-10-20 23:40:09 +00:00
let htlc_fulfill_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_fulfill_updates . update_fulfill_htlcs [ 0 ] ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_sent ( & nodes [ 0 ] , payment_preimage , None , false , false ) ;
2021-10-20 23:40:09 +00:00
// Now reload nodes[1]...
2022-11-15 02:43:51 +00:00
reload_node! ( nodes [ 1 ] , & chan_manager_serialized , & [ & chan_0_monitor_serialized ] , persister , new_chain_monitor , nodes_1_deserialized ) ;
2021-10-20 23:40:09 +00:00
2023-02-21 19:10:43 +00:00
nodes [ 0 ] . node . peer_disconnected ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ;
2023-07-27 15:58:07 -07:00
reconnect_nodes ( ReconnectArgs ::new ( & nodes [ 0 ] , & nodes [ 1 ] ) ) ;
2021-10-20 23:40:09 +00:00
nodes [ 1 ] . node . fail_htlc_backwards ( & payment_hash ) ;
2022-07-25 11:28:51 -07:00
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , vec! [ HTLCDestination ::FailedPayment { payment_hash } ] ) ;
2021-10-20 23:40:09 +00:00
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let htlc_fail_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_fail_updates . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , htlc_fail_updates . commitment_signed , false ) ;
// nodes[0] shouldn't generate any events here, while it just got a payment failure completion
// it had already considered the payment fulfilled, and now they just got free money.
2021-11-18 16:24:14 -06:00
assert! ( nodes [ 0 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
2021-10-20 23:40:09 +00:00
}
2021-12-13 18:40:16 -05:00
#[ test ]
fn get_ldk_payment_preimage ( ) {
// Ensure that `ChannelManager::get_payment_preimage` can successfully be used to claim a payment.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
2021-12-13 18:40:16 -05:00
let amt_msat = 60_000 ;
let expiry_secs = 60 * 60 ;
2022-11-24 22:31:16 +02:00
let ( payment_hash , payment_secret ) = nodes [ 1 ] . node . create_inbound_payment ( Some ( amt_msat ) , expiry_secs , None ) . unwrap ( ) ;
2021-12-13 18:40:16 -05:00
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( nodes [ 1 ] . node . invoice_features ( ) ) . unwrap ( ) ;
2023-02-07 14:04:55 -05:00
let scorer = test_utils ::TestScorer ::new ( ) ;
2022-03-09 11:13:47 -06:00
let keys_manager = test_utils ::TestKeysInterface ::new ( & [ 0 u8 ; 32 ] , Network ::Testnet ) ;
let random_seed_bytes = keys_manager . get_secure_random_bytes ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
let route = get_route ( & nodes [ 0 ] . node . get_our_node_id ( ) , & route_params ,
& nodes [ 0 ] . network_graph . read_only ( ) ,
Some ( & nodes [ 0 ] . node . list_usable_channels ( ) . iter ( ) . collect ::< Vec < _ > > ( ) ) , nodes [ 0 ] . logger ,
& scorer , & ( ) , & random_seed_bytes ) . unwrap ( ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_hash . 0 ) ) . unwrap ( ) ;
2021-12-13 18:40:16 -05:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
// Make sure to use `get_payment_preimage`
let payment_preimage = nodes [ 1 ] . node . get_payment_preimage ( payment_hash , payment_secret ) . unwrap ( ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , amt_msat , payment_hash , Some ( payment_secret ) , events . pop ( ) . unwrap ( ) , true , Some ( payment_preimage ) ) ;
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , false , payment_preimage ) ;
}
2022-06-24 12:00:20 +02:00
#[ test ]
fn sent_probe_is_probe_of_sending_node ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
2022-06-24 12:00:20 +02:00
// First check we refuse to build a single-hop probe
let ( route , _ , _ , _ ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , 100_000 ) ;
assert! ( nodes [ 0 ] . node . send_probe ( route . paths [ 0 ] . clone ( ) ) . is_err ( ) ) ;
// Then build an actual two-hop probing path
let ( route , _ , _ , _ ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 2 ] , 100_000 ) ;
match nodes [ 0 ] . node . send_probe ( route . paths [ 0 ] . clone ( ) ) {
Ok ( ( payment_hash , payment_id ) ) = > {
assert! ( nodes [ 0 ] . node . payment_is_probe ( & payment_hash , & payment_id ) ) ;
assert! ( ! nodes [ 1 ] . node . payment_is_probe ( & payment_hash , & payment_id ) ) ;
assert! ( ! nodes [ 2 ] . node . payment_is_probe ( & payment_hash , & payment_id ) ) ;
} ,
_ = > panic! ( ) ,
}
get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
}
#[ test ]
fn successful_probe_yields_event ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
2022-06-24 12:00:20 +02:00
let ( route , _ , _ , _ ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 2 ] , 100_000 ) ;
let ( payment_hash , payment_id ) = nodes [ 0 ] . node . send_probe ( route . paths [ 0 ] . clone ( ) ) . unwrap ( ) ;
// node[0] -- update_add_htlcs -> node[1]
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let updates = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
let probe_event = SendEvent ::from_commitment_update ( nodes [ 1 ] . node . get_our_node_id ( ) , updates ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & probe_event . msgs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , probe_event . commitment_msg , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
// node[1] -- update_add_htlcs -> node[2]
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 2 ] . node . get_our_node_id ( ) ) ;
let probe_event = SendEvent ::from_commitment_update ( nodes [ 1 ] . node . get_our_node_id ( ) , updates ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & probe_event . msgs [ 0 ] ) ;
check_added_monitors! ( nodes [ 2 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 1 ] , probe_event . commitment_msg , true , true ) ;
// node[1] <- update_fail_htlcs -- node[2]
let updates = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_update_fail_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & updates . update_fail_htlcs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 2 ] , updates . commitment_signed , true ) ;
// node[0] <- update_fail_htlcs -- node[1]
let updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & updates . update_fail_htlcs [ 0 ] ) ;
check_added_monitors! ( nodes [ 0 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , updates . commitment_signed , false ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events . drain ( .. ) . next ( ) . unwrap ( ) {
2023-03-07 13:57:01 -08:00
crate ::events ::Event ::ProbeSuccessful { payment_id : ev_pid , payment_hash : ev_ph , .. } = > {
2022-06-24 12:00:20 +02:00
assert_eq! ( payment_id , ev_pid ) ;
assert_eq! ( payment_hash , ev_ph ) ;
} ,
_ = > panic! ( ) ,
} ;
2023-02-15 17:30:47 -05:00
assert! ( ! nodes [ 0 ] . node . has_pending_payments ( ) ) ;
2022-06-24 12:00:20 +02:00
}
#[ test ]
fn failed_probe_yields_event ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 1 , 2 , 100000 , 90000000 ) ;
2022-06-24 12:00:20 +02:00
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , 42 ) ;
2022-06-24 12:00:20 +02:00
2023-08-31 12:25:38 +02:00
let ( route , _ , _ , _ ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 2 ] , payment_params , 9_998_000 ) ;
2022-06-24 12:00:20 +02:00
let ( payment_hash , payment_id ) = nodes [ 0 ] . node . send_probe ( route . paths [ 0 ] . clone ( ) ) . unwrap ( ) ;
// node[0] -- update_add_htlcs -> node[1]
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let updates = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
let probe_event = SendEvent ::from_commitment_update ( nodes [ 1 ] . node . get_our_node_id ( ) , updates ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & probe_event . msgs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , probe_event . commitment_msg , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
// node[0] <- update_fail_htlcs -- node[1]
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
// Skip the PendingHTLCsForwardable event
let _events = nodes [ 1 ] . node . get_and_clear_pending_events ( ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & updates . update_fail_htlcs [ 0 ] ) ;
check_added_monitors! ( nodes [ 0 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , updates . commitment_signed , false ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events . drain ( .. ) . next ( ) . unwrap ( ) {
2023-03-07 13:57:01 -08:00
crate ::events ::Event ::ProbeFailed { payment_id : ev_pid , payment_hash : ev_ph , .. } = > {
2022-06-24 12:00:20 +02:00
assert_eq! ( payment_id , ev_pid ) ;
assert_eq! ( payment_hash , ev_ph ) ;
} ,
_ = > panic! ( ) ,
} ;
2023-02-15 17:30:47 -05:00
assert! ( ! nodes [ 0 ] . node . has_pending_payments ( ) ) ;
2022-06-24 12:00:20 +02:00
}
2022-09-07 21:39:17 +00:00
#[ test ]
fn onchain_failed_probe_yields_event ( ) {
// Tests that an attempt to probe over a channel that is eventaully closed results in a failure
// event.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let chan_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
2022-09-07 21:39:17 +00:00
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , 42 ) ;
2022-09-07 21:39:17 +00:00
// Send a dust HTLC, which will be treated as if it timed out once the channel hits the chain.
2023-08-31 12:25:38 +02:00
let ( route , _ , _ , _ ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 2 ] , payment_params , 1_000 ) ;
2022-09-07 21:39:17 +00:00
let ( payment_hash , payment_id ) = nodes [ 0 ] . node . send_probe ( route . paths [ 0 ] . clone ( ) ) . unwrap ( ) ;
// node[0] -- update_add_htlcs -> node[1]
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let updates = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
let probe_event = SendEvent ::from_commitment_update ( nodes [ 1 ] . node . get_our_node_id ( ) , updates ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & probe_event . msgs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , probe_event . commitment_msg , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let _ = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 2 ] . node . get_our_node_id ( ) ) ;
// Don't bother forwarding the HTLC onwards and just confirm the force-close transaction on
// Node A, which after 6 confirmations should result in a probe failure event.
let bs_txn = get_local_commitment_txn! ( nodes [ 1 ] , chan_id ) ;
confirm_transaction ( & nodes [ 0 ] , & bs_txn [ 0 ] ) ;
check_closed_broadcast! ( & nodes [ 0 ] , true ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
let mut found_probe_failed = false ;
for event in events . drain ( .. ) {
match event {
Event ::ProbeFailed { payment_id : ev_pid , payment_hash : ev_ph , .. } = > {
assert_eq! ( payment_id , ev_pid ) ;
assert_eq! ( payment_hash , ev_ph ) ;
found_probe_failed = true ;
} ,
Event ::ChannelClosed { .. } = > { } ,
_ = > panic! ( ) ,
}
}
assert! ( found_probe_failed ) ;
2023-02-15 17:30:47 -05:00
assert! ( ! nodes [ 0 ] . node . has_pending_payments ( ) ) ;
2022-09-07 21:39:17 +00:00
}
2022-10-08 23:26:18 +00:00
#[ test ]
fn claimed_send_payment_idempotent ( ) {
// Tests that `send_payment` (and friends) are (reasonably) idempotent.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
2022-10-08 23:26:18 +00:00
let ( route , second_payment_hash , second_payment_preimage , second_payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 1 ] , 100_000 ) ;
let ( first_payment_preimage , _ , _ , payment_id ) = send_along_route ( & nodes [ 0 ] , route . clone ( ) , & [ & nodes [ 1 ] ] , 100_000 ) ;
macro_rules ! check_send_rejected {
( ) = > {
// If we try to resend a new payment with a different payment_hash but with the same
// payment_id, it should be rejected.
2023-03-22 21:48:22 +00:00
let send_result = nodes [ 0 ] . node . send_payment_with_route ( & route , second_payment_hash ,
RecipientOnionFields ::secret_only ( second_payment_secret ) , payment_id ) ;
2022-10-08 23:26:18 +00:00
match send_result {
2022-11-02 23:25:34 +00:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
2022-10-08 23:26:18 +00:00
_ = > panic! ( " Unexpected send result: {:?} " , send_result ) ,
}
// Further, if we try to send a spontaneous payment with the same payment_id it should
// also be rejected.
2023-03-24 01:19:20 +00:00
let send_result = nodes [ 0 ] . node . send_spontaneous_payment (
& route , None , RecipientOnionFields ::spontaneous_empty ( ) , payment_id ) ;
2022-10-08 23:26:18 +00:00
match send_result {
2022-11-02 23:25:34 +00:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
2022-10-08 23:26:18 +00:00
_ = > panic! ( " Unexpected send result: {:?} " , send_result ) ,
}
}
}
check_send_rejected! ( ) ;
// Claim the payment backwards, but note that the PaymentSent event is still pending and has
// not been seen by the user. At this point, from the user perspective nothing has changed, so
// we must remain just as idempotent as we were before.
do_claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , false , first_payment_preimage ) ;
for _ in 0 ..= IDEMPOTENCY_TIMEOUT_TICKS {
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
}
check_send_rejected! ( ) ;
// Once the user sees and handles the `PaymentSent` event, we expect them to no longer call
// `send_payment`, and our idempotency guarantees are off - they should have atomically marked
// the payment complete. However, they could have called `send_payment` while the event was
// being processed, leading to a race in our idempotency guarantees. Thus, even immediately
// after the event is handled a duplicate payment should sitll be rejected.
expect_payment_sent! ( & nodes [ 0 ] , first_payment_preimage , Some ( 0 ) ) ;
check_send_rejected! ( ) ;
// If relatively little time has passed, a duplicate payment should still fail.
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
check_send_rejected! ( ) ;
// However, after some time has passed (at least more than the one timer tick above), a
// duplicate payment should go through, as ChannelManager should no longer have any remaining
// references to the old payment data.
for _ in 0 .. IDEMPOTENCY_TIMEOUT_TICKS {
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
}
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , second_payment_hash ,
RecipientOnionFields ::secret_only ( second_payment_secret ) , payment_id ) . unwrap ( ) ;
2022-10-08 23:26:18 +00:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
pass_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , 100_000 , second_payment_hash , second_payment_secret ) ;
claim_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , second_payment_preimage ) ;
}
2022-10-30 00:20:52 +00:00
#[ test ]
fn abandoned_send_payment_idempotent ( ) {
// Tests that `send_payment` (and friends) allow duplicate PaymentIds immediately after
// abandon_payment.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
2022-10-30 00:20:52 +00:00
let ( route , second_payment_hash , second_payment_preimage , second_payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 1 ] , 100_000 ) ;
let ( _ , first_payment_hash , _ , payment_id ) = send_along_route ( & nodes [ 0 ] , route . clone ( ) , & [ & nodes [ 1 ] ] , 100_000 ) ;
macro_rules ! check_send_rejected {
( ) = > {
// If we try to resend a new payment with a different payment_hash but with the same
// payment_id, it should be rejected.
2023-03-22 21:48:22 +00:00
let send_result = nodes [ 0 ] . node . send_payment_with_route ( & route , second_payment_hash ,
RecipientOnionFields ::secret_only ( second_payment_secret ) , payment_id ) ;
2022-10-30 00:20:52 +00:00
match send_result {
2022-11-02 23:25:34 +00:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
2022-10-30 00:20:52 +00:00
_ = > panic! ( " Unexpected send result: {:?} " , send_result ) ,
}
// Further, if we try to send a spontaneous payment with the same payment_id it should
// also be rejected.
2023-03-24 01:19:20 +00:00
let send_result = nodes [ 0 ] . node . send_spontaneous_payment (
& route , None , RecipientOnionFields ::spontaneous_empty ( ) , payment_id ) ;
2022-10-30 00:20:52 +00:00
match send_result {
2022-11-02 23:25:34 +00:00
Err ( PaymentSendFailure ::DuplicatePayment ) = > { } ,
2022-10-30 00:20:52 +00:00
_ = > panic! ( " Unexpected send result: {:?} " , send_result ) ,
}
}
}
check_send_rejected! ( ) ;
nodes [ 1 ] . node . fail_htlc_backwards ( & first_payment_hash ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , [ HTLCDestination ::FailedPayment { payment_hash : first_payment_hash } ] ) ;
2023-02-03 12:53:01 -05:00
// Until we abandon the payment upon path failure, no matter how many timer ticks pass, we still cannot reuse the
2022-10-30 00:20:52 +00:00
// PaymentId.
for _ in 0 ..= IDEMPOTENCY_TIMEOUT_TICKS {
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
}
check_send_rejected! ( ) ;
2023-03-31 19:07:57 -05:00
pass_failed_payment_back ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , false , first_payment_hash , PaymentFailureReason ::RecipientRejected ) ;
2022-10-30 00:20:52 +00:00
2023-02-03 12:53:01 -05:00
// However, we can reuse the PaymentId immediately after we `abandon_payment` upon passing the
// failed payment back.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , second_payment_hash ,
RecipientOnionFields ::secret_only ( second_payment_secret ) , payment_id ) . unwrap ( ) ;
2022-10-30 00:20:52 +00:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
pass_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , 100_000 , second_payment_hash , second_payment_secret ) ;
claim_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , second_payment_preimage ) ;
}
2022-11-12 20:16:52 -08:00
2022-11-14 15:05:37 -05:00
#[ derive(PartialEq) ]
enum InterceptTest {
Forward ,
Fail ,
Timeout ,
}
2022-11-12 20:16:52 -08:00
#[ test ]
fn test_trivial_inflight_htlc_tracking ( ) {
// In this test, we test three scenarios:
// (1) Sending + claiming a payment successfully should return `None` when querying InFlightHtlcs
// (2) Sending a payment without claiming it should return the payment's value (500000) when querying InFlightHtlcs
// (3) After we claim the payment sent in (2), InFlightHtlcs should return `None` for the query.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let ( _ , _ , chan_1_id , _ ) = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
let ( _ , _ , chan_2_id , _ ) = create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
2022-11-12 20:16:52 -08:00
// Send and claim the payment. Inflight HTLCs should be empty.
2023-02-02 22:38:54 +00:00
let payment_hash = send_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , 500000 ) . 1 ;
let inflight_htlcs = node_chanmgrs [ 0 ] . compute_inflight_htlcs ( ) ;
2022-11-12 20:16:52 -08:00
{
2022-11-26 09:02:20 +01:00
let mut node_0_per_peer_lock ;
let mut node_0_peer_state_lock ;
let channel_1 = get_channel_ref! ( & nodes [ 0 ] , nodes [ 1 ] , node_0_per_peer_lock , node_0_peer_state_lock , chan_1_id ) ;
2022-11-12 20:16:52 -08:00
let chan_1_used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel_1 . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
2023-02-02 22:38:54 +00:00
assert_eq! ( chan_1_used_liquidity , None ) ;
}
{
let mut node_1_per_peer_lock ;
let mut node_1_peer_state_lock ;
let channel_2 = get_channel_ref! ( & nodes [ 1 ] , nodes [ 2 ] , node_1_per_peer_lock , node_1_peer_state_lock , chan_2_id ) ;
2022-11-12 20:16:52 -08:00
let chan_2_used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 2 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel_2 . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
assert_eq! ( chan_2_used_liquidity , None ) ;
}
2022-12-04 17:24:08 -08:00
let pending_payments = nodes [ 0 ] . node . list_recent_payments ( ) ;
assert_eq! ( pending_payments . len ( ) , 1 ) ;
assert_eq! ( pending_payments [ 0 ] , RecentPaymentDetails ::Fulfilled { payment_hash : Some ( payment_hash ) } ) ;
// Remove fulfilled payment
for _ in 0 ..= IDEMPOTENCY_TIMEOUT_TICKS {
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
}
2022-11-12 20:16:52 -08:00
// Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
2023-02-02 22:38:54 +00:00
let ( payment_preimage , payment_hash , _ ) = route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , 500000 ) ;
let inflight_htlcs = node_chanmgrs [ 0 ] . compute_inflight_htlcs ( ) ;
2022-11-12 20:16:52 -08:00
{
2022-11-26 09:02:20 +01:00
let mut node_0_per_peer_lock ;
let mut node_0_peer_state_lock ;
let channel_1 = get_channel_ref! ( & nodes [ 0 ] , nodes [ 1 ] , node_0_per_peer_lock , node_0_peer_state_lock , chan_1_id ) ;
2022-11-12 20:16:52 -08:00
let chan_1_used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel_1 . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
2023-02-02 22:38:54 +00:00
// First hop accounts for expected 1000 msat fee
assert_eq! ( chan_1_used_liquidity , Some ( 501000 ) ) ;
}
{
let mut node_1_per_peer_lock ;
let mut node_1_peer_state_lock ;
let channel_2 = get_channel_ref! ( & nodes [ 1 ] , nodes [ 2 ] , node_1_per_peer_lock , node_1_peer_state_lock , chan_2_id ) ;
2022-11-12 20:16:52 -08:00
let chan_2_used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 2 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel_2 . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
assert_eq! ( chan_2_used_liquidity , Some ( 500000 ) ) ;
}
2022-12-04 17:24:08 -08:00
let pending_payments = nodes [ 0 ] . node . list_recent_payments ( ) ;
assert_eq! ( pending_payments . len ( ) , 1 ) ;
assert_eq! ( pending_payments [ 0 ] , RecentPaymentDetails ::Pending { payment_hash , total_msat : 500000 } ) ;
2022-11-12 20:16:52 -08:00
// Now, let's claim the payment. This should result in the used liquidity to return `None`.
claim_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , payment_preimage ) ;
2022-12-04 17:24:08 -08:00
// Remove fulfilled payment
for _ in 0 ..= IDEMPOTENCY_TIMEOUT_TICKS {
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
}
2023-02-02 22:38:54 +00:00
let inflight_htlcs = node_chanmgrs [ 0 ] . compute_inflight_htlcs ( ) ;
2022-11-12 20:16:52 -08:00
{
2022-11-26 09:02:20 +01:00
let mut node_0_per_peer_lock ;
let mut node_0_peer_state_lock ;
let channel_1 = get_channel_ref! ( & nodes [ 0 ] , nodes [ 1 ] , node_0_per_peer_lock , node_0_peer_state_lock , chan_1_id ) ;
2022-11-12 20:16:52 -08:00
let chan_1_used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel_1 . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
2023-02-02 22:38:54 +00:00
assert_eq! ( chan_1_used_liquidity , None ) ;
}
{
let mut node_1_per_peer_lock ;
let mut node_1_peer_state_lock ;
let channel_2 = get_channel_ref! ( & nodes [ 1 ] , nodes [ 2 ] , node_1_per_peer_lock , node_1_peer_state_lock , chan_2_id ) ;
2022-11-12 20:16:52 -08:00
let chan_2_used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 2 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel_2 . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
assert_eq! ( chan_2_used_liquidity , None ) ;
}
2022-12-04 17:24:08 -08:00
let pending_payments = nodes [ 0 ] . node . list_recent_payments ( ) ;
assert_eq! ( pending_payments . len ( ) , 0 ) ;
2022-11-12 20:16:52 -08:00
}
#[ test ]
fn test_holding_cell_inflight_htlcs ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
2023-01-11 10:21:29 -08:00
let channel_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
2022-11-12 20:16:52 -08:00
let ( route , payment_hash_1 , _ , payment_secret_1 ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 1 ] , 1000000 ) ;
let ( _ , payment_hash_2 , payment_secret_2 ) = get_payment_preimage_hash! ( nodes [ 1 ] ) ;
// Queue up two payments - one will be delivered right away, one immediately goes into the
// holding cell as nodes[0] is AwaitingRAA.
{
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , payment_hash_1 ,
RecipientOnionFields ::secret_only ( payment_secret_1 ) , PaymentId ( payment_hash_1 . 0 ) ) . unwrap ( ) ;
2022-11-12 20:16:52 -08:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , payment_hash_2 ,
RecipientOnionFields ::secret_only ( payment_secret_2 ) , PaymentId ( payment_hash_2 . 0 ) ) . unwrap ( ) ;
2022-11-12 20:16:52 -08:00
check_added_monitors! ( nodes [ 0 ] , 0 ) ;
}
let inflight_htlcs = node_chanmgrs [ 0 ] . compute_inflight_htlcs ( ) ;
{
2022-11-26 09:02:20 +01:00
let mut node_0_per_peer_lock ;
let mut node_0_peer_state_lock ;
let channel = get_channel_ref! ( & nodes [ 0 ] , nodes [ 1 ] , node_0_per_peer_lock , node_0_peer_state_lock , channel_id ) ;
2022-11-12 20:16:52 -08:00
let used_liquidity = inflight_htlcs . used_liquidity_msat (
& NodeId ::from_pubkey ( & nodes [ 0 ] . node . get_our_node_id ( ) ) ,
& NodeId ::from_pubkey ( & nodes [ 1 ] . node . get_our_node_id ( ) ) ,
2023-06-07 12:15:24 +02:00
channel . context . get_short_channel_id ( ) . unwrap ( )
2022-11-12 20:16:52 -08:00
) ;
assert_eq! ( used_liquidity , Some ( 2000000 ) ) ;
}
// Clear pending events so test doesn't throw a "Had excess message on node..." error
nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
}
2022-11-06 16:06:44 -05:00
#[ test ]
2022-11-07 11:16:49 -05:00
fn intercepted_payment ( ) {
2022-11-06 16:06:44 -05:00
// Test that detecting an intercept scid on payment forward will signal LDK to generate an
2022-11-07 11:16:49 -05:00
// intercept event, which the LSP can then use to either (a) open a JIT channel to forward the
// payment or (b) fail the payment.
2022-11-14 15:05:37 -05:00
do_test_intercepted_payment ( InterceptTest ::Forward ) ;
do_test_intercepted_payment ( InterceptTest ::Fail ) ;
// Make sure that intercepted payments will be automatically failed back if too many blocks pass.
do_test_intercepted_payment ( InterceptTest ::Timeout ) ;
2022-11-07 11:16:49 -05:00
}
2022-11-14 15:05:37 -05:00
fn do_test_intercepted_payment ( test : InterceptTest ) {
2022-11-06 16:06:44 -05:00
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
2022-11-14 13:36:52 -05:00
let mut zero_conf_chan_config = test_default_channel_config ( ) ;
zero_conf_chan_config . manually_accept_inbound_channels = true ;
let mut intercept_forwards_config = test_default_channel_config ( ) ;
intercept_forwards_config . accept_intercept_htlcs = true ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , Some ( intercept_forwards_config ) , Some ( zero_conf_chan_config ) ] ) ;
2022-11-06 16:06:44 -05:00
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
2023-02-07 14:04:55 -05:00
let scorer = test_utils ::TestScorer ::new ( ) ;
2022-11-06 16:06:44 -05:00
let random_seed_bytes = chanmon_cfgs [ 0 ] . keys_manager . get_secure_random_bytes ( ) ;
2023-01-11 10:21:29 -08:00
let _ = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
2022-11-06 16:06:44 -05:00
let amt_msat = 100_000 ;
let intercept_scid = nodes [ 1 ] . node . get_intercept_scid ( ) ;
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2022-11-06 16:06:44 -05:00
. with_route_hints ( vec! [
RouteHint ( vec! [ RouteHintHop {
src_node_id : nodes [ 1 ] . node . get_our_node_id ( ) ,
short_channel_id : intercept_scid ,
fees : RoutingFees {
base_msat : 1000 ,
proportional_millionths : 0 ,
} ,
cltv_expiry_delta : MIN_CLTV_EXPIRY_DELTA ,
htlc_minimum_msat : None ,
htlc_maximum_msat : None ,
} ] )
2023-04-29 14:47:59 -04:00
] ) . unwrap ( )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( nodes [ 2 ] . node . invoice_features ( ) ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat , ) ;
let route = get_route ( & nodes [ 0 ] . node . get_our_node_id ( ) , & route_params ,
& nodes [ 0 ] . network_graph . read_only ( ) , None , nodes [ 0 ] . logger , & scorer , & ( ) ,
& random_seed_bytes , ) . unwrap ( ) ;
2022-11-06 16:06:44 -05:00
2022-11-24 22:31:16 +02:00
let ( payment_hash , payment_secret ) = nodes [ 2 ] . node . create_inbound_payment ( Some ( amt_msat ) , 60 * 60 , None ) . unwrap ( ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment_with_route ( & route , payment_hash ,
RecipientOnionFields ::secret_only ( payment_secret ) , PaymentId ( payment_hash . 0 ) ) . unwrap ( ) ;
2022-11-06 16:06:44 -05:00
let payment_event = {
{
let mut added_monitors = nodes [ 0 ] . chain_monitor . added_monitors . lock ( ) . unwrap ( ) ;
assert_eq! ( added_monitors . len ( ) , 1 ) ;
added_monitors . clear ( ) ;
}
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
SendEvent ::from_event ( events . remove ( 0 ) )
} ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , & payment_event . commitment_msg , false , true ) ;
// Check that we generate the PaymentIntercepted event when an intercept forward is detected.
let events = nodes [ 1 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let ( intercept_id , expected_outbound_amount_msat ) = match events [ 0 ] {
2023-03-07 13:57:01 -08:00
crate ::events ::Event ::HTLCIntercepted {
2022-11-06 16:06:44 -05:00
intercept_id , expected_outbound_amount_msat , payment_hash : pmt_hash , inbound_amount_msat , requested_next_hop_scid : short_channel_id
} = > {
assert_eq! ( pmt_hash , payment_hash ) ;
assert_eq! ( inbound_amount_msat , route . get_total_amount ( ) + route . get_total_fees ( ) ) ;
assert_eq! ( short_channel_id , intercept_scid ) ;
( intercept_id , expected_outbound_amount_msat )
} ,
_ = > panic! ( )
} ;
// Check for unknown channel id error.
2023-08-26 01:30:40 +02:00
let unknown_chan_id_err = nodes [ 1 ] . node . forward_intercepted_htlc ( intercept_id , & ChannelId ::from_bytes ( [ 42 ; 32 ] ) , nodes [ 2 ] . node . get_our_node_id ( ) , expected_outbound_amount_msat ) . unwrap_err ( ) ;
2023-06-07 18:09:23 +02:00
assert_eq! ( unknown_chan_id_err , APIError ::ChannelUnavailable {
err : format ! ( " Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening. " ,
log_bytes! ( [ 42 ; 32 ] ) , nodes [ 2 ] . node . get_our_node_id ( ) ) } ) ;
2022-11-06 16:06:44 -05:00
2022-11-14 15:05:37 -05:00
if test = = InterceptTest ::Fail {
2022-11-07 11:16:49 -05:00
// Ensure we can fail the intercepted payment back.
nodes [ 1 ] . node . fail_intercepted_htlc ( intercept_id ) . unwrap ( ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore! ( nodes [ 1 ] , vec! [ HTLCDestination ::UnknownNextHop { requested_forward_scid : intercept_scid } ] ) ;
nodes [ 1 ] . node . process_pending_htlc_forwards ( ) ;
let update_fail = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
check_added_monitors! ( & nodes [ 1 ] , 1 ) ;
assert! ( update_fail . update_fail_htlcs . len ( ) = = 1 ) ;
let fail_msg = update_fail . update_fail_htlcs [ 0 ] . clone ( ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & fail_msg ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , update_fail . commitment_signed , false ) ;
// Ensure the payment fails with the expected error.
let fail_conditions = PaymentFailedConditions ::new ( )
. blamed_scid ( intercept_scid )
. blamed_chan_closed ( true )
. expected_htlc_error_data ( 0x4000 | 10 , & [ ] ) ;
expect_payment_failed_conditions ( & nodes [ 0 ] , payment_hash , false , fail_conditions ) ;
2022-11-14 15:05:37 -05:00
} else if test = = InterceptTest ::Forward {
2022-11-22 19:15:56 -05:00
// Check that we'll fail as expected when sending to a channel that isn't in `ChannelReady` yet.
let temp_chan_id = nodes [ 1 ] . node . create_channel ( nodes [ 2 ] . node . get_our_node_id ( ) , 100_000 , 0 , 42 , None ) . unwrap ( ) ;
let unusable_chan_err = nodes [ 1 ] . node . forward_intercepted_htlc ( intercept_id , & temp_chan_id , nodes [ 2 ] . node . get_our_node_id ( ) , expected_outbound_amount_msat ) . unwrap_err ( ) ;
2023-06-07 18:09:23 +02:00
assert_eq! ( unusable_chan_err , APIError ::ChannelUnavailable {
err : format ! ( " Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening. " ,
2023-08-26 01:30:40 +02:00
& temp_chan_id , nodes [ 2 ] . node . get_our_node_id ( ) ) } ) ;
2022-11-22 19:15:56 -05:00
assert_eq! ( nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) . len ( ) , 1 ) ;
2022-11-07 11:16:49 -05:00
// Open the just-in-time channel so the payment can then be forwarded.
let ( _ , channel_id ) = open_zero_conf_channel ( & nodes [ 1 ] , & nodes [ 2 ] , None ) ;
// Finally, forward the intercepted payment through and claim it.
nodes [ 1 ] . node . forward_intercepted_htlc ( intercept_id , & channel_id , nodes [ 2 ] . node . get_our_node_id ( ) , expected_outbound_amount_msat ) . unwrap ( ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
let payment_event = {
{
let mut added_monitors = nodes [ 1 ] . chain_monitor . added_monitors . lock ( ) . unwrap ( ) ;
assert_eq! ( added_monitors . len ( ) , 1 ) ;
added_monitors . clear ( ) ;
}
let mut events = nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
SendEvent ::from_event ( events . remove ( 0 ) )
} ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 1 ] , & payment_event . commitment_msg , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 2 ] ) ;
let payment_preimage = nodes [ 2 ] . node . get_payment_preimage ( payment_hash , payment_secret ) . unwrap ( ) ;
2022-12-01 09:34:34 +01:00
expect_payment_claimable! ( & nodes [ 2 ] , payment_hash , payment_secret , amt_msat , Some ( payment_preimage ) , nodes [ 2 ] . node . get_our_node_id ( ) ) ;
2022-11-07 11:16:49 -05:00
do_claim_payment_along_route ( & nodes [ 0 ] , & vec! ( & vec! ( & nodes [ 1 ] , & nodes [ 2 ] ) [ .. ] ) , false , payment_preimage ) ;
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
match events [ 0 ] {
Event ::PaymentSent { payment_preimage : ref ev_preimage , payment_hash : ref ev_hash , ref fee_paid_msat , .. } = > {
assert_eq! ( payment_preimage , * ev_preimage ) ;
assert_eq! ( payment_hash , * ev_hash ) ;
assert_eq! ( fee_paid_msat , & Some ( 1000 ) ) ;
} ,
_ = > panic! ( " Unexpected event " )
}
match events [ 1 ] {
Event ::PaymentPathSuccessful { payment_hash : hash , .. } = > {
assert_eq! ( hash , Some ( payment_hash ) ) ;
} ,
_ = > panic! ( " Unexpected event " )
2022-11-06 16:06:44 -05:00
}
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
2022-11-14 15:05:37 -05:00
} else if test = = InterceptTest ::Timeout {
2023-04-27 13:31:04 -07:00
let mut block = create_dummy_block ( nodes [ 0 ] . best_block_hash ( ) , 42 , Vec ::new ( ) ) ;
2022-11-14 15:05:37 -05:00
connect_block ( & nodes [ 0 ] , & block ) ;
connect_block ( & nodes [ 1 ] , & block ) ;
2022-12-01 00:16:31 -05:00
for _ in 0 .. TEST_FINAL_CLTV {
2022-11-14 15:05:37 -05:00
block . header . prev_blockhash = block . block_hash ( ) ;
connect_block ( & nodes [ 0 ] , & block ) ;
connect_block ( & nodes [ 1 ] , & block ) ;
}
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , vec! [ HTLCDestination ::InvalidForward { requested_forward_scid : intercept_scid } ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let htlc_timeout_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
assert! ( htlc_timeout_updates . update_add_htlcs . is_empty ( ) ) ;
assert_eq! ( htlc_timeout_updates . update_fail_htlcs . len ( ) , 1 ) ;
assert! ( htlc_timeout_updates . update_fail_malformed_htlcs . is_empty ( ) ) ;
assert! ( htlc_timeout_updates . update_fee . is_none ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_timeout_updates . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , htlc_timeout_updates . commitment_signed , false ) ;
expect_payment_failed! ( nodes [ 0 ] , payment_hash , false , 0x2000 | 2 , [ ] ) ;
// Check for unknown intercept id error.
let ( _ , channel_id ) = open_zero_conf_channel ( & nodes [ 1 ] , & nodes [ 2 ] , None ) ;
let unknown_intercept_id_err = nodes [ 1 ] . node . forward_intercepted_htlc ( intercept_id , & channel_id , nodes [ 2 ] . node . get_our_node_id ( ) , expected_outbound_amount_msat ) . unwrap_err ( ) ;
2022-12-01 00:12:29 -05:00
assert_eq! ( unknown_intercept_id_err , APIError ::APIMisuseError { err : format ! ( " Payment with intercept id {} not found " , log_bytes! ( intercept_id . 0 ) ) } ) ;
2022-12-01 00:13:53 -05:00
let unknown_intercept_id_err = nodes [ 1 ] . node . fail_intercepted_htlc ( intercept_id ) . unwrap_err ( ) ;
assert_eq! ( unknown_intercept_id_err , APIError ::APIMisuseError { err : format ! ( " Payment with intercept id {} not found " , log_bytes! ( intercept_id . 0 ) ) } ) ;
2022-11-06 16:06:44 -05:00
}
}
2022-12-19 00:38:54 -05:00
2023-05-24 11:53:05 -04:00
#[ test ]
fn accept_underpaying_htlcs_config ( ) {
do_accept_underpaying_htlcs_config ( 1 ) ;
do_accept_underpaying_htlcs_config ( 2 ) ;
do_accept_underpaying_htlcs_config ( 3 ) ;
}
fn do_accept_underpaying_htlcs_config ( num_mpp_parts : usize ) {
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let mut intercept_forwards_config = test_default_channel_config ( ) ;
intercept_forwards_config . accept_intercept_htlcs = true ;
let mut underpay_config = test_default_channel_config ( ) ;
underpay_config . channel_config . accept_underpaying_htlcs = true ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , Some ( intercept_forwards_config ) , Some ( underpay_config ) ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
let mut chan_ids = Vec ::new ( ) ;
for _ in 0 .. num_mpp_parts {
let _ = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 10_000 , 0 ) ;
let channel_id = create_unannounced_chan_between_nodes_with_value ( & nodes , 1 , 2 , 2_000_000 , 0 ) . 0. channel_id ;
chan_ids . push ( channel_id ) ;
}
// Send the initial payment.
let amt_msat = 900_000 ;
let skimmed_fee_msat = 20 ;
let mut route_hints = Vec ::new ( ) ;
for _ in 0 .. num_mpp_parts {
route_hints . push ( RouteHint ( vec! [ RouteHintHop {
src_node_id : nodes [ 1 ] . node . get_our_node_id ( ) ,
short_channel_id : nodes [ 1 ] . node . get_intercept_scid ( ) ,
fees : RoutingFees {
base_msat : 1000 ,
proportional_millionths : 0 ,
} ,
cltv_expiry_delta : MIN_CLTV_EXPIRY_DELTA ,
htlc_minimum_msat : None ,
htlc_maximum_msat : Some ( amt_msat / num_mpp_parts as u64 + 5 ) ,
} ] ) ) ;
}
let payment_params = PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
. with_route_hints ( route_hints ) . unwrap ( )
. with_bolt11_features ( nodes [ 2 ] . node . invoice_features ( ) ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2023-05-24 11:53:05 -04:00
let ( payment_hash , payment_secret ) = nodes [ 2 ] . node . create_inbound_payment ( Some ( amt_msat ) , 60 * 60 , None ) . unwrap ( ) ;
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 0 ) ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , num_mpp_parts ) ; // one monitor per path
let mut events : Vec < SendEvent > = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . into_iter ( ) . map ( | e | SendEvent ::from_event ( e ) ) . collect ( ) ;
assert_eq! ( events . len ( ) , num_mpp_parts ) ;
// Forward the intercepted payments.
for ( idx , ev ) in events . into_iter ( ) . enumerate ( ) {
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & ev . msgs [ 0 ] ) ;
do_commitment_signed_dance ( & nodes [ 1 ] , & nodes [ 0 ] , & ev . commitment_msg , false , true ) ;
let events = nodes [ 1 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let ( intercept_id , expected_outbound_amt_msat ) = match events [ 0 ] {
crate ::events ::Event ::HTLCIntercepted {
intercept_id , expected_outbound_amount_msat , payment_hash : pmt_hash , ..
} = > {
assert_eq! ( pmt_hash , payment_hash ) ;
( intercept_id , expected_outbound_amount_msat )
} ,
_ = > panic! ( )
} ;
nodes [ 1 ] . node . forward_intercepted_htlc ( intercept_id , & chan_ids [ idx ] ,
nodes [ 2 ] . node . get_our_node_id ( ) , expected_outbound_amt_msat - skimmed_fee_msat ) . unwrap ( ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
let payment_event = {
{
let mut added_monitors = nodes [ 1 ] . chain_monitor . added_monitors . lock ( ) . unwrap ( ) ;
assert_eq! ( added_monitors . len ( ) , 1 ) ;
added_monitors . clear ( ) ;
}
let mut events = nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
SendEvent ::from_event ( events . remove ( 0 ) )
} ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
do_commitment_signed_dance ( & nodes [ 2 ] , & nodes [ 1 ] , & payment_event . commitment_msg , false , true ) ;
if idx = = num_mpp_parts - 1 {
expect_pending_htlcs_forwardable! ( nodes [ 2 ] ) ;
}
}
// Claim the payment and check that the skimmed fee is as expected.
let payment_preimage = nodes [ 2 ] . node . get_payment_preimage ( payment_hash , payment_secret ) . unwrap ( ) ;
let events = nodes [ 2 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
crate ::events ::Event ::PaymentClaimable {
ref payment_hash , ref purpose , amount_msat , counterparty_skimmed_fee_msat , receiver_node_id , ..
} = > {
assert_eq! ( payment_hash , payment_hash ) ;
assert_eq! ( amt_msat - skimmed_fee_msat * num_mpp_parts as u64 , amount_msat ) ;
assert_eq! ( skimmed_fee_msat * num_mpp_parts as u64 , counterparty_skimmed_fee_msat ) ;
assert_eq! ( nodes [ 2 ] . node . get_our_node_id ( ) , receiver_node_id . unwrap ( ) ) ;
match purpose {
crate ::events ::PaymentPurpose ::InvoicePayment { payment_preimage : ev_payment_preimage ,
payment_secret : ev_payment_secret , .. } = >
{
assert_eq! ( payment_preimage , ev_payment_preimage . unwrap ( ) ) ;
assert_eq! ( payment_secret , * ev_payment_secret ) ;
} ,
_ = > panic! ( ) ,
}
} ,
_ = > panic! ( " Unexpected event " ) ,
}
let mut expected_paths_vecs = Vec ::new ( ) ;
let mut expected_paths = Vec ::new ( ) ;
for _ in 0 .. num_mpp_parts { expected_paths_vecs . push ( vec! ( & nodes [ 1 ] , & nodes [ 2 ] ) ) ; }
for i in 0 .. num_mpp_parts { expected_paths . push ( & expected_paths_vecs [ i ] [ .. ] ) ; }
let total_fee_msat = do_claim_payment_along_route_with_extra_penultimate_hop_fees (
& nodes [ 0 ] , & expected_paths [ .. ] , & vec! [ skimmed_fee_msat as u32 ; num_mpp_parts ] [ .. ] , false ,
payment_preimage ) ;
// The sender doesn't know that the penultimate hop took an extra fee.
expect_payment_sent ( & nodes [ 0 ] , payment_preimage ,
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
Some ( Some ( total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64 ) ) , true , true ) ;
2023-05-24 11:53:05 -04:00
}
2022-12-19 00:38:54 -05:00
#[ derive(PartialEq) ]
enum AutoRetry {
Success ,
2023-01-24 12:15:40 -05:00
Spontaneous ,
2022-12-19 00:38:54 -05:00
FailAttempts ,
FailTimeout ,
FailOnRestart ,
2023-02-03 12:49:07 -05:00
FailOnRetry ,
2022-12-19 00:38:54 -05:00
}
#[ test ]
fn automatic_retries ( ) {
do_automatic_retries ( AutoRetry ::Success ) ;
2023-01-24 12:15:40 -05:00
do_automatic_retries ( AutoRetry ::Spontaneous ) ;
2022-12-19 00:38:54 -05:00
do_automatic_retries ( AutoRetry ::FailAttempts ) ;
do_automatic_retries ( AutoRetry ::FailTimeout ) ;
do_automatic_retries ( AutoRetry ::FailOnRestart ) ;
2023-02-03 12:49:07 -05:00
do_automatic_retries ( AutoRetry ::FailOnRetry ) ;
2022-12-19 00:38:54 -05:00
}
fn do_automatic_retries ( test : AutoRetry ) {
// Test basic automatic payment retries in ChannelManager. See individual `test` variant comments
// below.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let persister ;
let new_chain_monitor ;
2023-08-15 19:19:03 +00:00
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
2022-12-19 00:38:54 -05:00
let node_0_deserialized ;
let mut nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
let channel_id_1 = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
let channel_id_2 = create_announced_chan_between_nodes ( & nodes , 2 , 1 ) . 2 ;
// Marshall data to send the payment
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
let amt_msat = 1000 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2022-12-19 00:38:54 -05:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2022-12-19 00:38:54 -05:00
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2022-12-19 00:38:54 -05:00
let ( _ , payment_hash , payment_preimage , payment_secret ) = get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 2 ] , amt_msat ) ;
macro_rules ! pass_failed_attempt_with_retry_along_path {
( $failing_channel_id : expr , $expect_pending_htlcs_forwardable : expr ) = > {
// Send a payment attempt that fails due to lack of liquidity on the second hop
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let update_0 = get_htlc_update_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
let mut update_add = update_0 . update_add_htlcs [ 0 ] . clone ( ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & update_add ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , & update_0 . commitment_signed , false , true ) ;
expect_pending_htlcs_forwardable_ignore! ( nodes [ 1 ] ) ;
nodes [ 1 ] . node . process_pending_htlc_forwards ( ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore! ( nodes [ 1 ] ,
vec! [ HTLCDestination ::NextHopChannel {
node_id : Some ( nodes [ 2 ] . node . get_our_node_id ( ) ) ,
channel_id : $failing_channel_id ,
} ] ) ;
nodes [ 1 ] . node . process_pending_htlc_forwards ( ) ;
let update_1 = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
check_added_monitors! ( & nodes [ 1 ] , 1 ) ;
assert! ( update_1 . update_fail_htlcs . len ( ) = = 1 ) ;
let fail_msg = update_1 . update_fail_htlcs [ 0 ] . clone ( ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & fail_msg ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , update_1 . commitment_signed , false ) ;
// Ensure the attempt fails and a new PendingHTLCsForwardable event is generated for the retry
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
2023-02-03 12:53:01 -05:00
assert_eq! ( events . len ( ) , 2 ) ;
2022-12-19 00:38:54 -05:00
match events [ 0 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently , .. } = > {
assert_eq! ( payment_hash , ev_payment_hash ) ;
assert_eq! ( payment_failed_permanently , false ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
if $expect_pending_htlcs_forwardable {
match events [ 1 ] {
Event ::PendingHTLCsForwardable { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-02-03 12:53:01 -05:00
} else {
match events [ 1 ] {
Event ::PaymentFailed { payment_hash : ev_payment_hash , .. } = > {
assert_eq! ( payment_hash , ev_payment_hash ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
}
2022-12-19 00:38:54 -05:00
}
}
if test = = AutoRetry ::Success {
// Test that we can succeed on the first retry.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
pass_failed_attempt_with_retry_along_path! ( channel_id_2 , true ) ;
// Open a new channel with liquidity on the second hop so we can find a route for the retry
// attempt, since the initial second hop channel will be excluded from pathfinding
create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
// We retry payments in `process_pending_htlc_forwards`
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( msg_events . len ( ) , 1 ) ;
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , amt_msat , payment_hash , Some ( payment_secret ) , msg_events . pop ( ) . unwrap ( ) , true , None ) ;
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 2 ] ] ] , false , payment_preimage ) ;
2023-01-24 12:15:40 -05:00
} else if test = = AutoRetry ::Spontaneous {
2023-03-24 01:19:20 +00:00
nodes [ 0 ] . node . send_spontaneous_payment_with_retry ( Some ( payment_preimage ) ,
RecipientOnionFields ::spontaneous_empty ( ) , PaymentId ( payment_hash . 0 ) , route_params ,
Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2023-01-24 12:15:40 -05:00
pass_failed_attempt_with_retry_along_path! ( channel_id_2 , true ) ;
// Open a new channel with liquidity on the second hop so we can find a route for the retry
// attempt, since the initial second hop channel will be excluded from pathfinding
create_announced_chan_between_nodes ( & nodes , 1 , 2 ) ;
// We retry payments in `process_pending_htlc_forwards`
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( msg_events . len ( ) , 1 ) ;
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , amt_msat , payment_hash , None , msg_events . pop ( ) . unwrap ( ) , true , Some ( payment_preimage ) ) ;
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 2 ] ] ] , false , payment_preimage ) ;
2022-12-19 00:38:54 -05:00
} else if test = = AutoRetry ::FailAttempts {
// Ensure ChannelManager will not retry a payment if it has run out of payment attempts.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
pass_failed_attempt_with_retry_along_path! ( channel_id_2 , true ) ;
// Open a new channel with no liquidity on the second hop so we can find a (bad) route for
// the retry attempt, since the initial second hop channel will be excluded from pathfinding
let channel_id_3 = create_announced_chan_between_nodes ( & nodes , 2 , 1 ) . 2 ;
// We retry payments in `process_pending_htlc_forwards`
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
pass_failed_attempt_with_retry_along_path! ( channel_id_3 , false ) ;
// Ensure we won't retry a second time.
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( msg_events . len ( ) , 0 ) ;
} else if test = = AutoRetry ::FailTimeout {
#[ cfg(not(feature = " no-std " )) ] {
// Ensure ChannelManager will not retry a payment if it times out due to Retry::Timeout.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Timeout ( Duration ::from_secs ( 60 ) ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
pass_failed_attempt_with_retry_along_path! ( channel_id_2 , true ) ;
// Advance the time so the second attempt fails due to timeout.
SinceEpoch ::advance ( Duration ::from_secs ( 61 ) ) ;
// Make sure we don't retry again.
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( msg_events . len ( ) , 0 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
2023-03-31 19:07:57 -05:00
Event ::PaymentFailed { payment_hash : ref ev_payment_hash , payment_id : ref ev_payment_id , reason : ref ev_reason } = > {
2022-12-19 00:38:54 -05:00
assert_eq! ( payment_hash , * ev_payment_hash ) ;
assert_eq! ( PaymentId ( payment_hash . 0 ) , * ev_payment_id ) ;
2023-03-31 19:07:57 -05:00
assert_eq! ( PaymentFailureReason ::RetriesExhausted , ev_reason . unwrap ( ) ) ;
2022-12-19 00:38:54 -05:00
} ,
_ = > panic! ( " Unexpected event " ) ,
}
}
} else if test = = AutoRetry ::FailOnRestart {
// Ensure ChannelManager will not retry a payment after restart, even if there were retry
// attempts remaining prior to restart.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 2 ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
pass_failed_attempt_with_retry_along_path! ( channel_id_2 , true ) ;
// Open a new channel with no liquidity on the second hop so we can find a (bad) route for
// the retry attempt, since the initial second hop channel will be excluded from pathfinding
let channel_id_3 = create_announced_chan_between_nodes ( & nodes , 2 , 1 ) . 2 ;
// Ensure the first retry attempt fails, with 1 retry attempt remaining
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
pass_failed_attempt_with_retry_along_path! ( channel_id_3 , true ) ;
// Restart the node and ensure that ChannelManager does not use its remaining retry attempt
let node_encoded = nodes [ 0 ] . node . encode ( ) ;
let chan_1_monitor_serialized = get_monitor! ( nodes [ 0 ] , channel_id_1 ) . encode ( ) ;
reload_node! ( nodes [ 0 ] , node_encoded , & [ & chan_1_monitor_serialized ] , persister , new_chain_monitor , node_0_deserialized ) ;
2023-02-17 17:14:43 -05:00
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
expect_pending_htlcs_forwardable_from_events! ( nodes [ 0 ] , events , true ) ;
2022-12-19 00:38:54 -05:00
// Make sure we don't retry again.
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( msg_events . len ( ) , 0 ) ;
2023-02-03 12:49:07 -05:00
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
2023-03-31 19:07:57 -05:00
Event ::PaymentFailed { payment_hash : ref ev_payment_hash , payment_id : ref ev_payment_id , reason : ref ev_reason } = > {
2023-02-03 12:49:07 -05:00
assert_eq! ( payment_hash , * ev_payment_hash ) ;
assert_eq! ( PaymentId ( payment_hash . 0 ) , * ev_payment_id ) ;
2023-03-31 19:07:57 -05:00
assert_eq! ( PaymentFailureReason ::RetriesExhausted , ev_reason . unwrap ( ) ) ;
2023-02-03 12:49:07 -05:00
} ,
_ = > panic! ( " Unexpected event " ) ,
}
} else if test = = AutoRetry ::FailOnRetry {
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2023-02-03 12:49:07 -05:00
pass_failed_attempt_with_retry_along_path! ( channel_id_2 , true ) ;
// We retry payments in `process_pending_htlc_forwards`. Since our channel closed, we should
// fail to find a route.
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( msg_events . len ( ) , 0 ) ;
2022-12-19 00:38:54 -05:00
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
2023-03-31 19:07:57 -05:00
Event ::PaymentFailed { payment_hash : ref ev_payment_hash , payment_id : ref ev_payment_id , reason : ref ev_reason } = > {
2022-12-19 00:38:54 -05:00
assert_eq! ( payment_hash , * ev_payment_hash ) ;
assert_eq! ( PaymentId ( payment_hash . 0 ) , * ev_payment_id ) ;
2023-03-31 19:07:57 -05:00
assert_eq! ( PaymentFailureReason ::RouteNotFound , ev_reason . unwrap ( ) ) ;
2022-12-19 00:38:54 -05:00
} ,
_ = > panic! ( " Unexpected event " ) ,
}
}
}
#[ test ]
fn auto_retry_partial_failure ( ) {
// Test that we'll retry appropriately on send partial failure and retry partial failure.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
let chan_1_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
let chan_2_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
let chan_3_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
// Marshall data to send the payment
let amt_msat = 20_000 ;
let ( _ , payment_hash , payment_preimage , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2022-12-19 00:38:54 -05:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2022-12-19 00:38:54 -05:00
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2022-12-19 00:38:54 -05:00
// Ensure the first monitor update (for the initial send path1 over chan_1) succeeds, but the
// second (for the initial send path2 over chan_2) fails.
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::Completed ) ;
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::PermanentFailure ) ;
// Ensure third monitor update (for the retry1's path1 over chan_1) succeeds, but the fourth (for
// the retry1's path2 over chan_3) fails, and monitor updates succeed after that.
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::Completed ) ;
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::PermanentFailure ) ;
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::Completed ) ;
// Configure the initial send, retry1 and retry2's paths.
let send_route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2022-12-19 00:38:54 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_id ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : amt_msat / 2 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2022-12-19 00:38:54 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_2_id ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : amt_msat / 2 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2022-12-19 00:38:54 -05:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( route_params . clone ( ) ) ,
2022-12-19 00:38:54 -05:00
} ;
let retry_1_route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2022-12-19 00:38:54 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_id ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : amt_msat / 4 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2022-12-19 00:38:54 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_3_id ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : amt_msat / 4 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2022-12-19 00:38:54 -05:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( route_params . clone ( ) ) ,
2022-12-19 00:38:54 -05:00
} ;
let retry_2_route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2022-12-19 00:38:54 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_id ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : amt_msat / 4 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2022-12-19 00:38:54 -05:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( route_params . clone ( ) ) ,
2022-12-19 00:38:54 -05:00
} ;
2023-01-27 20:31:10 +00:00
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( send_route ) ) ;
2023-02-16 16:40:51 -05:00
let mut payment_params = route_params . payment_params . clone ( ) ;
payment_params . previously_failed_channels . push ( chan_2_id ) ;
2023-08-31 12:25:38 +02:00
nodes [ 0 ] . router . expect_find_route (
RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat / 2 ) ,
Ok ( retry_1_route ) ) ;
2023-02-16 16:40:51 -05:00
let mut payment_params = route_params . payment_params . clone ( ) ;
payment_params . previously_failed_channels . push ( chan_3_id ) ;
2023-08-31 12:25:38 +02:00
nodes [ 0 ] . router . expect_find_route (
RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat / 4 ) ,
Ok ( retry_2_route ) ) ;
2022-12-19 00:38:54 -05:00
// Send a payment that will partially fail on send, then partially fail on retry, then succeed.
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 3 ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
let closed_chan_events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
2023-02-10 15:09:01 -06:00
assert_eq! ( closed_chan_events . len ( ) , 4 ) ;
2022-12-19 00:38:54 -05:00
match closed_chan_events [ 0 ] {
Event ::ChannelClosed { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
match closed_chan_events [ 1 ] {
2023-02-10 15:09:01 -06:00
Event ::PaymentPathFailed { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
match closed_chan_events [ 2 ] {
2022-12-19 00:38:54 -05:00
Event ::ChannelClosed { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-02-10 15:09:01 -06:00
match closed_chan_events [ 3 ] {
Event ::PaymentPathFailed { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
2022-12-19 00:38:54 -05:00
// Pass the first part of the payment along the path.
check_added_monitors! ( nodes [ 0 ] , 5 ) ; // three outbound channel updates succeeded, two permanently failed
let mut msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
// First message is the first update_add, remaining messages are broadcasting channel updates and
// errors for the permfailed channels
assert_eq! ( msg_events . len ( ) , 5 ) ;
let mut payment_event = SendEvent ::from_event ( msg_events . remove ( 0 ) ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . commitment_msg ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let ( bs_first_raa , bs_first_cs ) = get_revoke_commit_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_first_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let as_second_htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_first_cs ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let as_first_raa = get_event_msg! ( nodes [ 0 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_first_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_second_htlc_updates . msgs [ 0 ] ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_second_htlc_updates . msgs [ 1 ] ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_second_htlc_updates . commitment_msg ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let ( bs_second_raa , bs_second_cs ) = get_revoke_commit_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_cs ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let as_second_raa = get_event_msg! ( nodes [ 0 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_second_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
expect_pending_htlcs_forwardable_ignore! ( nodes [ 1 ] ) ;
nodes [ 1 ] . node . process_pending_htlc_forwards ( ) ;
expect_payment_claimable! ( nodes [ 1 ] , payment_hash , payment_secret , amt_msat ) ;
nodes [ 1 ] . node . claim_funds ( payment_preimage ) ;
expect_payment_claimed! ( nodes [ 1 ] , payment_hash , amt_msat ) ;
let bs_claim_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
assert_eq! ( bs_claim_update . update_fulfill_htlcs . len ( ) , 1 ) ;
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_claim_update . update_fulfill_htlcs [ 0 ] ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_sent ( & nodes [ 0 ] , payment_preimage , None , false , false ) ;
2022-12-19 00:38:54 -05:00
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_claim_update . commitment_signed ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let ( as_third_raa , as_third_cs ) = get_revoke_commit_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_third_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 4 ) ;
let bs_second_claim_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_third_cs ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_third_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_third_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_path_successful! ( nodes [ 0 ] ) ;
2022-12-19 00:38:54 -05:00
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_claim_update . update_fulfill_htlcs [ 0 ] ) ;
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_claim_update . update_fulfill_htlcs [ 1 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_claim_update . commitment_signed ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let ( as_fourth_raa , as_fourth_cs ) = get_revoke_commit_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_fourth_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_fourth_cs ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_second_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
if let Event ::PaymentPathSuccessful { .. } = events [ 0 ] { } else { panic! ( ) ; }
if let Event ::PaymentPathSuccessful { .. } = events [ 1 ] { } else { panic! ( ) ; }
2022-12-19 00:38:54 -05:00
}
#[ test ]
fn auto_retry_zero_attempts_send_error ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
// Marshall data to send the payment
let amt_msat = 20_000 ;
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2022-12-19 00:38:54 -05:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2022-12-19 00:38:54 -05:00
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2022-12-19 00:38:54 -05:00
chanmon_cfgs [ 0 ] . persister . set_update_ret ( ChannelMonitorUpdateStatus ::PermanentFailure ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 0 ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
assert_eq! ( nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) . len ( ) , 2 ) ; // channel close messages
2023-02-10 15:09:01 -06:00
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 3 ) ;
if let Event ::ChannelClosed { .. } = events [ 0 ] { } else { panic! ( ) ; }
if let Event ::PaymentPathFailed { .. } = events [ 1 ] { } else { panic! ( ) ; }
if let Event ::PaymentFailed { .. } = events [ 2 ] { } else { panic! ( ) ; }
2022-12-19 00:38:54 -05:00
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
}
#[ test ]
fn fails_paying_after_rejected_by_payee ( ) {
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 0. contents . short_channel_id ;
// Marshall data to send the payment
let amt_msat = 20_000 ;
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2022-12-19 00:38:54 -05:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2022-12-19 00:38:54 -05:00
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2022-12-19 00:38:54 -05:00
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2022-12-19 00:38:54 -05:00
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let mut payment_event = SendEvent ::from_event ( events . pop ( ) . unwrap ( ) ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , payment_event . commitment_msg , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
expect_payment_claimable! ( & nodes [ 1 ] , payment_hash , payment_secret , amt_msat ) ;
nodes [ 1 ] . node . fail_htlc_backwards ( & payment_hash ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , [ HTLCDestination ::FailedPayment { payment_hash } ] ) ;
2023-03-31 19:07:57 -05:00
pass_failed_payment_back ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , false , payment_hash , PaymentFailureReason ::RecipientRejected ) ;
2022-12-19 00:38:54 -05:00
}
2023-01-04 18:32:12 -05:00
#[ test ]
fn retry_multi_path_single_failed_payment ( ) {
// Tests that we can/will retry after a single path of an MPP payment failed immediately
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 0 ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 0 ) ;
2023-01-27 20:31:10 +00:00
let amt_msat = 100_010_000 ;
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2023-01-27 20:31:10 +00:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value (
payment_params . clone ( ) , amt_msat ) ;
2023-01-27 20:31:10 +00:00
2023-01-04 18:32:12 -05:00
let chans = nodes [ 0 ] . node . list_usable_channels ( ) ;
let mut route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-04 18:32:12 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chans [ 0 ] . short_channel_id . unwrap ( ) ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : 10_000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-04 18:32:12 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chans [ 1 ] . short_channel_id . unwrap ( ) ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : 100_000_001 , // Our default max-HTLC-value is 10% of the channel value, which this is one more than
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-01-04 18:32:12 -05:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( route_params . clone ( ) ) ,
2023-01-04 18:32:12 -05:00
} ;
2023-01-27 20:31:10 +00:00
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
2023-01-04 18:32:12 -05:00
// On retry, split the payment across both channels.
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 0 ] . fee_msat = 50_000_001 ;
route . paths [ 1 ] . hops [ 0 ] . fee_msat = 50_000_000 ;
2023-08-31 15:10:09 +02:00
let mut pay_params = route . route_params . clone ( ) . unwrap ( ) . payment_params ;
2023-02-16 16:40:51 -05:00
pay_params . previously_failed_channels . push ( chans [ 1 ] . short_channel_id . unwrap ( ) ) ;
2023-08-31 12:25:38 +02:00
nodes [ 0 ] . router . expect_find_route (
// Note that the second request here requests the amount we originally failed to send,
// not the amount remaining on the full payment, which should be changed.
RouteParameters ::from_payment_params_and_value ( pay_params , 100_000_001 ) ,
Ok ( route . clone ( ) ) ) ;
2023-01-04 18:32:12 -05:00
2023-02-07 14:10:41 -05:00
{
2023-08-22 18:57:06 +03:00
let scorer = chanmon_cfgs [ 0 ] . scorer . read ( ) . unwrap ( ) ;
2023-02-07 14:10:41 -05:00
// The initial send attempt, 2 paths
scorer . expect_usage ( chans [ 0 ] . short_channel_id . unwrap ( ) , ChannelUsage { amount_msat : 10_000 , inflight_htlc_msat : 0 , effective_capacity : EffectiveCapacity ::Unknown } ) ;
scorer . expect_usage ( chans [ 1 ] . short_channel_id . unwrap ( ) , ChannelUsage { amount_msat : 100_000_001 , inflight_htlc_msat : 0 , effective_capacity : EffectiveCapacity ::Unknown } ) ;
// The retry, 2 paths. Ensure that the in-flight HTLC amount is factored in.
scorer . expect_usage ( chans [ 0 ] . short_channel_id . unwrap ( ) , ChannelUsage { amount_msat : 50_000_001 , inflight_htlc_msat : 10_000 , effective_capacity : EffectiveCapacity ::Unknown } ) ;
scorer . expect_usage ( chans [ 1 ] . short_channel_id . unwrap ( ) , ChannelUsage { amount_msat : 50_000_000 , inflight_htlc_msat : 0 , effective_capacity : EffectiveCapacity ::Unknown } ) ;
}
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2023-02-10 15:09:01 -06:00
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently : false ,
2023-05-17 01:33:42 +00:00
failure : PathFailure ::InitialSend { err : APIError ::ChannelUnavailable { .. } } ,
2023-02-13 17:55:42 -05:00
short_channel_id : Some ( expected_scid ) , .. } = >
{
2023-02-10 15:09:01 -06:00
assert_eq! ( payment_hash , ev_payment_hash ) ;
2023-04-09 13:50:44 -04:00
assert_eq! ( expected_scid , route . paths [ 1 ] . hops [ 0 ] . short_channel_id ) ;
2023-02-10 15:09:01 -06:00
} ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-01-27 20:31:10 +00:00
let htlc_msgs = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( htlc_msgs . len ( ) , 2 ) ;
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
}
#[ test ]
fn immediate_retry_on_failure ( ) {
// Tests that we can/will retry immediately after a failure
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 0 ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 0 ) ;
let amt_msat = 100_000_001 ;
2023-01-04 18:32:12 -05:00
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2023-01-04 18:32:12 -05:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
2023-01-27 19:24:52 +00:00
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2023-01-04 18:32:12 -05:00
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2023-01-04 18:32:12 -05:00
let chans = nodes [ 0 ] . node . list_usable_channels ( ) ;
let mut route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-04 18:32:12 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chans [ 0 ] . short_channel_id . unwrap ( ) ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : 100_000_001 , // Our default max-HTLC-value is 10% of the channel value, which this is one more than
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-01-04 18:32:12 -05:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( RouteParameters ::from_payment_params_and_value (
PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV ) ,
100_000_001 ) ) ,
2023-01-04 18:32:12 -05:00
} ;
2023-01-27 20:31:10 +00:00
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
2023-01-04 18:32:12 -05:00
// On retry, split the payment across both channels.
route . paths . push ( route . paths [ 0 ] . clone ( ) ) ;
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 0 ] . short_channel_id = chans [ 1 ] . short_channel_id . unwrap ( ) ;
route . paths [ 0 ] . hops [ 0 ] . fee_msat = 50_000_000 ;
route . paths [ 1 ] . hops [ 0 ] . fee_msat = 50_000_001 ;
2023-02-16 16:40:51 -05:00
let mut pay_params = route_params . payment_params . clone ( ) ;
pay_params . previously_failed_channels . push ( chans [ 0 ] . short_channel_id . unwrap ( ) ) ;
2023-08-31 12:25:38 +02:00
nodes [ 0 ] . router . expect_find_route (
RouteParameters ::from_payment_params_and_value ( pay_params , amt_msat ) ,
Ok ( route . clone ( ) ) ) ;
2023-01-04 18:32:12 -05:00
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2023-02-10 15:09:01 -06:00
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently : false ,
2023-05-17 01:33:42 +00:00
failure : PathFailure ::InitialSend { err : APIError ::ChannelUnavailable { .. } } ,
2023-02-13 17:55:42 -05:00
short_channel_id : Some ( expected_scid ) , .. } = >
{
2023-02-10 15:09:01 -06:00
assert_eq! ( payment_hash , ev_payment_hash ) ;
2023-04-09 13:50:44 -04:00
assert_eq! ( expected_scid , route . paths [ 1 ] . hops [ 0 ] . short_channel_id ) ;
2023-02-10 15:09:01 -06:00
} ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-01-04 18:32:12 -05:00
let htlc_msgs = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( htlc_msgs . len ( ) , 2 ) ;
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
}
#[ test ]
fn no_extra_retries_on_back_to_back_fail ( ) {
// In a previous release, we had a race where we may exceed the payment retry count if we
2023-02-11 19:38:48 -05:00
// get two failures in a row with the second indicating that all paths had failed (this field,
// `all_paths_failed`, has since been removed).
2023-01-04 18:32:12 -05:00
// Generally, when we give up trying to retry a payment, we don't know for sure what the
// current state of the ChannelManager event queue is. Specifically, we cannot be sure that
// there are not multiple additional `PaymentPathFailed` or even `PaymentSent` events
// pending which we will see later. Thus, when we previously removed the retry tracking map
// entry after a `all_paths_failed` `PaymentPathFailed` event, we may have dropped the
// retry entry even though more events for the same payment were still pending. This led to
// us retrying a payment again even though we'd already given up on it.
//
// We now have a separate event - `PaymentFailed` which indicates no HTLCs remain and which
// is used to remove the payment retry counter entries instead. This tests for the specific
// excess-retry case while also testing `PaymentFailed` generation.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
let chan_1_scid = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
let chan_2_scid = create_announced_chan_between_nodes_with_value ( & nodes , 1 , 2 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
2023-01-27 20:31:10 +00:00
let amt_msat = 200_000_000 ;
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 1 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2023-01-27 20:31:10 +00:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2023-01-27 20:31:10 +00:00
2023-01-04 18:32:12 -05:00
let mut route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-04 18:32:12 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_scid ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
2023-01-27 23:01:39 +00:00
fee_msat : 0 , // nodes[1] will fail the payment as we don't pay its fee
2023-01-04 18:32:12 -05:00
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : nodes [ 2 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 2 ] . node . node_features ( ) ,
short_channel_id : chan_2_scid ,
channel_features : nodes [ 2 ] . node . channel_features ( ) ,
fee_msat : 100_000_000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-04 18:32:12 -05:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_scid ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
2023-01-27 23:01:39 +00:00
fee_msat : 0 , // nodes[1] will fail the payment as we don't pay its fee
2023-01-04 18:32:12 -05:00
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : nodes [ 2 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 2 ] . node . node_features ( ) ,
short_channel_id : chan_2_scid ,
channel_features : nodes [ 2 ] . node . channel_features ( ) ,
fee_msat : 100_000_000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None }
2023-01-04 18:32:12 -05:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( RouteParameters ::from_payment_params_and_value (
PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV ) ,
100_000_000 ) ) ,
2023-01-04 18:32:12 -05:00
} ;
2023-01-27 20:31:10 +00:00
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
let mut second_payment_params = route_params . payment_params . clone ( ) ;
second_payment_params . previously_failed_channels = vec! [ chan_2_scid , chan_2_scid ] ;
2023-01-27 23:01:39 +00:00
// On retry, we'll only return one path
route . paths . remove ( 1 ) ;
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 1 ] . fee_msat = amt_msat ;
2023-08-31 12:25:38 +02:00
nodes [ 0 ] . router . expect_find_route (
RouteParameters ::from_payment_params_and_value ( second_payment_params , amt_msat ) ,
Ok ( route . clone ( ) ) ) ;
2023-01-04 18:32:12 -05:00
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2023-01-04 18:32:12 -05:00
let htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
assert_eq! ( htlc_updates . msgs . len ( ) , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & htlc_updates . msgs [ 0 ] ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & htlc_updates . commitment_msg ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let ( bs_first_raa , bs_first_cs ) = get_revoke_commit_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_first_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let second_htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_first_cs ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let as_first_raa = get_event_msg! ( nodes [ 0 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & second_htlc_updates . msgs [ 0 ] ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & second_htlc_updates . commitment_msg ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_second_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_first_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_fail_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fail_update . update_fail_htlcs [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fail_update . commitment_signed ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let ( as_second_raa , as_third_cs ) = get_revoke_commit_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_second_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_second_fail_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_third_cs ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_third_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_fail_update . update_fail_htlcs [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_fail_update . commitment_signed ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_third_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let ( as_third_raa , as_fourth_cs ) = get_revoke_commit_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_third_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_fourth_cs ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_fourth_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fourth_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
// At this point A has sent two HTLCs which both failed due to lack of fee. It now has two
// pending `PaymentPathFailed` events, one with `all_paths_failed` unset, and the second
2023-01-27 23:01:39 +00:00
// with it set.
//
// Previously, we retried payments in an event consumer, which would retry each
// `PaymentPathFailed` individually. In that setup, we had retried the payment in response to
// the first `PaymentPathFailed`, then seen the second `PaymentPathFailed` with
// `all_paths_failed` set and assumed the payment was completely failed. We ultimately fixed it
// by adding the `PaymentFailed` event.
//
// Because we now retry payments as a batch, we simply return a single-path route in the
2023-02-03 12:53:01 -05:00
// second, batched, request, have that fail, ensure the payment was abandoned.
2023-01-04 18:32:12 -05:00
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
2023-02-17 17:35:09 -05:00
assert_eq! ( events . len ( ) , 3 ) ;
2023-01-04 18:32:12 -05:00
match events [ 0 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently , .. } = > {
assert_eq! ( payment_hash , ev_payment_hash ) ;
assert_eq! ( payment_failed_permanently , false ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
match events [ 1 ] {
Event ::PendingHTLCsForwardable { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
match events [ 2 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently , .. } = > {
assert_eq! ( payment_hash , ev_payment_hash ) ;
assert_eq! ( payment_failed_permanently , false ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
let retry_htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & retry_htlc_updates . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , & retry_htlc_updates . commitment_msg , false , true ) ;
let bs_fail_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fail_update . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , & bs_fail_update . commitment_signed , false , true ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
2023-02-03 12:53:01 -05:00
assert_eq! ( events . len ( ) , 2 ) ;
2023-01-04 18:32:12 -05:00
match events [ 0 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently , .. } = > {
assert_eq! ( payment_hash , ev_payment_hash ) ;
assert_eq! ( payment_failed_permanently , false ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-02-03 12:53:01 -05:00
match events [ 1 ] {
2023-03-31 19:07:57 -05:00
Event ::PaymentFailed { payment_hash : ref ev_payment_hash , payment_id : ref ev_payment_id , reason : ref ev_reason } = > {
2023-01-04 18:32:12 -05:00
assert_eq! ( payment_hash , * ev_payment_hash ) ;
assert_eq! ( PaymentId ( payment_hash . 0 ) , * ev_payment_id ) ;
2023-03-31 19:07:57 -05:00
assert_eq! ( PaymentFailureReason ::RetriesExhausted , ev_reason . unwrap ( ) ) ;
2023-01-04 18:32:12 -05:00
} ,
_ = > panic! ( " Unexpected event " ) ,
}
}
2023-01-28 02:14:26 +00:00
#[ test ]
fn test_simple_partial_retry ( ) {
// In the first version of the in-`ChannelManager` payment retries, retries were sent for the
// full amount of the payment, rather than only the missing amount. Here we simply test for
// this by sending a payment with two parts, failing one, and retrying the second. Note that
// `TestRouter` will check that the `RouteParameters` (which contain the amount) matches the
// request.
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
let chan_1_scid = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
let chan_2_scid = create_announced_chan_between_nodes_with_value ( & nodes , 1 , 2 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
let amt_msat = 200_000_000 ;
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 2 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2023-01-28 02:14:26 +00:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2023-01-28 02:14:26 +00:00
let mut route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-28 02:14:26 +00:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_scid ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : 0 , // nodes[1] will fail the payment as we don't pay its fee
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : nodes [ 2 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 2 ] . node . node_features ( ) ,
short_channel_id : chan_2_scid ,
channel_features : nodes [ 2 ] . node . channel_features ( ) ,
fee_msat : 100_000_000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-01-28 02:14:26 +00:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_scid ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : 100_000 ,
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : nodes [ 2 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 2 ] . node . node_features ( ) ,
short_channel_id : chan_2_scid ,
channel_features : nodes [ 2 ] . node . channel_features ( ) ,
fee_msat : 100_000_000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None }
2023-01-28 02:14:26 +00:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( RouteParameters ::from_payment_params_and_value (
PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV ) ,
100_000_000 ) ) ,
2023-01-28 02:14:26 +00:00
} ;
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
let mut second_payment_params = route_params . payment_params . clone ( ) ;
second_payment_params . previously_failed_channels = vec! [ chan_2_scid ] ;
// On retry, we'll only be asked for one path (or 100k sats)
route . paths . remove ( 0 ) ;
2023-08-31 12:25:38 +02:00
nodes [ 0 ] . router . expect_find_route (
RouteParameters ::from_payment_params_and_value ( second_payment_params , amt_msat / 2 ) ,
Ok ( route . clone ( ) ) ) ;
2023-01-28 02:14:26 +00:00
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
2023-01-28 02:14:26 +00:00
let htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
assert_eq! ( htlc_updates . msgs . len ( ) , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & htlc_updates . msgs [ 0 ] ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & htlc_updates . commitment_msg ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let ( bs_first_raa , bs_first_cs ) = get_revoke_commit_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_first_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let second_htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_first_cs ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let as_first_raa = get_event_msg! ( nodes [ 0 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & second_htlc_updates . msgs [ 0 ] ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & second_htlc_updates . commitment_msg ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_second_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_first_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_fail_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_second_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fail_update . update_fail_htlcs [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fail_update . commitment_signed ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let ( as_second_raa , as_third_cs ) = get_revoke_commit_msgs! ( nodes [ 0 ] , nodes [ 1 ] . node . get_our_node_id ( ) ) ;
nodes [ 1 ] . node . handle_revoke_and_ack ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_second_raa ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
nodes [ 1 ] . node . handle_commitment_signed ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_third_cs ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_third_raa = get_event_msg! ( nodes [ 1 ] , MessageSendEvent ::SendRevokeAndACK , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_third_raa ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
match events [ 0 ] {
Event ::PaymentPathFailed { payment_hash : ev_payment_hash , payment_failed_permanently , .. } = > {
assert_eq! ( payment_hash , ev_payment_hash ) ;
assert_eq! ( payment_failed_permanently , false ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
match events [ 1 ] {
Event ::PendingHTLCsForwardable { .. } = > { } ,
_ = > panic! ( " Unexpected event " ) ,
}
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
let retry_htlc_updates = SendEvent ::from_node ( & nodes [ 0 ] ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & retry_htlc_updates . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , & retry_htlc_updates . commitment_msg , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
let bs_forward_update = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 2 ] . node . get_our_node_id ( ) ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_forward_update . update_add_htlcs [ 0 ] ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_forward_update . update_add_htlcs [ 1 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 1 ] , & bs_forward_update . commitment_signed , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 2 ] ) ;
expect_payment_claimable! ( nodes [ 2 ] , payment_hash , payment_secret , amt_msat ) ;
}
2023-02-03 23:05:58 +00:00
#[ test ]
#[ cfg(feature = " std " ) ]
fn test_threaded_payment_retries ( ) {
// In the first version of the in-`ChannelManager` payment retries, retries weren't limited to
// a single thread and would happily let multiple threads run retries at the same time. Because
// retries are done by first calculating the amount we need to retry, then dropping the
// relevant lock, then actually sending, we would happily let multiple threads retry the same
// amount at the same time, overpaying our original HTLC!
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
// There is one mitigating guardrail when retrying payments - we can never over-pay by more
// than 10% of the original value. Thus, we want all our retries to be below that. In order to
// keep things simple, we route one HTLC for 0.1% of the payment over channel 1 and the rest
// out over channel 3+4. This will let us ignore 99% of the payment value and deal with only
// our channel.
let chan_1_scid = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
create_announced_chan_between_nodes_with_value ( & nodes , 1 , 3 , 10_000_000 , 0 ) ;
let chan_3_scid = create_announced_chan_between_nodes_with_value ( & nodes , 0 , 2 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
let chan_4_scid = create_announced_chan_between_nodes_with_value ( & nodes , 2 , 3 , 10_000_000 , 0 ) . 0. contents . short_channel_id ;
let amt_msat = 100_000_000 ;
let ( _ , payment_hash , _ , payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , nodes [ 2 ] , amt_msat ) ;
#[ cfg(feature = " std " ) ]
let payment_expiry_secs = SystemTime ::UNIX_EPOCH . elapsed ( ) . unwrap ( ) . as_secs ( ) + 60 * 60 ;
#[ cfg(not(feature = " std " )) ]
let payment_expiry_secs = 60 * 60 ;
2023-07-14 14:41:58 -05:00
let mut invoice_features = Bolt11InvoiceFeatures ::empty ( ) ;
2023-02-03 23:05:58 +00:00
invoice_features . set_variable_length_onion_required ( ) ;
invoice_features . set_payment_secret_required ( ) ;
invoice_features . set_basic_mpp_optional ( ) ;
let payment_params = PaymentParameters ::from_node_id ( nodes [ 1 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
. with_expiry_time ( payment_expiry_secs as u64 )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( invoice_features ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let mut route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2023-02-03 23:05:58 +00:00
let mut route = Route {
paths : vec ! [
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-02-03 23:05:58 +00:00
pubkey : nodes [ 1 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 1 ] . node . node_features ( ) ,
short_channel_id : chan_1_scid ,
channel_features : nodes [ 1 ] . node . channel_features ( ) ,
fee_msat : 0 ,
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : nodes [ 3 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 2 ] . node . node_features ( ) ,
short_channel_id : 42 , // Set a random SCID which nodes[1] will fail as unknown
channel_features : nodes [ 2 ] . node . channel_features ( ) ,
fee_msat : amt_msat / 1000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None } ,
2023-04-09 13:50:44 -04:00
Path { hops : vec ! [ RouteHop {
2023-02-03 23:05:58 +00:00
pubkey : nodes [ 2 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 2 ] . node . node_features ( ) ,
short_channel_id : chan_3_scid ,
channel_features : nodes [ 2 ] . node . channel_features ( ) ,
fee_msat : 100_000 ,
cltv_expiry_delta : 100 ,
} , RouteHop {
pubkey : nodes [ 3 ] . node . get_our_node_id ( ) ,
node_features : nodes [ 3 ] . node . node_features ( ) ,
short_channel_id : chan_4_scid ,
channel_features : nodes [ 3 ] . node . channel_features ( ) ,
fee_msat : amt_msat - amt_msat / 1000 ,
cltv_expiry_delta : 100 ,
2023-04-18 12:06:35 -04:00
} ] , blinded_tail : None }
2023-02-03 23:05:58 +00:00
] ,
2023-08-31 15:10:09 +02:00
route_params : Some ( RouteParameters ::from_payment_params_and_value (
PaymentParameters ::from_node_id ( nodes [ 2 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV ) ,
amt_msat - amt_msat / 1000 ) ) ,
2023-02-03 23:05:58 +00:00
} ;
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
2023-03-22 21:48:22 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
PaymentId ( payment_hash . 0 ) , route_params . clone ( ) , Retry ::Attempts ( 0xdeadbeef ) ) . unwrap ( ) ;
2023-02-03 23:05:58 +00:00
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
let mut send_msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( send_msg_events . len ( ) , 2 ) ;
send_msg_events . retain ( | msg |
if let MessageSendEvent ::UpdateHTLCs { node_id , .. } = msg {
// Drop the commitment update for nodes[2], we can just let that one sit pending
// forever.
* node_id = = nodes [ 1 ] . node . get_our_node_id ( )
} else { panic! ( ) ; }
) ;
// from here on out, the retry `RouteParameters` amount will be amt/1000
route_params . final_value_msat / = 1000 ;
route . paths . pop ( ) ;
let end_time = Instant ::now ( ) + Duration ::from_secs ( 1 ) ;
macro_rules ! thread_body { ( ) = > { {
// We really want std::thread::scope, but its not stable until 1.63. Until then, we get unsafe.
let node_ref = NodePtr ::from_node ( & nodes [ 0 ] ) ;
move | | {
let node_a = unsafe { & * node_ref . 0 } ;
while Instant ::now ( ) < end_time {
node_a . node . get_and_clear_pending_events ( ) ; // wipe the PendingHTLCsForwardable
// Ignore if we have any pending events, just always pretend we just got a
// PendingHTLCsForwardable
node_a . node . process_pending_htlc_forwards ( ) ;
}
}
} } }
let mut threads = Vec ::new ( ) ;
for _ in 0 .. 16 { threads . push ( std ::thread ::spawn ( thread_body! ( ) ) ) ; }
// Back in the main thread, poll pending messages and make sure that we never have more than
// one HTLC pending at a time. Note that the commitment_signed_dance will fail horribly if
// there are HTLC messages shoved in while its running. This allows us to test that we never
// generate an additional update_add_htlc until we've fully failed the first.
let mut previously_failed_channels = Vec ::new ( ) ;
loop {
assert_eq! ( send_msg_events . len ( ) , 1 ) ;
let send_event = SendEvent ::from_event ( send_msg_events . pop ( ) . unwrap ( ) ) ;
assert_eq! ( send_event . msgs . len ( ) , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & send_event . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , send_event . commitment_msg , false , true ) ;
// Note that we only push one route into `expect_find_route` at a time, because that's all
// the retries (should) need. If the bug is reintroduced "real" routes may be selected, but
// we should still ultimately fail for the same reason - because we're trying to send too
// many HTLCs at once.
let mut new_route_params = route_params . clone ( ) ;
2023-04-09 13:50:44 -04:00
previously_failed_channels . push ( route . paths [ 0 ] . hops [ 1 ] . short_channel_id ) ;
2023-02-03 23:05:58 +00:00
new_route_params . payment_params . previously_failed_channels = previously_failed_channels . clone ( ) ;
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 1 ] . short_channel_id + = 1 ;
2023-02-03 23:05:58 +00:00
nodes [ 0 ] . router . expect_find_route ( new_route_params , Ok ( route . clone ( ) ) ) ;
let bs_fail_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & bs_fail_updates . update_fail_htlcs [ 0 ] ) ;
// The "normal" commitment_signed_dance delivers the final RAA and then calls
// `check_added_monitors` to ensure only the one RAA-generated monitor update was created.
// This races with our other threads which may generate an add-HTLCs commitment update via
// `process_pending_htlc_forwards`. Instead, we defer the monitor update check until after
// *we've* called `process_pending_htlc_forwards` when its guaranteed to have two updates.
let last_raa = commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , bs_fail_updates . commitment_signed , false , true , false , true ) ;
nodes [ 0 ] . node . handle_revoke_and_ack ( & nodes [ 1 ] . node . get_our_node_id ( ) , & last_raa ) ;
let cur_time = Instant ::now ( ) ;
if cur_time > end_time {
for thread in threads . drain ( .. ) { thread . join ( ) . unwrap ( ) ; }
}
// Make sure we have some events to handle when we go around...
nodes [ 0 ] . node . get_and_clear_pending_events ( ) ; // wipe the PendingHTLCsForwardable
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
send_msg_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
if cur_time > end_time {
break ;
}
}
}
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
fn do_no_missing_sent_on_reload ( persist_manager_with_payment : bool , at_midpoint : bool ) {
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
// Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still
// receive the PaymentSent event even if the ChannelManager had no idea about the payment when
// it was last persisted.
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let ( persister_a , persister_b , persister_c ) ;
let ( chain_monitor_a , chain_monitor_b , chain_monitor_c ) ;
2023-08-15 19:19:03 +00:00
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None , None ] ) ;
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
let ( nodes_0_deserialized , nodes_0_deserialized_b , nodes_0_deserialized_c ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
let chan_id = create_announced_chan_between_nodes ( & nodes , 0 , 1 ) . 2 ;
let mut nodes_0_serialized = Vec ::new ( ) ;
if ! persist_manager_with_payment {
nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
}
let ( our_payment_preimage , our_payment_hash , _ ) = route_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , 1_000_000 ) ;
if persist_manager_with_payment {
nodes_0_serialized = nodes [ 0 ] . node . encode ( ) ;
}
nodes [ 1 ] . node . claim_funds ( our_payment_preimage ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
expect_payment_claimed! ( nodes [ 1 ] , our_payment_hash , 1_000_000 ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
if at_midpoint {
let updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & updates . update_fulfill_htlcs [ 0 ] ) ;
nodes [ 0 ] . node . handle_commitment_signed ( & nodes [ 1 ] . node . get_our_node_id ( ) , & updates . commitment_signed ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
} else {
let htlc_fulfill_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & htlc_fulfill_updates . update_fulfill_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , htlc_fulfill_updates . commitment_signed , false ) ;
// Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd
// be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we
// expect to get the PaymentSent again later.
check_added_monitors ( & nodes [ 0 ] , 0 ) ;
}
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
// The ChannelMonitor should always be the latest version, as we're required to persist it
// during the commitment signed handling.
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , & nodes_0_serialized , & [ & chan_0_monitor_serialized ] , persister_a , chain_monitor_a , nodes_0_deserialized ) ;
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 2 ) ;
if let Event ::ChannelClosed { reason : ClosureReason ::OutdatedChannelManager , .. } = events [ 0 ] { } else { panic! ( ) ; }
if let Event ::PaymentSent { payment_preimage , .. } = events [ 1 ] { assert_eq! ( payment_preimage , our_payment_preimage ) ; } else { panic! ( ) ; }
// Note that we don't get a PaymentPathSuccessful here as we leave the HTLC pending to avoid
// the double-claim that would otherwise appear at the end of this test.
2023-02-28 10:45:48 -08:00
nodes [ 0 ] . node . timer_tick_occurred ( ) ;
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
let as_broadcasted_txn = nodes [ 0 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
assert_eq! ( as_broadcasted_txn . len ( ) , 1 ) ;
// Ensure that, even after some time, if we restart we still include *something* in the current
// `ChannelManager` which prevents a `PaymentFailed` when we restart even if pending resolved
// payments have since been timed out thanks to `IDEMPOTENCY_TIMEOUT_TICKS`.
// A naive implementation of the fix here would wipe the pending payments set, causing a
// failure event when we restart.
for _ in 0 .. ( IDEMPOTENCY_TIMEOUT_TICKS * 2 ) { nodes [ 0 ] . node . timer_tick_occurred ( ) ; }
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , & nodes [ 0 ] . node . encode ( ) , & [ & chan_0_monitor_serialized ] , persister_b , chain_monitor_b , nodes_0_deserialized_b ) ;
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert! ( events . is_empty ( ) ) ;
// Ensure that we don't generate any further events even after the channel-closing commitment
// transaction is confirmed on-chain.
confirm_transaction ( & nodes [ 0 ] , & as_broadcasted_txn [ 0 ] ) ;
for _ in 0 .. ( IDEMPOTENCY_TIMEOUT_TICKS * 2 ) { nodes [ 0 ] . node . timer_tick_occurred ( ) ; }
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert! ( events . is_empty ( ) ) ;
let chan_0_monitor_serialized = get_monitor! ( nodes [ 0 ] , chan_id ) . encode ( ) ;
reload_node! ( nodes [ 0 ] , test_default_channel_config ( ) , & nodes [ 0 ] . node . encode ( ) , & [ & chan_0_monitor_serialized ] , persister_c , chain_monitor_c , nodes_0_deserialized_c ) ;
let events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert! ( events . is_empty ( ) ) ;
2023-04-06 19:56:01 +00:00
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
}
#[ test ]
fn no_missing_sent_on_midpoint_reload ( ) {
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
do_no_missing_sent_on_reload ( false , true ) ;
do_no_missing_sent_on_reload ( true , true ) ;
}
#[ test ]
fn no_missing_sent_on_reload ( ) {
do_no_missing_sent_on_reload ( false , false ) ;
do_no_missing_sent_on_reload ( true , false ) ;
Track claimed outbound HTLCs in ChannelMonitors
When we receive an update_fulfill_htlc message, we immediately try
to "claim" the HTLC against the HTLCSource. If there is one, this
works great, we immediately generate a `ChannelMonitorUpdate` for
the corresponding inbound HTLC and persist that before we ever get
to processing our counterparty's `commitment_signed` and persisting
the corresponding `ChannelMonitorUpdate`.
However, if there isn't one (and this is the first successful HTLC
for a payment we sent), we immediately generate a `PaymentSent`
event and queue it up for the user. Then, a millisecond later, we
receive the `commitment_signed` from our peer, removing the HTLC
from the latest local commitment transaction as a side-effect of
the `ChannelMonitorUpdate` applied.
If the user has processed the `PaymentSent` event by that point,
great, we're done. However, if they have not, and we crash prior to
persisting the `ChannelManager`, on startup we get confused about
the state of the payment. We'll force-close the channel for being
stale, and see an HTLC which was removed and is no longer present
in the latest commitment transaction (which we're broadcasting).
Because we claim corresponding inbound HTLCs before updating a
`ChannelMonitor`, we assume such HTLCs have failed - attempting to
fail after having claimed should be a noop. However, in the
sent-payment case we now generate a `PaymentFailed` event for the
user, allowing an HTLC to complete without giving the user a
preimage.
Here we address this issue by storing the payment preimages for
claimed outbound HTLCs in the `ChannelMonitor`, in addition to the
existing inbound HTLC preimages already stored there. This allows
us to fix the specific issue described by checking for a preimage
and switching the type of event generated in response. In addition,
it reduces the risk of future confusion by ensuring we don't fail
HTLCs which were claimed but not fully committed to before a crash.
It does not, however, full fix the issue here - because the
preimages are removed after the HTLC has been fully removed from
available commitment transactions if we are substantially delayed
in persisting the `ChannelManager` from the time we receive the
`update_fulfill_htlc` until after a full commitment signed dance
completes we may still hit this issue. The full fix for this issue
is to delay the persistence of the `ChannelMonitorUpdate` until
after the `PaymentSent` event has been processed. This avoids the
issue entirely, ensuring we process the event before updating the
`ChannelMonitor`, the same as we ensure the upstream HTLC has been
claimed before updating the `ChannelMonitor` for forwarded
payments.
The full solution will be implemented in a later work, however this
change still makes sense at that point as well - if we were to
delay the initial `commitment_signed` `ChannelMonitorUpdate` util
after the `PaymentSent` event has been processed (which likely
requires a database update on the users' end), we'd hold our
`commitment_signed` + `revoke_and_ack` response for two DB writes
(i.e. `fsync()` calls), making our commitment transaction
processing a full `fsync` slower. By making this change first, we
can instead delay the `ChannelMonitorUpdate` from the
counterparty's final `revoke_and_ack` message until the event has
been processed, giving us a full network roundtrip to do so and
avoiding delaying our response as long as an `fsync` is faster than
a network roundtrip.
2023-02-22 02:40:59 +00:00
}
2023-04-04 04:12:55 +00:00
fn do_claim_from_closed_chan ( fail_payment : bool ) {
// Previously, LDK would refuse to claim a payment if a channel on which the payment was
// received had been closed between when the HTLC was received and when we went to claim it.
// This makes sense in the payment case - why pay an on-chain fee to claim the HTLC when
// presumably the sender may retry later. Long ago it also reduced total code in the claim
// pipeline.
//
// However, this doesn't make sense if you're trying to do an atomic swap or some other
// protocol that requires atomicity with some other action - if your money got claimed
// elsewhere you need to be able to claim the HTLC in lightning no matter what. Further, this
// is an over-optimization - there should be a very, very low likelihood that a channel closes
// between when we receive the last HTLC for a payment and the user goes to claim the payment.
// Since we now have code to handle this anyway we should allow it.
// Build 4 nodes and send an MPP payment across two paths. By building a route manually set the
// CLTVs on the paths to different value resulting in a different claim deadline.
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , None ] ) ;
let mut nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 2 , 1_000_000 , 0 ) ;
let chan_bd = create_announced_chan_between_nodes_with_value ( & nodes , 1 , 3 , 1_000_000 , 0 ) . 2 ;
create_announced_chan_between_nodes ( & nodes , 2 , 3 ) ;
let ( payment_preimage , payment_hash , payment_secret ) = get_payment_preimage_hash! ( nodes [ 3 ] ) ;
2023-08-31 12:25:38 +02:00
let mut route_params = RouteParameters ::from_payment_params_and_value (
PaymentParameters ::from_node_id ( nodes [ 3 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( nodes [ 1 ] . node . invoice_features ( ) ) . unwrap ( ) ,
2023-08-31 12:25:38 +02:00
10_000_000 ) ;
2023-04-04 04:12:55 +00:00
let mut route = nodes [ 0 ] . router . find_route ( & nodes [ 0 ] . node . get_our_node_id ( ) , & route_params ,
2023-07-18 19:41:07 +00:00
None , nodes [ 0 ] . node . compute_inflight_htlcs ( ) ) . unwrap ( ) ;
2023-04-04 04:12:55 +00:00
// Make sure the route is ordered as the B->D path before C->D
2023-04-09 13:50:44 -04:00
route . paths . sort_by ( | a , _ | if a . hops [ 0 ] . pubkey = = nodes [ 1 ] . node . get_our_node_id ( ) {
2023-04-04 04:12:55 +00:00
std ::cmp ::Ordering ::Less } else { std ::cmp ::Ordering ::Greater } ) ;
// Note that we add an extra 1 in the send pipeline to compensate for any blocks found while
// the HTLC is being relayed.
2023-04-09 13:50:44 -04:00
route . paths [ 0 ] . hops [ 1 ] . cltv_expiry_delta = TEST_FINAL_CLTV + 8 ;
route . paths [ 1 ] . hops [ 1 ] . cltv_expiry_delta = TEST_FINAL_CLTV + 12 ;
2023-04-04 04:12:55 +00:00
let final_cltv = nodes [ 0 ] . best_block_info ( ) . 1 + TEST_FINAL_CLTV + 8 + 1 ;
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
2023-04-07 16:29:19 +00:00
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields ::secret_only ( payment_secret ) ,
2023-04-04 04:12:55 +00:00
PaymentId ( payment_hash . 0 ) , route_params . clone ( ) , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
check_added_monitors ( & nodes [ 0 ] , 2 ) ;
let mut send_msgs = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
send_msgs . sort_by ( | a , _ | {
let a_node_id =
if let MessageSendEvent ::UpdateHTLCs { node_id , .. } = a { node_id } else { panic! ( ) } ;
let node_b_id = nodes [ 1 ] . node . get_our_node_id ( ) ;
if * a_node_id = = node_b_id { std ::cmp ::Ordering ::Less } else { std ::cmp ::Ordering ::Greater }
} ) ;
assert_eq! ( send_msgs . len ( ) , 2 ) ;
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 3 ] ] , 10_000_000 ,
payment_hash , Some ( payment_secret ) , send_msgs . remove ( 0 ) , false , None ) ;
2023-04-04 04:21:45 +00:00
let receive_event = pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] , 10_000_000 ,
2023-04-04 04:12:55 +00:00
payment_hash , Some ( payment_secret ) , send_msgs . remove ( 0 ) , true , None ) ;
2023-04-04 04:21:45 +00:00
match receive_event . unwrap ( ) {
Event ::PaymentClaimable { claim_deadline , .. } = > {
assert_eq! ( claim_deadline . unwrap ( ) , final_cltv - HTLC_FAIL_BACK_BUFFER ) ;
} ,
_ = > panic! ( ) ,
}
2023-04-04 04:12:55 +00:00
// Ensure that the claim_deadline is correct, with the payment failing at exactly the given
// height.
connect_blocks ( & nodes [ 3 ] , final_cltv - HTLC_FAIL_BACK_BUFFER - nodes [ 3 ] . best_block_info ( ) . 1
- if fail_payment { 0 } else { 2 } ) ;
if fail_payment {
// We fail the HTLC on the A->B->D path first as it expires 4 blocks earlier. We go ahead
// and expire both immediately, though, by connecting another 4 blocks.
let reason = HTLCDestination ::FailedPayment { payment_hash } ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( & nodes [ 3 ] , [ reason . clone ( ) ] ) ;
connect_blocks ( & nodes [ 3 ] , 4 ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( & nodes [ 3 ] , [ reason ] ) ;
2023-03-31 19:07:57 -05:00
pass_failed_payment_back ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] , false , payment_hash , PaymentFailureReason ::RecipientRejected ) ;
2023-04-04 04:12:55 +00:00
} else {
nodes [ 1 ] . node . force_close_broadcasting_latest_txn ( & chan_bd , & nodes [ 3 ] . node . get_our_node_id ( ) ) . unwrap ( ) ;
2023-07-12 14:58:22 +03:00
check_closed_event! ( & nodes [ 1 ] , 1 , ClosureReason ::HolderForceClosed , false ,
[ nodes [ 3 ] . node . get_our_node_id ( ) ] , 1000000 ) ;
2023-04-04 04:12:55 +00:00
check_closed_broadcast ( & nodes [ 1 ] , 1 , true ) ;
let bs_tx = nodes [ 1 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
assert_eq! ( bs_tx . len ( ) , 1 ) ;
mine_transaction ( & nodes [ 3 ] , & bs_tx [ 0 ] ) ;
check_added_monitors ( & nodes [ 3 ] , 1 ) ;
check_closed_broadcast ( & nodes [ 3 ] , 1 , true ) ;
2023-07-12 14:58:22 +03:00
check_closed_event! ( & nodes [ 3 ] , 1 , ClosureReason ::CommitmentTxConfirmed , false ,
[ nodes [ 1 ] . node . get_our_node_id ( ) ] , 1000000 ) ;
2023-04-04 04:12:55 +00:00
nodes [ 3 ] . node . claim_funds ( payment_preimage ) ;
check_added_monitors ( & nodes [ 3 ] , 2 ) ;
expect_payment_claimed! ( nodes [ 3 ] , payment_hash , 10_000_000 ) ;
let ds_tx = nodes [ 3 ] . tx_broadcaster . txn_broadcasted . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
assert_eq! ( ds_tx . len ( ) , 1 ) ;
check_spends! ( & ds_tx [ 0 ] , & bs_tx [ 0 ] ) ;
mine_transactions ( & nodes [ 1 ] , & [ & bs_tx [ 0 ] , & ds_tx [ 0 ] ] ) ;
check_added_monitors ( & nodes [ 1 ] , 1 ) ;
expect_payment_forwarded! ( nodes [ 1 ] , nodes [ 0 ] , nodes [ 3 ] , Some ( 1000 ) , false , true ) ;
let bs_claims = nodes [ 1 ] . node . get_and_clear_pending_msg_events ( ) ;
check_added_monitors ( & nodes [ 1 ] , 1 ) ;
assert_eq! ( bs_claims . len ( ) , 1 ) ;
if let MessageSendEvent ::UpdateHTLCs { updates , .. } = & bs_claims [ 0 ] {
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & updates . update_fulfill_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , updates . commitment_signed , false , true ) ;
} else { panic! ( ) ; }
expect_payment_sent! ( nodes [ 0 ] , payment_preimage ) ;
let ds_claim_msgs = nodes [ 3 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( ds_claim_msgs . len ( ) , 1 ) ;
let cs_claim_msgs = if let MessageSendEvent ::UpdateHTLCs { updates , .. } = & ds_claim_msgs [ 0 ] {
nodes [ 2 ] . node . handle_update_fulfill_htlc ( & nodes [ 3 ] . node . get_our_node_id ( ) , & updates . update_fulfill_htlcs [ 0 ] ) ;
let cs_claim_msgs = nodes [ 2 ] . node . get_and_clear_pending_msg_events ( ) ;
check_added_monitors ( & nodes [ 2 ] , 1 ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 3 ] , updates . commitment_signed , false , true ) ;
expect_payment_forwarded! ( nodes [ 2 ] , nodes [ 0 ] , nodes [ 3 ] , Some ( 1000 ) , false , false ) ;
cs_claim_msgs
} else { panic! ( ) ; } ;
assert_eq! ( cs_claim_msgs . len ( ) , 1 ) ;
if let MessageSendEvent ::UpdateHTLCs { updates , .. } = & cs_claim_msgs [ 0 ] {
nodes [ 0 ] . node . handle_update_fulfill_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & updates . update_fulfill_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 2 ] , updates . commitment_signed , false , true ) ;
} else { panic! ( ) ; }
expect_payment_path_successful! ( nodes [ 0 ] ) ;
}
}
#[ test ]
fn claim_from_closed_chan ( ) {
do_claim_from_closed_chan ( true ) ;
do_claim_from_closed_chan ( false ) ;
}
2023-04-08 01:17:51 +00:00
2023-05-17 18:40:18 -05:00
#[ test ]
2023-06-08 12:08:25 -05:00
fn test_custom_tlvs_basic ( ) {
do_test_custom_tlvs ( false , false , false ) ;
do_test_custom_tlvs ( true , false , false ) ;
2023-05-17 18:40:18 -05:00
}
2023-06-08 12:08:25 -05:00
#[ test ]
fn test_custom_tlvs_explicit_claim ( ) {
// Test that when receiving even custom TLVs the user must explicitly accept in case they
// are unknown.
do_test_custom_tlvs ( false , true , false ) ;
do_test_custom_tlvs ( false , true , true ) ;
}
fn do_test_custom_tlvs ( spontaneous : bool , even_tlvs : bool , known_tlvs : bool ) {
2023-05-17 18:40:18 -05:00
let chanmon_cfgs = create_chanmon_cfgs ( 2 ) ;
let node_cfgs = create_node_cfgs ( 2 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 2 , & node_cfgs , & [ None ; 2 ] ) ;
let mut nodes = create_network ( 2 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
let amt_msat = 100_000 ;
let ( mut route , our_payment_hash , our_payment_preimage , our_payment_secret ) = get_route_and_payment_hash! ( & nodes [ 0 ] , & nodes [ 1 ] , amt_msat ) ;
let payment_id = PaymentId ( our_payment_hash . 0 ) ;
let custom_tlvs = vec! [
2023-06-08 12:08:25 -05:00
( if even_tlvs { 5482373482 } else { 5482373483 } , vec! [ 1 , 2 , 3 , 4 ] ) ,
2023-05-17 18:40:18 -05:00
( 5482373487 , vec! [ 0x42 u8 ; 16 ] ) ,
] ;
let onion_fields = RecipientOnionFields {
payment_secret : if spontaneous { None } else { Some ( our_payment_secret ) } ,
payment_metadata : None ,
custom_tlvs : custom_tlvs . clone ( )
} ;
if spontaneous {
nodes [ 0 ] . node . send_spontaneous_payment ( & route , Some ( our_payment_preimage ) , onion_fields , payment_id ) . unwrap ( ) ;
} else {
nodes [ 0 ] . node . send_payment_with_route ( & route , our_payment_hash , onion_fields , payment_id ) . unwrap ( ) ;
}
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
let ev = remove_first_msg_event_to_node ( & nodes [ 1 ] . node . get_our_node_id ( ) , & mut events ) ;
let mut payment_event = SendEvent ::from_event ( ev ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
check_added_monitors! ( & nodes [ 1 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , payment_event . commitment_msg , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
let events = nodes [ 1 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
2023-08-16 13:04:33 -05:00
Event ::PaymentClaimable { ref onion_fields , .. } = > {
2023-05-17 18:40:18 -05:00
assert_eq! ( onion_fields . clone ( ) . unwrap ( ) . custom_tlvs ( ) . clone ( ) , custom_tlvs ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-06-08 12:08:25 -05:00
match ( known_tlvs , even_tlvs ) {
( true , _ ) = > {
nodes [ 1 ] . node . claim_funds_with_known_custom_tlvs ( our_payment_preimage ) ;
let expected_total_fee_msat = pass_claimed_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , & [ 0 ; 1 ] , false , our_payment_preimage ) ;
expect_payment_sent! ( & nodes [ 0 ] , our_payment_preimage , Some ( expected_total_fee_msat ) ) ;
} ,
( false , false ) = > {
claim_payment ( & nodes [ 0 ] , & [ & nodes [ 1 ] ] , our_payment_preimage ) ;
} ,
( false , true ) = > {
nodes [ 1 ] . node . claim_funds ( our_payment_preimage ) ;
let expected_destinations = vec! [ HTLCDestination ::FailedPayment { payment_hash : our_payment_hash } ] ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 1 ] , expected_destinations ) ;
pass_failed_payment_back ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] ] ] , false , our_payment_hash , PaymentFailureReason ::RecipientRejected ) ;
}
}
2023-05-17 18:40:18 -05:00
}
#[ test ]
fn test_retry_custom_tlvs ( ) {
// Test that custom TLVs are successfully sent on retries
let chanmon_cfgs = create_chanmon_cfgs ( 3 ) ;
let node_cfgs = create_node_cfgs ( 3 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 3 , & node_cfgs , & [ None , None , None ] ) ;
let nodes = create_network ( 3 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes ( & nodes , 0 , 1 ) ;
let ( chan_2_update , _ , chan_2_id , _ ) = create_announced_chan_between_nodes ( & nodes , 2 , 1 ) ;
// Rebalance
send_payment ( & nodes [ 2 ] , & vec! ( & nodes [ 1 ] ) [ .. ] , 1_500_000 ) ;
let amt_msat = 1_000_000 ;
let ( route , payment_hash , payment_preimage , payment_secret ) =
get_route_and_payment_hash! ( nodes [ 0 ] , nodes [ 2 ] , amt_msat ) ;
// Initiate the payment
let payment_id = PaymentId ( payment_hash . 0 ) ;
2023-08-31 15:10:09 +02:00
let mut route_params = route . route_params . clone ( ) . unwrap ( ) ;
2023-05-17 18:40:18 -05:00
let custom_tlvs = vec! [ ( ( 1 < < 16 ) + 1 , vec! [ 0x42 u8 ; 16 ] ) ] ;
let onion_fields = RecipientOnionFields ::secret_only ( payment_secret ) ;
let onion_fields = onion_fields . with_custom_tlvs ( custom_tlvs . clone ( ) ) . unwrap ( ) ;
nodes [ 0 ] . router . expect_find_route ( route_params . clone ( ) , Ok ( route . clone ( ) ) ) ;
nodes [ 0 ] . node . send_payment ( payment_hash , onion_fields ,
payment_id , route_params . clone ( ) , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ; // one monitor per path
// Add the HTLC along the first hop.
2023-08-16 13:04:33 -05:00
let htlc_updates = get_htlc_update_msgs ( & nodes [ 0 ] , & nodes [ 1 ] . node . get_our_node_id ( ) ) ;
let msgs ::CommitmentUpdate { update_add_htlcs , commitment_signed , .. } = htlc_updates ;
assert_eq! ( update_add_htlcs . len ( ) , 1 ) ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & update_add_htlcs [ 0 ] ) ;
2023-05-17 18:40:18 -05:00
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , commitment_signed , false ) ;
// Attempt to forward the payment and complete the path's failure.
expect_pending_htlcs_forwardable! ( & nodes [ 1 ] ) ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( & nodes [ 1 ] ,
vec! [ HTLCDestination ::NextHopChannel {
node_id : Some ( nodes [ 2 ] . node . get_our_node_id ( ) ) ,
channel_id : chan_2_id
} ] ) ;
check_added_monitors! ( nodes [ 1 ] , 1 ) ;
2023-08-16 13:04:33 -05:00
let htlc_updates = get_htlc_update_msgs! ( nodes [ 1 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
let msgs ::CommitmentUpdate { update_fail_htlcs , commitment_signed , .. } = htlc_updates ;
assert_eq! ( update_fail_htlcs . len ( ) , 1 ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 1 ] , commitment_signed , false ) ;
2023-05-17 18:40:18 -05:00
let mut events = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
match events [ 1 ] {
Event ::PendingHTLCsForwardable { .. } = > { } ,
_ = > panic! ( " Unexpected event " )
}
events . remove ( 1 ) ;
expect_payment_failed_conditions_event ( events , payment_hash , false ,
PaymentFailedConditions ::new ( ) . mpp_parts_remain ( ) ) ;
// Rebalance the channel so the retry of the payment can succeed.
send_payment ( & nodes [ 2 ] , & vec! ( & nodes [ 1 ] ) [ .. ] , 1_500_000 ) ;
// Retry the payment and make sure it succeeds
route_params . payment_params . previously_failed_channels . push ( chan_2_update . contents . short_channel_id ) ;
nodes [ 0 ] . router . expect_find_route ( route_params , Ok ( route ) ) ;
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let payment_claimable = pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 2 ] ] , 1_000_000 ,
payment_hash , Some ( payment_secret ) , events . pop ( ) . unwrap ( ) , true , None ) . unwrap ( ) ;
2023-08-16 13:04:33 -05:00
match payment_claimable {
Event ::PaymentClaimable { onion_fields , .. } = > {
assert_eq! ( onion_fields . unwrap ( ) . custom_tlvs ( ) , & custom_tlvs ) ;
} ,
2023-05-17 18:40:18 -05:00
_ = > panic! ( " Unexpected event " ) ,
} ;
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 2 ] ] ] , false , payment_preimage ) ;
}
2023-05-19 15:37:47 -05:00
#[ test ]
fn test_custom_tlvs_consistency ( ) {
let even_type_1 = 1 < < 16 ;
let odd_type_1 = ( 1 < < 16 ) + 1 ;
let even_type_2 = ( 1 < < 16 ) + 2 ;
let odd_type_2 = ( 1 < < 16 ) + 3 ;
let value_1 = | | vec! [ 1 , 2 , 3 , 4 ] ;
let differing_value_1 = | | vec! [ 1 , 2 , 3 , 5 ] ;
let value_2 = | | vec! [ 42 u8 ; 16 ] ;
// Drop missing odd tlvs
do_test_custom_tlvs_consistency (
vec! [ ( odd_type_1 , value_1 ( ) ) , ( odd_type_2 , value_2 ( ) ) ] ,
vec! [ ( odd_type_1 , value_1 ( ) ) ] ,
Some ( vec! [ ( odd_type_1 , value_1 ( ) ) ] ) ,
) ;
// Drop non-matching odd tlvs
do_test_custom_tlvs_consistency (
vec! [ ( odd_type_1 , value_1 ( ) ) , ( odd_type_2 , value_2 ( ) ) ] ,
vec! [ ( odd_type_1 , differing_value_1 ( ) ) , ( odd_type_2 , value_2 ( ) ) ] ,
Some ( vec! [ ( odd_type_2 , value_2 ( ) ) ] ) ,
) ;
// Fail missing even tlvs
do_test_custom_tlvs_consistency (
vec! [ ( odd_type_1 , value_1 ( ) ) , ( even_type_2 , value_2 ( ) ) ] ,
vec! [ ( odd_type_1 , value_1 ( ) ) ] ,
None ,
) ;
// Fail non-matching even tlvs
do_test_custom_tlvs_consistency (
vec! [ ( even_type_1 , value_1 ( ) ) , ( odd_type_2 , value_2 ( ) ) ] ,
vec! [ ( even_type_1 , differing_value_1 ( ) ) , ( odd_type_2 , value_2 ( ) ) ] ,
None ,
) ;
}
fn do_test_custom_tlvs_consistency ( first_tlvs : Vec < ( u64 , Vec < u8 > ) > , second_tlvs : Vec < ( u64 , Vec < u8 > ) > ,
expected_receive_tlvs : Option < Vec < ( u64 , Vec < u8 > ) > > ) {
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , None , None , None ] ) ;
let nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 100_000 , 0 ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 2 , 100_000 , 0 ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 1 , 3 , 100_000 , 0 ) ;
let chan_2_3 = create_announced_chan_between_nodes_with_value ( & nodes , 2 , 3 , 100_000 , 0 ) ;
let payment_params = PaymentParameters ::from_node_id ( nodes [ 3 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
. with_bolt11_features ( nodes [ 3 ] . node . invoice_features ( ) ) . unwrap ( ) ;
let mut route = get_route! ( nodes [ 0 ] , payment_params , 15_000_000 ) . unwrap ( ) ;
assert_eq! ( route . paths . len ( ) , 2 ) ;
route . paths . sort_by ( | path_a , _ | {
// Sort the path so that the path through nodes[1] comes first
if path_a . hops [ 0 ] . pubkey = = nodes [ 1 ] . node . get_our_node_id ( ) {
core ::cmp ::Ordering ::Less } else { core ::cmp ::Ordering ::Greater }
} ) ;
let ( our_payment_preimage , our_payment_hash , our_payment_secret ) = get_payment_preimage_hash! ( & nodes [ 3 ] ) ;
let payment_id = PaymentId ( [ 42 ; 32 ] ) ;
let amt_msat = 15_000_000 ;
// Send first part
let onion_fields = RecipientOnionFields {
payment_secret : Some ( our_payment_secret ) ,
payment_metadata : None ,
custom_tlvs : first_tlvs
} ;
let session_privs = nodes [ 0 ] . node . test_add_new_pending_payment ( our_payment_hash ,
onion_fields . clone ( ) , payment_id , & route ) . unwrap ( ) ;
let cur_height = nodes [ 0 ] . best_block_info ( ) . 1 ;
nodes [ 0 ] . node . test_send_payment_along_path ( & route . paths [ 0 ] , & our_payment_hash ,
onion_fields . clone ( ) , amt_msat , cur_height , payment_id ,
& None , session_privs [ 0 ] ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
{
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
2023-08-16 13:04:33 -05:00
pass_along_path ( & nodes [ 0 ] , & [ & nodes [ 1 ] , & nodes [ 3 ] ] , amt_msat , our_payment_hash ,
Some ( our_payment_secret ) , events . pop ( ) . unwrap ( ) , false , None ) ;
2023-05-19 15:37:47 -05:00
}
assert! ( nodes [ 3 ] . node . get_and_clear_pending_events ( ) . is_empty ( ) ) ;
// Send second part
let onion_fields = RecipientOnionFields {
payment_secret : Some ( our_payment_secret ) ,
payment_metadata : None ,
custom_tlvs : second_tlvs
} ;
nodes [ 0 ] . node . test_send_payment_along_path ( & route . paths [ 1 ] , & our_payment_hash ,
onion_fields . clone ( ) , amt_msat , cur_height , payment_id , & None , session_privs [ 1 ] ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 1 ) ;
{
let mut events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let payment_event = SendEvent ::from_event ( events . pop ( ) . unwrap ( ) ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 0 ] , payment_event . commitment_msg , false ) ;
expect_pending_htlcs_forwardable! ( nodes [ 2 ] ) ;
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
let mut events = nodes [ 2 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
let payment_event = SendEvent ::from_event ( events . pop ( ) . unwrap ( ) ) ;
nodes [ 3 ] . node . handle_update_add_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & payment_event . msgs [ 0 ] ) ;
check_added_monitors! ( nodes [ 3 ] , 0 ) ;
commitment_signed_dance! ( nodes [ 3 ] , nodes [ 2 ] , payment_event . commitment_msg , true , true ) ;
}
expect_pending_htlcs_forwardable_ignore! ( nodes [ 3 ] ) ;
nodes [ 3 ] . node . process_pending_htlc_forwards ( ) ;
if let Some ( expected_tlvs ) = expected_receive_tlvs {
// Claim and match expected
let events = nodes [ 3 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( events . len ( ) , 1 ) ;
match events [ 0 ] {
2023-08-16 13:04:33 -05:00
Event ::PaymentClaimable { ref onion_fields , .. } = > {
2023-05-19 15:37:47 -05:00
assert_eq! ( onion_fields . clone ( ) . unwrap ( ) . custom_tlvs , expected_tlvs ) ;
} ,
_ = > panic! ( " Unexpected event " ) ,
}
2023-08-16 13:04:33 -05:00
do_claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] ,
false , our_payment_preimage ) ;
Delay RAA-after-next processing until PaymentSent is are handled
In 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!
As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.
Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.
The original fix in 0ad1f4c943bdc9037d0c43d1b74c745befa065f0 is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.
Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
2023-07-28 05:30:24 +00:00
expect_payment_sent ( & nodes [ 0 ] , our_payment_preimage , Some ( Some ( 2000 ) ) , true , true ) ;
2023-05-19 15:37:47 -05:00
} else {
// Expect fail back
let expected_destinations = vec! [ HTLCDestination ::FailedPayment { payment_hash : our_payment_hash } ] ;
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 3 ] , expected_destinations ) ;
check_added_monitors! ( nodes [ 3 ] , 1 ) ;
let fail_updates_1 = get_htlc_update_msgs! ( nodes [ 3 ] , nodes [ 2 ] . node . get_our_node_id ( ) ) ;
nodes [ 2 ] . node . handle_update_fail_htlc ( & nodes [ 3 ] . node . get_our_node_id ( ) , & fail_updates_1 . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 3 ] , fail_updates_1 . commitment_signed , false ) ;
2023-08-16 13:04:33 -05:00
expect_pending_htlcs_forwardable_and_htlc_handling_failed! ( nodes [ 2 ] , vec! [
HTLCDestination ::NextHopChannel {
node_id : Some ( nodes [ 3 ] . node . get_our_node_id ( ) ) ,
channel_id : chan_2_3 . 2
} ] ) ;
2023-05-19 15:37:47 -05:00
check_added_monitors! ( nodes [ 2 ] , 1 ) ;
let fail_updates_2 = get_htlc_update_msgs! ( nodes [ 2 ] , nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & fail_updates_2 . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 2 ] , fail_updates_2 . commitment_signed , false ) ;
2023-08-16 13:04:33 -05:00
expect_payment_failed_conditions ( & nodes [ 0 ] , our_payment_hash , true ,
PaymentFailedConditions ::new ( ) . mpp_parts_remain ( ) ) ;
2023-05-19 15:37:47 -05:00
}
}
2023-04-08 01:17:51 +00:00
fn do_test_payment_metadata_consistency ( do_reload : bool , do_modify : bool ) {
// Check that a payment metadata received on one HTLC that doesn't match the one received on
// another results in the HTLC being rejected.
//
// We first set up a diamond shaped network, allowing us to split a payment into two HTLCs, the
// first of which we'll deliver and the second of which we'll fail and then re-send with
// modified payment metadata, which will in turn result in it being failed by the recipient.
let chanmon_cfgs = create_chanmon_cfgs ( 4 ) ;
let node_cfgs = create_node_cfgs ( 4 , & chanmon_cfgs ) ;
2023-08-15 19:19:03 +00:00
let persister ;
let new_chain_monitor ;
2023-04-08 01:17:51 +00:00
let mut config = test_default_channel_config ( ) ;
config . channel_handshake_config . max_inbound_htlc_value_in_flight_percent_of_channel = 50 ;
let node_chanmgrs = create_node_chanmgrs ( 4 , & node_cfgs , & [ None , Some ( config ) , Some ( config ) , Some ( config ) ] ) ;
let nodes_0_deserialized ;
let mut nodes = create_network ( 4 , & node_cfgs , & node_chanmgrs ) ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 1 , 1_000_000 , 0 ) ;
let chan_id_bd = create_announced_chan_between_nodes_with_value ( & nodes , 1 , 3 , 1_000_000 , 0 ) . 2 ;
create_announced_chan_between_nodes_with_value ( & nodes , 0 , 2 , 1_000_000 , 0 ) ;
let chan_id_cd = create_announced_chan_between_nodes_with_value ( & nodes , 2 , 3 , 1_000_000 , 0 ) . 2 ;
// Pay more than half of each channel's max, requiring MPP
let amt_msat = 750_000_000 ;
let ( payment_preimage , payment_hash , payment_secret ) = get_payment_preimage_hash! ( nodes [ 3 ] , Some ( amt_msat ) ) ;
let payment_id = PaymentId ( payment_hash . 0 ) ;
let payment_metadata = vec! [ 44 , 49 , 52 , 142 ] ;
let payment_params = PaymentParameters ::from_node_id ( nodes [ 3 ] . node . get_our_node_id ( ) , TEST_FINAL_CLTV )
2023-04-29 15:37:51 -04:00
. with_bolt11_features ( nodes [ 1 ] . node . invoice_features ( ) ) . unwrap ( ) ;
2023-08-31 12:25:38 +02:00
let mut route_params = RouteParameters ::from_payment_params_and_value ( payment_params , amt_msat ) ;
2023-04-08 01:17:51 +00:00
// Send the MPP payment, delivering the updated commitment state to nodes[1].
nodes [ 0 ] . node . send_payment ( payment_hash , RecipientOnionFields {
2023-05-16 17:56:28 -05:00
payment_secret : Some ( payment_secret ) , payment_metadata : Some ( payment_metadata ) , custom_tlvs : vec ! [ ] ,
2023-04-08 01:17:51 +00:00
} , payment_id , route_params . clone ( ) , Retry ::Attempts ( 1 ) ) . unwrap ( ) ;
check_added_monitors! ( nodes [ 0 ] , 2 ) ;
let mut send_events = nodes [ 0 ] . node . get_and_clear_pending_msg_events ( ) ;
assert_eq! ( send_events . len ( ) , 2 ) ;
let first_send = SendEvent ::from_event ( send_events . pop ( ) . unwrap ( ) ) ;
let second_send = SendEvent ::from_event ( send_events . pop ( ) . unwrap ( ) ) ;
let ( b_recv_ev , c_recv_ev ) = if first_send . node_id = = nodes [ 1 ] . node . get_our_node_id ( ) {
( & first_send , & second_send )
} else {
( & second_send , & first_send )
} ;
nodes [ 1 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & b_recv_ev . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 1 ] , nodes [ 0 ] , b_recv_ev . commitment_msg , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 1 ] ) ;
check_added_monitors ( & nodes [ 1 ] , 1 ) ;
let b_forward_ev = SendEvent ::from_node ( & nodes [ 1 ] ) ;
nodes [ 3 ] . node . handle_update_add_htlc ( & nodes [ 1 ] . node . get_our_node_id ( ) , & b_forward_ev . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 3 ] , nodes [ 1 ] , b_forward_ev . commitment_msg , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 3 ] ) ;
// Before delivering the second MPP HTLC to nodes[2], disconnect nodes[2] and nodes[3], which
// will result in nodes[2] failing the HTLC back.
nodes [ 2 ] . node . peer_disconnected ( & nodes [ 3 ] . node . get_our_node_id ( ) ) ;
nodes [ 3 ] . node . peer_disconnected ( & nodes [ 2 ] . node . get_our_node_id ( ) ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & c_recv_ev . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 0 ] , c_recv_ev . commitment_msg , false , true ) ;
let cs_fail = get_htlc_update_msgs ( & nodes [ 2 ] , & nodes [ 0 ] . node . get_our_node_id ( ) ) ;
nodes [ 0 ] . node . handle_update_fail_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & cs_fail . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 0 ] , nodes [ 2 ] , cs_fail . commitment_signed , false , true ) ;
let payment_fail_retryable_evs = nodes [ 0 ] . node . get_and_clear_pending_events ( ) ;
assert_eq! ( payment_fail_retryable_evs . len ( ) , 2 ) ;
if let Event ::PaymentPathFailed { .. } = payment_fail_retryable_evs [ 0 ] { } else { panic! ( ) ; }
if let Event ::PendingHTLCsForwardable { .. } = payment_fail_retryable_evs [ 1 ] { } else { panic! ( ) ; }
// Before we allow the HTLC to be retried, optionally change the payment_metadata we have
// stored for our payment.
if do_modify {
nodes [ 0 ] . node . test_set_payment_metadata ( payment_id , Some ( Vec ::new ( ) ) ) ;
}
// Optionally reload nodes[3] to check that the payment_metadata is properly serialized with
// the payment state.
if do_reload {
let mon_bd = get_monitor! ( nodes [ 3 ] , chan_id_bd ) . encode ( ) ;
let mon_cd = get_monitor! ( nodes [ 3 ] , chan_id_cd ) . encode ( ) ;
reload_node! ( nodes [ 3 ] , config , & nodes [ 3 ] . node . encode ( ) , & [ & mon_bd , & mon_cd ] ,
persister , new_chain_monitor , nodes_0_deserialized ) ;
nodes [ 1 ] . node . peer_disconnected ( & nodes [ 3 ] . node . get_our_node_id ( ) ) ;
2023-07-27 15:58:07 -07:00
reconnect_nodes ( ReconnectArgs ::new ( & nodes [ 1 ] , & nodes [ 3 ] ) ) ;
2023-04-08 01:17:51 +00:00
}
2023-07-27 15:58:07 -07:00
let mut reconnect_args = ReconnectArgs ::new ( & nodes [ 2 ] , & nodes [ 3 ] ) ;
reconnect_args . send_channel_ready = ( true , true ) ;
reconnect_nodes ( reconnect_args ) ;
2023-04-08 01:17:51 +00:00
// Create a new channel between C and D as A will refuse to retry on the existing one because
// it just failed.
let chan_id_cd_2 = create_announced_chan_between_nodes_with_value ( & nodes , 2 , 3 , 1_000_000 , 0 ) . 2 ;
// Now retry the failed HTLC.
nodes [ 0 ] . node . process_pending_htlc_forwards ( ) ;
check_added_monitors ( & nodes [ 0 ] , 1 ) ;
let as_resend = SendEvent ::from_node ( & nodes [ 0 ] ) ;
nodes [ 2 ] . node . handle_update_add_htlc ( & nodes [ 0 ] . node . get_our_node_id ( ) , & as_resend . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 0 ] , as_resend . commitment_msg , false , true ) ;
expect_pending_htlcs_forwardable! ( nodes [ 2 ] ) ;
check_added_monitors ( & nodes [ 2 ] , 1 ) ;
let cs_forward = SendEvent ::from_node ( & nodes [ 2 ] ) ;
nodes [ 3 ] . node . handle_update_add_htlc ( & nodes [ 2 ] . node . get_our_node_id ( ) , & cs_forward . msgs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 3 ] , nodes [ 2 ] , cs_forward . commitment_msg , false , true ) ;
// Finally, check that nodes[3] does the correct thing - either accepting the payment or, if
// the payment metadata was modified, failing only the one modified HTLC and retaining the
// other.
if do_modify {
expect_pending_htlcs_forwardable_ignore! ( nodes [ 3 ] ) ;
nodes [ 3 ] . node . process_pending_htlc_forwards ( ) ;
expect_pending_htlcs_forwardable_conditions ( nodes [ 3 ] . node . get_and_clear_pending_events ( ) ,
& [ HTLCDestination ::FailedPayment { payment_hash } ] ) ;
nodes [ 3 ] . node . process_pending_htlc_forwards ( ) ;
check_added_monitors ( & nodes [ 3 ] , 1 ) ;
let ds_fail = get_htlc_update_msgs ( & nodes [ 3 ] , & nodes [ 2 ] . node . get_our_node_id ( ) ) ;
nodes [ 2 ] . node . handle_update_fail_htlc ( & nodes [ 3 ] . node . get_our_node_id ( ) , & ds_fail . update_fail_htlcs [ 0 ] ) ;
commitment_signed_dance! ( nodes [ 2 ] , nodes [ 3 ] , ds_fail . commitment_signed , false , true ) ;
expect_pending_htlcs_forwardable_conditions ( nodes [ 2 ] . node . get_and_clear_pending_events ( ) ,
& [ HTLCDestination ::NextHopChannel { node_id : Some ( nodes [ 3 ] . node . get_our_node_id ( ) ) , channel_id : chan_id_cd_2 } ] ) ;
} else {
expect_pending_htlcs_forwardable! ( nodes [ 3 ] ) ;
expect_payment_claimable! ( nodes [ 3 ] , payment_hash , payment_secret , amt_msat ) ;
claim_payment_along_route ( & nodes [ 0 ] , & [ & [ & nodes [ 1 ] , & nodes [ 3 ] ] , & [ & nodes [ 2 ] , & nodes [ 3 ] ] ] , false , payment_preimage ) ;
}
}
#[ test ]
fn test_payment_metadata_consistency ( ) {
do_test_payment_metadata_consistency ( true , true ) ;
do_test_payment_metadata_consistency ( true , false ) ;
do_test_payment_metadata_consistency ( false , true ) ;
do_test_payment_metadata_consistency ( false , false ) ;
}