Merge pull request #2459 from valentinewallace/2023-07-reconnect-peers-test-util

Struct-ify `reconnect_nodes` test util args
This commit is contained in:
Elias Rohrer 2023-07-31 15:25:05 +02:00 committed by GitHub
commit 7768b73251
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 112 additions and 38 deletions

View file

@ -178,7 +178,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
if disconnect { if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);
} }
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
@ -233,7 +235,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
if disconnect { if disconnect {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
} }
// ...and make sure we can force-close a frozen channel // ...and make sure we can force-close a frozen channel
@ -1925,7 +1927,9 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
// Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready.1 = confirm_a_first;
reconnect_nodes(reconnect_args);
// But we want to re-emit ChannelPending // But we want to re-emit ChannelPending
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
@ -2575,10 +2579,14 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
if second_fails { if second_fails {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_htlc_fails.0 = 1;
reconnect_nodes(reconnect_args);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
} else { } else {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
} }
if htlc_status == HTLCStatusAtDupClaim::HoldingCell { if htlc_status == HTLCStatusAtDupClaim::HoldingCell {

View file

@ -2945,9 +2945,41 @@ macro_rules! handle_chan_reestablish_msgs {
} }
} }
pub struct ReconnectArgs<'a, 'b, 'c, 'd> {
pub node_a: &'a Node<'b, 'c, 'd>,
pub node_b: &'a Node<'b, 'c, 'd>,
pub send_channel_ready: (bool, bool),
pub pending_htlc_adds: (i64, i64),
pub pending_htlc_claims: (usize, usize),
pub pending_htlc_fails: (usize, usize),
pub pending_cell_htlc_claims: (usize, usize),
pub pending_cell_htlc_fails: (usize, usize),
pub pending_raa: (bool, bool),
}
impl<'a, 'b, 'c, 'd> ReconnectArgs<'a, 'b, 'c, 'd> {
pub fn new(node_a: &'a Node<'b, 'c, 'd>, node_b: &'a Node<'b, 'c, 'd>) -> Self {
Self {
node_a,
node_b,
send_channel_ready: (false, false),
pending_htlc_adds: (0, 0),
pending_htlc_claims: (0, 0),
pending_htlc_fails: (0, 0),
pending_cell_htlc_claims: (0, 0),
pending_cell_htlc_fails: (0, 0),
pending_raa: (false, false),
}
}
}
/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
/// for claims/fails they are separated out. /// for claims/fails they are separated out.
pub fn reconnect_nodes<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, send_channel_ready: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) { pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
let ReconnectArgs {
node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
} = args;
node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init { node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
features: node_b.node.init_features(), networks: None, remote_network_address: None features: node_b.node.init_features(), networks: None, remote_network_address: None
}, true).unwrap(); }, true).unwrap();

View file

@ -3585,7 +3585,9 @@ fn test_dup_events_on_peer_disconnect() {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
expect_payment_path_successful!(nodes[0]); expect_payment_path_successful!(nodes[0]);
} }
@ -3642,7 +3644,9 @@ fn test_simple_peer_disconnect() {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);
let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@ -3651,7 +3655,7 @@ fn test_simple_peer_disconnect() {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000); let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@ -3664,7 +3668,10 @@ fn test_simple_peer_disconnect() {
claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3); claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3);
fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5); fail_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_hash_5);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_cell_htlc_fails.0 = 1;
reconnect_args.pending_cell_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
{ {
let events = nodes[0].node.get_and_clear_pending_events(); let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 4); assert_eq!(events.len(), 4);
@ -3776,19 +3783,29 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
} }
// Even if the channel_ready messages get exchanged, as long as nothing further was // Even if the channel_ready messages get exchanged, as long as nothing further was
// received on either side, both sides will need to resend them. // received on either side, both sides will need to resend them.
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_args.pending_htlc_adds.1 = 1;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 { } else if messages_delivered == 3 {
// nodes[0] still wants its RAA + commitment_signed // nodes[0] still wants its RAA + commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.0 = -1;
reconnect_args.pending_raa.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 { } else if messages_delivered == 4 {
// nodes[0] still wants its commitment_signed // nodes[0] still wants its commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.0 = -1;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 { } else if messages_delivered == 5 {
// nodes[1] still wants its final RAA // nodes[1] still wants its final RAA
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_raa.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 6 { } else if messages_delivered == 6 {
// Everything was delivered... // Everything was delivered...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
} }
let events_1 = nodes[1].node.get_and_clear_pending_events(); let events_1 = nodes[1].node.get_and_clear_pending_events();
@ -3812,7 +3829,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
nodes[1].node.process_pending_htlc_forwards(); nodes[1].node.process_pending_htlc_forwards();
@ -3896,7 +3913,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
if messages_delivered < 2 { if messages_delivered < 2 {
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_claims.0 = 1;
reconnect_nodes(reconnect_args);
if messages_delivered < 1 { if messages_delivered < 1 {
expect_payment_sent!(nodes[0], payment_preimage_1); expect_payment_sent!(nodes[0], payment_preimage_1);
} else { } else {
@ -3904,16 +3923,23 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
} }
} else if messages_delivered == 2 { } else if messages_delivered == 2 {
// nodes[0] still wants its RAA + commitment_signed // nodes[0] still wants its RAA + commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.1 = -1;
reconnect_args.pending_raa.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 { } else if messages_delivered == 3 {
// nodes[0] still wants its commitment_signed // nodes[0] still wants its commitment_signed
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_htlc_adds.1 = -1;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 { } else if messages_delivered == 4 {
// nodes[1] still wants its final RAA // nodes[1] still wants its final RAA
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.pending_raa.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 { } else if messages_delivered == 5 {
// Everything was delivered... // Everything was delivered...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
} }
if messages_delivered == 1 || messages_delivered == 2 { if messages_delivered == 1 || messages_delivered == 2 {
@ -3923,7 +3949,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
} }
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
if messages_delivered > 2 { if messages_delivered > 2 {
expect_payment_path_successful!(nodes[0]); expect_payment_path_successful!(nodes[0]);

View file

@ -597,7 +597,7 @@ fn test_onion_failure() {
nodes[1].node.get_and_clear_pending_msg_events(); nodes[1].node.get_and_clear_pending_msg_events();
nodes[2].node.get_and_clear_pending_msg_events(); nodes[2].node.get_and_clear_pending_msg_events();
}, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id)); }, true, Some(UPDATE|20), Some(NetworkUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy(short_channel_id)}), Some(short_channel_id));
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| { run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, &payment_secret, |msg| {
let session_priv = SecretKey::from_slice(&[3; 32]).unwrap(); let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();

View file

@ -490,7 +490,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
// nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected // nodes[1] now immediately fails the HTLC as the next-hop channel is disconnected
let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone(); let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan_id)[0].clone();
if confirm_before_reload { if confirm_before_reload {
@ -789,7 +789,9 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
nodes[0].node.test_process_background_events(); nodes[0].node.test_process_background_events();
check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 1);
reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);
// Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures // Now resend the payment, delivering the HTLC and actually claiming it this time. This ensures
// the payment is not (spuriously) listed as still pending. // the payment is not (spuriously) listed as still pending.
@ -817,7 +819,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) {
nodes[0].node.test_process_background_events(); nodes[0].node.test_process_background_events();
check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 1);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { match nodes[0].node.send_payment_with_route(&new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) {
Err(PaymentSendFailure::DuplicatePayment) => {}, Err(PaymentSendFailure::DuplicatePayment) => {},
@ -1011,7 +1013,7 @@ fn test_fulfill_restart_failure() {
reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
nodes[1].node.fail_htlc_backwards(&payment_hash); nodes[1].node.fail_htlc_backwards(&payment_hash);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
@ -3422,9 +3424,11 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) {
reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd], reload_node!(nodes[3], config, &nodes[3].node.encode(), &[&mon_bd, &mon_cd],
persister, new_chain_monitor, nodes_0_deserialized); persister, new_chain_monitor, nodes_0_deserialized);
nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id());
reconnect_nodes(&nodes[1], &nodes[3], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[3]));
} }
reconnect_nodes(&nodes[2], &nodes[3], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[2], &nodes[3]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);
// Create a new channel between C and D as A will refuse to retry on the existing one because // Create a new channel between C and D as A will refuse to retry on the existing one because
// it just failed. // it just failed.

View file

@ -52,7 +52,9 @@ fn test_funding_peer_disconnect() {
let events_1 = nodes[0].node.get_and_clear_pending_msg_events(); let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
assert!(events_1.is_empty()); assert!(events_1.is_empty());
reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready.1 = true;
reconnect_nodes(reconnect_args);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
@ -180,7 +182,7 @@ fn test_funding_peer_disconnect() {
reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
} }
#[test] #[test]
@ -334,7 +336,7 @@ fn test_simple_manager_serialize_deserialize() {
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash); fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
@ -456,8 +458,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
check_added_monitors!(nodes[0], 1); check_added_monitors!(nodes[0], 1);
// nodes[1] and nodes[2] have no lost state with nodes[0]... // nodes[1] and nodes[2] have no lost state with nodes[0]...
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[2]));
//... and we can even still claim the payment! //... and we can even still claim the payment!
claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
@ -666,10 +668,12 @@ fn test_forwardable_regen() {
let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
// Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
// the commitment state. // the commitment state.
reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.send_channel_ready = (true, true);
reconnect_nodes(reconnect_args);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
@ -967,7 +971,7 @@ fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_ht
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
if use_cs_commitment { if use_cs_commitment {
// If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait // If we confirm a commitment transaction that has the HTLC on-chain, nodes[1] should wait
@ -1085,7 +1089,7 @@ fn removed_payment_no_manager_persistence() {
// now forgotten everywhere. The ChannelManager should have, as a side-effect of reload, // now forgotten everywhere. The ChannelManager should have, as a side-effect of reload,
// learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set. // learned that the HTLC is gone from the ChannelMonitor and added it to the to-fail-back set.
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], [HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);