mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Test the various shutdown handling updates
This commit is contained in:
parent
224fb05cc5
commit
182affc03b
3 changed files with 437 additions and 37 deletions
|
@ -3642,6 +3642,30 @@ mod tests {
|
|||
}
|
||||
}
|
||||
|
||||
macro_rules! get_closing_signed_broadcast {
|
||||
($node: expr, $dest_pubkey: expr) => {
|
||||
{
|
||||
let events = $node.get_and_clear_pending_msg_events();
|
||||
assert!(events.len() == 1 || events.len() == 2);
|
||||
(match events[events.len() - 1] {
|
||||
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
||||
assert_eq!(msg.contents.flags & 2, 2);
|
||||
msg.clone()
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}, if events.len() == 2 {
|
||||
match events[0] {
|
||||
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
|
||||
assert_eq!(*node_id, $dest_pubkey);
|
||||
Some(msg.clone())
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
} else { None })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
|
||||
let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
|
||||
let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
|
||||
|
@ -3673,29 +3697,6 @@ mod tests {
|
|||
})
|
||||
};
|
||||
|
||||
macro_rules! get_closing_signed_broadcast {
|
||||
($node: expr, $dest_pubkey: expr) => {
|
||||
{
|
||||
let events = $node.get_and_clear_pending_msg_events();
|
||||
assert!(events.len() == 1 || events.len() == 2);
|
||||
(match events[events.len() - 1] {
|
||||
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
||||
msg.clone()
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}, if events.len() == 2 {
|
||||
match events[0] {
|
||||
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
|
||||
assert_eq!(*node_id, $dest_pubkey);
|
||||
Some(msg.clone())
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
} else { None })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap();
|
||||
let (as_update, bs_update) = if close_inbound_first {
|
||||
assert!(node_a.get_and_clear_pending_msg_events().is_empty());
|
||||
|
@ -3764,35 +3765,41 @@ mod tests {
|
|||
}
|
||||
|
||||
macro_rules! commitment_signed_dance {
|
||||
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
|
||||
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
|
||||
{
|
||||
check_added_monitors!($node_a, 0);
|
||||
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||
$node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
|
||||
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
|
||||
check_added_monitors!($node_a, 1);
|
||||
commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
|
||||
}
|
||||
};
|
||||
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
|
||||
{
|
||||
let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
|
||||
check_added_monitors!($node_b, 0);
|
||||
assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
|
||||
$node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
|
||||
assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
|
||||
check_added_monitors!($node_b, 1);
|
||||
$node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap();
|
||||
let bs_revoke_and_ack = get_event_msg!($node_b, MessageSendEvent::SendRevokeAndACK, $node_a.node.get_our_node_id());
|
||||
let (bs_revoke_and_ack, extra_msg_option) = {
|
||||
let events = $node_b.node.get_and_clear_pending_msg_events();
|
||||
assert!(events.len() <= 2);
|
||||
(match events[0] {
|
||||
MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
|
||||
assert_eq!(*node_id, $node_a.node.get_our_node_id());
|
||||
(*msg).clone()
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}, events.get(1).map(|e| e.clone()))
|
||||
};
|
||||
check_added_monitors!($node_b, 1);
|
||||
if $fail_backwards {
|
||||
assert!($node_a.node.get_and_clear_pending_events().is_empty());
|
||||
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||
}
|
||||
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
|
||||
if $fail_backwards {
|
||||
let channel_state = $node_a.node.channel_state.lock().unwrap();
|
||||
assert_eq!(channel_state.pending_msg_events.len(), 1);
|
||||
if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
|
||||
assert_ne!(*node_id, $node_b.node.get_our_node_id());
|
||||
} else { panic!("Unexpected event"); }
|
||||
} else {
|
||||
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||
}
|
||||
{
|
||||
let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
|
||||
if $fail_backwards {
|
||||
|
@ -3803,6 +3810,26 @@ mod tests {
|
|||
}
|
||||
added_monitors.clear();
|
||||
}
|
||||
extra_msg_option
|
||||
}
|
||||
};
|
||||
($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
|
||||
{
|
||||
assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
|
||||
}
|
||||
};
|
||||
($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
|
||||
{
|
||||
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
|
||||
if $fail_backwards {
|
||||
let channel_state = $node_a.node.channel_state.lock().unwrap();
|
||||
assert_eq!(channel_state.pending_msg_events.len(), 1);
|
||||
if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
|
||||
assert_ne!(*node_id, $node_b.node.get_our_node_id());
|
||||
} else { panic!("Unexpected event"); }
|
||||
} else {
|
||||
assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4648,6 +4675,375 @@ mod tests {
|
|||
close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pre_funding_lock_shutdown_test() {
|
||||
// Test sending a shutdown prior to funding_locked after funding generation
|
||||
let nodes = create_network(2);
|
||||
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0);
|
||||
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||
nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
|
||||
nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
|
||||
|
||||
nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
|
||||
|
||||
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
|
||||
let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
|
||||
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_none.is_none());
|
||||
|
||||
assert!(nodes[0].node.list_channels().is_empty());
|
||||
assert!(nodes[1].node.list_channels().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn updates_shutdown_wait() {
|
||||
// Test sending a shutdown with outstanding updates pending
|
||||
let mut nodes = create_network(3);
|
||||
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||
let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
|
||||
let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
|
||||
|
||||
let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
|
||||
|
||||
nodes[0].node.close_channel(&chan_1.2).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
|
||||
|
||||
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
|
||||
if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {}
|
||||
else { panic!("New sends should fail!") };
|
||||
if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {}
|
||||
else { panic!("New sends should fail!") };
|
||||
|
||||
assert!(nodes[2].node.claim_funds(our_payment_preimage));
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
assert!(updates.update_fail_htlcs.is_empty());
|
||||
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates.update_fee.is_none());
|
||||
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
|
||||
|
||||
assert!(updates_2.update_add_htlcs.is_empty());
|
||||
assert!(updates_2.update_fail_htlcs.is_empty());
|
||||
assert!(updates_2.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates_2.update_fee.is_none());
|
||||
assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentSent { ref payment_preimage } => {
|
||||
assert_eq!(our_payment_preimage, *payment_preimage);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
|
||||
let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
|
||||
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_none.is_none());
|
||||
|
||||
assert!(nodes[0].node.list_channels().is_empty());
|
||||
|
||||
assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
|
||||
nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
|
||||
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
|
||||
assert!(nodes[1].node.list_channels().is_empty());
|
||||
assert!(nodes[2].node.list_channels().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn htlc_fail_async_shutdown() {
|
||||
// Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
|
||||
let mut nodes = create_network(3);
|
||||
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||
|
||||
let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
|
||||
let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
|
||||
nodes[0].node.send_payment(route, our_payment_hash).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
assert_eq!(updates.update_add_htlcs.len(), 1);
|
||||
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||
assert!(updates.update_fail_htlcs.is_empty());
|
||||
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates.update_fee.is_none());
|
||||
|
||||
nodes[1].node.close_channel(&chan_1.2).unwrap();
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
|
||||
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
|
||||
|
||||
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
assert!(updates_2.update_add_htlcs.is_empty());
|
||||
assert!(updates_2.update_fulfill_htlcs.is_empty());
|
||||
assert_eq!(updates_2.update_fail_htlcs.len(), 1);
|
||||
assert!(updates_2.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates_2.update_fee.is_none());
|
||||
|
||||
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentFailed { ref payment_hash, ref rejected_by_dest } => {
|
||||
assert_eq!(our_payment_hash, *payment_hash);
|
||||
assert!(!rejected_by_dest);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
|
||||
let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
|
||||
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_none.is_none());
|
||||
|
||||
assert!(nodes[0].node.list_channels().is_empty());
|
||||
|
||||
assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
|
||||
nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
|
||||
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
|
||||
assert!(nodes[1].node.list_channels().is_empty());
|
||||
assert!(nodes[2].node.list_channels().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_fee_async_shutdown() {
|
||||
// Test update_fee works after shutdown start if messages are delivered out-of-order
|
||||
let nodes = create_network(2);
|
||||
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
|
||||
let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate();
|
||||
nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap();
|
||||
check_added_monitors!(nodes[0], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
assert!(updates.update_fulfill_htlcs.is_empty());
|
||||
assert!(updates.update_fail_htlcs.is_empty());
|
||||
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates.update_fee.is_some());
|
||||
|
||||
nodes[1].node.close_channel(&chan_1.2).unwrap();
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
|
||||
// Note that we don't actually test normative behavior here. The spec indicates we could
|
||||
// actually send a closing_signed here, but is kinda unclear and could possibly be amended
|
||||
// to require waiting on the full commitment dance before doing so (see
|
||||
// https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid
|
||||
// ambiguity, we should wait until after the full commitment dance to send closing_signed.
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap();
|
||||
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
|
||||
let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true);
|
||||
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() {
|
||||
MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
|
||||
assert_eq!(*node_id, nodes[1].node.get_our_node_id());
|
||||
msg
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}).unwrap();
|
||||
let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
|
||||
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_none.is_none());
|
||||
}
|
||||
|
||||
fn do_test_shutdown_rebroadcast(recv_count: u8) {
|
||||
// Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
|
||||
// messages delivered prior to disconnect
|
||||
let nodes = create_network(3);
|
||||
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
|
||||
|
||||
let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
|
||||
|
||||
nodes[1].node.close_channel(&chan_1.2).unwrap();
|
||||
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
if recv_count > 0 {
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
|
||||
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
if recv_count > 1 {
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
|
||||
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
|
||||
|
||||
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
|
||||
let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
|
||||
let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap();
|
||||
let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
assert!(node_1_shutdown == node_1_2nd_shutdown);
|
||||
|
||||
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap();
|
||||
let node_0_2nd_shutdown = if recv_count > 0 {
|
||||
let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
|
||||
node_0_2nd_shutdown
|
||||
} else {
|
||||
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
|
||||
get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
|
||||
};
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap();
|
||||
|
||||
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
assert!(nodes[2].node.claim_funds(our_payment_preimage));
|
||||
check_added_monitors!(nodes[2], 1);
|
||||
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
|
||||
assert!(updates.update_add_htlcs.is_empty());
|
||||
assert!(updates.update_fail_htlcs.is_empty());
|
||||
assert!(updates.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates.update_fee.is_none());
|
||||
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
|
||||
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
|
||||
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
|
||||
|
||||
assert!(updates_2.update_add_htlcs.is_empty());
|
||||
assert!(updates_2.update_fail_htlcs.is_empty());
|
||||
assert!(updates_2.update_fail_malformed_htlcs.is_empty());
|
||||
assert!(updates_2.update_fee.is_none());
|
||||
assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
|
||||
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
|
||||
commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
|
||||
|
||||
let events = nodes[0].node.get_and_clear_pending_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
Event::PaymentSent { ref payment_preimage } => {
|
||||
assert_eq!(our_payment_preimage, *payment_preimage);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
|
||||
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
|
||||
if recv_count > 0 {
|
||||
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
|
||||
let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
|
||||
assert!(node_1_closing_signed.is_some());
|
||||
}
|
||||
|
||||
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
|
||||
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
|
||||
|
||||
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
|
||||
let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
|
||||
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
|
||||
if recv_count == 0 {
|
||||
// If all closing_signeds weren't delivered we can just resume where we left off...
|
||||
let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
|
||||
|
||||
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap();
|
||||
let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
|
||||
|
||||
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap();
|
||||
let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
|
||||
assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
|
||||
|
||||
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap();
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap();
|
||||
let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_closing_signed == node_0_2nd_closing_signed);
|
||||
|
||||
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap();
|
||||
let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
|
||||
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
|
||||
let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
|
||||
assert!(node_0_none.is_none());
|
||||
} else {
|
||||
// If one node, however, received + responded with an identical closing_signed we end
|
||||
// up erroring and node[0] will try to broadcast its own latest commitment transaction.
|
||||
// There isn't really anything better we can do simply, but in the future we might
|
||||
// explore storing a set of recently-closed channels that got disconnected during
|
||||
// closing_signed and avoiding broadcasting local commitment txn for some timeout to
|
||||
// give our counterparty enough time to (potentially) broadcast a cooperative closing
|
||||
// transaction.
|
||||
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
|
||||
|
||||
if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) =
|
||||
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
|
||||
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
|
||||
let msgs::ErrorMessage {ref channel_id, ..} = msg;
|
||||
assert_eq!(*channel_id, chan_1.2);
|
||||
} else { panic!("Needed SendErrorMessage close"); }
|
||||
|
||||
// get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
|
||||
// checks it, but in this case nodes[0] didn't ever get a chance to receive a
|
||||
// closing_signed so we do it ourselves
|
||||
let events = nodes[0].node.get_and_clear_pending_msg_events();
|
||||
assert_eq!(events.len(), 1);
|
||||
match events[0] {
|
||||
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
|
||||
assert_eq!(msg.contents.flags & 2, 2);
|
||||
},
|
||||
_ => panic!("Unexpected event"),
|
||||
}
|
||||
}
|
||||
|
||||
assert!(nodes[0].node.list_channels().is_empty());
|
||||
|
||||
assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
|
||||
nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
|
||||
close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
|
||||
assert!(nodes[1].node.list_channels().is_empty());
|
||||
assert!(nodes[2].node.list_channels().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_shutdown_rebroadcast() {
|
||||
do_test_shutdown_rebroadcast(0);
|
||||
do_test_shutdown_rebroadcast(1);
|
||||
do_test_shutdown_rebroadcast(2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fake_network_test() {
|
||||
// Simple test which builds a network of ChannelManagers, connects them to each other, and
|
||||
|
|
|
@ -151,6 +151,7 @@ pub struct Init {
|
|||
}
|
||||
|
||||
/// An error message to be sent or received from a peer
|
||||
#[derive(Clone)]
|
||||
pub struct ErrorMessage {
|
||||
pub(crate) channel_id: [u8; 32],
|
||||
pub(crate) data: String,
|
||||
|
@ -235,14 +236,14 @@ pub struct FundingLocked {
|
|||
}
|
||||
|
||||
/// A shutdown message to be sent or received from a peer
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct Shutdown {
|
||||
pub(crate) channel_id: [u8; 32],
|
||||
pub(crate) scriptpubkey: Script,
|
||||
}
|
||||
|
||||
/// A closing_signed message to be sent or received from a peer
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct ClosingSigned {
|
||||
pub(crate) channel_id: [u8; 32],
|
||||
pub(crate) fee_satoshis: u64,
|
||||
|
@ -448,6 +449,7 @@ pub struct ChannelUpdate {
|
|||
}
|
||||
|
||||
/// Used to put an error message in a HandleError
|
||||
#[derive(Clone)]
|
||||
pub enum ErrorAction {
|
||||
/// The peer took some action which made us think they were useless. Disconnect them.
|
||||
DisconnectPeer {
|
||||
|
@ -486,6 +488,7 @@ pub struct CommitmentUpdate {
|
|||
/// The information we received from a peer along the route of a payment we originated. This is
|
||||
/// returned by ChannelMessageHandler::handle_update_fail_htlc to be passed into
|
||||
/// RoutingMessageHandler::handle_htlc_fail_channel_update to update our network map.
|
||||
#[derive(Clone)]
|
||||
pub enum HTLCFailChannelUpdate {
|
||||
/// We received an error which included a full ChannelUpdate message.
|
||||
ChannelUpdateMessage {
|
||||
|
|
|
@ -103,6 +103,7 @@ pub enum Event {
|
|||
/// An event generated by ChannelManager which indicates a message should be sent to a peer (or
|
||||
/// broadcast to most peers).
|
||||
/// These events are handled by PeerManager::process_events if you are using a PeerManager.
|
||||
#[derive(Clone)]
|
||||
pub enum MessageSendEvent {
|
||||
/// Used to indicate that we've accepted a channel open and should send the accept_channel
|
||||
/// message provided to the given peer.
|
||||
|
|
Loading…
Add table
Reference in a new issue