mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Merge pull request #1430 from vincenzopalazzo/macros/channel_reestablish_v2
send warning when we receive a old commitment transaction
This commit is contained in:
commit
9fbafd4b6c
2 changed files with 49 additions and 14 deletions
|
@ -3756,6 +3756,15 @@ impl<Signer: Sign> Channel<Signer> {
|
|||
}
|
||||
}
|
||||
|
||||
// Before we change the state of the channel, we check if the peer is sending a very old
|
||||
// commitment transaction number, if yes we send a warning message.
|
||||
let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number - 1;
|
||||
if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
|
||||
return Err(
|
||||
ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
|
||||
);
|
||||
}
|
||||
|
||||
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
|
||||
// remaining cases either succeed or ErrorMessage-fail).
|
||||
self.channel_state &= !(ChannelState::PeerDisconnected as u32);
|
||||
|
|
|
@ -7402,22 +7402,48 @@ fn test_data_loss_protect() {
|
|||
}
|
||||
|
||||
// Check we close channel detecting A is fallen-behind
|
||||
// Check that we sent the warning message when we detected that A has fallen behind,
|
||||
// and give the possibility for A to recover from the warning.
|
||||
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
|
||||
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Peer attempted to reestablish channel with a very old local commitment transaction".to_string() });
|
||||
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Peer attempted to reestablish channel with a very old local commitment transaction");
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
|
||||
assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
|
||||
|
||||
// Check A is able to claim to_remote output
|
||||
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
|
||||
assert_eq!(node_txn.len(), 1);
|
||||
check_spends!(node_txn[0], chan.3);
|
||||
assert_eq!(node_txn[0].output.len(), 2);
|
||||
mine_transaction(&nodes[0], &node_txn[0]);
|
||||
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
|
||||
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting".to_string() });
|
||||
let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager);
|
||||
assert_eq!(spend_txn.len(), 1);
|
||||
check_spends!(spend_txn[0], node_txn[0]);
|
||||
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
|
||||
// The node B should not broadcast the transaction to force close the channel!
|
||||
assert!(node_txn.is_empty());
|
||||
// B should now detect that there is something wrong and should force close the channel.
|
||||
let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting";
|
||||
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() });
|
||||
|
||||
// after the warning message sent by B, we should not able to
|
||||
// use the channel, or reconnect with success to the channel.
|
||||
assert!(nodes[0].node.list_usable_channels().is_empty());
|
||||
nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
|
||||
nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
|
||||
let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
|
||||
|
||||
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
|
||||
let mut err_msgs_0 = Vec::with_capacity(1);
|
||||
for msg in nodes[0].node.get_and_clear_pending_msg_events() {
|
||||
if let MessageSendEvent::HandleError { ref action, .. } = msg {
|
||||
match action {
|
||||
&ErrorAction::SendErrorMessage { ref msg } => {
|
||||
assert_eq!(msg.data, "Failed to find corresponding channel");
|
||||
err_msgs_0.push(msg.clone());
|
||||
},
|
||||
_ => panic!("Unexpected event!"),
|
||||
}
|
||||
} else {
|
||||
panic!("Unexpected event!");
|
||||
}
|
||||
}
|
||||
assert_eq!(err_msgs_0.len(), 1);
|
||||
nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
|
||||
assert!(nodes[1].node.list_usable_channels().is_empty());
|
||||
check_added_monitors!(nodes[1], 1);
|
||||
check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
|
||||
check_closed_broadcast!(nodes[1], false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
Loading…
Add table
Reference in a new issue