mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #445 from TheBlueMatt/2020-01-fuzz-enforcer-fix
Fix EnforcingChannelKeys panic when our counterparty burns their $.
This commit is contained in:
commit
88b7dcd7e4
2 changed files with 41 additions and 1 deletions
|
@ -240,7 +240,10 @@ pub(super) struct Channel<ChanSigner: ChannelKeys> {
|
|||
secp_ctx: Secp256k1<secp256k1::All>,
|
||||
channel_value_satoshis: u64,
|
||||
|
||||
#[cfg(not(test))]
|
||||
local_keys: ChanSigner,
|
||||
#[cfg(test)]
|
||||
pub(super) local_keys: ChanSigner,
|
||||
shutdown_pubkey: PublicKey,
|
||||
|
||||
// Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
|
||||
|
@ -1995,6 +1998,17 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
|
|||
self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)
|
||||
.map_err(|e| ChannelError::Close(e.0))?;
|
||||
|
||||
if self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
|
||||
// Our counterparty seems to have burned their coins to us (by revoking a state when we
|
||||
// haven't given them a new commitment transaction to broadcast). We should probably
|
||||
// take advantage of this by updating our channel monitor, sending them an error, and
|
||||
// waiting for them to broadcast their latest (now-revoked claim). But, that would be a
|
||||
// lot of work, and there's some chance this is all a misunderstanding anyway.
|
||||
// We have to do *something*, though, since our signer may get mad at us for otherwise
|
||||
// jumping a remote commitment number, so best to just force-close and move on.
|
||||
return Err(ChannelError::Close("Received an unexpected revoke_and_ack"));
|
||||
}
|
||||
|
||||
// Update state now that we've passed all the can-fail calls...
|
||||
// (note that we may still fail to generate the new commitment_signed message, but that's
|
||||
// OK, we step the channel here and *then* if the new generation fails we can fail the
|
||||
|
|
|
@ -9,7 +9,7 @@ use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
|
|||
use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
|
||||
use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
|
||||
use ln::channel::{Channel, ChannelError};
|
||||
use ln::onion_utils;
|
||||
use ln::{chan_utils, onion_utils};
|
||||
use ln::router::{Route, RouteHop};
|
||||
use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
|
||||
use ln::msgs;
|
||||
|
@ -6972,6 +6972,32 @@ fn test_set_outpoints_partial_claiming() {
|
|||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_counterparty_raa_skip_no_crash() {
|
||||
// Previously, if our counterparty sent two RAAs in a row without us having provided a
|
||||
// commitment transaction, we would have happily carried on and provided them the next
|
||||
// commitment transaction based on one RAA forward. This would probably eventually have led to
|
||||
// channel closure, but it would not have resulted in funds loss. Still, our
|
||||
// EnforcingChannelKeys would have paniced as it doesn't like jumps into the future. Here, we
|
||||
// check simply that the channel is closed in response to such an RAA, but don't check whether
|
||||
// we decide to punish our counterparty for revoking their funds (as we don't currently
|
||||
// implement that).
|
||||
let node_cfgs = create_node_cfgs(2);
|
||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
|
||||
|
||||
let commitment_seed = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&channel_id).unwrap().local_keys.commitment_seed().clone();
|
||||
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
|
||||
let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
|
||||
&SecretKey::from_slice(&chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
|
||||
let per_commitment_secret = chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER);
|
||||
|
||||
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
|
||||
&msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
|
||||
assert_eq!(check_closed_broadcast!(nodes[1], true).unwrap().data, "Received an unexpected revoke_and_ack");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bump_txn_sanitize_tracking_maps() {
|
||||
// Sanitizing pendning_claim_request and claimable_outpoints used to be buggy,
|
||||
|
|
Loading…
Add table
Reference in a new issue