mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Fail channel if we can't sign a new commitment tx during HTLC claim
Previously, we could fail to generate a new commitment transaction but it simply indicated we had gone to doule-claim an HTLC. Now that double-claims are returned instead as Ok(None), we should handle the error case and fail the channel, as the only way to hit the error case is if key derivation failed or the user refused to sign the new commitment transaction. This also resolves an issue where we wouldn't inform our ChannelMonitor of the new payment preimage in case we failed to fetch a signature for the new commitment transaction.
This commit is contained in:
parent
c09104f46e
commit
f06f9d1136
3 changed files with 26 additions and 20 deletions
|
@ -114,7 +114,7 @@ impl Readable for ChannelMonitorUpdate {
|
|||
}
|
||||
|
||||
/// An error enum representing a failure to persist a channel monitor update.
|
||||
#[derive(Clone, Debug)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum ChannelMonitorUpdateErr {
|
||||
/// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
|
||||
/// our state failed, but is expected to succeed at some point in the future).
|
||||
|
|
|
@ -1374,10 +1374,13 @@ impl<Signer: Sign> Channel<Signer> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, ChannelError> where L::Target: Logger {
|
||||
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, (ChannelError, ChannelMonitorUpdate)> where L::Target: Logger {
|
||||
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
|
||||
UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => {
|
||||
let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?;
|
||||
let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) {
|
||||
Err(e) => return Err((e, monitor_update)),
|
||||
Ok(res) => res
|
||||
};
|
||||
// send_commitment_no_status_check may bump latest_monitor_id but we want them to be
|
||||
// strictly increasing by one, so decrement it here.
|
||||
self.latest_monitor_update_id = monitor_update.update_id;
|
||||
|
|
|
@ -57,7 +57,7 @@ use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEv
|
|||
use util::{byte_utils, events};
|
||||
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
|
||||
use util::chacha20::{ChaCha20, ChaChaReader};
|
||||
use util::logger::Logger;
|
||||
use util::logger::{Logger, Level};
|
||||
use util::errors::APIError;
|
||||
|
||||
use prelude::*;
|
||||
|
@ -2679,16 +2679,17 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
|||
};
|
||||
|
||||
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
|
||||
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
|
||||
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
|
||||
Ok(msgs_monitor_option) => {
|
||||
if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
|
||||
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
|
||||
if was_frozen_for_monitor {
|
||||
assert!(msgs.is_none());
|
||||
} else {
|
||||
return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
|
||||
}
|
||||
log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
|
||||
"Failed to update channel monitor with preimage {:?}: {:?}",
|
||||
payment_preimage, e);
|
||||
return Err(Some((
|
||||
chan.get().get_counterparty_node_id(),
|
||||
handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
|
||||
)));
|
||||
}
|
||||
if let Some((msg, commitment_signed)) = msgs {
|
||||
log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
|
||||
|
@ -2708,16 +2709,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
|||
}
|
||||
return Ok(())
|
||||
},
|
||||
Err(e) => {
|
||||
// TODO: Do something with e?
|
||||
// This should only occur if we are claiming an HTLC at the same time as the
|
||||
// HTLC is being failed (eg because a block is being connected and this caused
|
||||
// an HTLC to time out). This should, of course, only occur if the user is the
|
||||
// one doing the claiming (as it being a part of a peer claim would imply we're
|
||||
// about to lose funds) and only if the lock in claim_funds was dropped as a
|
||||
// previous HTLC was failed (thus not for an MPP payment).
|
||||
debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
|
||||
return Err(None)
|
||||
Err((e, monitor_update)) => {
|
||||
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
|
||||
log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
|
||||
"Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
|
||||
payment_preimage, e);
|
||||
}
|
||||
let counterparty_node_id = chan.get().get_counterparty_node_id();
|
||||
let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
|
||||
if drop {
|
||||
chan.remove_entry();
|
||||
}
|
||||
return Err(Some((counterparty_node_id, res)));
|
||||
},
|
||||
}
|
||||
} else { unreachable!(); }
|
||||
|
|
Loading…
Add table
Reference in a new issue