mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #808 from TheBlueMatt/2021-02-791-order-fix
Process monitor update events in block_[dis]connected asynchronously
This commit is contained in:
commit
1a8b9be5e4
6 changed files with 395 additions and 193 deletions
|
@ -4179,7 +4179,11 @@ impl<Signer: Sign> Channel<Signer> {
|
||||||
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
|
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
|
||||||
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
|
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
|
||||||
/// immediately (others we will have to allow to time out).
|
/// immediately (others we will have to allow to time out).
|
||||||
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>) {
|
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
|
||||||
|
// Note that we MUST only generate a monitor update that indicates force-closure - we're
|
||||||
|
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
|
||||||
|
// being fully configured in some cases. Thus, its likely any monitor events we generate will
|
||||||
|
// be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
|
||||||
assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
|
assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
|
||||||
|
|
||||||
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
|
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
|
||||||
|
@ -4193,7 +4197,7 @@ impl<Signer: Sign> Channel<Signer> {
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let funding_txo = if let Some(funding_txo) = self.get_funding_txo() {
|
let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
|
||||||
// If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
|
// If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
|
||||||
// returning a channel monitor update here would imply a channel monitor update before
|
// returning a channel monitor update here would imply a channel monitor update before
|
||||||
// we even registered the channel monitor to begin with, which is invalid.
|
// we even registered the channel monitor to begin with, which is invalid.
|
||||||
|
@ -4202,17 +4206,17 @@ impl<Signer: Sign> Channel<Signer> {
|
||||||
// monitor update to the user, even if we return one).
|
// monitor update to the user, even if we return one).
|
||||||
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
|
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
|
||||||
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelFunded as u32 | ChannelState::ShutdownComplete as u32) != 0 {
|
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelFunded as u32 | ChannelState::ShutdownComplete as u32) != 0 {
|
||||||
Some(funding_txo.clone())
|
self.latest_monitor_update_id += 1;
|
||||||
|
Some((funding_txo, ChannelMonitorUpdate {
|
||||||
|
update_id: self.latest_monitor_update_id,
|
||||||
|
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
|
||||||
|
}))
|
||||||
} else { None }
|
} else { None }
|
||||||
} else { None };
|
} else { None };
|
||||||
|
|
||||||
self.channel_state = ChannelState::ShutdownComplete as u32;
|
self.channel_state = ChannelState::ShutdownComplete as u32;
|
||||||
self.update_time_counter += 1;
|
self.update_time_counter += 1;
|
||||||
self.latest_monitor_update_id += 1;
|
(monitor_update, dropped_outbound_htlcs)
|
||||||
(funding_txo, ChannelMonitorUpdate {
|
|
||||||
update_id: self.latest_monitor_update_id,
|
|
||||||
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
|
|
||||||
}, dropped_outbound_htlcs)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -206,7 +206,7 @@ pub struct PaymentPreimage(pub [u8;32]);
|
||||||
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
|
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct PaymentSecret(pub [u8;32]);
|
pub struct PaymentSecret(pub [u8;32]);
|
||||||
|
|
||||||
type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
|
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
|
||||||
|
|
||||||
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
|
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
|
||||||
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
|
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
|
||||||
|
@ -333,6 +333,15 @@ pub(super) struct ChannelHolder<Signer: Sign> {
|
||||||
pub(super) pending_msg_events: Vec<MessageSendEvent>,
|
pub(super) pending_msg_events: Vec<MessageSendEvent>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Events which we process internally but cannot be procsesed immediately at the generation site
|
||||||
|
/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
|
||||||
|
/// quite some time lag.
|
||||||
|
enum BackgroundEvent {
|
||||||
|
/// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
|
||||||
|
/// commitment transaction.
|
||||||
|
ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
|
||||||
|
}
|
||||||
|
|
||||||
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
|
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
|
||||||
/// the latest Init features we heard from the peer.
|
/// the latest Init features we heard from the peer.
|
||||||
struct PeerState {
|
struct PeerState {
|
||||||
|
@ -436,6 +445,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
|
||||||
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
|
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
|
||||||
|
|
||||||
pending_events: Mutex<Vec<events::Event>>,
|
pending_events: Mutex<Vec<events::Event>>,
|
||||||
|
pending_background_events: Mutex<Vec<BackgroundEvent>>,
|
||||||
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
|
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
|
||||||
/// Essentially just when we're serializing ourselves out.
|
/// Essentially just when we're serializing ourselves out.
|
||||||
/// Taken first everywhere where we are making changes before any other locks.
|
/// Taken first everywhere where we are making changes before any other locks.
|
||||||
|
@ -794,6 +804,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
per_peer_state: RwLock::new(HashMap::new()),
|
per_peer_state: RwLock::new(HashMap::new()),
|
||||||
|
|
||||||
pending_events: Mutex::new(Vec::new()),
|
pending_events: Mutex::new(Vec::new()),
|
||||||
|
pending_background_events: Mutex::new(Vec::new()),
|
||||||
total_consistency_lock: RwLock::new(()),
|
total_consistency_lock: RwLock::new(()),
|
||||||
persistence_notifier: PersistenceNotifier::new(),
|
persistence_notifier: PersistenceNotifier::new(),
|
||||||
|
|
||||||
|
@ -942,12 +953,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
|
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
|
||||||
let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
|
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
|
||||||
log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
|
log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
|
||||||
for htlc_source in failed_htlcs.drain(..) {
|
for htlc_source in failed_htlcs.drain(..) {
|
||||||
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
|
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
|
||||||
}
|
}
|
||||||
if let Some(funding_txo) = funding_txo_option {
|
if let Some((funding_txo, monitor_update)) = monitor_update_option {
|
||||||
// There isn't anything we can do if we get an update failure - we're already
|
// There isn't anything we can do if we get an update failure - we're already
|
||||||
// force-closing. The monitor update on the required in-memory copy should broadcast
|
// force-closing. The monitor update on the required in-memory copy should broadcast
|
||||||
// the latest local state, which is the best we can do anyway. Thus, it is safe to
|
// the latest local state, which is the best we can do anyway. Thus, it is safe to
|
||||||
|
@ -1854,13 +1865,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
events.append(&mut new_events);
|
events.append(&mut new_events);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Free the background events, generally called from timer_chan_freshness_every_min.
|
||||||
|
///
|
||||||
|
/// Exposed for testing to allow us to process events quickly without generating accidental
|
||||||
|
/// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
|
||||||
|
///
|
||||||
|
/// Expects the caller to have a total_consistency_lock read lock.
|
||||||
|
fn process_background_events(&self) {
|
||||||
|
let mut background_events = Vec::new();
|
||||||
|
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
|
||||||
|
for event in background_events.drain(..) {
|
||||||
|
match event {
|
||||||
|
BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
|
||||||
|
// The channel has already been closed, so no use bothering to care about the
|
||||||
|
// monitor updating completing.
|
||||||
|
let _ = self.chain_monitor.update_channel(funding_txo, update);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "_test_utils"))]
|
||||||
|
pub(crate) fn test_process_background_events(&self) {
|
||||||
|
self.process_background_events();
|
||||||
|
}
|
||||||
|
|
||||||
/// If a peer is disconnected we mark any channels with that peer as 'disabled'.
|
/// If a peer is disconnected we mark any channels with that peer as 'disabled'.
|
||||||
/// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
|
/// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
|
||||||
/// to inform the network about the uselessness of these channels.
|
/// to inform the network about the uselessness of these channels.
|
||||||
///
|
///
|
||||||
/// This method handles all the details, and must be called roughly once per minute.
|
/// This method handles all the details, and must be called roughly once per minute.
|
||||||
|
///
|
||||||
|
/// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
|
||||||
pub fn timer_chan_freshness_every_min(&self) {
|
pub fn timer_chan_freshness_every_min(&self) {
|
||||||
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
|
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
|
||||||
|
self.process_background_events();
|
||||||
|
|
||||||
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
||||||
let channel_state = &mut *channel_state_lock;
|
let channel_state = &mut *channel_state_lock;
|
||||||
for (_, chan) in channel_state.by_id.iter_mut() {
|
for (_, chan) in channel_state.by_id.iter_mut() {
|
||||||
|
@ -1953,6 +1993,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
//identify whether we sent it or not based on the (I presume) very different runtime
|
//identify whether we sent it or not based on the (I presume) very different runtime
|
||||||
//between the branches here. We should make this async and move it into the forward HTLCs
|
//between the branches here. We should make this async and move it into the forward HTLCs
|
||||||
//timer handling.
|
//timer handling.
|
||||||
|
|
||||||
|
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
|
||||||
|
// from block_connected which may run during initialization prior to the chain_monitor
|
||||||
|
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
|
||||||
match source {
|
match source {
|
||||||
HTLCSource::OutboundRoute { ref path, .. } => {
|
HTLCSource::OutboundRoute { ref path, .. } => {
|
||||||
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
|
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
|
||||||
|
@ -2418,7 +2462,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
// We do not do a force-close here as that would generate a monitor update for
|
// We do not do a force-close here as that would generate a monitor update for
|
||||||
// a monitor that we didn't manage to store (and that we don't care about - we
|
// a monitor that we didn't manage to store (and that we don't care about - we
|
||||||
// don't respond with the funding_signed so the channel can never go on chain).
|
// don't respond with the funding_signed so the channel can never go on chain).
|
||||||
let (_funding_txo_option, _monitor_update, failed_htlcs) = chan.force_shutdown(true);
|
let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
|
||||||
assert!(failed_htlcs.is_empty());
|
assert!(failed_htlcs.is_empty());
|
||||||
return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
|
return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
|
||||||
},
|
},
|
||||||
|
@ -3100,6 +3144,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
self.finish_force_close_channel(failure);
|
self.finish_force_close_channel(failure);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle a list of channel failures during a block_connected or block_disconnected call,
|
||||||
|
/// pushing the channel monitor update (if any) to the background events queue and removing the
|
||||||
|
/// Channel object.
|
||||||
|
fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
|
||||||
|
for mut failure in failed_channels.drain(..) {
|
||||||
|
// Either a commitment transactions has been confirmed on-chain or
|
||||||
|
// Channel::block_disconnected detected that the funding transaction has been
|
||||||
|
// reorganized out of the main chain.
|
||||||
|
// We cannot broadcast our latest local state via monitor update (as
|
||||||
|
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
|
||||||
|
// so we track the update internally and handle it when the user next calls
|
||||||
|
// timer_chan_freshness_every_min, guaranteeing we're running normally.
|
||||||
|
if let Some((funding_txo, update)) = failure.0.take() {
|
||||||
|
assert_eq!(update.updates.len(), 1);
|
||||||
|
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
|
||||||
|
assert!(should_broadcast);
|
||||||
|
} else { unreachable!(); }
|
||||||
|
self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
|
||||||
|
}
|
||||||
|
self.finish_force_close_channel(failure);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
|
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
|
||||||
|
@ -3167,6 +3234,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
{
|
{
|
||||||
/// Updates channel state based on transactions seen in a connected block.
|
/// Updates channel state based on transactions seen in a connected block.
|
||||||
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
|
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
|
||||||
|
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
|
||||||
|
// during initialization prior to the chain_monitor being fully configured in some cases.
|
||||||
|
// See the docs for `ChannelManagerReadArgs` for more.
|
||||||
let header_hash = header.block_hash();
|
let header_hash = header.block_hash();
|
||||||
log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
|
log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
|
||||||
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
|
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
|
||||||
|
@ -3218,9 +3288,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
if let Some(short_id) = channel.get_short_channel_id() {
|
if let Some(short_id) = channel.get_short_channel_id() {
|
||||||
short_to_id.remove(&short_id);
|
short_to_id.remove(&short_id);
|
||||||
}
|
}
|
||||||
// It looks like our counterparty went on-chain. We go ahead and
|
// It looks like our counterparty went on-chain. Close the channel.
|
||||||
// broadcast our latest local state as well here, just in case its
|
|
||||||
// some kind of SPV attack, though we expect these to be dropped.
|
|
||||||
failed_channels.push(channel.force_shutdown(true));
|
failed_channels.push(channel.force_shutdown(true));
|
||||||
if let Ok(update) = self.get_channel_update(&channel) {
|
if let Ok(update) = self.get_channel_update(&channel) {
|
||||||
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
|
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
|
||||||
|
@ -3254,9 +3322,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
!htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
|
!htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
for failure in failed_channels.drain(..) {
|
|
||||||
self.finish_force_close_channel(failure);
|
self.handle_init_event_channel_failures(failed_channels);
|
||||||
}
|
|
||||||
|
|
||||||
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
|
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
|
||||||
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
|
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
|
||||||
|
@ -3282,6 +3349,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
/// If necessary, the channel may be force-closed without letting the counterparty participate
|
/// If necessary, the channel may be force-closed without letting the counterparty participate
|
||||||
/// in the shutdown.
|
/// in the shutdown.
|
||||||
pub fn block_disconnected(&self, header: &BlockHeader) {
|
pub fn block_disconnected(&self, header: &BlockHeader) {
|
||||||
|
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
|
||||||
|
// during initialization prior to the chain_monitor being fully configured in some cases.
|
||||||
|
// See the docs for `ChannelManagerReadArgs` for more.
|
||||||
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
|
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
|
||||||
let mut failed_channels = Vec::new();
|
let mut failed_channels = Vec::new();
|
||||||
{
|
{
|
||||||
|
@ -3306,9 +3376,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
for failure in failed_channels.drain(..) {
|
self.handle_init_event_channel_failures(failed_channels);
|
||||||
self.finish_force_close_channel(failure);
|
|
||||||
}
|
|
||||||
self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
|
self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
|
||||||
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
|
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
|
||||||
}
|
}
|
||||||
|
@ -3914,6 +3982,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
|
||||||
event.write(writer)?;
|
event.write(writer)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let background_events = self.pending_background_events.lock().unwrap();
|
||||||
|
(background_events.len() as u64).write(writer)?;
|
||||||
|
for event in background_events.iter() {
|
||||||
|
match event {
|
||||||
|
BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
|
||||||
|
0u8.write(writer)?;
|
||||||
|
funding_txo.write(writer)?;
|
||||||
|
monitor_update.write(writer)?;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
|
(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -3929,11 +4009,22 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
|
||||||
/// ChannelManager)>::read(reader, args).
|
/// ChannelManager)>::read(reader, args).
|
||||||
/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
|
/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
|
||||||
/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
|
/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
|
||||||
/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
|
/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
|
||||||
/// ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo().
|
/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
|
||||||
|
/// ChannelMonitor::get_funding_txo().
|
||||||
/// 4) Reconnect blocks on your ChannelMonitors.
|
/// 4) Reconnect blocks on your ChannelMonitors.
|
||||||
/// 5) Move the ChannelMonitors into your local chain::Watch.
|
/// 5) Disconnect/connect blocks on the ChannelManager.
|
||||||
/// 6) Disconnect/connect blocks on the ChannelManager.
|
/// 6) Move the ChannelMonitors into your local chain::Watch.
|
||||||
|
///
|
||||||
|
/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
|
||||||
|
/// call any other methods on the newly-deserialized ChannelManager.
|
||||||
|
///
|
||||||
|
/// Note that because some channels may be closed during deserialization, it is critical that you
|
||||||
|
/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
|
||||||
|
/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
|
||||||
|
/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
|
||||||
|
/// not force-close the same channels but consider them live), you may end up revoking a state for
|
||||||
|
/// which you've already broadcasted the transaction.
|
||||||
pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
||||||
where M::Target: chain::Watch<Signer>,
|
where M::Target: chain::Watch<Signer>,
|
||||||
T::Target: BroadcasterInterface,
|
T::Target: BroadcasterInterface,
|
||||||
|
@ -4064,7 +4155,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
||||||
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
|
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
|
||||||
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
|
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
|
||||||
// But if the channel is behind of the monitor, close the channel:
|
// But if the channel is behind of the monitor, close the channel:
|
||||||
let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
|
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
|
||||||
failed_htlcs.append(&mut new_failed_htlcs);
|
failed_htlcs.append(&mut new_failed_htlcs);
|
||||||
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
|
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
|
||||||
} else {
|
} else {
|
||||||
|
@ -4128,6 +4219,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let background_event_count: u64 = Readable::read(reader)?;
|
||||||
|
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
|
||||||
|
for _ in 0..background_event_count {
|
||||||
|
match <u8 as Readable>::read(reader)? {
|
||||||
|
0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
|
||||||
|
_ => return Err(DecodeError::InvalidValue),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let last_node_announcement_serial: u32 = Readable::read(reader)?;
|
let last_node_announcement_serial: u32 = Readable::read(reader)?;
|
||||||
|
|
||||||
let mut secp_ctx = Secp256k1::new();
|
let mut secp_ctx = Secp256k1::new();
|
||||||
|
@ -4157,6 +4257,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
|
||||||
per_peer_state: RwLock::new(per_peer_state),
|
per_peer_state: RwLock::new(per_peer_state),
|
||||||
|
|
||||||
pending_events: Mutex::new(pending_events_read),
|
pending_events: Mutex::new(pending_events_read),
|
||||||
|
pending_background_events: Mutex::new(pending_background_events_read),
|
||||||
total_consistency_lock: RwLock::new(()),
|
total_consistency_lock: RwLock::new(()),
|
||||||
persistence_notifier: PersistenceNotifier::new(),
|
persistence_notifier: PersistenceNotifier::new(),
|
||||||
|
|
||||||
|
|
|
@ -83,11 +83,13 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block,
|
||||||
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
|
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
|
||||||
node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
|
node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
|
||||||
node.node.block_connected(&block.header, &txdata, height);
|
node.node.block_connected(&block.header, &txdata, height);
|
||||||
|
node.node.test_process_background_events();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disconnect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, header: &BlockHeader, height: u32) {
|
pub fn disconnect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, header: &BlockHeader, height: u32) {
|
||||||
node.chain_monitor.chain_monitor.block_disconnected(header, height);
|
node.chain_monitor.chain_monitor.block_disconnected(header, height);
|
||||||
node.node.block_disconnected(header);
|
node.node.block_disconnected(header);
|
||||||
|
node.node.test_process_background_events();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TestChanMonCfg {
|
pub struct TestChanMonCfg {
|
||||||
|
|
|
@ -51,7 +51,6 @@ use std::collections::{BTreeSet, HashMap, HashSet};
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::mem;
|
|
||||||
|
|
||||||
use ln::functional_test_utils::*;
|
use ln::functional_test_utils::*;
|
||||||
use ln::chan_utils::CommitmentTransaction;
|
use ln::chan_utils::CommitmentTransaction;
|
||||||
|
@ -3533,37 +3532,6 @@ fn test_force_close_fail_back() {
|
||||||
check_spends!(node_txn[0], tx);
|
check_spends!(node_txn[0], tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_unconf_chan() {
|
|
||||||
// After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
|
|
||||||
let chanmon_cfgs = create_chanmon_cfgs(2);
|
|
||||||
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
|
||||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
|
||||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
|
||||||
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
|
|
||||||
|
|
||||||
let channel_state = nodes[0].node.channel_state.lock().unwrap();
|
|
||||||
assert_eq!(channel_state.by_id.len(), 1);
|
|
||||||
assert_eq!(channel_state.short_to_id.len(), 1);
|
|
||||||
mem::drop(channel_state);
|
|
||||||
|
|
||||||
let mut headers = Vec::new();
|
|
||||||
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
|
||||||
headers.push(header.clone());
|
|
||||||
for _i in 2..100 {
|
|
||||||
header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
|
||||||
headers.push(header.clone());
|
|
||||||
}
|
|
||||||
while !headers.is_empty() {
|
|
||||||
nodes[0].node.block_disconnected(&headers.pop().unwrap());
|
|
||||||
}
|
|
||||||
check_closed_broadcast!(nodes[0], false);
|
|
||||||
check_added_monitors!(nodes[0], 1);
|
|
||||||
let channel_state = nodes[0].node.channel_state.lock().unwrap();
|
|
||||||
assert_eq!(channel_state.by_id.len(), 0);
|
|
||||||
assert_eq!(channel_state.short_to_id.len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple_peer_disconnect() {
|
fn test_simple_peer_disconnect() {
|
||||||
// Test that we can reconnect when there are no lost messages
|
// Test that we can reconnect when there are no lost messages
|
||||||
|
@ -8059,103 +8027,6 @@ fn test_bump_penalty_txn_on_remote_commitment() {
|
||||||
nodes[1].node.get_and_clear_pending_msg_events();
|
nodes[1].node.get_and_clear_pending_msg_events();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_set_outpoints_partial_claiming() {
|
|
||||||
// - remote party claim tx, new bump tx
|
|
||||||
// - disconnect remote claiming tx, new bump
|
|
||||||
// - disconnect tx, see no tx anymore
|
|
||||||
let chanmon_cfgs = create_chanmon_cfgs(2);
|
|
||||||
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
|
||||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
|
||||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
|
||||||
|
|
||||||
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
|
|
||||||
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
|
|
||||||
let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
|
|
||||||
|
|
||||||
// Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
|
|
||||||
let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
|
|
||||||
assert_eq!(remote_txn.len(), 3);
|
|
||||||
assert_eq!(remote_txn[0].output.len(), 4);
|
|
||||||
assert_eq!(remote_txn[0].input.len(), 1);
|
|
||||||
assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
|
|
||||||
check_spends!(remote_txn[1], remote_txn[0]);
|
|
||||||
check_spends!(remote_txn[2], remote_txn[0]);
|
|
||||||
|
|
||||||
// Connect blocks on node A to advance height towards TEST_FINAL_CLTV
|
|
||||||
let prev_header_100 = connect_blocks(&nodes[1], 100, 0, false, Default::default());
|
|
||||||
// Provide node A with both preimage
|
|
||||||
nodes[0].node.claim_funds(payment_preimage_1, &None, 3_000_000);
|
|
||||||
nodes[0].node.claim_funds(payment_preimage_2, &None, 3_000_000);
|
|
||||||
check_added_monitors!(nodes[0], 2);
|
|
||||||
nodes[0].node.get_and_clear_pending_events();
|
|
||||||
nodes[0].node.get_and_clear_pending_msg_events();
|
|
||||||
|
|
||||||
// Connect blocks on node A commitment transaction
|
|
||||||
let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
|
||||||
connect_block(&nodes[0], &Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
|
|
||||||
check_closed_broadcast!(nodes[0], false);
|
|
||||||
check_added_monitors!(nodes[0], 1);
|
|
||||||
// Verify node A broadcast tx claiming both HTLCs
|
|
||||||
{
|
|
||||||
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
||||||
// ChannelMonitor: claim tx, ChannelManager: local commitment tx + HTLC-Success*2
|
|
||||||
assert_eq!(node_txn.len(), 4);
|
|
||||||
check_spends!(node_txn[0], remote_txn[0]);
|
|
||||||
check_spends!(node_txn[1], chan.3);
|
|
||||||
check_spends!(node_txn[2], node_txn[1]);
|
|
||||||
check_spends!(node_txn[3], node_txn[1]);
|
|
||||||
assert_eq!(node_txn[0].input.len(), 2);
|
|
||||||
node_txn.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect blocks on node B
|
|
||||||
connect_blocks(&nodes[1], 135, 0, false, Default::default());
|
|
||||||
check_closed_broadcast!(nodes[1], false);
|
|
||||||
check_added_monitors!(nodes[1], 1);
|
|
||||||
// Verify node B broadcast 2 HTLC-timeout txn
|
|
||||||
let partial_claim_tx = {
|
|
||||||
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
||||||
assert_eq!(node_txn.len(), 3);
|
|
||||||
check_spends!(node_txn[1], node_txn[0]);
|
|
||||||
check_spends!(node_txn[2], node_txn[0]);
|
|
||||||
assert_eq!(node_txn[1].input.len(), 1);
|
|
||||||
assert_eq!(node_txn[2].input.len(), 1);
|
|
||||||
node_txn[1].clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
|
|
||||||
let header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
|
||||||
connect_block(&nodes[0], &Block { header, txdata: vec![partial_claim_tx.clone()] }, 102);
|
|
||||||
{
|
|
||||||
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
||||||
assert_eq!(node_txn.len(), 1);
|
|
||||||
check_spends!(node_txn[0], remote_txn[0]);
|
|
||||||
assert_eq!(node_txn[0].input.len(), 1); //dropped HTLC
|
|
||||||
node_txn.clear();
|
|
||||||
}
|
|
||||||
nodes[0].node.get_and_clear_pending_msg_events();
|
|
||||||
|
|
||||||
// Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
|
|
||||||
disconnect_block(&nodes[0], &header, 102);
|
|
||||||
{
|
|
||||||
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
||||||
assert_eq!(node_txn.len(), 1);
|
|
||||||
check_spends!(node_txn[0], remote_txn[0]);
|
|
||||||
assert_eq!(node_txn[0].input.len(), 2); //resurrected HTLC
|
|
||||||
node_txn.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
//// Disconnect one more block and then reconnect multiple no transaction should be generated
|
|
||||||
disconnect_block(&nodes[0], &header, 101);
|
|
||||||
connect_blocks(&nodes[1], 15, 101, false, prev_header_100);
|
|
||||||
{
|
|
||||||
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
|
||||||
assert_eq!(node_txn.len(), 0);
|
|
||||||
node_txn.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_counterparty_raa_skip_no_crash() {
|
fn test_counterparty_raa_skip_no_crash() {
|
||||||
// Previously, if our counterparty sent two RAAs in a row without us having provided a
|
// Previously, if our counterparty sent two RAAs in a row without us having provided a
|
||||||
|
@ -8551,48 +8422,48 @@ fn test_pre_lockin_no_chan_closed_update() {
|
||||||
#[test]
|
#[test]
|
||||||
fn test_htlc_no_detection() {
|
fn test_htlc_no_detection() {
|
||||||
// This test is a mutation to underscore the detection logic bug we had
|
// This test is a mutation to underscore the detection logic bug we had
|
||||||
// before #653. HTLC value routed is above the remaining balance, thus
|
// before #653. HTLC value routed is above the remaining balance, thus
|
||||||
// inverting HTLC and `to_remote` output. HTLC will come second and
|
// inverting HTLC and `to_remote` output. HTLC will come second and
|
||||||
// it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
|
// it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
|
||||||
// on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
|
// on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
|
||||||
// outputs order detection for correct spending children filtring.
|
// outputs order detection for correct spending children filtring.
|
||||||
|
|
||||||
let chanmon_cfgs = create_chanmon_cfgs(2);
|
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||||
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||||
|
|
||||||
// Create some initial channels
|
// Create some initial channels
|
||||||
let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
|
let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
|
||||||
|
|
||||||
send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000, 1_000_000);
|
send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000, 1_000_000);
|
||||||
let (_, our_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
|
let (_, our_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
|
||||||
let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
|
let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
|
||||||
assert_eq!(local_txn[0].input.len(), 1);
|
assert_eq!(local_txn[0].input.len(), 1);
|
||||||
assert_eq!(local_txn[0].output.len(), 3);
|
assert_eq!(local_txn[0].output.len(), 3);
|
||||||
check_spends!(local_txn[0], chan_1.3);
|
check_spends!(local_txn[0], chan_1.3);
|
||||||
|
|
||||||
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
|
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
|
||||||
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||||
connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
|
connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
|
||||||
// We deliberately connect the local tx twice as this should provoke a failure calling
|
// We deliberately connect the local tx twice as this should provoke a failure calling
|
||||||
// this test before #653 fix.
|
// this test before #653 fix.
|
||||||
connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
|
connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] }, 200);
|
||||||
check_closed_broadcast!(nodes[0], false);
|
check_closed_broadcast!(nodes[0], false);
|
||||||
check_added_monitors!(nodes[0], 1);
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
let htlc_timeout = {
|
let htlc_timeout = {
|
||||||
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
assert_eq!(node_txn[0].input.len(), 1);
|
assert_eq!(node_txn[0].input.len(), 1);
|
||||||
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
|
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
|
||||||
check_spends!(node_txn[0], local_txn[0]);
|
check_spends!(node_txn[0], local_txn[0]);
|
||||||
node_txn[0].clone()
|
node_txn[0].clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||||
connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
|
connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] }, 201);
|
||||||
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
|
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 201, true, header_201.block_hash());
|
||||||
expect_payment_failed!(nodes[0], our_payment_hash, true);
|
expect_payment_failed!(nodes[0], our_payment_hash, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
|
fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
|
||||||
|
|
|
@ -9,14 +9,22 @@
|
||||||
|
|
||||||
//! Further functional tests which test blockchain reorganizations.
|
//! Further functional tests which test blockchain reorganizations.
|
||||||
|
|
||||||
use chain::channelmonitor::ANTI_REORG_DELAY;
|
use chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
|
||||||
|
use chain::Watch;
|
||||||
|
use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs};
|
||||||
use ln::features::InitFeatures;
|
use ln::features::InitFeatures;
|
||||||
use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
|
use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
|
||||||
|
use util::config::UserConfig;
|
||||||
|
use util::enforcing_trait_impls::EnforcingSigner;
|
||||||
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
|
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
|
||||||
|
use util::test_utils;
|
||||||
|
use util::ser::{ReadableArgs, Writeable};
|
||||||
|
|
||||||
use bitcoin::blockdata::block::{Block, BlockHeader};
|
use bitcoin::blockdata::block::{Block, BlockHeader};
|
||||||
|
use bitcoin::hash_types::BlockHash;
|
||||||
|
|
||||||
use std::default::Default;
|
use std::collections::HashMap;
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
use ln::functional_test_utils::*;
|
use ln::functional_test_utils::*;
|
||||||
|
|
||||||
|
@ -180,3 +188,206 @@ fn test_onchain_htlc_claim_reorg_remote_commitment() {
|
||||||
fn test_onchain_htlc_timeout_delay_remote_commitment() {
|
fn test_onchain_htlc_timeout_delay_remote_commitment() {
|
||||||
do_test_onchain_htlc_reorg(false, false);
|
do_test_onchain_htlc_reorg(false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
|
||||||
|
// After creating a chan between nodes, we disconnect all blocks previously seen to force a
|
||||||
|
// channel close on nodes[0] side. We also use this to provide very basic testing of logic
|
||||||
|
// around freeing background events which store monitor updates during block_[dis]connected.
|
||||||
|
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||||
|
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||||
|
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||||
|
let persister: test_utils::TestPersister;
|
||||||
|
let new_chain_monitor: test_utils::TestChainMonitor;
|
||||||
|
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
|
||||||
|
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||||
|
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
|
||||||
|
|
||||||
|
let channel_state = nodes[0].node.channel_state.lock().unwrap();
|
||||||
|
assert_eq!(channel_state.by_id.len(), 1);
|
||||||
|
assert_eq!(channel_state.short_to_id.len(), 1);
|
||||||
|
mem::drop(channel_state);
|
||||||
|
|
||||||
|
let mut headers = Vec::new();
|
||||||
|
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||||
|
headers.push(header.clone());
|
||||||
|
for _i in 2..100 {
|
||||||
|
header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||||
|
headers.push(header.clone());
|
||||||
|
}
|
||||||
|
if !reorg_after_reload {
|
||||||
|
while !headers.is_empty() {
|
||||||
|
nodes[0].node.block_disconnected(&headers.pop().unwrap());
|
||||||
|
}
|
||||||
|
check_closed_broadcast!(nodes[0], false);
|
||||||
|
{
|
||||||
|
let channel_state = nodes[0].node.channel_state.lock().unwrap();
|
||||||
|
assert_eq!(channel_state.by_id.len(), 0);
|
||||||
|
assert_eq!(channel_state.short_to_id.len(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if reload_node {
|
||||||
|
// Since we currently have a background event pending, it's good to test that we survive a
|
||||||
|
// serialization roundtrip. Further, this tests the somewhat awkward edge-case of dropping
|
||||||
|
// the Channel object from the ChannelManager, but still having a monitor event pending for
|
||||||
|
// it when we go to deserialize, and then use the ChannelManager.
|
||||||
|
let nodes_0_serialized = nodes[0].node.encode();
|
||||||
|
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
|
||||||
|
nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
|
||||||
|
|
||||||
|
persister = test_utils::TestPersister::new();
|
||||||
|
let keys_manager = &chanmon_cfgs[0].keys_manager;
|
||||||
|
new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
|
||||||
|
nodes[0].chain_monitor = &new_chain_monitor;
|
||||||
|
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
|
||||||
|
let (_, mut chan_0_monitor) = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(
|
||||||
|
&mut chan_0_monitor_read, keys_manager).unwrap();
|
||||||
|
assert!(chan_0_monitor_read.is_empty());
|
||||||
|
|
||||||
|
let mut nodes_0_read = &nodes_0_serialized[..];
|
||||||
|
let config = UserConfig::default();
|
||||||
|
nodes_0_deserialized = {
|
||||||
|
let mut channel_monitors = HashMap::new();
|
||||||
|
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
|
||||||
|
<(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster,
|
||||||
|
&test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(
|
||||||
|
&mut nodes_0_read, ChannelManagerReadArgs {
|
||||||
|
default_config: config,
|
||||||
|
keys_manager,
|
||||||
|
fee_estimator: node_cfgs[0].fee_estimator,
|
||||||
|
chain_monitor: nodes[0].chain_monitor,
|
||||||
|
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
|
||||||
|
logger: nodes[0].logger,
|
||||||
|
channel_monitors,
|
||||||
|
}).unwrap().1
|
||||||
|
};
|
||||||
|
nodes[0].node = &nodes_0_deserialized;
|
||||||
|
assert!(nodes_0_read.is_empty());
|
||||||
|
|
||||||
|
nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if reorg_after_reload {
|
||||||
|
while !headers.is_empty() {
|
||||||
|
nodes[0].node.block_disconnected(&headers.pop().unwrap());
|
||||||
|
}
|
||||||
|
check_closed_broadcast!(nodes[0], false);
|
||||||
|
{
|
||||||
|
let channel_state = nodes[0].node.channel_state.lock().unwrap();
|
||||||
|
assert_eq!(channel_state.by_id.len(), 0);
|
||||||
|
assert_eq!(channel_state.short_to_id.len(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
|
||||||
|
// is a ChannelForcClosed on the right channel with should_broadcast set.
|
||||||
|
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan_id, true));
|
||||||
|
nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_unconf_chan() {
|
||||||
|
do_test_unconf_chan(true, true);
|
||||||
|
do_test_unconf_chan(false, true);
|
||||||
|
do_test_unconf_chan(true, false);
|
||||||
|
do_test_unconf_chan(false, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_set_outpoints_partial_claiming() {
|
||||||
|
// - remote party claim tx, new bump tx
|
||||||
|
// - disconnect remote claiming tx, new bump
|
||||||
|
// - disconnect tx, see no tx anymore
|
||||||
|
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||||
|
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||||
|
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||||
|
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||||
|
|
||||||
|
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
|
||||||
|
let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
|
||||||
|
let payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000).0;
|
||||||
|
|
||||||
|
// Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
|
||||||
|
let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
|
||||||
|
assert_eq!(remote_txn.len(), 3);
|
||||||
|
assert_eq!(remote_txn[0].output.len(), 4);
|
||||||
|
assert_eq!(remote_txn[0].input.len(), 1);
|
||||||
|
assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.txid());
|
||||||
|
check_spends!(remote_txn[1], remote_txn[0]);
|
||||||
|
check_spends!(remote_txn[2], remote_txn[0]);
|
||||||
|
|
||||||
|
// Connect blocks on node A to advance height towards TEST_FINAL_CLTV
|
||||||
|
let prev_header_100 = connect_blocks(&nodes[1], 100, 0, false, Default::default());
|
||||||
|
// Provide node A with both preimage
|
||||||
|
nodes[0].node.claim_funds(payment_preimage_1, &None, 3_000_000);
|
||||||
|
nodes[0].node.claim_funds(payment_preimage_2, &None, 3_000_000);
|
||||||
|
check_added_monitors!(nodes[0], 2);
|
||||||
|
nodes[0].node.get_and_clear_pending_events();
|
||||||
|
nodes[0].node.get_and_clear_pending_msg_events();
|
||||||
|
|
||||||
|
// Connect blocks on node A commitment transaction
|
||||||
|
let header = BlockHeader { version: 0x20000000, prev_blockhash: prev_header_100, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||||
|
connect_block(&nodes[0], &Block { header, txdata: vec![remote_txn[0].clone()] }, 101);
|
||||||
|
check_closed_broadcast!(nodes[0], false);
|
||||||
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
// Verify node A broadcast tx claiming both HTLCs
|
||||||
|
{
|
||||||
|
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
|
// ChannelMonitor: claim tx, ChannelManager: local commitment tx + HTLC-Success*2
|
||||||
|
assert_eq!(node_txn.len(), 4);
|
||||||
|
check_spends!(node_txn[0], remote_txn[0]);
|
||||||
|
check_spends!(node_txn[1], chan.3);
|
||||||
|
check_spends!(node_txn[2], node_txn[1]);
|
||||||
|
check_spends!(node_txn[3], node_txn[1]);
|
||||||
|
assert_eq!(node_txn[0].input.len(), 2);
|
||||||
|
node_txn.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect blocks on node B
|
||||||
|
connect_blocks(&nodes[1], 135, 0, false, Default::default());
|
||||||
|
check_closed_broadcast!(nodes[1], false);
|
||||||
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
// Verify node B broadcast 2 HTLC-timeout txn
|
||||||
|
let partial_claim_tx = {
|
||||||
|
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
|
assert_eq!(node_txn.len(), 3);
|
||||||
|
check_spends!(node_txn[1], node_txn[0]);
|
||||||
|
check_spends!(node_txn[2], node_txn[0]);
|
||||||
|
assert_eq!(node_txn[1].input.len(), 1);
|
||||||
|
assert_eq!(node_txn[2].input.len(), 1);
|
||||||
|
node_txn[1].clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Broadcast partial claim on node A, should regenerate a claiming tx with HTLC dropped
|
||||||
|
let header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
|
||||||
|
connect_block(&nodes[0], &Block { header, txdata: vec![partial_claim_tx.clone()] }, 102);
|
||||||
|
{
|
||||||
|
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
|
assert_eq!(node_txn.len(), 1);
|
||||||
|
check_spends!(node_txn[0], remote_txn[0]);
|
||||||
|
assert_eq!(node_txn[0].input.len(), 1); //dropped HTLC
|
||||||
|
node_txn.clear();
|
||||||
|
}
|
||||||
|
nodes[0].node.get_and_clear_pending_msg_events();
|
||||||
|
|
||||||
|
// Disconnect last block on node A, should regenerate a claiming tx with HTLC dropped
|
||||||
|
disconnect_block(&nodes[0], &header, 102);
|
||||||
|
{
|
||||||
|
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
|
assert_eq!(node_txn.len(), 1);
|
||||||
|
check_spends!(node_txn[0], remote_txn[0]);
|
||||||
|
assert_eq!(node_txn[0].input.len(), 2); //resurrected HTLC
|
||||||
|
node_txn.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
//// Disconnect one more block and then reconnect multiple no transaction should be generated
|
||||||
|
disconnect_block(&nodes[0], &header, 101);
|
||||||
|
connect_blocks(&nodes[1], 15, 101, false, prev_header_100);
|
||||||
|
{
|
||||||
|
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
|
assert_eq!(node_txn.len(), 0);
|
||||||
|
node_txn.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -82,9 +82,13 @@ pub struct TestChainMonitor<'a> {
|
||||||
pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a channelmonitor::Persist<EnforcingSigner>>,
|
pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a channelmonitor::Persist<EnforcingSigner>>,
|
||||||
pub keys_manager: &'a TestKeysInterface,
|
pub keys_manager: &'a TestKeysInterface,
|
||||||
pub update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
|
pub update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
|
||||||
// If this is set to Some(), after the next return, we'll always return this until update_ret
|
/// If this is set to Some(), after the next return, we'll always return this until update_ret
|
||||||
// is changed:
|
/// is changed:
|
||||||
pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
|
pub next_update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
|
||||||
|
/// If this is set to Some(), the next update_channel call (not watch_channel) must be a
|
||||||
|
/// ChannelForceClosed event for the given channel_id with should_broadcast set to the given
|
||||||
|
/// boolean.
|
||||||
|
pub expect_channel_force_closed: Mutex<Option<([u8; 32], bool)>>,
|
||||||
}
|
}
|
||||||
impl<'a> TestChainMonitor<'a> {
|
impl<'a> TestChainMonitor<'a> {
|
||||||
pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a channelmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
|
pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a channelmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
|
||||||
|
@ -95,6 +99,7 @@ impl<'a> TestChainMonitor<'a> {
|
||||||
keys_manager,
|
keys_manager,
|
||||||
update_ret: Mutex::new(None),
|
update_ret: Mutex::new(None),
|
||||||
next_update_ret: Mutex::new(None),
|
next_update_ret: Mutex::new(None),
|
||||||
|
expect_channel_force_closed: Mutex::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,6 +134,14 @@ impl<'a> chain::Watch<EnforcingSigner> for TestChainMonitor<'a> {
|
||||||
assert!(channelmonitor::ChannelMonitorUpdate::read(
|
assert!(channelmonitor::ChannelMonitorUpdate::read(
|
||||||
&mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
|
&mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
|
||||||
|
|
||||||
|
if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() {
|
||||||
|
assert_eq!(funding_txo.to_channel_id(), exp.0);
|
||||||
|
assert_eq!(update.updates.len(), 1);
|
||||||
|
if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
|
||||||
|
assert_eq!(should_broadcast, exp.1);
|
||||||
|
} else { panic!(); }
|
||||||
|
}
|
||||||
|
|
||||||
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
|
self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
|
||||||
let update_res = self.chain_monitor.update_channel(funding_txo, update);
|
let update_res = self.chain_monitor.update_channel(funding_txo, update);
|
||||||
// At every point where we get a monitor update, we should be able to send a useful monitor
|
// At every point where we get a monitor update, we should be able to send a useful monitor
|
||||||
|
|
Loading…
Add table
Reference in a new issue