Merge pull request #838 from TheBlueMatt/2021-03-skip-blocks

Make `Channel`'s block connection API more electrum-friendly
This commit is contained in:
Matt Corallo 2021-04-05 22:12:45 +00:00 committed by GitHub
commit e23c270720
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 479 additions and 300 deletions

View file

@ -435,11 +435,11 @@ pub fn do_test<Out: test_logger::Output>(data: &[u8], out: Out) {
let chain_hash = genesis_block(Network::Bitcoin).block_hash();
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
$node.block_connected(&header, &txdata, 1);
for i in 2..100 {
$node.transactions_confirmed(&header, 1, &txdata);
for _ in 2..100 {
header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
$node.block_connected(&header, &[], i);
}
$node.update_best_block(&header, 99);
} }
}

View file

@ -27,6 +27,7 @@ use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
use lightning::chain;
use lightning::chain::Listen;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::chainmonitor;
use lightning::chain::transaction::OutPoint;
@ -202,7 +203,8 @@ impl<'a> MoneyLossDetector<'a> {
self.blocks_connected += 1;
let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
self.height += 1;
self.manager.block_connected(&header, &txdata, self.height as u32);
self.manager.transactions_confirmed(&header, self.height as u32, &txdata);
self.manager.update_best_block(&header, self.height as u32);
(*self.monitor).block_connected(&header, &txdata, self.height as u32);
if self.header_hashes.len() > self.height {
self.header_hashes[self.height] = (header.block_hash(), self.blocks_connected);
@ -216,7 +218,7 @@ impl<'a> MoneyLossDetector<'a> {
fn disconnect_block(&mut self) {
if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: Default::default(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
self.manager.block_disconnected(&header);
self.manager.block_disconnected(&header, self.height as u32);
self.monitor.block_disconnected(&header, self.height as u32);
self.height -= 1;
let removal_height = self.height;

View file

@ -244,7 +244,7 @@ mod tests {
// Force close because cooperative close doesn't result in any persisted
// updates.
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -252,7 +252,7 @@ mod tests {
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
// Make sure everything is persisted as expected after close.

View file

@ -241,7 +241,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool, persister_fail
// ...and make sure we can force-close a frozen channel
nodes[0].node.force_close_channel(&channel_id).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
// TODO: Once we hit the chain with the failure transaction we should check that we get a
// PaymentFailed event

View file

@ -7,7 +7,6 @@
// You may not use this file except in accordance with one or both of these
// licenses.
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::script::{Script,Builder};
use bitcoin::blockdata::transaction::{TxIn, TxOut, Transaction, SigHashType};
use bitcoin::blockdata::opcodes;
@ -376,13 +375,10 @@ pub(super) struct Channel<Signer: Sign> {
last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig)
/// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
/// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full
/// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we
/// could miss the funding_tx_confirmed_in block as well, but it serves as a useful fallback.
/// The hash of the block in which the funding transaction was included.
funding_tx_confirmed_in: Option<BlockHash>,
funding_tx_confirmation_height: u64,
short_channel_id: Option<u64>,
funding_tx_confirmations: u64,
counterparty_dust_limit_satoshis: u64,
#[cfg(test)]
@ -441,10 +437,6 @@ struct CommitmentTxInfoCached {
}
pub const OUR_MAX_HTLCS: u16 = 50; //TODO
/// Confirmation count threshold at which we close a channel. Ideally we'd keep the channel around
/// on ice until the funding transaction gets more confirmations, but the LN protocol doesn't
/// really allow for this, so instead we're stuck closing it out at that point.
const UNCONF_THRESHOLD: u32 = 6;
const SPENDING_INPUT_FOR_A_OUTPUT_WEIGHT: u64 = 79; // prevout: 36, nSequence: 4, script len: 1, witness lengths: (3+1)/4, sig: 73/4, if-selector: 1, redeemScript: (6 ops + 2*33 pubkeys + 1*2 delay)/4
const B_OUTPUT_PLUS_SPENDING_INPUT_WEIGHT: u64 = 104; // prevout: 40, nSequence: 4, script len: 1, witness lengths: 3/4, sig: 73/4, pubkey: 33/4, output: 31 (TODO: Wrong? Useless?)
@ -581,8 +573,8 @@ impl<Signer: Sign> Channel<Signer> {
last_sent_closing_fee: None,
funding_tx_confirmed_in: None,
funding_tx_confirmation_height: 0,
short_channel_id: None,
funding_tx_confirmations: 0,
feerate_per_kw: feerate,
counterparty_dust_limit_satoshis: 0,
@ -818,8 +810,8 @@ impl<Signer: Sign> Channel<Signer> {
last_sent_closing_fee: None,
funding_tx_confirmed_in: None,
funding_tx_confirmation_height: 0,
short_channel_id: None,
funding_tx_confirmations: 0,
feerate_per_kw: msg.feerate_per_kw,
channel_value_satoshis: msg.funding_satoshis,
@ -3509,26 +3501,140 @@ impl<Signer: Sign> Channel<Signer> {
self.network_sync == UpdateStatus::DisabledMarked
}
/// When we receive a new block, we (a) check whether the block contains the funding
/// transaction (which would start us counting blocks until we send the funding_signed), and
/// (b) check the height of the block against outbound holding cell HTLCs in case we need to
/// give up on them prematurely and time them out. Everything else (e.g. commitment
/// transaction broadcasts, channel closure detection, HTLC transaction broadcasting, etc) is
fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
if self.funding_tx_confirmation_height == 0 {
return None;
}
let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
if funding_tx_confirmations <= 0 {
self.funding_tx_confirmation_height = 0;
}
if funding_tx_confirmations < self.minimum_depth as i64 {
return None;
}
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
self.channel_state |= ChannelState::OurFundingLocked as u32;
true
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
self.update_time_counter += 1;
true
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
// We got a reorg but not enough to trigger a force close, just ignore.
false
} else if self.channel_state < ChannelState::ChannelFunded as u32 {
panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
} else {
// We got a reorg but not enough to trigger a force close, just ignore.
false
};
if need_commitment_update {
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
return Some(msgs::FundingLocked {
channel_id: self.channel_id,
next_per_commitment_point,
});
} else {
self.monitor_pending_funding_locked = true;
}
}
None
}
/// When a transaction is confirmed, we check whether it is or spends the funding transaction
/// In the first case, we store the confirmation height and calculating the short channel id.
/// In the second, we simply return an Err indicating we need to be force-closed now.
pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, logger: &L)
-> Result<Option<msgs::FundingLocked>, msgs::ErrorMessage> where L::Target: Logger {
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
for &(index_in_block, tx) in txdata.iter() {
if let Some(funding_txo) = self.get_funding_txo() {
// If we haven't yet sent a funding_locked, but are in FundingSent (ignoring
// whether they've sent a funding_locked or not), check if we should send one.
if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
if tx.txid() == funding_txo.txid {
let txo_idx = funding_txo.index as usize;
if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
tx.output[txo_idx].value != self.channel_value_satoshis {
if self.is_outbound() {
// If we generated the funding transaction and it doesn't match what it
// should, the client is really broken and we should just panic and
// tell them off. That said, because hash collisions happen with high
// probability in fuzztarget mode, if we're fuzzing we just close the
// channel and move on.
#[cfg(not(feature = "fuzztarget"))]
panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
}
self.channel_state = ChannelState::ShutdownComplete as u32;
self.update_time_counter += 1;
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: "funding tx had wrong script/value or output index".to_owned()
});
} else {
if self.is_outbound() {
for input in tx.input.iter() {
if input.witness.is_empty() {
// We generated a malleable funding transaction, implying we've
// just exposed ourselves to funds loss to our counterparty.
#[cfg(not(feature = "fuzztarget"))]
panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
}
}
}
self.funding_tx_confirmation_height = height as u64;
self.funding_tx_confirmed_in = Some(*block_hash);
self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
Ok(scid) => Some(scid),
Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
}
}
}
// If we allow 1-conf funding, we may need to check for funding_locked here and
// send it immediately instead of waiting for an update_best_block call (which
// may have already happened for this block).
if let Some(funding_locked) = self.check_get_funding_locked(height) {
return Ok(Some(funding_locked));
}
}
for inp in tx.input.iter() {
if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
log_trace!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: "Commitment or closing transaction was confirmed on chain.".to_owned()
});
}
}
}
}
Ok(None)
}
/// When a new block is connected, we check the height of the block against outbound holding
/// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
/// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
/// handled by the ChannelMonitor.
///
/// If we return Err, the channel may have been closed, at which point the standard
/// requirements apply - no calls may be made except those explicitly stated to be allowed
/// post-shutdown.
/// Only returns an ErrorAction of DisconnectPeer, if Err.
///
/// May return some HTLCs (and their payment_hash) which have timed out and should be failed
/// back.
pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
pub fn update_best_block(&mut self, height: u32, highest_header_time: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
let mut timed_out_htlcs = Vec::new();
let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
self.holding_cell_htlc_updates.retain(|htlc_update| {
match htlc_update {
&HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
if *cltv_expiry <= height + HTLC_FAIL_BACK_BUFFER {
if *cltv_expiry <= unforwarded_htlc_cltv_limit {
timed_out_htlcs.push((source.clone(), payment_hash.clone()));
false
} else { true }
@ -3537,114 +3643,38 @@ impl<Signer: Sign> Channel<Signer> {
}
});
if self.funding_tx_confirmations > 0 {
self.funding_tx_confirmations += 1;
self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
if let Some(funding_locked) = self.check_get_funding_locked(height) {
return Ok((Some(funding_locked), timed_out_htlcs));
}
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
for &(index_in_block, tx) in txdata.iter() {
let funding_txo = self.get_funding_txo().unwrap();
if tx.txid() == funding_txo.txid {
let txo_idx = funding_txo.index as usize;
if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
tx.output[txo_idx].value != self.channel_value_satoshis {
if self.is_outbound() {
// If we generated the funding transaction and it doesn't match what it
// should, the client is really broken and we should just panic and
// tell them off. That said, because hash collisions happen with high
// probability in fuzztarget mode, if we're fuzzing we just close the
// channel and move on.
#[cfg(not(feature = "fuzztarget"))]
panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
}
self.channel_state = ChannelState::ShutdownComplete as u32;
self.update_time_counter += 1;
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: "funding tx had wrong script/value".to_owned()
});
} else {
if self.is_outbound() {
for input in tx.input.iter() {
if input.witness.is_empty() {
// We generated a malleable funding transaction, implying we've
// just exposed ourselves to funds loss to our counterparty.
#[cfg(not(feature = "fuzztarget"))]
panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
}
}
}
self.funding_tx_confirmations = 1;
self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
Ok(scid) => Some(scid),
Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
}
}
}
if non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
(non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 {
let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
if self.funding_tx_confirmation_height == 0 {
// Note that check_get_funding_locked may reset funding_tx_confirmation_height to
// zero if it has been reorged out, however in either case, our state flags
// indicate we've already sent a funding_locked
funding_tx_confirmations = 0;
}
// If we've sent funding_locked (or have both sent and received funding_locked), and
// the funding transaction's confirmation count has dipped below minimum_depth / 2,
// close the channel and hope we can get the latest state on chain (because presumably
// the funding transaction is at least still in the mempool of most nodes).
if funding_tx_confirmations < self.minimum_depth as i64 / 2 {
return Err(msgs::ErrorMessage {
channel_id: self.channel_id(),
data: format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth, funding_tx_confirmations),
});
}
}
self.update_time_counter = cmp::max(self.update_time_counter, header.time);
if self.funding_tx_confirmations > 0 {
if self.funding_tx_confirmations == self.minimum_depth as u64 {
let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
self.channel_state |= ChannelState::OurFundingLocked as u32;
true
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
self.update_time_counter += 1;
true
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
// We got a reorg but not enough to trigger a force close, just update
// funding_tx_confirmed_in and return.
false
} else if self.channel_state < ChannelState::ChannelFunded as u32 {
panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
} else {
// We got a reorg but not enough to trigger a force close, just update
// funding_tx_confirmed_in and return.
false
};
self.funding_tx_confirmed_in = Some(header.block_hash());
//TODO: Note that this must be a duplicate of the previous commitment point they sent us,
//as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
//they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
//a protocol oversight, but I assume I'm just missing something.
if need_commitment_update {
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
return Ok((Some(msgs::FundingLocked {
channel_id: self.channel_id,
next_per_commitment_point,
}), timed_out_htlcs));
} else {
self.monitor_pending_funding_locked = true;
return Ok((None, timed_out_htlcs));
}
}
}
}
Ok((None, timed_out_htlcs))
}
/// Called by channelmanager based on chain blocks being disconnected.
/// Returns true if we need to close the channel now due to funding transaction
/// unconfirmation/reorg.
pub fn block_disconnected(&mut self, header: &BlockHeader) -> bool {
if self.funding_tx_confirmations > 0 {
self.funding_tx_confirmations -= 1;
if self.funding_tx_confirmations == UNCONF_THRESHOLD as u64 {
return true;
}
}
if Some(header.block_hash()) == self.funding_tx_confirmed_in {
self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
}
false
}
// Methods to get unprompted messages to send to the remote end (or where we already returned
// something in the handler for the message that prompted this message):
@ -4466,8 +4496,8 @@ impl<Signer: Sign> Writeable for Channel<Signer> {
}
self.funding_tx_confirmed_in.write(writer)?;
self.funding_tx_confirmation_height.write(writer)?;
self.short_channel_id.write(writer)?;
self.funding_tx_confirmations.write(writer)?;
self.counterparty_dust_limit_satoshis.write(writer)?;
self.holder_dust_limit_satoshis.write(writer)?;
@ -4636,8 +4666,8 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
};
let funding_tx_confirmed_in = Readable::read(reader)?;
let funding_tx_confirmation_height = Readable::read(reader)?;
let short_channel_id = Readable::read(reader)?;
let funding_tx_confirmations = Readable::read(reader)?;
let counterparty_dust_limit_satoshis = Readable::read(reader)?;
let holder_dust_limit_satoshis = Readable::read(reader)?;
@ -4716,8 +4746,8 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
last_sent_closing_fee,
funding_tx_confirmed_in,
funding_tx_confirmation_height,
short_channel_id,
funding_tx_confirmations,
counterparty_dust_limit_satoshis,
holder_dust_limit_satoshis,

View file

@ -1004,16 +1004,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
}
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
if let Some(node_id) = peer_node_id {
if chan.get().get_counterparty_node_id() != *node_id {
// Error or Ok here doesn't matter - the result is only exposed publicly
// when peer_node_id is None anyway.
return Ok(());
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
}
if let Some(short_id) = chan.get().get_short_channel_id() {
@ -1033,14 +1031,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
});
}
Ok(())
Ok(chan.get_counterparty_node_id())
}
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.force_close_channel_with_peer(channel_id, None)
match self.force_close_channel_with_peer(channel_id, None) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
node_id: counterparty_node_id,
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
},
}
);
Ok(())
},
Err(e) => Err(e)
}
}
/// Force close all channels, immediately broadcasting the latest local commitment transaction
@ -3196,6 +3207,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
},
});
}
},
}
@ -3278,12 +3295,26 @@ where
L::Target: Logger,
{
fn block_connected(&self, block: &Block, height: u32) {
assert_eq!(*self.last_block_hash.read().unwrap(), block.header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
ChannelManager::block_connected(self, &block.header, &txdata, height);
self.transactions_confirmed(&block.header, height, &txdata);
self.update_best_block(&block.header, height);
}
fn block_disconnected(&self, header: &BlockHeader, _height: u32) {
ChannelManager::block_disconnected(self, header);
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
let new_height = self.latest_block_height.fetch_sub(1, Ordering::AcqRel) as u32 - 1;
assert_eq!(new_height, height - 1,
"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
*self.last_block_hash.write().unwrap() = header.prev_blockhash;
self.do_chain_event(new_height, |channel| channel.update_best_block(new_height, header.time));
}
}
@ -3294,22 +3325,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
F::Target: FeeEstimator,
L::Target: Logger,
{
/// Updates channel state based on transactions seen in a connected block.
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
(&self, height: u32, f: FN) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let block_hash = header.block_hash();
log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
assert_eq!(*self.last_block_hash.read().unwrap(), header.prev_blockhash,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
"Blocks must be connected in chain-order - the connected header must build on the last connected header");
self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.write().unwrap() = block_hash;
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
@ -3319,7 +3339,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
let res = channel.block_connected(header, txdata, height);
let res = f(channel);
if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
@ -3345,32 +3365,23 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
}
} else if let Err(e) = res {
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
});
return false;
}
if let Some(funding_txo) = channel.get_funding_txo() {
for &(_, tx) in txdata.iter() {
for inp in tx.input.iter() {
if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
// It looks like our counterparty went on-chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
return false;
}
}
}
}
true
});
@ -3399,6 +3410,64 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
}
}
/// Updates channel state to take note of transactions which were confirmed in the given block
/// at the given height.
///
/// Note that you must still call (or have called) [`update_best_block`] with the block
/// information which is included here.
///
/// This method may be called before or after [`update_best_block`] for a given block's
/// transaction data and may be called multiple times with additional transaction data for a
/// given block.
///
/// This method may be called for a previous block after an [`update_best_block`] call has
/// been made for a later block, however it must *not* be called with transaction data from a
/// block which is no longer in the best chain (ie where [`update_best_block`] has already
/// been informed about a blockchain reorganization which no longer includes the block which
/// corresponds to `header`).
///
/// [`update_best_block`]: `Self::update_best_block`
pub fn transactions_confirmed(&self, header: &BlockHeader, height: u32, txdata: &TransactionData) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.do_chain_event(height, |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
}
/// Updates channel state with the current best blockchain tip. You should attempt to call this
/// quickly after a new block becomes available, however if multiple new blocks become
/// available at the same time, only a single `update_best_block()` call needs to be made.
///
/// This method should also be called immediately after any block disconnections, once at the
/// reorganization fork point, and once with the new chain tip. Calling this method at the
/// blockchain reorganization fork point ensures we learn when a funding transaction which was
/// previously confirmed is reorganized out of the blockchain, ensuring we do not continue to
/// accept payments which cannot be enforced on-chain.
///
/// In both the block-connection and block-disconnection case, this method may be called either
/// once per block connected or disconnected, or simply at the fork point and new tip(s),
/// skipping any intermediary blocks.
pub fn update_best_block(&self, header: &BlockHeader, height: u32) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.write().unwrap() = block_hash;
self.do_chain_event(height, |channel| channel.update_best_block(height, header.time));
loop {
// Update last_node_announcement_serial to be the max of its current value and the
@ -3414,48 +3483,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
}
/// Updates channel state based on a disconnected block.
///
/// If necessary, the channel may be force-closed without letting the counterparty participate
/// in the shutdown.
pub fn block_disconnected(&self, header: &BlockHeader) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
*self.last_block_hash.write().unwrap() = header.prev_blockhash;
let mut failed_channels = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, v| {
if v.block_disconnected(header) {
if let Some(short_id) = v.get_short_channel_id() {
short_to_id.remove(&short_id);
}
failed_channels.push(v.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&v) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
false
} else {
true
}
});
}
self.handle_init_event_channel_failures(failed_channels);
}
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
/// indicating whether persistence is necessary. Only one listener on
/// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken

View file

@ -10,7 +10,7 @@
//! A bunch of useful utilities for building networks of nodes and exchanging messages between
//! nodes for functional tests.
use chain::Watch;
use chain::{Listen, Watch};
use chain::channelmonitor::ChannelMonitor;
use chain::transaction::OutPoint;
use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure};
@ -60,21 +60,15 @@ pub fn mine_transaction<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transac
/// Mine the given transaction at the given height, mining blocks as required to build to that
/// height
pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
let starting_block = node.best_block_info();
let first_connect_height = node.best_block_info().1 + 1;
assert!(first_connect_height <= conf_height);
if conf_height - first_connect_height >= 1 {
connect_blocks(node, conf_height - first_connect_height);
}
let mut block = Block {
header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
txdata: Vec::new(),
};
let height = starting_block.1 + 1;
assert!(height <= conf_height);
for _ in height..conf_height {
connect_block(node, &block);
block = Block {
header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
}
for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
block.txdata.push(Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() });
}
@ -82,37 +76,94 @@ pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &T
connect_block(node, &block);
}
/// The possible ways we may notify a ChannelManager of a new block
pub enum ConnectStyle {
/// Calls update_best_block first, detecting transactions in the block only after receiving the
/// header and height information.
BestBlockFirst,
/// The same as BestBlockFirst, however when we have multiple blocks to connect, we only
/// make a single update_best_block call.
BestBlockFirstSkippingBlocks,
/// Calls transactions_confirmed first, detecting transactions in the block before updating the
/// header and height information.
TransactionsFirst,
/// The same as TransactionsFirst, however when we have multiple blocks to connect, we only
/// make a single update_best_block call.
TransactionsFirstSkippingBlocks,
/// Provides the full block via the chain::Listen interface. In the current code this is
/// equivalent to TransactionsFirst with some additional assertions.
FullBlockViaListen,
}
pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
let skip_intermediaries = match *node.connect_style.borrow() {
ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => true,
_ => false,
};
let mut block = Block {
header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
connect_block(node, &block);
for _ in 2..depth + 1 {
assert!(depth >= 1);
for _ in 0..depth - 1 {
do_connect_block(node, &block, skip_intermediaries);
block = Block {
header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
txdata: vec![],
};
connect_block(node, &block);
}
connect_block(node, &block);
block.header.block_hash()
}
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
do_connect_block(node, block, false);
}
fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, skip_manager: bool) {
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
let height = node.best_block_info().1 + 1;
node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
node.node.block_connected(&block.header, &txdata, height);
if !skip_manager {
match *node.connect_style.borrow() {
ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks => {
node.node.update_best_block(&block.header, height);
node.node.transactions_confirmed(&block.header, height, &block.txdata.iter().enumerate().collect::<Vec<_>>());
},
ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks => {
node.node.transactions_confirmed(&block.header, height, &block.txdata.iter().enumerate().collect::<Vec<_>>());
node.node.update_best_block(&block.header, height);
},
ConnectStyle::FullBlockViaListen => {
Listen::block_connected(node.node, &block, height);
}
}
}
node.node.test_process_background_events();
node.blocks.borrow_mut().push((block.header, height));
}
pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) {
for _ in 0..count {
for i in 0..count {
let orig_header = node.blocks.borrow_mut().pop().unwrap();
assert!(orig_header.1 > 0); // Cannot disconnect genesis
let prev_header = node.blocks.borrow().last().unwrap().clone();
node.chain_monitor.chain_monitor.block_disconnected(&orig_header.0, orig_header.1);
node.node.block_disconnected(&orig_header.0);
match *node.connect_style.borrow() {
ConnectStyle::FullBlockViaListen => {
Listen::block_disconnected(node.node, &orig_header.0, orig_header.1);
},
ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
if i == count - 1 {
node.node.update_best_block(&prev_header.0, prev_header.1);
}
},
_ => {
node.node.update_best_block(&prev_header.0, prev_header.1);
},
}
}
}
@ -152,6 +203,7 @@ pub struct Node<'a, 'b: 'a, 'c: 'b> {
pub network_chan_count: Rc<RefCell<u32>>,
pub logger: &'c test_utils::TestLogger,
pub blocks: RefCell<Vec<(BlockHeader, u32)>>,
pub connect_style: Rc<RefCell<ConnectStyle>>,
}
impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
pub fn best_block_hash(&self) -> BlockHash {
@ -1243,6 +1295,7 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
let mut nodes = Vec::new();
let chan_count = Rc::new(RefCell::new(0));
let payment_count = Rc::new(RefCell::new(0));
let connect_style = Rc::new(RefCell::new(ConnectStyle::FullBlockViaListen));
for i in 0..node_count {
let net_graph_msg_handler = NetGraphMsgHandler::new(cfgs[i].chain_source.genesis_hash, None, cfgs[i].logger);
@ -1251,7 +1304,8 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec<NodeC
keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler,
node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
blocks: RefCell::new(vec![(genesis_block(Network::Testnet).header, 0)])
blocks: RefCell::new(vec![(genesis_block(Network::Testnet).header, 0)]),
connect_style: Rc::clone(&connect_style),
})
}
@ -1364,22 +1418,36 @@ pub fn check_preimage_claim<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, prev_txn: &Vec<
pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize) {
let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
assert_eq!(events_1.len(), 1);
assert_eq!(events_1.len(), 2);
let as_update = match events_1[0] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
match events_1[1] {
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[b].node.get_our_node_id());
assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
},
_ => panic!("Unexpected event"),
}
let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
assert_eq!(events_2.len(), 2);
let bs_update = match events_2[0] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
match events_2[1] {
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[a].node.get_our_node_id());
assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
},
_ => panic!("Unexpected event"),
}
for node in nodes {
node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();

View file

@ -394,8 +394,7 @@ fn test_multi_flight_update_fee() {
check_added_monitors!(nodes[1], 1);
}
#[test]
fn test_1_conf_open() {
fn do_test_1_conf_open(connect_style: ConnectStyle) {
// Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
// tests that we properly send one in that case.
let mut alice_config = UserConfig::default();
@ -409,7 +408,8 @@ fn test_1_conf_open() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
*nodes[0].connect_style.borrow_mut() = connect_style;
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
mine_transaction(&nodes[1], &tx);
@ -425,6 +425,12 @@ fn test_1_conf_open() {
node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
}
}
#[test]
fn test_1_conf_open() {
do_test_1_conf_open(ConnectStyle::BestBlockFirst);
do_test_1_conf_open(ConnectStyle::TransactionsFirst);
do_test_1_conf_open(ConnectStyle::FullBlockViaListen);
}
fn do_test_sanity_on_in_flight_opens(steps: u8) {
// Previously, we had issues deserializing channels when we hadn't connected the first block
@ -1508,10 +1514,14 @@ fn test_duplicate_htlc_different_direction_onchain() {
check_spends!(htlc_pair.1, remote_txn[0]);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
assert_eq!(events.len(), 3);
for e in events {
match e {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
assert_eq!(node_id, nodes[1].node.get_our_node_id());
assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
},
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(update_fail_htlcs.is_empty());
@ -2334,6 +2344,7 @@ fn channel_monitor_network_test() {
// Simple case with no pending HTLCs:
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
assert_eq!(node_txn.len(), 1);
@ -2341,7 +2352,7 @@ fn channel_monitor_network_test() {
check_added_monitors!(nodes[0], 1);
test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
}
get_announce_close_broadcast_events(&nodes, 0, 1);
check_closed_broadcast!(nodes[0], true);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
@ -2350,6 +2361,7 @@ fn channel_monitor_network_test() {
// Simple case of one pending HTLC to HTLC-Timeout
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
check_closed_broadcast!(nodes[1], false);
check_added_monitors!(nodes[1], 1);
{
let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
@ -2357,7 +2369,7 @@ fn channel_monitor_network_test() {
check_added_monitors!(nodes[2], 1);
test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
}
get_announce_close_broadcast_events(&nodes, 1, 2);
check_closed_broadcast!(nodes[2], true);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
@ -2385,6 +2397,7 @@ fn channel_monitor_network_test() {
// HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], false);
let node2_commitment_txid;
{
let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
@ -2396,7 +2409,7 @@ fn channel_monitor_network_test() {
check_added_monitors!(nodes[3], 1);
check_preimage_claim(&nodes[3], &node_txn);
}
get_announce_close_broadcast_events(&nodes, 2, 3);
check_closed_broadcast!(nodes[3], true);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
@ -2412,13 +2425,19 @@ fn channel_monitor_network_test() {
let (close_chan_update_1, close_chan_update_2) = {
connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
let events = nodes[3].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
assert_eq!(events.len(), 2);
let close_chan_update_1 = match events[0] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
match events[1] {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
assert_eq!(node_id, nodes[4].node.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
check_added_monitors!(nodes[3], 1);
// Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
@ -2438,13 +2457,19 @@ fn channel_monitor_network_test() {
connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
let events = nodes[4].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
assert_eq!(events.len(), 2);
let close_chan_update_2 = match events[0] {
MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
msg.clone()
},
_ => panic!("Unexpected event"),
};
match events[1] {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
assert_eq!(node_id, nodes[3].node.get_our_node_id());
},
_ => panic!("Unexpected event"),
}
check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
@ -2785,7 +2810,7 @@ fn test_htlc_on_chain_success() {
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], false);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
assert_eq!(node_txn.len(), 5);
@ -2818,12 +2843,17 @@ fn test_htlc_on_chain_success() {
assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
added_monitors.clear();
}
assert_eq!(events.len(), 2);
assert_eq!(events.len(), 3);
match events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
match events[1] {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
_ => panic!("Unexpected event"),
}
match events[2] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(update_fail_htlcs.is_empty());
@ -2877,7 +2907,7 @@ fn test_htlc_on_chain_success() {
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
check_spends!(commitment_tx[0], chan_1.3);
mine_transaction(&nodes[1], &commitment_tx[0]);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 1 (HTLC-Success)
assert_eq!(node_txn.len(), 4);
@ -2897,7 +2927,7 @@ fn test_htlc_on_chain_success() {
// Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[0], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] });
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
@ -2964,7 +2994,7 @@ fn test_htlc_on_chain_timeout() {
_ => panic!("Unexpected event"),
};
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], false);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
assert_eq!(node_txn.len(), 1);
@ -2996,7 +3026,7 @@ fn test_htlc_on_chain_timeout() {
mine_transaction(&nodes[1], &timeout_tx);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
{
// B will rebroadcast a fee-bumped timeout transaction here.
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
@ -3033,7 +3063,7 @@ fn test_htlc_on_chain_timeout() {
mine_transaction(&nodes[0], &commitment_tx[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 timeout tx
assert_eq!(node_txn.len(), 3);
@ -3070,7 +3100,7 @@ fn test_simple_commitment_revoked_fail_backward() {
mine_transaction(&nodes[1], &revoked_local_txn[0]);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
@ -3241,11 +3271,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 });
assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
match events[if deliver_bs_raa { 1 } else { 0 }] {
MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
_ => panic!("Unexpected event"),
}
match events[if deliver_bs_raa { 2 } else { 1 }] {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
assert_eq!(channel_id, chan_2.2);
assert_eq!(data.as_str(), "Commitment or closing transaction was confirmed on chain.");
},
_ => panic!("Unexpected event"),
}
if deliver_bs_raa {
match events[0] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
@ -3258,7 +3295,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
_ => panic!("Unexpected event"),
}
}
match events[if deliver_bs_raa { 2 } else { 1 }] {
match events[if deliver_bs_raa { 3 } else { 2 }] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
assert!(update_add_htlcs.is_empty());
assert_eq!(update_fail_htlcs.len(), 3);
@ -3407,7 +3444,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -3415,7 +3452,7 @@ fn test_htlc_ignore_latest_remote_commitment() {
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
// Duplicate the connect_block call since this may happen due to other listeners
@ -3469,7 +3506,7 @@ fn test_force_close_fail_back() {
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
check_closed_broadcast!(nodes[2], false);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
let tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -3483,7 +3520,7 @@ fn test_force_close_fail_back() {
mine_transaction(&nodes[1], &tx);
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
@ -4658,7 +4695,7 @@ fn test_claim_sizeable_push_msat() {
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
nodes[1].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
@ -4684,7 +4721,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
nodes[0].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -4693,7 +4730,7 @@ fn test_claim_on_remote_sizeable_push_msat() {
assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
mine_transaction(&nodes[1], &node_txn[0]);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
@ -4720,7 +4757,7 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() {
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -4846,7 +4883,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -4882,7 +4919,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
// A will generate HTLC-Timeout from revoked commitment tx
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -4895,7 +4932,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
// B will generate justice tx from A's revoked commitment/HTLC tx
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -4951,7 +4988,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
// B will generate HTLC-Success from revoked commitment tx
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -4967,7 +5004,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
// A will generate justice tx from B's revoked commitment/HTLC tx
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -5041,7 +5078,7 @@ fn test_onchain_to_onchain_claim() {
assert!(updates.update_fail_malformed_htlcs.is_empty());
mine_transaction(&nodes[2], &commitment_tx[0]);
check_closed_broadcast!(nodes[2], false);
check_closed_broadcast!(nodes[2], true);
check_added_monitors!(nodes[2], 1);
let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
@ -5075,12 +5112,17 @@ fn test_onchain_to_onchain_claim() {
}
check_added_monitors!(nodes[1], 1);
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 3);
check_added_monitors!(nodes[1], 1);
match msg_events[0] {
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
match msg_events[1] {
MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
_ => panic!("Unexpected event"),
}
match msg_events[2] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(update_fail_htlcs.is_empty());
@ -5103,7 +5145,7 @@ fn test_onchain_to_onchain_claim() {
assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
assert_eq!(b_txn[0].lock_time, 0); // Success tx
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
}
@ -5128,7 +5170,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
check_spends!(commitment_txn[0], chan_2.3);
mine_transaction(&nodes[1], &commitment_txn[0]);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let htlc_timeout_tx;
@ -5408,7 +5450,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], false);
check_closed_broadcast!(nodes[2], true);
expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 3);
@ -5545,7 +5587,7 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
mine_transaction(&nodes[0], &local_txn[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let htlc_timeout = {
@ -5613,7 +5655,7 @@ fn test_key_derivation_params() {
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
mine_transaction(&nodes[0], &local_txn_1[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let htlc_timeout = {
@ -5705,7 +5747,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) {
block.header.prev_blockhash = block.block_hash();
}
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
}
@ -5737,7 +5779,7 @@ fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
header.prev_blockhash = header.block_hash();
}
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
}
@ -5785,7 +5827,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no
}
if !check_revoke_no_close {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
@ -6967,7 +7009,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
}
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
@ -7029,7 +7071,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
if local {
// We fail dust-HTLC 1 by broadcast of local commitment tx
mine_transaction(&nodes[0], &as_commitment_tx[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
@ -7044,7 +7086,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
} else {
// We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
mine_transaction(&nodes[0], &bs_commitment_tx[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
@ -7719,7 +7761,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
// B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -7851,7 +7893,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
assert_eq!(node_txn.len(), 0);
node_txn.clear();
}
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
}
@ -8025,7 +8067,7 @@ fn test_bump_txn_sanitize_tracking_maps() {
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let penalty_txn = {
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@ -8388,7 +8430,7 @@ fn test_htlc_no_detection() {
// We deliberately connect the local tx twice as this should provoke a failure calling
// this test before #653 fix.
chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let htlc_timeout = {
@ -8446,7 +8488,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
let mut force_closing_node = 0; // Alice force-closes
if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
check_closed_broadcast!(nodes[force_closing_node], false);
check_closed_broadcast!(nodes[force_closing_node], true);
check_added_monitors!(nodes[force_closing_node], 1);
if go_onchain_before_fulfill {
let txn_to_broadcast = match broadcast_alice {
@ -8457,7 +8499,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
if broadcast_alice {
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
}
assert_eq!(bob_txn.len(), 1);
@ -8536,7 +8578,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain
connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
// If Bob was the one to force-close, he will have already passed these checks earlier.
if broadcast_alice {
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
}
let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();

View file

@ -76,7 +76,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
// Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
connect_block(&nodes[2], &Block { header, txdata: node_1_commitment_txn.clone() });
check_added_monitors!(nodes[2], 1);
check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
check_closed_broadcast!(nodes[2], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
@ -116,7 +116,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
node_2_commitment_txn
};
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
check_closed_broadcast!(nodes[1], true); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
// Connect ANTI_REORG_DELAY - 2 blocks, giving us a confirmation count of ANTI_REORG_DELAY - 1.
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2);
check_added_monitors!(nodes[1], 0);
@ -184,7 +184,7 @@ fn test_onchain_htlc_timeout_delay_remote_commitment() {
do_test_onchain_htlc_reorg(false, false);
}
fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, connect_style: ConnectStyle) {
// After creating a chan between nodes, we disconnect all blocks previously seen to force a
// channel close on nodes[0] side. We also use this to provide very basic testing of logic
// around freeing background events which store monitor updates during block_[dis]connected.
@ -195,6 +195,8 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
let new_chain_monitor: test_utils::TestChainMonitor;
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
*nodes[0].connect_style.borrow_mut() = connect_style;
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
let channel_state = nodes[0].node.channel_state.lock().unwrap();
@ -204,7 +206,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
if !reorg_after_reload {
disconnect_all_blocks(&nodes[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
{
let channel_state = nodes[0].node.channel_state.lock().unwrap();
assert_eq!(channel_state.by_id.len(), 0);
@ -256,7 +258,7 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
if reorg_after_reload {
disconnect_all_blocks(&nodes[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
{
let channel_state = nodes[0].node.channel_state.lock().unwrap();
assert_eq!(channel_state.by_id.len(), 0);
@ -272,10 +274,18 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool) {
#[test]
fn test_unconf_chan() {
do_test_unconf_chan(true, true);
do_test_unconf_chan(false, true);
do_test_unconf_chan(true, false);
do_test_unconf_chan(false, false);
do_test_unconf_chan(true, true, ConnectStyle::BestBlockFirstSkippingBlocks);
do_test_unconf_chan(false, true, ConnectStyle::BestBlockFirstSkippingBlocks);
do_test_unconf_chan(true, false, ConnectStyle::BestBlockFirstSkippingBlocks);
do_test_unconf_chan(false, false, ConnectStyle::BestBlockFirstSkippingBlocks);
}
#[test]
fn test_unconf_chan_via_listen() {
do_test_unconf_chan(true, true, ConnectStyle::FullBlockViaListen);
do_test_unconf_chan(false, true, ConnectStyle::FullBlockViaListen);
do_test_unconf_chan(true, false, ConnectStyle::FullBlockViaListen);
do_test_unconf_chan(false, false, ConnectStyle::FullBlockViaListen);
}
#[test]
@ -311,7 +321,7 @@ fn test_set_outpoints_partial_claiming() {
// Connect blocks on node A commitment transaction
mine_transaction(&nodes[0], &remote_txn[0]);
check_closed_broadcast!(nodes[0], false);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
// Verify node A broadcast tx claiming both HTLCs
{
@ -328,7 +338,7 @@ fn test_set_outpoints_partial_claiming() {
// Connect blocks on node B
connect_blocks(&nodes[1], 135);
check_closed_broadcast!(nodes[1], false);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
// Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = {