Merge pull request #2495 from dunxen/2023-07-channelenummap

Use a single peer state map for all channel phases in peer state
This commit is contained in:
Matt Corallo 2023-09-09 18:54:06 +00:00 committed by GitHub
commit 6436232293
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 1154 additions and 979 deletions

View file

@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channel::AnnouncementSigsState;
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::test_channel_signer::TestChannelSigner;
@ -136,15 +136,18 @@ fn test_monitor_and_persister_update_fail() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else {
assert!(false);
}
}
check_added_monitors!(nodes[0], 1);
@ -1460,12 +1463,12 @@ fn monitor_failed_no_reestablish_response() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
{
let mut node_1_per_peer_lock;
let mut node_1_peer_state_lock;
get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
// Route the payment and deliver the initial commitment_signed (with a monitor update failure

View file

@ -605,6 +605,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
(0, update, required),
});
/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
/// its variants containing an appropriate channel struct.
pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
UnfundedOutboundV1(OutboundV1Channel<SP>),
UnfundedInboundV1(InboundV1Channel<SP>),
Funded(Channel<SP>),
}
impl<'a, SP: Deref> ChannelPhase<SP> where
SP::Target: SignerProvider,
<SP::Target as SignerProvider>::Signer: ChannelSigner,
{
pub fn context(&'a self) -> &'a ChannelContext<SP> {
match self {
ChannelPhase::Funded(chan) => &chan.context,
ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
}
}
pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
match self {
ChannelPhase::Funded(ref mut chan) => &mut chan.context,
ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
}
}
}
/// Contains all state common to unfunded inbound/outbound channels.
pub(super) struct UnfundedChannelContext {
/// A counter tracking how many ticks have elapsed since this unfunded channel was
@ -2036,11 +2065,6 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur
(commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
}
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_funding_signed on an
// inbound channel.
//
// Holder designates channel data owned for the benefit of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {

File diff suppressed because it is too large Load diff

View file

@ -811,36 +811,14 @@ macro_rules! get_channel_ref {
}
}
#[cfg(test)]
macro_rules! get_outbound_v1_channel_ref {
($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
{
$per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
$peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
$peer_state_lock.outbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
}
}
}
#[cfg(test)]
macro_rules! get_inbound_v1_channel_ref {
($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
{
$per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
$peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
$peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
}
}
}
#[cfg(test)]
macro_rules! get_feerate {
($node: expr, $counterparty_node: expr, $channel_id: expr) => {
{
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
chan.context.get_feerate_sat_per_1000_weight()
let phase = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
phase.context().get_feerate_sat_per_1000_weight()
}
}
}
@ -852,7 +830,7 @@ macro_rules! get_channel_type_features {
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
chan.context.get_channel_type().clone()
chan.context().get_channel_type().clone()
}
}
}
@ -2414,10 +2392,10 @@ pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, '
let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
.unwrap().lock().unwrap();
let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
if let Some(prev_config) = channel.context.prev_config() {
if let Some(prev_config) = channel.context().prev_config() {
prev_config.forwarding_fee_base_msat
} else {
channel.context.config().forwarding_fee_base_msat
channel.context().config().forwarding_fee_base_msat
}
};
if $idx == 1 { fee += expected_extra_fees[i]; }
@ -2941,7 +2919,9 @@ macro_rules! get_channel_value_stat {
($node: expr, $counterparty_node: expr, $channel_id: expr) => {{
let peer_state_lock = $node.node.per_peer_state.read().unwrap();
let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap();
let chan = chan_lock.channel_by_id.get(&$channel_id).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
chan.get_value_stat()
}}
}

View file

@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint;
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
use crate::ln::{chan_utils, onion_utils};
@ -181,14 +181,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
let mut sender_node_per_peer_lock;
let mut sender_node_peer_state_lock;
if send_from_initiator {
let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
chan.context.holder_selected_channel_reserve_satoshis = 0;
chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
} else {
let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
chan.context.holder_selected_channel_reserve_satoshis = 0;
chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
match channel_phase {
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
let chan_context = channel_phase.context_mut();
chan_context.holder_selected_channel_reserve_satoshis = 0;
chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
},
ChannelPhase::Funded(_) => assert!(false),
}
}
@ -701,7 +702,9 @@ fn test_update_fee_that_funder_cannot_afford() {
let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = local_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
@ -710,7 +713,9 @@ fn test_update_fee_that_funder_cannot_afford() {
let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@ -725,7 +730,9 @@ fn test_update_fee_that_funder_cannot_afford() {
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
@ -1418,7 +1425,9 @@ fn test_fee_spike_violation_fails_htlc() {
let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = local_chan.get_signer();
// Make the signer believe we validated another commitment, so we can release the secret
chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
@ -1432,7 +1441,9 @@ fn test_fee_spike_violation_fails_htlc() {
let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@ -1461,7 +1472,9 @@ fn test_fee_spike_violation_fails_htlc() {
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
commitment_number,
@ -3207,7 +3220,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
// The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
// well, so HTLCs at exactly the dust limit will not be included in commitment txn.
nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
.unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
} else { 3000000 };
let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
@ -5105,7 +5118,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
.unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
@ -6215,7 +6228,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
// Fetch a route in advance as we will be unable to once we're unable to send.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@ -6288,7 +6301,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
}
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@ -6891,7 +6904,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
// We route 2 dust-HTLCs between A and B
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
@ -6984,7 +6997,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@ -7663,7 +7676,9 @@ fn test_counterparty_raa_skip_no_crash() {
{
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
let keys = guard.channel_by_id.get_mut(&channel_id).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap().get_signer();
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
@ -8385,11 +8400,14 @@ fn test_update_err_monitor_lockdown() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else {
assert!(false);
}
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
@ -8483,13 +8501,16 @@ fn test_concurrent_monitor_claim() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else {
assert!(false);
}
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
@ -8940,9 +8961,13 @@ fn test_duplicate_chan_id() {
// another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
// try to create another channel. Instead, we drop the channel entirely here (leaving the
// channelmanager in a possibly nonsense state instead).
let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
let logger = test_utils::TestLogger::new();
as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
ChannelPhase::UnfundedOutboundV1(chan) => {
let logger = test_utils::TestLogger::new();
chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
},
_ => panic!("Unexpected ChannelPhase variant"),
}
};
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@ -9609,8 +9634,12 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
if on_holder_tx {
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
chan.context.holder_dust_limit_satoshis = 546;
match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
ChannelPhase::UnfundedOutboundV1(chan) => {
chan.context.holder_dust_limit_satoshis = 546;
},
_ => panic!("Unexpected ChannelPhase variant"),
}
}
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
@ -9634,8 +9663,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
(chan.context.get_dust_buffer_feerate(None) as u64,
chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
(chan.context().get_dust_buffer_feerate(None) as u64,
chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
};
let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
@ -10094,7 +10123,7 @@ fn test_remove_expired_outbound_unfunded_channels() {
let check_outbound_channel_existence = |should_exist: bool| {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
};
// Channel should exist without any timer ticks.
@ -10145,7 +10174,7 @@ fn test_remove_expired_inbound_unfunded_channels() {
let check_inbound_channel_existence = |should_exist: bool| {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
};
// Channel should exist without any timer ticks.

View file

@ -516,7 +516,7 @@ fn test_onion_failure() {
let short_channel_id = channels[1].0.contents.short_channel_id;
let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap()
.context.get_counterparty_htlc_minimum_msat() - 1;
.context().get_counterparty_htlc_minimum_msat() - 1;
let mut bogus_route = route.clone();
let route_len = bogus_route.paths[0].hops.len();
bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;

View file

@ -663,9 +663,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap();
let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
let mut new_config = channel.context.config();
let mut new_config = channel.context().config();
new_config.forwarding_fee_base_msat += 100_000;
channel.context.update_config(&new_config);
channel.context_mut().update_config(&new_config);
new_route.paths[0].hops[0].fee_msat += 100_000;
}
@ -1469,7 +1469,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.context.get_short_channel_id().unwrap()
channel_1.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_1_used_liquidity, None);
}
@ -1481,7 +1481,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.context.get_short_channel_id().unwrap()
channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, None);
@ -1506,7 +1506,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.context.get_short_channel_id().unwrap()
channel_1.context().get_short_channel_id().unwrap()
);
// First hop accounts for expected 1000 msat fee
assert_eq!(chan_1_used_liquidity, Some(501000));
@ -1519,7 +1519,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.context.get_short_channel_id().unwrap()
channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, Some(500000));
@ -1545,7 +1545,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.context.get_short_channel_id().unwrap()
channel_1.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_1_used_liquidity, None);
}
@ -1557,7 +1557,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.context.get_short_channel_id().unwrap()
channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, None);
}
@ -1598,7 +1598,7 @@ fn test_holding_cell_inflight_htlcs() {
let used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel.context.get_short_channel_id().unwrap()
channel.context().get_short_channel_id().unwrap()
);
assert_eq!(used_liquidity, Some(2000000));
@ -1691,7 +1691,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
// Check for unknown channel id error.
let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable {
err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
if test == InterceptTest::Fail {
@ -1717,8 +1717,8 @@ fn do_test_intercepted_payment(test: InterceptTest) {
let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
assert_eq!(unusable_chan_err , APIError::ChannelUnavailable {
err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
&temp_chan_id, nodes[2].node.get_our_node_id()) });
err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
temp_chan_id, nodes[2].node.get_our_node_id()) });
assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
// Open the just-in-time channel so the payment can then be forwarded.

View file

@ -1062,7 +1062,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context.closing_fee_limits.as_mut().unwrap().1 *= 10;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10;
}
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());