Refactor ChannelManager with ChannelPhase

This commit is contained in:
Duncan Dean 2023-08-14 11:28:40 +02:00
parent 585188cda3
commit 15199a65a7
No known key found for this signature in database
7 changed files with 1070 additions and 800 deletions

View File

@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channel::AnnouncementSigsState;
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::test_channel_signer::TestChannelSigner;
@ -136,15 +136,18 @@ fn test_monitor_and_persister_update_fail() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that even though the persister is returning a InProgress,
// because the update is bogus, ultimately the error that's returned
// should be a PermanentFailure.
if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else {
assert!(false);
}
}
check_added_monitors!(nodes[0], 1);
@ -1460,12 +1463,12 @@ fn monitor_failed_no_reestablish_response() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
{
let mut node_1_per_peer_lock;
let mut node_1_peer_state_lock;
get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
// Route the payment and deliver the initial commitment_signed (with a monitor update failure

File diff suppressed because it is too large Load Diff

View File

@ -839,8 +839,8 @@ macro_rules! get_feerate {
{
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
chan.context.get_feerate_sat_per_1000_weight()
let phase = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
phase.context().get_feerate_sat_per_1000_weight()
}
}
}
@ -852,7 +852,7 @@ macro_rules! get_channel_type_features {
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
chan.context.get_channel_type().clone()
chan.context().get_channel_type().clone()
}
}
}
@ -2414,10 +2414,10 @@ pub fn pass_claimed_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, '
let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
.unwrap().lock().unwrap();
let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
if let Some(prev_config) = channel.context.prev_config() {
if let Some(prev_config) = channel.context().prev_config() {
prev_config.forwarding_fee_base_msat
} else {
channel.context.config().forwarding_fee_base_msat
channel.context().config().forwarding_fee_base_msat
}
};
if $idx == 1 { fee += expected_extra_fees[i]; }
@ -2941,7 +2941,9 @@ macro_rules! get_channel_value_stat {
($node: expr, $counterparty_node: expr, $channel_id: expr) => {{
let peer_state_lock = $node.node.per_peer_state.read().unwrap();
let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap();
let chan = chan_lock.channel_by_id.get(&$channel_id).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
chan.get_value_stat()
}}
}

View File

@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint;
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
use crate::ln::{chan_utils, onion_utils};
@ -701,7 +701,9 @@ fn test_update_fee_that_funder_cannot_afford() {
let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = local_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
@ -710,7 +712,9 @@ fn test_update_fee_that_funder_cannot_afford() {
let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@ -725,7 +729,9 @@ fn test_update_fee_that_funder_cannot_afford() {
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
@ -1418,7 +1424,9 @@ fn test_fee_spike_violation_fails_htlc() {
let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = local_chan.get_signer();
// Make the signer believe we validated another commitment, so we can release the secret
chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
@ -1432,7 +1440,9 @@ fn test_fee_spike_violation_fails_htlc() {
let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@ -1461,7 +1471,9 @@ fn test_fee_spike_violation_fails_htlc() {
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
commitment_number,
@ -3207,7 +3219,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
// The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
// well, so HTLCs at exactly the dust limit will not be included in commitment txn.
nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
.unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
} else { 3000000 };
let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
@ -5105,7 +5117,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
.unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
@ -6215,7 +6227,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
// Fetch a route in advance as we will be unable to once we're unable to send.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@ -6288,7 +6300,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
}
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@ -6891,7 +6903,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
// We route 2 dust-HTLCs between A and B
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
@ -6984,7 +6996,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@ -7663,7 +7675,9 @@ fn test_counterparty_raa_skip_no_crash() {
{
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
let keys = guard.channel_by_id.get_mut(&channel_id).map(
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap().get_signer();
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
@ -8385,11 +8399,14 @@ fn test_update_err_monitor_lockdown() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else {
assert!(false);
}
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
@ -8483,13 +8500,16 @@ fn test_concurrent_monitor_claim() {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update
assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
} else { assert!(false); }
} else {
assert!(false);
}
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
@ -9634,8 +9654,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
(chan.context.get_dust_buffer_feerate(None) as u64,
chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
(chan.context().get_dust_buffer_feerate(None) as u64,
chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
};
let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;

View File

@ -516,7 +516,7 @@ fn test_onion_failure() {
let short_channel_id = channels[1].0.contents.short_channel_id;
let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap()
.context.get_counterparty_htlc_minimum_msat() - 1;
.context().get_counterparty_htlc_minimum_msat() - 1;
let mut bogus_route = route.clone();
let route_len = bogus_route.paths[0].hops.len();
bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;

View File

@ -663,9 +663,9 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) {
let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap();
let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
let mut new_config = channel.context.config();
let mut new_config = channel.context().config();
new_config.forwarding_fee_base_msat += 100_000;
channel.context.update_config(&new_config);
channel.context_mut().update_config(&new_config);
new_route.paths[0].hops[0].fee_msat += 100_000;
}
@ -1469,7 +1469,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.context.get_short_channel_id().unwrap()
channel_1.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_1_used_liquidity, None);
}
@ -1481,7 +1481,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.context.get_short_channel_id().unwrap()
channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, None);
@ -1506,7 +1506,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.context.get_short_channel_id().unwrap()
channel_1.context().get_short_channel_id().unwrap()
);
// First hop accounts for expected 1000 msat fee
assert_eq!(chan_1_used_liquidity, Some(501000));
@ -1519,7 +1519,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.context.get_short_channel_id().unwrap()
channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, Some(500000));
@ -1545,7 +1545,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel_1.context.get_short_channel_id().unwrap()
channel_1.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_1_used_liquidity, None);
}
@ -1557,7 +1557,7 @@ fn test_trivial_inflight_htlc_tracking(){
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
channel_2.context.get_short_channel_id().unwrap()
channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, None);
}
@ -1598,7 +1598,7 @@ fn test_holding_cell_inflight_htlcs() {
let used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
channel.context.get_short_channel_id().unwrap()
channel.context().get_short_channel_id().unwrap()
);
assert_eq!(used_liquidity, Some(2000000));
@ -1691,7 +1691,7 @@ fn do_test_intercepted_payment(test: InterceptTest) {
// Check for unknown channel id error.
let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable {
err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
if test == InterceptTest::Fail {
@ -1717,8 +1717,8 @@ fn do_test_intercepted_payment(test: InterceptTest) {
let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
assert_eq!(unusable_chan_err , APIError::ChannelUnavailable {
err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
&temp_chan_id, nodes[2].node.get_our_node_id()) });
err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
temp_chan_id, nodes[2].node.get_our_node_id()) });
assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
// Open the just-in-time channel so the payment can then be forwarded.

View File

@ -1062,7 +1062,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context.closing_fee_limits.as_mut().unwrap().1 *= 10;
get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10;
}
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());