Hide ChannelPhase::Funded behind as_funded method

Exposing ChannelPhase in ChannelManager has led to verbose match
statements, which need to be modified each time a ChannelPhase is added.
Making ChannelPhase an implementation detail of Channel would help avoid
this.

As a step in this direction, introduce ChannelPhase::as_funded and
ChannelPhase::as_funded_mut for use in ChannelManager when a Channel (to
be later renamed FundedChannel) is needed.
This commit is contained in:
Jeffrey Czyz 2025-01-05 22:24:18 -06:00
parent d6637d7d04
commit 1780ce4e5a
No known key found for this signature in database
GPG key ID: 912EF12EA67705F5
5 changed files with 150 additions and 126 deletions

View file

@ -20,7 +20,7 @@ use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; use crate::ln::channel::AnnouncementSigsState;
use crate::ln::msgs; use crate::ln::msgs;
use crate::ln::types::ChannelId; use crate::ln::types::ChannelId;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
@ -98,7 +98,7 @@ fn test_monitor_and_persister_update_fail() {
{ {
let mut node_0_per_peer_lock; let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock; let mut node_0_peer_state_lock;
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) { if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2).as_funded_mut() {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Check that the persister returns InProgress (and will never actually complete) // Check that the persister returns InProgress (and will never actually complete)
// as the monitor update errors. // as the monitor update errors.

View file

@ -1152,6 +1152,22 @@ impl<'a, SP: Deref> ChannelPhase<SP> where
ChannelPhase::UnfundedV2(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedV2(ref mut chan) => &mut chan.context,
} }
} }
pub fn as_funded(&self) -> Option<&Channel<SP>> {
if let ChannelPhase::Funded(channel) = self {
Some(channel)
} else {
None
}
}
pub fn as_funded_mut(&mut self) -> Option<&mut Channel<SP>> {
if let ChannelPhase::Funded(channel) = self {
Some(channel)
} else {
None
}
}
} }
/// Contains all state common to unfunded inbound/outbound channels. /// Contains all state common to unfunded inbound/outbound channels.

View file

@ -3229,7 +3229,10 @@ macro_rules! handle_monitor_update_completion {
for (channel_id, counterparty_node_id, _) in removed_batch_state { for (channel_id, counterparty_node_id, _) in removed_batch_state {
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state = peer_state_mutex.lock().unwrap(); let mut peer_state = peer_state_mutex.lock().unwrap();
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&channel_id)
.and_then(ChannelPhase::as_funded_mut)
{
batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
chan.set_batch_ready(); chan.set_batch_ready();
let mut pending_events = $self.pending_events.lock().unwrap(); let mut pending_events = $self.pending_events.lock().unwrap();
@ -3704,11 +3707,8 @@ where
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
res.extend(peer_state.channel_by_id.iter() res.extend(peer_state.channel_by_id.iter()
.filter_map(|(chan_id, phase)| match phase {
// Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded. // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded.
ChannelPhase::Funded(chan) => Some((chan_id, chan)), .filter_map(|(chan_id, phase)| phase.as_funded().map(|chan| (chan_id, chan)))
_ => None,
})
.filter(f) .filter(f)
.map(|(_channel_id, channel)| { .map(|(_channel_id, channel)| {
ChannelDetails::from_channel_context(&channel.context, best_block_height, ChannelDetails::from_channel_context(&channel.context, best_block_height,
@ -3836,7 +3836,7 @@ where
match peer_state.channel_by_id.entry(channel_id.clone()) { match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let funding_txo_opt = chan.context.get_funding_txo(); let funding_txo_opt = chan.context.get_funding_txo();
let their_features = &peer_state.latest_features; let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) = let (shutdown_msg, mut monitor_update_opt, htlcs) =
@ -3964,7 +3964,7 @@ where
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) { match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => { hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { if let Some(chan) = chan_phase.get_mut().as_funded_mut() {
handle_new_monitor_update!(self, funding_txo, handle_new_monitor_update!(self, funding_txo,
monitor_update, peer_state_lock, peer_state, per_peer_state, chan); monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
return; return;
@ -4102,15 +4102,14 @@ where
let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None); let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
log_error!(logger, "Force-closing channel {}", channel_id); log_error!(logger, "Force-closing channel {}", channel_id);
let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() { let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut().as_funded_mut() {
ChannelPhase::Funded(ref mut chan) => { Some(chan) => {
( (
chan.context.force_shutdown(broadcast, closure_reason), chan.context.force_shutdown(broadcast, closure_reason),
self.get_channel_update_for_broadcast(&chan).ok(), self.get_channel_update_for_broadcast(&chan).ok(),
) )
}, },
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) | None => {
ChannelPhase::UnfundedV2(_) => {
// Unfunded channel has no update // Unfunded channel has no update
(chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None) (chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None)
}, },
@ -4272,9 +4271,7 @@ where
} }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get_mut(&channel_id).and_then( match peer_state.channel_by_id.get_mut(&channel_id).and_then(ChannelPhase::as_funded_mut) {
|chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
) {
None => None, None => None,
Some(chan) => Some(callback(chan)), Some(chan) => Some(callback(chan)),
} }
@ -4567,8 +4564,8 @@ where
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) { if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
match chan_phase_entry.get_mut() { match chan_phase_entry.get_mut().as_funded_mut() {
ChannelPhase::Funded(chan) => { Some(chan) => {
if !chan.context.is_live() { if !chan.context.is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
} }
@ -4599,7 +4596,7 @@ where
None => {}, None => {},
} }
}, },
_ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}), None => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
}; };
} else { } else {
// The channel was likely removed after we fetched the id from the // The channel was likely removed after we fetched the id from the
@ -5413,7 +5410,7 @@ where
if !channel_phase.context_mut().update_config(&config) { if !channel_phase.context_mut().update_config(&config) {
continue; continue;
} }
if let ChannelPhase::Funded(channel) = channel_phase { if let Some(channel) = channel_phase.as_funded() {
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) { if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg }); pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
@ -5501,18 +5498,19 @@ where
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.get(next_hop_channel_id) { match peer_state.channel_by_id.get(next_hop_channel_id) {
Some(ChannelPhase::Funded(chan)) => { Some(chan) => if let Some(chan) = chan.as_funded() {
if !chan.context.is_usable() { if !chan.context.is_usable() {
return Err(APIError::ChannelUnavailable { return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not fully established", next_hop_channel_id) err: format!("Channel with id {} not fully established", next_hop_channel_id)
}) })
} }
chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias()) chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
}, } else {
Some(_) => return Err(APIError::ChannelUnavailable { return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.", err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
next_hop_channel_id, next_node_id) next_hop_channel_id, next_node_id)
}), })
},
None => { None => {
let error = format!("Channel with id {} not found for the passed counterparty node_id {}", let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
next_hop_channel_id, next_node_id); next_hop_channel_id, next_node_id);
@ -5521,7 +5519,7 @@ where
return Err(APIError::ChannelUnavailable { return Err(APIError::ChannelUnavailable {
err: error err: error
}) })
} },
} }
}; };
@ -5912,8 +5910,9 @@ where
// applying non-strict forwarding. // applying non-strict forwarding.
// The channel with the least amount of outbound liquidity will be used to maximize the // The channel with the least amount of outbound liquidity will be used to maximize the
// probability of being able to successfully forward a subsequent HTLC. // probability of being able to successfully forward a subsequent HTLC.
let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase { let maybe_optimal_channel = peer_state.channel_by_id.values_mut()
ChannelPhase::Funded(chan) => { .filter_map(ChannelPhase::as_funded_mut)
.filter_map(|chan| {
let balances = chan.context.get_available_balances(&self.fee_estimator); let balances = chan.context.get_available_balances(&self.fee_estimator);
if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
@ -5922,14 +5921,16 @@ where
} else { } else {
None None
} }
}, })
_ => None, .min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
}).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
let optimal_channel = match maybe_optimal_channel { let optimal_channel = match maybe_optimal_channel {
Some(chan) => chan, Some(chan) => chan,
None => { None => {
// Fall back to the specified channel to return an appropriate error. // Fall back to the specified channel to return an appropriate error.
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&forward_chan_id)
.and_then(ChannelPhase::as_funded_mut)
{
chan chan
} else { } else {
forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
@ -5957,7 +5958,10 @@ where
panic!("Stated return value requirements in send_htlc() were not met"); panic!("Stated return value requirements in send_htlc() were not met");
} }
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&forward_chan_id)
.and_then(ChannelPhase::as_funded_mut)
{
let failure_code = 0x1000|7; let failure_code = 0x1000|7;
let data = self.get_htlc_inbound_temp_fail_data(failure_code); let data = self.get_htlc_inbound_temp_fail_data(failure_code);
failed_forwards.push((htlc_source, payment_hash, failed_forwards.push((htlc_source, payment_hash,
@ -5975,7 +5979,10 @@ where
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
}, },
HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&forward_chan_id)
.and_then(ChannelPhase::as_funded_mut)
{
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id)) Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
@ -5985,7 +5992,10 @@ where
} }
}, },
HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&forward_chan_id)
.and_then(ChannelPhase::as_funded_mut)
{
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc( let res = chan.queue_fail_malformed_htlc(
@ -6001,7 +6011,10 @@ where
if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
if let Err(e) = queue_fail_htlc_res { if let Err(e) = queue_fail_htlc_res {
if let ChannelError::Ignore(msg) = e { if let ChannelError::Ignore(msg) = e {
if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&forward_chan_id)
.and_then(ChannelPhase::as_funded_mut)
{
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
} }
@ -6309,7 +6322,10 @@ where
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(&channel_id)
.and_then(ChannelPhase::as_funded_mut)
{
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else { } else {
let update_actions = peer_state.monitor_update_blocked_actions let update_actions = peer_state.monitor_update_blocked_actions
@ -6369,9 +6385,9 @@ where
for (_cp_id, peer_state_mutex) in per_peer_state.iter() { for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map( for (chan_id, chan) in peer_state.channel_by_id.iter_mut()
|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } .filter_map(|(chan_id, phase)| phase.as_funded_mut().map(|chan| (chan_id, chan)))
) { {
let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
anchor_feerate anchor_feerate
} else { } else {
@ -6748,7 +6764,7 @@ where
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) { match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(chan_phase_entry) => { hash_map::Entry::Occupied(chan_phase_entry) => {
if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() { if let Some(_chan) = chan_phase_entry.get().as_funded() {
let failure_code = 0x1000|7; let failure_code = 0x1000|7;
let data = self.get_htlc_inbound_temp_fail_data(failure_code); let data = self.get_htlc_inbound_temp_fail_data(failure_code);
(failure_code, data) (failure_code, data)
@ -7081,7 +7097,7 @@ where
if let Some(peer_state_lock) = peer_state_opt.as_mut() { if let Some(peer_state_lock) = peer_state_opt.as_mut() {
let peer_state = &mut **peer_state_lock; let peer_state = &mut **peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) { if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger); let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
@ -7583,7 +7599,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
return; return;
} }
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { if let Some(chan) = peer_state.channel_by_id
.get_mut(channel_id)
.and_then(ChannelPhase::as_funded_mut)
{
if chan.is_awaiting_monitor_update() { if chan.is_awaiting_monitor_update() {
log_trace!(logger, "Channel is open and awaiting update, resuming it"); log_trace!(logger, "Channel is open and awaiting update, resuming it");
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
@ -8125,7 +8144,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
}); });
} }
if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) { if let Some(chan) = e.insert(ChannelPhase::Funded(chan)).as_funded_mut() {
handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state, handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
per_peer_state, chan, INITIAL_MONITOR); per_peer_state, chan, INITIAL_MONITOR);
} else { } else {
@ -8172,8 +8191,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
// We really should be able to insert here without doing a second // We really should be able to insert here without doing a second
// lookup, but sadly rust stdlib doesn't currently allow keeping // lookup, but sadly rust stdlib doesn't currently allow keeping
// the original Entry around with the value removed. // the original Entry around with the value removed.
let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan)); let chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
if let ChannelPhase::Funded(ref mut chan) = &mut chan { if let Some(chan) = chan.as_funded_mut() {
handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
} else { unreachable!(); } } else { unreachable!(); }
Ok(()) Ok(())
@ -8366,9 +8385,9 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
let channel_phase = chan_phase_entry.get_mut(); let channel_phase = chan_phase_entry.get_mut().as_funded_mut();
match channel_phase { match channel_phase {
ChannelPhase::Funded(chan) => { Some(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry); let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry);
if let Some(tx_signatures) = tx_signatures_opt { if let Some(tx_signatures) = tx_signatures_opt {
@ -8385,7 +8404,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
} }
} }
}, },
_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close( None => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
( (
"Got an unexpected tx_signatures message".into(), "Got an unexpected tx_signatures message".into(),
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
@ -8476,7 +8495,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer, let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer,
self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry); self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
@ -8531,9 +8550,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) { if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
let phase = chan_phase_entry.get_mut(); match chan_phase_entry.get_mut().as_funded_mut() {
match phase { Some(chan) => {
ChannelPhase::Funded(chan) => {
if !chan.received_shutdown() { if !chan.received_shutdown() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.", log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
@ -8561,12 +8579,11 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
peer_state_lock, peer_state, per_peer_state, chan); peer_state_lock, peer_state, per_peer_state, chan);
} }
}, },
ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) | None => {
ChannelPhase::UnfundedV2(_) => { let context = chan_phase_entry.get_mut().context_mut();
let context = phase.context_mut();
let logger = WithChannelContext::from(&self.logger, context, None); let logger = WithChannelContext::from(&self.logger, context, None);
log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut close_res = context.force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
remove_channel_phase!(self, peer_state, chan_phase_entry, close_res); remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
finish_shutdown = Some(close_res); finish_shutdown = Some(close_res);
}, },
@ -8599,7 +8616,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) { match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry); let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
@ -8635,8 +8652,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx)); log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]); self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
} }
if let Some(ChannelPhase::Funded(chan)) = chan_option { if let Some(chan) = chan_option.as_ref().and_then(ChannelPhase::as_funded) {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { if let Ok(update) = self.get_channel_update_for_broadcast(chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update msg: update
@ -8674,7 +8691,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let mut pending_forward_info = match decoded_hop_res { let mut pending_forward_info = match decoded_hop_res {
Ok((next_hop, shared_secret, next_packet_pk_opt)) => Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
self.construct_pending_htlc_status( self.construct_pending_htlc_status(
@ -8746,7 +8763,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry); let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry);
if let HTLCSource::PreviousHopData(prev_hop) = &res.0 { if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
@ -8795,7 +8812,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry); try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
} else { } else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close( return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
@ -8824,7 +8841,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned()); let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry); try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry);
} }
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry); try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
} else { } else {
return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close( return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
@ -8848,7 +8865,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo = chan.context.get_funding_txo(); let funding_txo = chan.context.get_funding_txo();
@ -9065,7 +9082,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
let funding_txo_opt = chan.context.get_funding_txo(); let funding_txo_opt = chan.context.get_funding_txo();
let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
@ -9105,7 +9122,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry); try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
} else { } else {
@ -9129,7 +9146,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
if !chan.context.is_usable() { if !chan.context.is_usable() {
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
} }
@ -9171,7 +9188,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(chan_id) { match peer_state.channel_by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
if chan.context.get_counterparty_node_id() != *counterparty_node_id { if chan.context.get_counterparty_node_id() != *counterparty_node_id {
if chan.context.should_announce() { if chan.context.should_announce() {
// If the announcement is about a channel of ours which is public, some // If the announcement is about a channel of ours which is public, some
@ -9222,7 +9239,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) { match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan_phase_entry) => { hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
// Currently, we expect all holding cell update_adds to be dropped on peer // Currently, we expect all holding cell update_adds to be dropped on peer
// disconnect, so Channel's reestablish will never hand us any holding cell // disconnect, so Channel's reestablish will never hand us any holding cell
// freed HTLCs to fail backwards. If in the future we no longer drop pending // freed HTLCs to fail backwards. If in the future we no longer drop pending
@ -9353,8 +9370,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone()); let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone());
let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res); let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
failed_channels.push(shutdown_res); failed_channels.push(shutdown_res);
if let ChannelPhase::Funded(chan) = chan_phase { if let Some(chan) = chan_phase.as_funded() {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { if let Ok(update) = self.get_channel_update_for_broadcast(chan) {
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap(); let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update msg: update
@ -9414,9 +9431,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
'chan_loop: loop { 'chan_loop: loop {
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state: &mut PeerState<_> = &mut *peer_state_lock; let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map( for (channel_id, chan) in peer_state.channel_by_id
|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } .iter_mut()
) { .filter_map(|(chan_id, phase)| phase.as_funded_mut().map(|chan| (chan_id, chan)))
{
let counterparty_node_id = chan.context.get_counterparty_node_id(); let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo(); let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) = let (monitor_opt, holding_cell_failed_htlcs) =
@ -9591,8 +9609,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events; let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|channel_id, phase| { peer_state.channel_by_id.retain(|channel_id, phase| {
match phase { match phase.as_funded_mut() {
ChannelPhase::Funded(chan) => { Some(chan) => {
let logger = WithChannelContext::from(&self.logger, &chan.context, None); let logger = WithChannelContext::from(&self.logger, &chan.context, None);
match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) { match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
Ok((msg_opt, tx_opt, shutdown_result_opt)) => { Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
@ -9630,7 +9648,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
} }
} }
}, },
_ => true, // Retain unfunded channels if present. None => true, // Retain unfunded channels if present.
} }
}); });
} }
@ -10539,9 +10557,7 @@ where
for (_cp_id, peer_state_mutex) in per_peer_state.iter() { for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map( for chan in peer_state.channel_by_id.values().filter_map(ChannelPhase::as_funded) {
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
) {
for (htlc_source, _) in chan.inflight_htlc_sources() { for (htlc_source, _) in chan.inflight_htlc_sources() {
if let HTLCSource::OutboundRoute { path, .. } = htlc_source { if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
inflight_htlcs.process_path(path, self.get_our_node_id()); inflight_htlcs.process_path(path, self.get_our_node_id());
@ -10620,7 +10636,7 @@ where
if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry( if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
channel_id) { channel_id) {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint); debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
@ -10916,7 +10932,7 @@ where
for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() { for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) { for chan in peer_state.channel_by_id.values().filter_map(ChannelPhase::as_funded) {
let txid_opt = chan.context.get_funding_txo(); let txid_opt = chan.context.get_funding_txo();
let height_opt = chan.context.get_funding_tx_confirmation_height(); let height_opt = chan.context.get_funding_tx_confirmation_height();
let hash_opt = chan.context.get_funding_tx_confirmed_in(); let hash_opt = chan.context.get_funding_tx_confirmed_in();
@ -10973,11 +10989,10 @@ where
let pending_msg_events = &mut peer_state.pending_msg_events; let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, phase| { peer_state.channel_by_id.retain(|_, phase| {
match phase { match phase.as_funded_mut() {
// Retain unfunded channels. // Retain unfunded channels.
ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) | None => true,
ChannelPhase::UnfundedV2(_) => true, Some(channel) => {
ChannelPhase::Funded(channel) => {
let res = f(channel); let res = f(channel);
if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res { if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
@ -11716,7 +11731,10 @@ where
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; } if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap(); let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) { if let Some(chan) = peer_state.channel_by_id
.get(&msg.channel_id)
.and_then(ChannelPhase::as_funded)
{
if let Some(msg) = chan.get_outbound_shutdown() { if let Some(msg) = chan.get_outbound_shutdown() {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id, node_id: counterparty_node_id,
@ -12745,9 +12763,11 @@ where
serializable_peer_count += 1; serializable_peer_count += 1;
} }
number_of_funded_channels += peer_state.channel_by_id.iter().filter( number_of_funded_channels += peer_state.channel_by_id
|(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false } .values()
).count(); .filter_map(ChannelPhase::as_funded)
.filter(|chan| chan.context.is_funding_broadcast())
.count();
} }
(number_of_funded_channels as u64).write(writer)?; (number_of_funded_channels as u64).write(writer)?;
@ -12755,11 +12775,11 @@ where
for (_, peer_state_mutex) in per_peer_state.iter() { for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
for channel in peer_state.channel_by_id.iter().filter_map( for channel in peer_state.channel_by_id
|(_, phase)| if let ChannelPhase::Funded(channel) = phase { .values()
if channel.context.is_funding_broadcast() { Some(channel) } else { None } .filter_map(ChannelPhase::as_funded)
} else { None } .filter(|channel| channel.context.is_funding_broadcast())
) { {
channel.write(writer)?; channel.write(writer)?;
} }
} }
@ -13565,7 +13585,7 @@ where
let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let mut peer_state_lock = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
for phase in peer_state.channel_by_id.values() { for phase in peer_state.channel_by_id.values() {
if let ChannelPhase::Funded(chan) = phase { if let Some(chan) = phase.as_funded() {
let logger = WithChannelContext::from(&args.logger, &chan.context, None); let logger = WithChannelContext::from(&args.logger, &chan.context, None);
// Channels that were persisted have to be funded, otherwise they should have been // Channels that were persisted have to be funded, otherwise they should have been
@ -14024,7 +14044,7 @@ where
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
for (chan_id, phase) in peer_state.channel_by_id.iter_mut() { for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
if let ChannelPhase::Funded(chan) = phase { if let Some(chan) = phase.as_funded_mut() {
let logger = WithChannelContext::from(&args.logger, &chan.context, None); let logger = WithChannelContext::from(&args.logger, &chan.context, None);
if chan.context.outbound_scid_alias() == 0 { if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias; let mut outbound_scid_alias;
@ -14274,7 +14294,10 @@ where
let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap(); let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock; let peer_state = &mut *peer_state_lock;
if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) { if let Some(channel) = peer_state.channel_by_id
.get_mut(&previous_channel_id)
.and_then(ChannelPhase::as_funded_mut)
{
let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash)); let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
channel.claim_htlc_while_disconnected_dropping_mon_update_legacy( channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger

View file

@ -3605,9 +3605,7 @@ macro_rules! get_channel_value_stat {
($node: expr, $counterparty_node: expr, $channel_id: expr) => {{ ($node: expr, $counterparty_node: expr, $channel_id: expr) => {{
let peer_state_lock = $node.node.per_peer_state.read().unwrap(); let peer_state_lock = $node.node.per_peer_state.read().unwrap();
let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap(); let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&$channel_id).map( let chan = chan_lock.channel_by_id.get(&$channel_id).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
chan.get_value_stat() chan.get_value_stat()
}} }}
} }

View file

@ -734,9 +734,7 @@ fn test_update_fee_that_funder_cannot_afford() {
let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = { let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).map( let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = local_chan.get_signer(); let chan_signer = local_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys(); let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
@ -745,9 +743,7 @@ fn test_update_fee_that_funder_cannot_afford() {
let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).map( let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = remote_chan.get_signer(); let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys(); let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@ -762,9 +758,7 @@ fn test_update_fee_that_funder_cannot_afford() {
let res = { let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map( let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let local_chan_signer = local_chan.get_signer(); let local_chan_signer = local_chan.get_signer();
let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![]; let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
@ -1470,9 +1464,7 @@ fn test_fee_spike_violation_fails_htlc() {
let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = { let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = chan_lock.channel_by_id.get(&chan.2).map( let local_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = local_chan.get_signer(); let chan_signer = local_chan.get_signer();
// Make the signer believe we validated another commitment, so we can release the secret // Make the signer believe we validated another commitment, so we can release the secret
chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
@ -1486,9 +1478,7 @@ fn test_fee_spike_violation_fails_htlc() {
let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = { let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap(); let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
let remote_chan = chan_lock.channel_by_id.get(&chan.2).map( let remote_chan = chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let chan_signer = remote_chan.get_signer(); let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys(); let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
@ -1517,9 +1507,7 @@ fn test_fee_spike_violation_fails_htlc() {
let res = { let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map( let local_chan = local_chan_lock.channel_by_id.get(&chan.2).and_then(ChannelPhase::as_funded).unwrap();
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
).flatten().unwrap();
let local_chan_signer = local_chan.get_signer(); let local_chan_signer = local_chan.get_signer();
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
commitment_number, commitment_number,
@ -7786,9 +7774,8 @@ fn test_counterparty_raa_skip_no_crash() {
{ {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let keys = guard.channel_by_id.get_mut(&channel_id).map( let keys = guard.channel_by_id.get(&channel_id).and_then(ChannelPhase::as_funded).unwrap()
|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None } .get_signer();
).flatten().unwrap().get_signer();
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
@ -8523,7 +8510,7 @@ fn test_update_err_monitor_lockdown() {
{ {
let mut node_0_per_peer_lock; let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock; let mut node_0_peer_state_lock;
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);
assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
@ -8625,7 +8612,7 @@ fn test_concurrent_monitor_claim() {
{ {
let mut node_0_per_peer_lock; let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock; let mut node_0_peer_state_lock;
if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2).as_funded_mut() {
if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
// Watchtower Alice should already have seen the block and reject the update // Watchtower Alice should already have seen the block and reject the update
assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress);