Move claimable_htlcs to separate lock

This commit is contained in:
Viktor Tigerström 2022-10-12 01:07:23 +02:00
parent 505102dc21
commit f0c6dfbd80

View file

@ -395,13 +395,6 @@ pub(super) enum RAACommitmentOrder {
// Note this is only exposed in cfg(test): // Note this is only exposed in cfg(test):
pub(super) struct ChannelHolder<Signer: Sign> { pub(super) struct ChannelHolder<Signer: Sign> {
pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>, pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
/// Map from payment hash to the payment data and any HTLCs which are to us and can be
/// failed/claimed by the user.
///
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except /// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict). /// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>, pub(super) pending_msg_events: Vec<MessageSendEvent>,
@ -682,6 +675,8 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
// | | // | |
// | |__`pending_inbound_payments` // | |__`pending_inbound_payments`
// | | // | |
// | |__`claimable_htlcs`
// | |
// | |__`pending_outbound_payments` // | |__`pending_outbound_payments`
// | | // | |
// | |__`best_block` // | |__`best_block`
@ -753,6 +748,15 @@ pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
#[cfg(not(test))] #[cfg(not(test))]
forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>, forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
/// Map from payment hash to the payment data and any HTLCs which are to us and can be
/// failed/claimed by the user.
///
/// Note that, no consistency guarantees are made about the channels given here actually
/// existing anymore by the time you go to read them!
///
/// See `ChannelManager` struct-level documentation for lock order requirements.
claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used /// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
@ -1657,13 +1661,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
channel_state: Mutex::new(ChannelHolder{ channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(), by_id: HashMap::new(),
claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(), pending_msg_events: Vec::new(),
}), }),
outbound_scid_aliases: Mutex::new(HashSet::new()), outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()), pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()),
forward_htlcs: Mutex::new(HashMap::new()), forward_htlcs: Mutex::new(HashMap::new()),
claimable_htlcs: Mutex::new(HashMap::new()),
id_to_peer: Mutex::new(HashMap::new()), id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()), short_to_chan_info: FairRwLock::new(HashMap::new()),
@ -3142,8 +3146,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
for (short_chan_id, mut pending_forwards) in forward_htlcs { for (short_chan_id, mut pending_forwards) in forward_htlcs {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if short_chan_id != 0 { if short_chan_id != 0 {
macro_rules! forwarding_channel_not_found { macro_rules! forwarding_channel_not_found {
() => { () => {
@ -3242,6 +3244,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
continue; continue;
} }
}; };
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(forward_chan_id) { match channel_state.by_id.entry(forward_chan_id) {
hash_map::Entry::Vacant(_) => { hash_map::Entry::Vacant(_) => {
forwarding_channel_not_found!(); forwarding_channel_not_found!();
@ -3434,7 +3438,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
payment_secret: $payment_data.payment_secret, payment_secret: $payment_data.payment_secret,
} }
}; };
let (_, htlcs) = channel_state.claimable_htlcs.entry(payment_hash) let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
let (_, htlcs) = claimable_htlcs.entry(payment_hash)
.or_insert_with(|| (purpose(), Vec::new())); .or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 { if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
@ -3502,7 +3507,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
check_total_value!(payment_data, payment_preimage); check_total_value!(payment_data, payment_preimage);
}, },
OnionPayload::Spontaneous(preimage) => { OnionPayload::Spontaneous(preimage) => {
match channel_state.claimable_htlcs.entry(payment_hash) { match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Vacant(e) => { hash_map::Entry::Vacant(e) => {
let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
e.insert((purpose.clone(), vec![claimable_htlc])); e.insert((purpose.clone(), vec![claimable_htlc]));
@ -3791,29 +3796,29 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
true true
}); });
}
channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
if htlcs.is_empty() { if htlcs.is_empty() {
// This should be unreachable // This should be unreachable
debug_assert!(false); debug_assert!(false);
return false;
}
if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
// In this case we're not going to handle any timeouts of the parts here.
if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
return true;
} else if htlcs.into_iter().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
}) {
timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
return false; return false;
} }
if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { }
// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). true
// In this case we're not going to handle any timeouts of the parts here. });
if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
return true;
} else if htlcs.into_iter().any(|htlc| {
htlc.timer_ticks += 1;
return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
}) {
timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
return false;
}
}
true
});
}
for htlc_source in timed_out_mpp_htlcs.drain(..) { for htlc_source in timed_out_mpp_htlcs.drain(..) {
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
@ -3846,10 +3851,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let removed_source = { let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.claimable_htlcs.remove(payment_hash)
};
if let Some((_, mut sources)) = removed_source { if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) { for htlc in sources.drain(..) {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
@ -4151,7 +4153,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash); let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
if let Some((payment_purpose, mut sources)) = removed_source { if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty()); assert!(!sources.is_empty());
@ -6019,28 +6021,28 @@ where
} }
true true
}); });
}
if let Some(height) = height_opt { if let Some(height) = height_opt {
channel_state.claimable_htlcs.retain(|payment_hash, (_, htlcs)| { self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
htlcs.retain(|htlc| { htlcs.retain(|htlc| {
// If height is approaching the number of blocks we think it takes us to get // If height is approaching the number of blocks we think it takes us to get
// our commitment transaction confirmed before the HTLC expires, plus the // our commitment transaction confirmed before the HTLC expires, plus the
// number of blocks we generally consider it to take to do a commitment update, // number of blocks we generally consider it to take to do a commitment update,
// just give up on it and fail the HTLC. // just give up on it and fail the HTLC.
if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
failure_code: 0x4000 | 15, failure_code: 0x4000 | 15,
data: htlc_msat_height_data data: htlc_msat_height_data
}, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
false false
} else { true } } else { true }
});
!htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
}); });
} !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
});
} }
self.handle_init_event_channel_failures(failed_channels); self.handle_init_event_channel_failures(failed_channels);
@ -6775,16 +6777,18 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelMana
} }
} }
let channel_state = self.channel_state.lock().unwrap(); let mut htlc_purposes: Vec<events::PaymentPurpose> = Vec::new();
let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); {
(channel_state.claimable_htlcs.len() as u64).write(writer)?; let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { (claimable_htlcs.len() as u64).write(writer)?;
payment_hash.write(writer)?; for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
(previous_hops.len() as u64).write(writer)?; payment_hash.write(writer)?;
for htlc in previous_hops.iter() { (previous_hops.len() as u64).write(writer)?;
htlc.write(writer)?; for htlc in previous_hops.iter() {
htlc.write(writer)?;
}
htlc_purposes.push(purpose.clone());
} }
htlc_purposes.push(purpose);
} }
let per_peer_state = self.per_peer_state.write().unwrap(); let per_peer_state = self.per_peer_state.write().unwrap();
@ -7389,7 +7393,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
channel_state: Mutex::new(ChannelHolder { channel_state: Mutex::new(ChannelHolder {
by_id, by_id,
claimable_htlcs,
pending_msg_events: Vec::new(), pending_msg_events: Vec::new(),
}), }),
inbound_payment_key: expanded_inbound_key, inbound_payment_key: expanded_inbound_key,
@ -7397,6 +7400,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs), forward_htlcs: Mutex::new(forward_htlcs),
claimable_htlcs: Mutex::new(claimable_htlcs),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases), outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
id_to_peer: Mutex::new(id_to_peer), id_to_peer: Mutex::new(id_to_peer),
short_to_chan_info: FairRwLock::new(short_to_chan_info), short_to_chan_info: FairRwLock::new(short_to_chan_info),