mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 15:20:24 +01:00
Keep track of the Init Features for every connected/channel'd peer
Since we want to keep track of the Init-context features for every peer we have channels with, we have to keep them for as long as the peer is connected (since we may open a channel with them at any point). We go ahead and take this opportunity to create a new per-peer-state struct which has two levels of mutexes which is appropriate for moving channel storage to. Since we can't process messages from a given peer in parallel, the inner lock is a regular mutex, but the outer lock is RW so that we can process for different peers at the same time with an outer read lock.
This commit is contained in:
parent
d2ba7caf47
commit
a19d71d0b2
1 changed files with 59 additions and 1 deletions
|
@ -275,6 +275,12 @@ pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
|
||||||
pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
|
pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
|
||||||
|
/// the latest Init features we heard from the peer.
|
||||||
|
struct PeerState {
|
||||||
|
latest_features: InitFeatures,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
|
#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
|
||||||
const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
|
const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
|
||||||
|
|
||||||
|
@ -328,6 +334,14 @@ pub struct ChannelManager<ChanSigner: ChannelKeys> {
|
||||||
channel_state: Mutex<ChannelHolder<ChanSigner>>,
|
channel_state: Mutex<ChannelHolder<ChanSigner>>,
|
||||||
our_network_key: SecretKey,
|
our_network_key: SecretKey,
|
||||||
|
|
||||||
|
/// The bulk of our storage will eventually be here (channels and message queues and the like).
|
||||||
|
/// If we are connected to a peer we always at least have an entry here, even if no channels
|
||||||
|
/// are currently open with that peer.
|
||||||
|
/// Because adding or removing an entry is rare, we usually take an outer read lock and then
|
||||||
|
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
|
||||||
|
/// new channel.
|
||||||
|
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
|
||||||
|
|
||||||
pending_events: Mutex<Vec<events::Event>>,
|
pending_events: Mutex<Vec<events::Event>>,
|
||||||
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
|
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
|
||||||
/// Essentially just when we're serializing ourselves out.
|
/// Essentially just when we're serializing ourselves out.
|
||||||
|
@ -610,6 +624,8 @@ impl<ChanSigner: ChannelKeys> ChannelManager<ChanSigner> {
|
||||||
}),
|
}),
|
||||||
our_network_key: keys_manager.get_node_secret(),
|
our_network_key: keys_manager.get_node_secret(),
|
||||||
|
|
||||||
|
per_peer_state: RwLock::new(HashMap::new()),
|
||||||
|
|
||||||
pending_events: Mutex::new(Vec::new()),
|
pending_events: Mutex::new(Vec::new()),
|
||||||
total_consistency_lock: RwLock::new(()),
|
total_consistency_lock: RwLock::new(()),
|
||||||
|
|
||||||
|
@ -2780,6 +2796,7 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
|
||||||
let _ = self.total_consistency_lock.read().unwrap();
|
let _ = self.total_consistency_lock.read().unwrap();
|
||||||
let mut failed_channels = Vec::new();
|
let mut failed_channels = Vec::new();
|
||||||
let mut failed_payments = Vec::new();
|
let mut failed_payments = Vec::new();
|
||||||
|
let mut no_channels_remain = true;
|
||||||
{
|
{
|
||||||
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
||||||
let channel_state = &mut *channel_state_lock;
|
let channel_state = &mut *channel_state_lock;
|
||||||
|
@ -2818,6 +2835,8 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
|
||||||
short_to_id.remove(&short_id);
|
short_to_id.remove(&short_id);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
} else {
|
||||||
|
no_channels_remain = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
|
@ -2843,6 +2862,10 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
if no_channels_remain {
|
||||||
|
self.per_peer_state.write().unwrap().remove(their_node_id);
|
||||||
|
}
|
||||||
|
|
||||||
for failure in failed_channels.drain(..) {
|
for failure in failed_channels.drain(..) {
|
||||||
self.finish_force_close_channel(failure);
|
self.finish_force_close_channel(failure);
|
||||||
}
|
}
|
||||||
|
@ -2853,10 +2876,25 @@ impl<ChanSigner: ChannelKeys> ChannelMessageHandler for ChannelManager<ChanSigne
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn peer_connected(&self, their_node_id: &PublicKey, _init_msg: &msgs::Init) {
|
fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) {
|
||||||
log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
|
log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
|
||||||
|
|
||||||
let _ = self.total_consistency_lock.read().unwrap();
|
let _ = self.total_consistency_lock.read().unwrap();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut peer_state_lock = self.per_peer_state.write().unwrap();
|
||||||
|
match peer_state_lock.entry(their_node_id.clone()) {
|
||||||
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert(Mutex::new(PeerState {
|
||||||
|
latest_features: init_msg.features.clone(),
|
||||||
|
}));
|
||||||
|
},
|
||||||
|
hash_map::Entry::Occupied(e) => {
|
||||||
|
e.get().lock().unwrap().latest_features = init_msg.features.clone();
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
let mut channel_state_lock = self.channel_state.lock().unwrap();
|
||||||
let channel_state = &mut *channel_state_lock;
|
let channel_state = &mut *channel_state_lock;
|
||||||
let pending_msg_events = &mut channel_state.pending_msg_events;
|
let pending_msg_events = &mut channel_state.pending_msg_events;
|
||||||
|
@ -3123,6 +3161,14 @@ impl<ChanSigner: ChannelKeys + Writeable> Writeable for ChannelManager<ChanSigne
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let per_peer_state = self.per_peer_state.write().unwrap();
|
||||||
|
(per_peer_state.len() as u64).write(writer)?;
|
||||||
|
for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
|
||||||
|
peer_pubkey.write(writer)?;
|
||||||
|
let peer_state = peer_state_mutex.lock().unwrap();
|
||||||
|
peer_state.latest_features.write(writer)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3256,6 +3302,16 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArg
|
||||||
claimable_htlcs.insert(payment_hash, previous_hops);
|
claimable_htlcs.insert(payment_hash, previous_hops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let peer_count: u64 = Readable::read(reader)?;
|
||||||
|
let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, 128));
|
||||||
|
for _ in 0..peer_count {
|
||||||
|
let peer_pubkey = Readable::read(reader)?;
|
||||||
|
let peer_state = PeerState {
|
||||||
|
latest_features: Readable::read(reader)?,
|
||||||
|
};
|
||||||
|
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
|
||||||
|
}
|
||||||
|
|
||||||
let channel_manager = ChannelManager {
|
let channel_manager = ChannelManager {
|
||||||
genesis_hash,
|
genesis_hash,
|
||||||
fee_estimator: args.fee_estimator,
|
fee_estimator: args.fee_estimator,
|
||||||
|
@ -3275,6 +3331,8 @@ impl<'a, R : ::std::io::Read, ChanSigner: ChannelKeys + Readable<R>> ReadableArg
|
||||||
}),
|
}),
|
||||||
our_network_key: args.keys_manager.get_node_secret(),
|
our_network_key: args.keys_manager.get_node_secret(),
|
||||||
|
|
||||||
|
per_peer_state: RwLock::new(per_peer_state),
|
||||||
|
|
||||||
pending_events: Mutex::new(Vec::new()),
|
pending_events: Mutex::new(Vec::new()),
|
||||||
total_consistency_lock: RwLock::new(()),
|
total_consistency_lock: RwLock::new(()),
|
||||||
keys_manager: args.keys_manager,
|
keys_manager: args.keys_manager,
|
||||||
|
|
Loading…
Add table
Reference in a new issue