Move monitor-generated HTLC event handling to manager event-getters

This is somewhat awkward, but prevents a slew of duplicate events.
Really this should probably be more explicit, but would be easy to
move that along with a slew of block_connected-event-processing
refactors, see-also GH #80.

This affects full_stack_target only on accident cause the demo test
didn't continue onwards with another block connection.
This commit is contained in:
Matt Corallo 2018-12-10 22:47:21 -05:00
parent b9c609eb6a
commit 221bfa6bd4
2 changed files with 30 additions and 10 deletions

File diff suppressed because one or more lines are too long

View file

@ -2622,6 +2622,20 @@ impl ChannelManager {
impl events::MessageSendEventsProvider for ChannelManager { impl events::MessageSendEventsProvider for ChannelManager {
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> { fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
// TODO: Event release to users and serialization is currently race-y: its very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
//TODO: This behavior should be documented.
for htlc_update in self.monitor.fetch_pending_htlc_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
} else {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
}
}
let mut ret = Vec::new(); let mut ret = Vec::new();
let mut channel_state = self.channel_state.lock().unwrap(); let mut channel_state = self.channel_state.lock().unwrap();
mem::swap(&mut ret, &mut channel_state.pending_msg_events); mem::swap(&mut ret, &mut channel_state.pending_msg_events);
@ -2631,6 +2645,20 @@ impl events::MessageSendEventsProvider for ChannelManager {
impl events::EventsProvider for ChannelManager { impl events::EventsProvider for ChannelManager {
fn get_and_clear_pending_events(&self) -> Vec<events::Event> { fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
// TODO: Event release to users and serialization is currently race-y: its very easy for a
// user to serialize a ChannelManager with pending events in it and lose those events on
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
//TODO: This behavior should be documented.
for htlc_update in self.monitor.fetch_pending_htlc_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
} else {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
}
}
let mut ret = Vec::new(); let mut ret = Vec::new();
let mut pending_events = self.pending_events.lock().unwrap(); let mut pending_events = self.pending_events.lock().unwrap();
mem::swap(&mut ret, &mut *pending_events); mem::swap(&mut ret, &mut *pending_events);
@ -2711,15 +2739,6 @@ impl ChainListener for ChannelManager {
for failure in failed_channels.drain(..) { for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure); self.finish_force_close_channel(failure);
} }
{
for htlc_update in self.monitor.fetch_pending_htlc_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
} else {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
}
}
self.latest_block_height.store(height as usize, Ordering::Release); self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash(); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
} }