Merge pull request #2766 from TheBlueMatt/2023-12-2314-cleanups-2

Marginally optimize test logging
This commit is contained in:
Elias Rohrer 2023-12-04 12:15:56 +01:00 committed by GitHub
commit ac06a6a2f6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 27 additions and 30 deletions

View file

@ -1375,9 +1375,9 @@ mod tests {
let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string(); let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string();
let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string(); let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string();
let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string(); let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string();
if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() && if log_entries.get(&("lightning_background_processor", desired_log_1)).is_some() &&
log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() && log_entries.get(&("lightning_background_processor", desired_log_2)).is_some() &&
log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() { log_entries.get(&("lightning_background_processor", desired_log_3)).is_some() {
break break
} }
} }
@ -1556,7 +1556,7 @@ mod tests {
loop { loop {
let log_entries = nodes[0].logger.lines.lock().unwrap(); let log_entries = nodes[0].logger.lines.lock().unwrap();
let expected_log = "Persisting scorer".to_string(); let expected_log = "Persisting scorer".to_string();
if log_entries.get(&("lightning_background_processor".to_string(), expected_log)).is_some() { if log_entries.get(&("lightning_background_processor", expected_log)).is_some() {
break break
} }
} }
@ -1580,7 +1580,7 @@ mod tests {
$sleep; $sleep;
let log_entries = $nodes[0].logger.lines.lock().unwrap(); let log_entries = $nodes[0].logger.lines.lock().unwrap();
let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string(); let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter)) if *log_entries.get(&("lightning_background_processor", loop_counter))
.unwrap_or(&0) > 1 .unwrap_or(&0) > 1
{ {
// Wait until the loop has gone around at least twice. // Wait until the loop has gone around at least twice.
@ -1792,7 +1792,7 @@ mod tests {
let log_entries = nodes[0].logger.lines.lock().unwrap(); let log_entries = nodes[0].logger.lines.lock().unwrap();
let expected_log = "Persisting scorer after update".to_string(); let expected_log = "Persisting scorer after update".to_string();
assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5); assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
} }
#[tokio::test] #[tokio::test]
@ -1838,7 +1838,7 @@ mod tests {
let log_entries = nodes[0].logger.lines.lock().unwrap(); let log_entries = nodes[0].logger.lines.lock().unwrap();
let expected_log = "Persisting scorer after update".to_string(); let expected_log = "Persisting scorer after update".to_string();
assert_eq!(*log_entries.get(&("lightning_background_processor".to_string(), expected_log)).unwrap(), 5); assert_eq!(*log_entries.get(&("lightning_background_processor", expected_log)).unwrap(), 5);
}); });
let (r1, r2) = tokio::join!(t1, t2); let (r1, r2) = tokio::join!(t1, t2);

View file

@ -693,7 +693,7 @@ fn test_update_fee_that_funder_cannot_afford() {
*feerate_lock += 4; *feerate_lock += 4;
} }
nodes[0].node.timer_tick_occurred(); nodes[0].node.timer_tick_occurred();
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot afford to send new feerate at {}", feerate + 4), 1); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot afford to send new feerate at {}", feerate + 4), 1);
check_added_monitors!(nodes[0], 0); check_added_monitors!(nodes[0], 0);
const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654; const INITIAL_COMMITMENT_NUMBER: u64 = 281474976710654;
@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() {
//check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
//Should produce and error. //Should produce and error.
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Funding remote cannot afford proposed new fee".to_string(), 1); nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1);
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true); check_closed_broadcast!(nodes[1], true);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") },
@ -1529,7 +1529,7 @@ fn test_fee_spike_violation_fails_htlc() {
}, },
_ => panic!("Unexpected event"), _ => panic!("Unexpected event"),
}; };
nodes[1].logger.assert_log("lightning::ln::channel".to_string(), nodes[1].logger.assert_log("lightning::ln::channel",
format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1);
check_added_monitors!(nodes[1], 2); check_added_monitors!(nodes[1], 2);
@ -1617,7 +1617,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg); nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
// Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1);
assert_eq!(nodes[0].node.list_channels().len(), 0); assert_eq!(nodes[0].node.list_channels().len(), 0);
let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value");
@ -1796,7 +1796,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
// Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd.
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote HTLC add would put them under remote reserve value".to_string(), 1); nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1);
assert_eq!(nodes[1].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1);
let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value");
@ -5930,7 +5930,7 @@ fn test_fail_holding_cell_htlc_upon_free() {
// us to surface its failure to the user. // us to surface its failure to the user.
chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1); nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 1 HTLC updates in channel {}", chan.2), 1);
// Check that the payment failed to be sent out. // Check that the payment failed to be sent out.
let events = nodes[0].node.get_and_clear_pending_events(); let events = nodes[0].node.get_and_clear_pending_events();
@ -6018,7 +6018,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
// to surface its failure to the user. The first payment should succeed. // to surface its failure to the user. The first payment should succeed.
chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1); nodes[0].logger.assert_log("lightning::ln::channel", format!("Freeing holding cell with 2 HTLC updates in channel {}", chan.2), 1);
// Check that the second payment failed to be sent out. // Check that the second payment failed to be sent out.
let events = nodes[0].node.get_and_clear_pending_events(); let events = nodes[0].node.get_and_clear_pending_events();
@ -6292,7 +6292,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() {
updates.update_add_htlcs[0].amount_msat = 0; updates.update_add_htlcs[0].amount_msat = 0;
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Remote side tried to send a 0-msat HTLC".to_string(), 1); nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1);
check_closed_broadcast!(nodes[1], true).unwrap(); check_closed_broadcast!(nodes[1], true).unwrap();
check_added_monitors!(nodes[1], 1); check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() },
@ -9843,10 +9843,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
// Outbound dust balance: 6399 sats // Outbound dust balance: 6399 sats
let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1); let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * (dust_inbound_htlc_on_holder_tx + 1);
let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat; let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * dust_outbound_htlc_on_holder_tx + dust_inbound_htlc_on_holder_tx_msat;
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1); nodes[0].logger.assert_log("lightning::ln::channel", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, max_dust_htlc_exposure_msat), 1);
} else { } else {
// Outbound dust balance: 5200 sats // Outbound dust balance: 5200 sats
nodes[0].logger.assert_log("lightning::ln::channel".to_string(), nodes[0].logger.assert_log("lightning::ln::channel",
format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4, dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 4,
max_dust_htlc_exposure_msat), 1); max_dust_htlc_exposure_msat), 1);

View file

@ -526,7 +526,7 @@ fn do_test_data_loss_protect(reconnect_panicing: bool, substantially_old: bool,
// `not_stale` to test the boundary condition. // `not_stale` to test the boundary condition.
let pay_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(), 100, false); let pay_params = PaymentParameters::for_keysend(nodes[1].node.get_our_node_id(), 100, false);
let route_params = RouteParameters::from_payment_params_and_value(pay_params, 40000); let route_params = RouteParameters::from_payment_params_and_value(pay_params, 40000);
nodes[0].node.send_spontaneous_payment_with_retry(None, RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), route_params, Retry::Attempts(0)); nodes[0].node.send_spontaneous_payment_with_retry(None, RecipientOnionFields::spontaneous_empty(), PaymentId([0; 32]), route_params, Retry::Attempts(0)).unwrap();
check_added_monitors(&nodes[0], 1); check_added_monitors(&nodes[0], 1);
let update_add_commit = SendEvent::from_node(&nodes[0]); let update_add_commit = SendEvent::from_node(&nodes[0]);

View file

@ -930,8 +930,8 @@ impl events::MessageSendEventsProvider for TestRoutingMessageHandler {
pub struct TestLogger { pub struct TestLogger {
level: Level, level: Level,
pub(crate) id: String, pub(crate) id: String,
pub lines: Mutex<HashMap<(String, String), usize>>, pub lines: Mutex<HashMap<(&'static str, String), usize>>,
pub context: Mutex<HashMap<(String, Option<PublicKey>, Option<ChannelId>), usize>>, pub context: Mutex<HashMap<(&'static str, Option<PublicKey>, Option<ChannelId>), usize>>,
} }
impl TestLogger { impl TestLogger {
@ -949,7 +949,7 @@ impl TestLogger {
pub fn enable(&mut self, level: Level) { pub fn enable(&mut self, level: Level) {
self.level = level; self.level = level;
} }
pub fn assert_log(&self, module: String, line: String, count: usize) { pub fn assert_log(&self, module: &str, line: String, count: usize) {
let log_entries = self.lines.lock().unwrap(); let log_entries = self.lines.lock().unwrap();
assert_eq!(log_entries.get(&(module, line)), Some(&count)); assert_eq!(log_entries.get(&(module, line)), Some(&count));
} }
@ -961,7 +961,7 @@ impl TestLogger {
pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) { pub fn assert_log_contains(&self, module: &str, line: &str, count: usize) {
let log_entries = self.lines.lock().unwrap(); let log_entries = self.lines.lock().unwrap();
let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| { let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
m == module && l.contains(line) *m == module && l.contains(line)
}).map(|(_, c) | { c }).sum(); }).map(|(_, c) | { c }).sum();
assert_eq!(l, count) assert_eq!(l, count)
} }
@ -974,7 +974,7 @@ impl TestLogger {
pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) { pub fn assert_log_regex(&self, module: &str, pattern: regex::Regex, count: usize) {
let log_entries = self.lines.lock().unwrap(); let log_entries = self.lines.lock().unwrap();
let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| { let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
m == module && pattern.is_match(&l) *m == module && pattern.is_match(&l)
}).map(|(_, c) | { c }).sum(); }).map(|(_, c) | { c }).sum();
assert_eq!(l, count) assert_eq!(l, count)
} }
@ -983,18 +983,15 @@ impl TestLogger {
&self, module: &str, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>, count: usize &self, module: &str, peer_id: Option<PublicKey>, channel_id: Option<ChannelId>, count: usize
) { ) {
let context_entries = self.context.lock().unwrap(); let context_entries = self.context.lock().unwrap();
let l: usize = context_entries.iter() let l = context_entries.get(&(module, peer_id, channel_id)).unwrap();
.filter(|&(&(ref m, ref p, ref c), _)| m == module && *p == peer_id && *c == channel_id) assert_eq!(*l, count)
.map(|(_, c) | c)
.sum();
assert_eq!(l, count)
} }
} }
impl Logger for TestLogger { impl Logger for TestLogger {
fn log(&self, record: Record) { fn log(&self, record: Record) {
*self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1; *self.lines.lock().unwrap().entry((record.module_path, format!("{}", record.args))).or_insert(0) += 1;
*self.context.lock().unwrap().entry((record.module_path.to_string(), record.peer_id, record.channel_id)).or_insert(0) += 1; *self.context.lock().unwrap().entry((record.module_path, record.peer_id, record.channel_id)).or_insert(0) += 1;
if record.level >= self.level { if record.level >= self.level {
#[cfg(all(not(ldk_bench), feature = "std"))] { #[cfg(all(not(ldk_bench), feature = "std"))] {
let pfx = format!("{} {} [{}:{}]", self.id, record.level.to_string(), record.module_path, record.line); let pfx = format!("{} {} [{}:{}]", self.id, record.level.to_string(), record.module_path, record.line);