mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-01-18 21:34:48 +01:00
Make BP's not_pruning_network_graph_until...
test dual async-sync
This commit is contained in:
parent
f0b928b06d
commit
2bc55b22d3
@ -1361,62 +1361,112 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
|
||||
($nodes: expr, $receive: expr, $sleep: expr) => {
|
||||
let features = ChannelFeatures::empty();
|
||||
$nodes[0].network_graph.add_channel_from_partial_announcement(
|
||||
42, 53, features, $nodes[0].node.get_our_node_id(), $nodes[1].node.get_our_node_id()
|
||||
).expect("Failed to update channel from partial announcement");
|
||||
let original_graph_description = $nodes[0].network_graph.to_string();
|
||||
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
|
||||
assert_eq!($nodes[0].network_graph.read_only().channels().len(), 1);
|
||||
|
||||
loop {
|
||||
$sleep;
|
||||
let log_entries = $nodes[0].logger.lines.lock().unwrap();
|
||||
let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
|
||||
if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
|
||||
.unwrap_or(&0) > 1
|
||||
{
|
||||
// Wait until the loop has gone around at least twice.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
let initialization_input = vec![
|
||||
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
|
||||
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
|
||||
0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
|
||||
187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
|
||||
157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
|
||||
88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
|
||||
204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
|
||||
181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
|
||||
110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
|
||||
76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
|
||||
226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
|
||||
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
|
||||
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
|
||||
];
|
||||
$nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
|
||||
|
||||
// this should have added two channels
|
||||
assert_eq!($nodes[0].network_graph.read_only().channels().len(), 3);
|
||||
|
||||
$receive.expect("Network graph not pruned within deadline");
|
||||
|
||||
// all channels should now be pruned
|
||||
assert_eq!($nodes[0].network_graph.read_only().channels().len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_not_pruning_network_graph_until_graph_sync_completion() {
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
||||
|
||||
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion".to_string());
|
||||
let data_dir = nodes[0].persister.get_data_dir();
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
||||
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
|
||||
let network_graph = nodes[0].network_graph.clone();
|
||||
let features = ChannelFeatures::empty();
|
||||
network_graph.add_channel_from_partial_announcement(42, 53, features, nodes[0].node.get_our_node_id(), nodes[1].node.get_our_node_id())
|
||||
.expect("Failed to update channel from partial announcement");
|
||||
let original_graph_description = network_graph.to_string();
|
||||
assert!(original_graph_description.contains("42: features: 0000, node_one:"));
|
||||
assert_eq!(network_graph.read_only().channels().len(), 1);
|
||||
|
||||
let event_handler = |_: _| {};
|
||||
let background_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
|
||||
|
||||
loop {
|
||||
let log_entries = nodes[0].logger.lines.lock().unwrap();
|
||||
let loop_counter = "Calling ChannelManager's timer_tick_occurred".to_string();
|
||||
if *log_entries.get(&("lightning_background_processor".to_string(), loop_counter))
|
||||
.unwrap_or(&0) > 1
|
||||
{
|
||||
// Wait until the loop has gone around at least twice.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
let initialization_input = vec![
|
||||
76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
|
||||
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
|
||||
0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
|
||||
187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
|
||||
157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
|
||||
88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
|
||||
204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
|
||||
181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
|
||||
110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
|
||||
76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
|
||||
226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 2, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
|
||||
0, 0, 0, 1, 0, 0, 0, 0, 58, 85, 116, 216, 255, 8, 153, 192, 0, 2, 27, 0, 0, 25, 0, 0,
|
||||
0, 1, 0, 0, 0, 125, 255, 2, 68, 226, 0, 6, 11, 0, 1, 5, 0, 0, 0, 0, 29, 129, 25, 192,
|
||||
];
|
||||
nodes[0].rapid_gossip_sync.update_network_graph_no_std(&initialization_input[..], Some(1642291930)).unwrap();
|
||||
|
||||
// this should have added two channels
|
||||
assert_eq!(network_graph.read_only().channels().len(), 3);
|
||||
|
||||
receiver
|
||||
.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5))
|
||||
.expect("Network graph not pruned within deadline");
|
||||
do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes,
|
||||
receiver.recv_timeout(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER * 5)),
|
||||
std::thread::sleep(Duration::from_millis(1)));
|
||||
|
||||
background_processor.stop().unwrap();
|
||||
}
|
||||
|
||||
// all channels should now be pruned
|
||||
assert_eq!(network_graph.read_only().channels().len(), 0);
|
||||
#[tokio::test]
|
||||
#[cfg(feature = "futures")]
|
||||
async fn test_not_pruning_network_graph_until_graph_sync_completion_async() {
|
||||
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
|
||||
|
||||
let nodes = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async".to_string());
|
||||
let data_dir = nodes[0].persister.get_data_dir();
|
||||
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
|
||||
|
||||
let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
|
||||
let bp_future = super::process_events_async(
|
||||
persister, |_: _| {async {}}, nodes[0].chain_monitor.clone(), nodes[0].node.clone(),
|
||||
nodes[0].rapid_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(),
|
||||
Some(nodes[0].scorer.clone()), move |dur: Duration| {
|
||||
let mut exit_receiver = exit_receiver.clone();
|
||||
Box::pin(async move {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(dur) => false,
|
||||
_ = exit_receiver.changed() => true,
|
||||
}
|
||||
})
|
||||
}, false,
|
||||
);
|
||||
// TODO: Drop _local and simply spawn after #2003
|
||||
let local_set = tokio::task::LocalSet::new();
|
||||
local_set.spawn_local(bp_future);
|
||||
local_set.spawn_local(async move {
|
||||
do_test_not_pruning_network_graph_until_graph_sync_completion!(nodes, {
|
||||
let mut i = 0;
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(super::FIRST_NETWORK_PRUNE_TIMER)).await;
|
||||
if let Ok(()) = receiver.try_recv() { break Ok::<(), ()>(()); }
|
||||
assert!(i < 5);
|
||||
i += 1;
|
||||
}
|
||||
}, tokio::time::sleep(Duration::from_millis(1)).await);
|
||||
exit_sender.send(()).unwrap();
|
||||
});
|
||||
local_set.await;
|
||||
}
|
||||
|
||||
macro_rules! do_test_payment_path_scoring {
|
||||
|
Loading…
Reference in New Issue
Block a user