mirror of
https://github.com/lightningdevkit/rust-lightning.git
synced 2025-02-25 07:17:40 +01:00
Merge pull request #3222 from tnull/2024-08-rustfmt-lightning-persister
`rustfmt`: Run on `lightning-persister`
This commit is contained in:
commit
bad5e32694
5 changed files with 241 additions and 91 deletions
|
@ -67,7 +67,9 @@ impl FilesystemStore {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_dest_dir_path(&self, primary_namespace: &str, secondary_namespace: &str) -> std::io::Result<PathBuf> {
|
fn get_dest_dir_path(
|
||||||
|
&self, primary_namespace: &str, secondary_namespace: &str,
|
||||||
|
) -> std::io::Result<PathBuf> {
|
||||||
let mut dest_dir_path = {
|
let mut dest_dir_path = {
|
||||||
#[cfg(target_os = "windows")]
|
#[cfg(target_os = "windows")]
|
||||||
{
|
{
|
||||||
|
@ -91,7 +93,9 @@ impl FilesystemStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KVStore for FilesystemStore {
|
impl KVStore for FilesystemStore {
|
||||||
fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
|
fn read(
|
||||||
|
&self, primary_namespace: &str, secondary_namespace: &str, key: &str,
|
||||||
|
) -> lightning::io::Result<Vec<u8>> {
|
||||||
check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?;
|
check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?;
|
||||||
|
|
||||||
let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
||||||
|
@ -114,15 +118,15 @@ impl KVStore for FilesystemStore {
|
||||||
Ok(buf)
|
Ok(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
|
fn write(
|
||||||
|
&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8],
|
||||||
|
) -> lightning::io::Result<()> {
|
||||||
check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?;
|
check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?;
|
||||||
|
|
||||||
let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
||||||
dest_file_path.push(key);
|
dest_file_path.push(key);
|
||||||
|
|
||||||
let parent_directory = dest_file_path
|
let parent_directory = dest_file_path.parent().ok_or_else(|| {
|
||||||
.parent()
|
|
||||||
.ok_or_else(|| {
|
|
||||||
let msg =
|
let msg =
|
||||||
format!("Could not retrieve parent directory of {}.", dest_file_path.display());
|
format!("Could not retrieve parent directory of {}.", dest_file_path.display());
|
||||||
std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
|
std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
|
||||||
|
@ -186,11 +190,11 @@ impl KVStore for FilesystemStore {
|
||||||
match res {
|
match res {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
// We fsync the dest file in hopes this will also flush the metadata to disk.
|
// We fsync the dest file in hopes this will also flush the metadata to disk.
|
||||||
let dest_file = fs::OpenOptions::new().read(true).write(true)
|
let dest_file =
|
||||||
.open(&dest_file_path)?;
|
fs::OpenOptions::new().read(true).write(true).open(&dest_file_path)?;
|
||||||
dest_file.sync_all()?;
|
dest_file.sync_all()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
},
|
||||||
Err(e) => Err(e.into()),
|
Err(e) => Err(e.into()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -201,7 +205,9 @@ impl KVStore for FilesystemStore {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
|
fn remove(
|
||||||
|
&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool,
|
||||||
|
) -> lightning::io::Result<()> {
|
||||||
check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?;
|
check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?;
|
||||||
|
|
||||||
let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
||||||
|
@ -229,8 +235,10 @@ impl KVStore for FilesystemStore {
|
||||||
fs::remove_file(&dest_file_path)?;
|
fs::remove_file(&dest_file_path)?;
|
||||||
|
|
||||||
let parent_directory = dest_file_path.parent().ok_or_else(|| {
|
let parent_directory = dest_file_path.parent().ok_or_else(|| {
|
||||||
let msg =
|
let msg = format!(
|
||||||
format!("Could not retrieve parent directory of {}.", dest_file_path.display());
|
"Could not retrieve parent directory of {}.",
|
||||||
|
dest_file_path.display()
|
||||||
|
);
|
||||||
std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
|
std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
|
||||||
})?;
|
})?;
|
||||||
let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
|
let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
|
||||||
|
@ -257,8 +265,8 @@ impl KVStore for FilesystemStore {
|
||||||
// However, all this is partially based on assumptions and local experiments, as
|
// However, all this is partially based on assumptions and local experiments, as
|
||||||
// Windows API is horribly underdocumented.
|
// Windows API is horribly underdocumented.
|
||||||
let mut trash_file_path = dest_file_path.clone();
|
let mut trash_file_path = dest_file_path.clone();
|
||||||
let trash_file_ext = format!("{}.trash",
|
let trash_file_ext =
|
||||||
self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
|
format!("{}.trash", self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
|
||||||
trash_file_path.set_extension(trash_file_ext);
|
trash_file_path.set_extension(trash_file_ext);
|
||||||
|
|
||||||
call!(unsafe {
|
call!(unsafe {
|
||||||
|
@ -273,7 +281,9 @@ impl KVStore for FilesystemStore {
|
||||||
{
|
{
|
||||||
// We fsync the trash file in hopes this will also flush the original's file
|
// We fsync the trash file in hopes this will also flush the original's file
|
||||||
// metadata to disk.
|
// metadata to disk.
|
||||||
let trash_file = fs::OpenOptions::new().read(true).write(true)
|
let trash_file = fs::OpenOptions::new()
|
||||||
|
.read(true)
|
||||||
|
.write(true)
|
||||||
.open(&trash_file_path.clone())?;
|
.open(&trash_file_path.clone())?;
|
||||||
trash_file.sync_all()?;
|
trash_file.sync_all()?;
|
||||||
}
|
}
|
||||||
|
@ -290,7 +300,9 @@ impl KVStore for FilesystemStore {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> lightning::io::Result<Vec<String>> {
|
fn list(
|
||||||
|
&self, primary_namespace: &str, secondary_namespace: &str,
|
||||||
|
) -> lightning::io::Result<Vec<String>> {
|
||||||
check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?;
|
check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?;
|
||||||
|
|
||||||
let prefixed_dest = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
let prefixed_dest = self.get_dest_dir_path(primary_namespace, secondary_namespace)?;
|
||||||
|
@ -327,10 +339,17 @@ impl KVStore for FilesystemStore {
|
||||||
|
|
||||||
// If we otherwise don't find a file at the given path something went wrong.
|
// If we otherwise don't find a file at the given path something went wrong.
|
||||||
if !metadata.is_file() {
|
if !metadata.is_file() {
|
||||||
debug_assert!(false, "Failed to list keys of {}/{}: file couldn't be accessed.",
|
debug_assert!(
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace));
|
false,
|
||||||
let msg = format!("Failed to list keys of {}/{}: file couldn't be accessed.",
|
"Failed to list keys of {}/{}: file couldn't be accessed.",
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace));
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace)
|
||||||
|
);
|
||||||
|
let msg = format!(
|
||||||
|
"Failed to list keys of {}/{}: file couldn't be accessed.",
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace)
|
||||||
|
);
|
||||||
return Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, msg));
|
return Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,20 +360,39 @@ impl KVStore for FilesystemStore {
|
||||||
keys.push(relative_path.to_string())
|
keys.push(relative_path.to_string())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
debug_assert!(false, "Failed to list keys of {}/{}: file path is not valid UTF-8",
|
debug_assert!(
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace));
|
false,
|
||||||
let msg = format!("Failed to list keys of {}/{}: file path is not valid UTF-8",
|
"Failed to list keys of {}/{}: file path is not valid UTF-8",
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace));
|
PrintableString(primary_namespace),
|
||||||
return Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, msg));
|
PrintableString(secondary_namespace)
|
||||||
}
|
);
|
||||||
|
let msg = format!(
|
||||||
|
"Failed to list keys of {}/{}: file path is not valid UTF-8",
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace)
|
||||||
|
);
|
||||||
|
return Err(lightning::io::Error::new(
|
||||||
|
lightning::io::ErrorKind::Other,
|
||||||
|
msg,
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug_assert!(false, "Failed to list keys of {}/{}: {}",
|
debug_assert!(
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace), e);
|
false,
|
||||||
let msg = format!("Failed to list keys of {}/{}: {}",
|
"Failed to list keys of {}/{}: {}",
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace), e);
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace),
|
||||||
|
e
|
||||||
|
);
|
||||||
|
let msg = format!(
|
||||||
|
"Failed to list keys of {}/{}: {}",
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace),
|
||||||
|
e
|
||||||
|
);
|
||||||
return Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, msg));
|
return Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, msg));
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,14 +409,14 @@ mod tests {
|
||||||
|
|
||||||
use bitcoin::Txid;
|
use bitcoin::Txid;
|
||||||
|
|
||||||
use lightning::chain::ChannelMonitorUpdateStatus;
|
|
||||||
use lightning::chain::chainmonitor::Persist;
|
use lightning::chain::chainmonitor::Persist;
|
||||||
use lightning::chain::transaction::OutPoint;
|
use lightning::chain::transaction::OutPoint;
|
||||||
|
use lightning::chain::ChannelMonitorUpdateStatus;
|
||||||
use lightning::check_closed_event;
|
use lightning::check_closed_event;
|
||||||
use lightning::events::{ClosureReason, MessageSendEventsProvider};
|
use lightning::events::{ClosureReason, MessageSendEventsProvider};
|
||||||
use lightning::ln::functional_test_utils::*;
|
use lightning::ln::functional_test_utils::*;
|
||||||
use lightning::util::test_utils;
|
|
||||||
use lightning::util::persist::read_channel_monitors;
|
use lightning::util::persist::read_channel_monitors;
|
||||||
|
use lightning::util::test_utils;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
impl Drop for FilesystemStore {
|
impl Drop for FilesystemStore {
|
||||||
|
@ -387,7 +425,7 @@ mod tests {
|
||||||
// fails.
|
// fails.
|
||||||
match fs::remove_dir_all(&self.data_dir) {
|
match fs::remove_dir_all(&self.data_dir) {
|
||||||
Err(e) => println!("Failed to remove test persister directory: {}", e),
|
Err(e) => println!("Failed to remove test persister directory: {}", e),
|
||||||
_ => {}
|
_ => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -411,14 +449,23 @@ mod tests {
|
||||||
|
|
||||||
let chanmon_cfgs = create_chanmon_cfgs(1);
|
let chanmon_cfgs = create_chanmon_cfgs(1);
|
||||||
let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
|
let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
|
||||||
let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &store, node_cfgs[0].keys_manager);
|
let chain_mon_0 = test_utils::TestChainMonitor::new(
|
||||||
|
Some(&chanmon_cfgs[0].chain_source),
|
||||||
|
&chanmon_cfgs[0].tx_broadcaster,
|
||||||
|
&chanmon_cfgs[0].logger,
|
||||||
|
&chanmon_cfgs[0].fee_estimator,
|
||||||
|
&store,
|
||||||
|
node_cfgs[0].keys_manager,
|
||||||
|
);
|
||||||
node_cfgs[0].chain_monitor = chain_mon_0;
|
node_cfgs[0].chain_monitor = chain_mon_0;
|
||||||
let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
|
let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
|
||||||
let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
|
let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
|
||||||
|
|
||||||
// Check that read_channel_monitors() returns error if monitors/ is not a
|
// Check that read_channel_monitors() returns error if monitors/ is not a
|
||||||
// directory.
|
// directory.
|
||||||
assert!(read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err());
|
assert!(
|
||||||
|
read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -446,8 +493,21 @@ mod tests {
|
||||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
let error_message = "Channel force-closed";
|
let error_message = "Channel force-closed";
|
||||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
|
nodes[1]
|
||||||
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
|
.node
|
||||||
|
.force_close_broadcasting_latest_txn(
|
||||||
|
&chan.2,
|
||||||
|
&nodes[0].node.get_our_node_id(),
|
||||||
|
error_message.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
check_closed_event!(
|
||||||
|
nodes[1],
|
||||||
|
1,
|
||||||
|
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) },
|
||||||
|
[nodes[0].node.get_our_node_id()],
|
||||||
|
100000
|
||||||
|
);
|
||||||
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
||||||
|
|
||||||
// Set the store's directory to read-only, which should result in
|
// Set the store's directory to read-only, which should result in
|
||||||
|
@ -459,12 +519,15 @@ mod tests {
|
||||||
fs::set_permissions(path, perms).unwrap();
|
fs::set_permissions(path, perms).unwrap();
|
||||||
|
|
||||||
let test_txo = OutPoint {
|
let test_txo = OutPoint {
|
||||||
txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
|
txid: Txid::from_str(
|
||||||
index: 0
|
"8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be",
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
index: 0,
|
||||||
};
|
};
|
||||||
match store.persist_new_channel(test_txo, &added_monitors[0].1) {
|
match store.persist_new_channel(test_txo, &added_monitors[0].1) {
|
||||||
ChannelMonitorUpdateStatus::UnrecoverableError => {},
|
ChannelMonitorUpdateStatus::UnrecoverableError => {},
|
||||||
_ => panic!("unexpected result from persisting new channel")
|
_ => panic!("unexpected result from persisting new channel"),
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes[1].node.get_and_clear_pending_msg_events();
|
nodes[1].node.get_and_clear_pending_msg_events();
|
||||||
|
@ -484,8 +547,21 @@ mod tests {
|
||||||
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
|
||||||
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
|
||||||
let error_message = "Channel force-closed";
|
let error_message = "Channel force-closed";
|
||||||
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
|
nodes[1]
|
||||||
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
|
.node
|
||||||
|
.force_close_broadcasting_latest_txn(
|
||||||
|
&chan.2,
|
||||||
|
&nodes[0].node.get_our_node_id(),
|
||||||
|
error_message.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
check_closed_event!(
|
||||||
|
nodes[1],
|
||||||
|
1,
|
||||||
|
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) },
|
||||||
|
[nodes[0].node.get_our_node_id()],
|
||||||
|
100000
|
||||||
|
);
|
||||||
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
|
||||||
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
|
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
|
||||||
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
|
let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
|
||||||
|
@ -497,12 +573,15 @@ mod tests {
|
||||||
let store = FilesystemStore::new(":<>/".into());
|
let store = FilesystemStore::new(":<>/".into());
|
||||||
|
|
||||||
let test_txo = OutPoint {
|
let test_txo = OutPoint {
|
||||||
txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
|
txid: Txid::from_str(
|
||||||
index: 0
|
"8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be",
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
index: 0,
|
||||||
};
|
};
|
||||||
match store.persist_new_channel(test_txo, &added_monitors[0].1) {
|
match store.persist_new_channel(test_txo, &added_monitors[0].1) {
|
||||||
ChannelMonitorUpdateStatus::UnrecoverableError => {},
|
ChannelMonitorUpdateStatus::UnrecoverableError => {},
|
||||||
_ => panic!("unexpected result from persisting new channel")
|
_ => panic!("unexpected result from persisting new channel"),
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes[1].node.get_and_clear_pending_msg_events();
|
nodes[1].node.get_and_clear_pending_msg_events();
|
||||||
|
@ -520,6 +599,10 @@ pub mod bench {
|
||||||
let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into());
|
let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into());
|
||||||
let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into());
|
let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into());
|
||||||
lightning::ln::channelmanager::bench::bench_two_sends(
|
lightning::ln::channelmanager::bench::bench_two_sends(
|
||||||
bench, "bench_filesystem_persisted_sends", store_a, store_b);
|
bench,
|
||||||
|
"bench_filesystem_persisted_sends",
|
||||||
|
store_a,
|
||||||
|
store_b,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,12 +2,11 @@
|
||||||
|
|
||||||
#![deny(rustdoc::broken_intra_doc_links)]
|
#![deny(rustdoc::broken_intra_doc_links)]
|
||||||
#![deny(rustdoc::private_intra_doc_links)]
|
#![deny(rustdoc::private_intra_doc_links)]
|
||||||
|
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
|
|
||||||
#[cfg(ldk_bench)] extern crate criterion;
|
#[cfg(ldk_bench)]
|
||||||
|
extern crate criterion;
|
||||||
|
|
||||||
pub mod fs_store;
|
pub mod fs_store;
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
use lightning::util::persist::{KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN, read_channel_monitors};
|
|
||||||
use lightning::ln::functional_test_utils::{connect_block, create_announced_chan_between_nodes,
|
|
||||||
create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs,
|
|
||||||
send_payment};
|
|
||||||
use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
|
use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
|
||||||
use lightning::util::test_utils;
|
|
||||||
use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
|
|
||||||
use lightning::events::ClosureReason;
|
use lightning::events::ClosureReason;
|
||||||
|
use lightning::ln::functional_test_utils::{
|
||||||
|
connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block,
|
||||||
|
create_network, create_node_cfgs, create_node_chanmgrs, send_payment,
|
||||||
|
};
|
||||||
|
use lightning::util::persist::{read_channel_monitors, KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN};
|
||||||
|
use lightning::util::test_utils;
|
||||||
|
use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event};
|
||||||
|
|
||||||
use std::panic::RefUnwindSafe;
|
use std::panic::RefUnwindSafe;
|
||||||
|
|
||||||
|
@ -24,7 +25,9 @@ pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_s
|
||||||
kv_store.write("", "", key, &data).unwrap();
|
kv_store.write("", "", key, &data).unwrap();
|
||||||
let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data));
|
let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data));
|
||||||
assert!(res.is_err());
|
assert!(res.is_err());
|
||||||
let res = std::panic::catch_unwind(|| kv_store.write(primary_namespace, secondary_namespace, "", &data));
|
let res = std::panic::catch_unwind(|| {
|
||||||
|
kv_store.write(primary_namespace, secondary_namespace, "", &data)
|
||||||
|
});
|
||||||
assert!(res.is_err());
|
assert!(res.is_err());
|
||||||
|
|
||||||
let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
|
let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
|
||||||
|
@ -62,8 +65,22 @@ pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_s
|
||||||
pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
|
pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
|
||||||
let chanmon_cfgs = create_chanmon_cfgs(2);
|
let chanmon_cfgs = create_chanmon_cfgs(2);
|
||||||
let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
|
||||||
let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, store_0, node_cfgs[0].keys_manager);
|
let chain_mon_0 = test_utils::TestChainMonitor::new(
|
||||||
let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, store_1, node_cfgs[1].keys_manager);
|
Some(&chanmon_cfgs[0].chain_source),
|
||||||
|
&chanmon_cfgs[0].tx_broadcaster,
|
||||||
|
&chanmon_cfgs[0].logger,
|
||||||
|
&chanmon_cfgs[0].fee_estimator,
|
||||||
|
store_0,
|
||||||
|
node_cfgs[0].keys_manager,
|
||||||
|
);
|
||||||
|
let chain_mon_1 = test_utils::TestChainMonitor::new(
|
||||||
|
Some(&chanmon_cfgs[1].chain_source),
|
||||||
|
&chanmon_cfgs[1].tx_broadcaster,
|
||||||
|
&chanmon_cfgs[1].logger,
|
||||||
|
&chanmon_cfgs[1].fee_estimator,
|
||||||
|
store_1,
|
||||||
|
node_cfgs[1].keys_manager,
|
||||||
|
);
|
||||||
node_cfgs[0].chain_monitor = chain_mon_0;
|
node_cfgs[0].chain_monitor = chain_mon_0;
|
||||||
node_cfgs[1].chain_monitor = chain_mon_1;
|
node_cfgs[1].chain_monitor = chain_mon_1;
|
||||||
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
|
||||||
|
@ -71,25 +88,31 @@ pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
|
||||||
|
|
||||||
// Check that the persisted channel data is empty before any channels are
|
// Check that the persisted channel data is empty before any channels are
|
||||||
// open.
|
// open.
|
||||||
let mut persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
|
let mut persisted_chan_data_0 =
|
||||||
|
read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
|
||||||
assert_eq!(persisted_chan_data_0.len(), 0);
|
assert_eq!(persisted_chan_data_0.len(), 0);
|
||||||
let mut persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
|
let mut persisted_chan_data_1 =
|
||||||
|
read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
|
||||||
assert_eq!(persisted_chan_data_1.len(), 0);
|
assert_eq!(persisted_chan_data_1.len(), 0);
|
||||||
|
|
||||||
// Helper to make sure the channel is on the expected update ID.
|
// Helper to make sure the channel is on the expected update ID.
|
||||||
macro_rules! check_persisted_data {
|
macro_rules! check_persisted_data {
|
||||||
($expected_update_id: expr) => {
|
($expected_update_id: expr) => {
|
||||||
persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
|
persisted_chan_data_0 =
|
||||||
|
read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager)
|
||||||
|
.unwrap();
|
||||||
assert_eq!(persisted_chan_data_0.len(), 1);
|
assert_eq!(persisted_chan_data_0.len(), 1);
|
||||||
for (_, mon) in persisted_chan_data_0.iter() {
|
for (_, mon) in persisted_chan_data_0.iter() {
|
||||||
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
||||||
}
|
}
|
||||||
persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
|
persisted_chan_data_1 =
|
||||||
|
read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager)
|
||||||
|
.unwrap();
|
||||||
assert_eq!(persisted_chan_data_1.len(), 1);
|
assert_eq!(persisted_chan_data_1.len(), 1);
|
||||||
for (_, mon) in persisted_chan_data_1.iter() {
|
for (_, mon) in persisted_chan_data_1.iter() {
|
||||||
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create some initial channel and check that a channel was persisted.
|
// Create some initial channel and check that a channel was persisted.
|
||||||
|
@ -97,25 +120,51 @@ pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
|
||||||
check_persisted_data!(0);
|
check_persisted_data!(0);
|
||||||
|
|
||||||
// Send a few payments and make sure the monitors are updated to the latest.
|
// Send a few payments and make sure the monitors are updated to the latest.
|
||||||
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
|
send_payment(&nodes[0], &vec![&nodes[1]][..], 8000000);
|
||||||
check_persisted_data!(5);
|
check_persisted_data!(5);
|
||||||
send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
|
send_payment(&nodes[1], &vec![&nodes[0]][..], 4000000);
|
||||||
check_persisted_data!(10);
|
check_persisted_data!(10);
|
||||||
|
|
||||||
// Force close because cooperative close doesn't result in any persisted
|
// Force close because cooperative close doesn't result in any persisted
|
||||||
// updates.
|
// updates.
|
||||||
let error_message = "Channel force-closed";
|
let error_message = "Channel force-closed";
|
||||||
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
|
nodes[0]
|
||||||
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
|
.node
|
||||||
|
.force_close_broadcasting_latest_txn(
|
||||||
|
&nodes[0].node.list_channels()[0].channel_id,
|
||||||
|
&nodes[1].node.get_our_node_id(),
|
||||||
|
error_message.to_string(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
check_closed_event!(
|
||||||
|
nodes[0],
|
||||||
|
1,
|
||||||
|
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) },
|
||||||
|
[nodes[1].node.get_our_node_id()],
|
||||||
|
100000
|
||||||
|
);
|
||||||
check_closed_broadcast!(nodes[0], true);
|
check_closed_broadcast!(nodes[0], true);
|
||||||
check_added_monitors!(nodes[0], 1);
|
check_added_monitors!(nodes[0], 1);
|
||||||
|
|
||||||
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
|
||||||
assert_eq!(node_txn.len(), 1);
|
assert_eq!(node_txn.len(), 1);
|
||||||
|
|
||||||
connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
|
connect_block(
|
||||||
|
&nodes[1],
|
||||||
|
&create_dummy_block(
|
||||||
|
nodes[0].best_block_hash(),
|
||||||
|
42,
|
||||||
|
vec![node_txn[0].clone(), node_txn[0].clone()],
|
||||||
|
),
|
||||||
|
);
|
||||||
check_closed_broadcast!(nodes[1], true);
|
check_closed_broadcast!(nodes[1], true);
|
||||||
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
|
check_closed_event!(
|
||||||
|
nodes[1],
|
||||||
|
1,
|
||||||
|
ClosureReason::CommitmentTxConfirmed,
|
||||||
|
[nodes[0].node.get_our_node_id()],
|
||||||
|
100000
|
||||||
|
);
|
||||||
check_added_monitors!(nodes[1], 1);
|
check_added_monitors!(nodes[1], 1);
|
||||||
|
|
||||||
// Make sure everything is persisted as expected after close.
|
// Make sure everything is persisted as expected after close.
|
||||||
|
|
|
@ -1,20 +1,31 @@
|
||||||
use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN};
|
use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN};
|
||||||
use lightning::util::string::PrintableString;
|
use lightning::util::string::PrintableString;
|
||||||
|
|
||||||
|
|
||||||
pub(crate) fn is_valid_kvstore_str(key: &str) -> bool {
|
pub(crate) fn is_valid_kvstore_str(key: &str) -> bool {
|
||||||
key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c))
|
key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN
|
||||||
|
&& key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn check_namespace_key_validity(
|
pub(crate) fn check_namespace_key_validity(
|
||||||
primary_namespace: &str, secondary_namespace: &str, key: Option<&str>, operation: &str)
|
primary_namespace: &str, secondary_namespace: &str, key: Option<&str>, operation: &str,
|
||||||
-> Result<(), std::io::Error> {
|
) -> Result<(), std::io::Error> {
|
||||||
if let Some(key) = key {
|
if let Some(key) = key {
|
||||||
if key.is_empty() {
|
if key.is_empty() {
|
||||||
debug_assert!(false, "Failed to {} {}/{}/{}: key may not be empty.", operation,
|
debug_assert!(
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
|
false,
|
||||||
let msg = format!("Failed to {} {}/{}/{}: key may not be empty.", operation,
|
"Failed to {} {}/{}/{}: key may not be empty.",
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
|
operation,
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace),
|
||||||
|
PrintableString(key)
|
||||||
|
);
|
||||||
|
let msg = format!(
|
||||||
|
"Failed to {} {}/{}/{}: key may not be empty.",
|
||||||
|
operation,
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace),
|
||||||
|
PrintableString(key)
|
||||||
|
);
|
||||||
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +40,10 @@ pub(crate) fn check_namespace_key_validity(
|
||||||
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !is_valid_kvstore_str(primary_namespace) || !is_valid_kvstore_str(secondary_namespace) || !is_valid_kvstore_str(key) {
|
if !is_valid_kvstore_str(primary_namespace)
|
||||||
|
|| !is_valid_kvstore_str(secondary_namespace)
|
||||||
|
|| !is_valid_kvstore_str(key)
|
||||||
|
{
|
||||||
debug_assert!(false, "Failed to {} {}/{}/{}: primary namespace, secondary namespace, and key must be valid.",
|
debug_assert!(false, "Failed to {} {}/{}/{}: primary namespace, secondary namespace, and key must be valid.",
|
||||||
operation,
|
operation,
|
||||||
PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
|
PrintableString(primary_namespace), PrintableString(secondary_namespace), PrintableString(key));
|
||||||
|
@ -49,10 +63,19 @@ pub(crate) fn check_namespace_key_validity(
|
||||||
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
||||||
}
|
}
|
||||||
if !is_valid_kvstore_str(primary_namespace) || !is_valid_kvstore_str(secondary_namespace) {
|
if !is_valid_kvstore_str(primary_namespace) || !is_valid_kvstore_str(secondary_namespace) {
|
||||||
debug_assert!(false, "Failed to {} {}/{}: primary namespace and secondary namespace must be valid.",
|
debug_assert!(
|
||||||
operation, PrintableString(primary_namespace), PrintableString(secondary_namespace));
|
false,
|
||||||
let msg = format!("Failed to {} {}/{}: primary namespace and secondary namespace must be valid.",
|
"Failed to {} {}/{}: primary namespace and secondary namespace must be valid.",
|
||||||
operation, PrintableString(primary_namespace), PrintableString(secondary_namespace));
|
operation,
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace)
|
||||||
|
);
|
||||||
|
let msg = format!(
|
||||||
|
"Failed to {} {}/{}: primary namespace and secondary namespace must be valid.",
|
||||||
|
operation,
|
||||||
|
PrintableString(primary_namespace),
|
||||||
|
PrintableString(secondary_namespace)
|
||||||
|
);
|
||||||
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,10 +6,6 @@
|
||||||
./lightning-invoice/src/tb.rs
|
./lightning-invoice/src/tb.rs
|
||||||
./lightning-invoice/src/utils.rs
|
./lightning-invoice/src/utils.rs
|
||||||
./lightning-invoice/tests/ser_de.rs
|
./lightning-invoice/tests/ser_de.rs
|
||||||
./lightning-persister/src/fs_store.rs
|
|
||||||
./lightning-persister/src/lib.rs
|
|
||||||
./lightning-persister/src/test_utils.rs
|
|
||||||
./lightning-persister/src/utils.rs
|
|
||||||
./lightning/src/blinded_path/message.rs
|
./lightning/src/blinded_path/message.rs
|
||||||
./lightning/src/blinded_path/mod.rs
|
./lightning/src/blinded_path/mod.rs
|
||||||
./lightning/src/blinded_path/payment.rs
|
./lightning/src/blinded_path/payment.rs
|
||||||
|
|
Loading…
Add table
Reference in a new issue