2019-11-15 02:44:30 +00:00
|
|
|
[workspace]
|
2017-12-25 01:05:27 -05:00
|
|
|
|
2019-11-15 02:44:30 +00:00
|
|
|
members = [
|
|
|
|
"lightning",
|
2021-01-11 10:50:54 -08:00
|
|
|
"lightning-block-sync",
|
2021-04-07 13:04:04 -04:00
|
|
|
"lightning-invoice",
|
2019-11-15 02:44:30 +00:00
|
|
|
"lightning-net-tokio",
|
2020-09-16 17:46:11 -04:00
|
|
|
"lightning-persister",
|
2021-04-13 19:39:32 -04:00
|
|
|
"lightning-background-processor",
|
2021-11-03 10:50:08 -07:00
|
|
|
"lightning-rapid-gossip-sync"
|
2019-11-15 02:44:30 +00:00
|
|
|
]
|
2019-11-28 01:19:54 -05:00
|
|
|
|
2022-03-03 23:59:44 -08:00
|
|
|
exclude = [
|
2023-01-03 11:24:30 -06:00
|
|
|
"lightning-custom-message",
|
2023-02-27 17:32:07 +00:00
|
|
|
"lightning-transaction-sync",
|
2022-03-03 23:59:44 -08:00
|
|
|
"no-std-check",
|
|
|
|
]
|
|
|
|
|
2023-02-24 22:15:00 +00:00
|
|
|
# Our tests do actual crypto and lots of work, the tradeoff for -O2 is well
|
|
|
|
# worth it. Note that we only apply optimizations to dependencies, not workspace
|
|
|
|
# crates themselves.
|
2020-10-05 12:47:08 -04:00
|
|
|
# https://doc.rust-lang.org/cargo/reference/profiles.html#profile-selection
|
2023-02-24 22:15:00 +00:00
|
|
|
[profile.dev.package."*"]
|
|
|
|
opt-level = 2
|
|
|
|
|
|
|
|
# It appears some minimal optimizations are required to inline many std methods
|
|
|
|
# and reduce the otherwise-substantial time spent in std self-checks. We do so
|
|
|
|
# here but ensure we keep LTO disabled as otherwise we're re-optimizing all our
|
|
|
|
# dependencies every time we make any local changes.
|
2020-09-01 16:11:39 -04:00
|
|
|
[profile.dev]
|
2020-10-05 12:47:08 -04:00
|
|
|
opt-level = 1
|
2023-02-24 22:15:00 +00:00
|
|
|
lto = "off"
|
2020-09-01 16:11:39 -04:00
|
|
|
|
2020-05-15 21:27:51 -04:00
|
|
|
[profile.release]
|
|
|
|
opt-level = 3
|
|
|
|
lto = true
|
2020-09-01 16:11:39 -04:00
|
|
|
panic = "abort"
|
Add bench profiles to Cargo.toml to force codegen-units=1
This makes a small difference for NetworkGraph deserialization
as it enables more inlining across different files, hopefully
better matching user performance as well.
As of this commit, on an Intel 2687W v3, the serialization
benchmarks take:
test routing::network_graph::benches::read_network_graph ... bench: 2,037,875,071 ns/iter (+/- 760,370)
test routing::network_graph::benches::write_network_graph ... bench: 320,561,557 ns/iter (+/- 176,343)
2021-05-28 14:16:20 +00:00
|
|
|
|
|
|
|
[profile.bench]
|
|
|
|
opt-level = 3
|
|
|
|
codegen-units = 1
|
|
|
|
lto = true
|