mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-02-21 22:11:41 +01:00
lnd+channeldb: pre-allocate cache size
To avoid the channel map needing to be re-grown while we fill the cache initially, we might as well pre-allocate it with a somewhat sane value to decrease the number of grow events.
This commit is contained in:
parent
bf27d05aa8
commit
a95a3728b5
8 changed files with 58 additions and 16 deletions
|
@ -290,7 +290,7 @@ func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB,
|
|||
var err error
|
||||
chanDB.graph, err = NewChannelGraph(
|
||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||
opts.BatchCommitInterval,
|
||||
opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -188,7 +188,8 @@ type ChannelGraph struct {
|
|||
// NewChannelGraph allocates a new ChannelGraph backed by a DB instance. The
|
||||
// returned instance has its own unique reject cache and channel cache.
|
||||
func NewChannelGraph(db kvdb.Backend, rejectCacheSize, chanCacheSize int,
|
||||
batchCommitInterval time.Duration) (*ChannelGraph, error) {
|
||||
batchCommitInterval time.Duration,
|
||||
preAllocCacheNumNodes int) (*ChannelGraph, error) {
|
||||
|
||||
if err := initChannelGraph(db); err != nil {
|
||||
return nil, err
|
||||
|
@ -198,7 +199,7 @@ func NewChannelGraph(db kvdb.Backend, rejectCacheSize, chanCacheSize int,
|
|||
db: db,
|
||||
rejectCache: newRejectCache(rejectCacheSize),
|
||||
chanCache: newChannelCache(chanCacheSize),
|
||||
graphCache: NewGraphCache(),
|
||||
graphCache: NewGraphCache(preAllocCacheNumNodes),
|
||||
}
|
||||
g.chanScheduler = batch.NewTimeScheduler(
|
||||
db, &g.cacheMu, batchCommitInterval,
|
||||
|
|
|
@ -175,10 +175,19 @@ type GraphCache struct {
|
|||
}
|
||||
|
||||
// NewGraphCache creates a new graphCache.
|
||||
func NewGraphCache() *GraphCache {
|
||||
func NewGraphCache(preAllocNumNodes int) *GraphCache {
|
||||
return &GraphCache{
|
||||
nodeChannels: make(map[route.Vertex]map[uint64]*DirectedChannel),
|
||||
nodeFeatures: make(map[route.Vertex]*lnwire.FeatureVector),
|
||||
nodeChannels: make(
|
||||
map[route.Vertex]map[uint64]*DirectedChannel,
|
||||
// A channel connects two nodes, so we can look it up
|
||||
// from both sides, meaning we get double the number of
|
||||
// entries.
|
||||
preAllocNumNodes*2,
|
||||
),
|
||||
nodeFeatures: make(
|
||||
map[route.Vertex]*lnwire.FeatureVector,
|
||||
preAllocNumNodes,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ func TestGraphCacheAddNode(t *testing.T) {
|
|||
outPolicies: []*ChannelEdgePolicy{outPolicy1},
|
||||
inPolicies: []*ChannelEdgePolicy{inPolicy1},
|
||||
}
|
||||
cache := NewGraphCache()
|
||||
cache := NewGraphCache(10)
|
||||
require.NoError(t, cache.AddNode(nil, node))
|
||||
|
||||
var fromChannels, toChannels []*DirectedChannel
|
||||
|
|
|
@ -75,7 +75,7 @@ func MakeTestGraph(modifiers ...OptionModifier) (*ChannelGraph, func(), error) {
|
|||
|
||||
graph, err := NewChannelGraph(
|
||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||
opts.BatchCommitInterval,
|
||||
opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
|
||||
)
|
||||
if err != nil {
|
||||
backendCleanup()
|
||||
|
|
|
@ -17,6 +17,12 @@ const (
|
|||
// in order to reply to gossip queries. This produces a cache size of
|
||||
// around 40MB.
|
||||
DefaultChannelCacheSize = 20000
|
||||
|
||||
// DefaultPreAllocCacheNumNodes is the default number of channels we
|
||||
// assume for mainnet for pre-allocating the graph cache. As of
|
||||
// September 2021, there currently are 14k nodes in a strictly pruned
|
||||
// graph, so we choose a number that is slightly higher.
|
||||
DefaultPreAllocCacheNumNodes = 15000
|
||||
)
|
||||
|
||||
// Options holds parameters for tuning and customizing a channeldb.DB.
|
||||
|
@ -35,6 +41,10 @@ type Options struct {
|
|||
// wait before attempting to commit a pending set of updates.
|
||||
BatchCommitInterval time.Duration
|
||||
|
||||
// PreAllocCacheNumNodes is the number of nodes we expect to be in the
|
||||
// graph cache, so we can pre-allocate the map accordingly.
|
||||
PreAllocCacheNumNodes int
|
||||
|
||||
// clock is the time source used by the database.
|
||||
clock clock.Clock
|
||||
|
||||
|
@ -52,9 +62,10 @@ func DefaultOptions() Options {
|
|||
AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge,
|
||||
DBTimeout: kvdb.DefaultDBTimeout,
|
||||
},
|
||||
RejectCacheSize: DefaultRejectCacheSize,
|
||||
ChannelCacheSize: DefaultChannelCacheSize,
|
||||
clock: clock.NewDefaultClock(),
|
||||
RejectCacheSize: DefaultRejectCacheSize,
|
||||
ChannelCacheSize: DefaultChannelCacheSize,
|
||||
PreAllocCacheNumNodes: DefaultPreAllocCacheNumNodes,
|
||||
clock: clock.NewDefaultClock(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,6 +86,13 @@ func OptionSetChannelCacheSize(n int) OptionModifier {
|
|||
}
|
||||
}
|
||||
|
||||
// OptionSetPreAllocCacheNumNodes sets the PreAllocCacheNumNodes to n.
|
||||
func OptionSetPreAllocCacheNumNodes(n int) OptionModifier {
|
||||
return func(o *Options) {
|
||||
o.PreAllocCacheNumNodes = n
|
||||
}
|
||||
}
|
||||
|
||||
// OptionSetSyncFreelist allows the database to sync its freelist.
|
||||
func OptionSetSyncFreelist(b bool) OptionModifier {
|
||||
return func(o *Options) {
|
||||
|
|
22
lnd.go
22
lnd.go
|
@ -22,6 +22,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/btcsuite/btcwallet/wallet"
|
||||
|
@ -1679,14 +1680,27 @@ func initializeDatabases(ctx context.Context,
|
|||
"instances")
|
||||
}
|
||||
|
||||
// Otherwise, we'll open two instances, one for the state we only need
|
||||
// locally, and the other for things we want to ensure are replicated.
|
||||
dbs.graphDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.GraphDB,
|
||||
dbOptions := []channeldb.OptionModifier{
|
||||
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
|
||||
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
|
||||
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
|
||||
channeldb.OptionDryRunMigration(cfg.DryRunMigration),
|
||||
}
|
||||
|
||||
// We want to pre-allocate the channel graph cache according to what we
|
||||
// expect for mainnet to speed up memory allocation.
|
||||
if cfg.ActiveNetParams.Name == chaincfg.MainNetParams.Name {
|
||||
dbOptions = append(
|
||||
dbOptions, channeldb.OptionSetPreAllocCacheNumNodes(
|
||||
channeldb.DefaultPreAllocCacheNumNodes,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Otherwise, we'll open two instances, one for the state we only need
|
||||
// locally, and the other for things we want to ensure are replicated.
|
||||
dbs.graphDB, err = channeldb.CreateWithBackend(
|
||||
databaseBackends.GraphDB, dbOptions...,
|
||||
)
|
||||
switch {
|
||||
// Give the DB a chance to dry run the migration. Since we know that
|
||||
|
|
|
@ -171,7 +171,7 @@ func makeTestGraph() (*channeldb.ChannelGraph, kvdb.Backend, func(), error) {
|
|||
opts := channeldb.DefaultOptions()
|
||||
graph, err := channeldb.NewChannelGraph(
|
||||
backend, opts.RejectCacheSize, opts.ChannelCacheSize,
|
||||
opts.BatchCommitInterval,
|
||||
opts.BatchCommitInterval, opts.PreAllocCacheNumNodes,
|
||||
)
|
||||
if err != nil {
|
||||
cleanUp()
|
||||
|
|
Loading…
Add table
Reference in a new issue