multi: extract database initialization

This commit is contained in:
Oliver Gugger 2021-09-23 16:54:41 +02:00
parent 0e279eb15a
commit f6d7e70f51
No known key found for this signature in database
GPG key ID: 8E4256593F177720
4 changed files with 265 additions and 195 deletions

View file

@ -1544,6 +1544,7 @@ func (c *Config) ImplementationConfig() *ImplementationCfg {
GrpcRegistrar: defaultImpl,
RestRegistrar: defaultImpl,
ExternalValidator: defaultImpl,
DatabaseBuilder: NewDefaultDatabaseBuilder(c, ltndLog),
}
}

View file

@ -3,9 +3,20 @@ package lnd
import (
"context"
"fmt"
"path/filepath"
"time"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btclog"
proxy "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnwallet/btcwallet"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/lightningnetwork/lnd/watchtower"
"github.com/lightningnetwork/lnd/watchtower/wtclient"
"github.com/lightningnetwork/lnd/watchtower/wtdb"
"google.golang.org/grpc"
"gopkg.in/macaroon-bakery.v2/bakery"
)
@ -46,6 +57,15 @@ type ExternalValidator interface {
Permissions() map[string][]bakery.Op
}
// DatabaseBuilder is an interface that must be satisfied by the implementation
// that provides lnd's main database backend instances.
type DatabaseBuilder interface {
// BuildDatabase extracts the current databases that we'll use for
// normal operation in the daemon. A function closure that closes all
// opened databases is also returned.
BuildDatabase(ctx context.Context) (*DatabaseInstances, func(), error)
}
// ImplementationCfg is a struct that holds all configuration items for
// components that can be implemented outside lnd itself.
type ImplementationCfg struct {
@ -60,6 +80,10 @@ type ImplementationCfg struct {
// ExternalValidator is a type that can provide external macaroon
// validation.
ExternalValidator
// DatabaseBuilder is a type that can provide lnd's main database
// backend instances.
DatabaseBuilder
}
// DefaultWalletImpl is the default implementation of our normal, btcwallet
@ -113,3 +137,209 @@ func (d *DefaultWalletImpl) ValidateMacaroon(ctx context.Context,
func (d *DefaultWalletImpl) Permissions() map[string][]bakery.Op {
return nil
}
// DatabaseInstances is a struct that holds all instances to the actual
// databases that are used in lnd.
type DatabaseInstances struct {
// GraphDB is the database that stores the channel graph used for path
// finding.
//
// NOTE/TODO: This currently _needs_ to be the same instance as the
// ChanStateDB below until the separation of the two databases is fully
// complete!
GraphDB *channeldb.DB
// ChanStateDB is the database that stores all of our node's channel
// state.
//
// NOTE/TODO: This currently _needs_ to be the same instance as the
// GraphDB above until the separation of the two databases is fully
// complete!
ChanStateDB *channeldb.DB
// HeightHintDB is the database that stores height hints for spends.
HeightHintDB kvdb.Backend
// MacaroonDB is the database that stores macaroon root keys.
MacaroonDB kvdb.Backend
// DecayedLogDB is the database that stores p2p related encryption
// information.
DecayedLogDB kvdb.Backend
// TowerClientDB is the database that stores the watchtower client's
// configuration.
TowerClientDB wtclient.DB
// TowerServerDB is the database that stores the watchtower server's
// configuration.
TowerServerDB watchtower.DB
// WalletDB is the configuration for loading the wallet database using
// the btcwallet's loader.
WalletDB btcwallet.LoaderOption
}
// DefaultDatabaseBuilder is a type that builds the default database backends
// for lnd, using the given configuration to decide what actual implementation
// to use.
type DefaultDatabaseBuilder struct {
cfg *Config
logger btclog.Logger
}
// NewDefaultDatabaseBuilder returns a new instance of the default database
// builder.
func NewDefaultDatabaseBuilder(cfg *Config,
logger btclog.Logger) *DefaultDatabaseBuilder {
return &DefaultDatabaseBuilder{
cfg: cfg,
logger: logger,
}
}
// BuildDatabase extracts the current databases that we'll use for normal
// operation in the daemon. A function closure that closes all opened databases
// is also returned.
func (d *DefaultDatabaseBuilder) BuildDatabase(
ctx context.Context) (*DatabaseInstances, func(), error) {
d.logger.Infof("Opening the main database, this might take a few " +
"minutes...")
cfg := d.cfg
if cfg.DB.Backend == lncfg.BoltBackend {
d.logger.Infof("Opening bbolt database, sync_freelist=%v, "+
"auto_compact=%v", !cfg.DB.Bolt.NoFreelistSync,
cfg.DB.Bolt.AutoCompact)
}
startOpenTime := time.Now()
databaseBackends, err := cfg.DB.GetBackends(
ctx, cfg.graphDatabaseDir(), cfg.networkDir, filepath.Join(
cfg.Watchtower.TowerDir,
cfg.registeredChains.PrimaryChain().String(),
lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name),
), cfg.WtClient.Active, cfg.Watchtower.Active,
)
if err != nil {
return nil, nil, fmt.Errorf("unable to obtain database "+
"backends: %v", err)
}
// With the full remote mode we made sure both the graph and channel
// state DB point to the same local or remote DB and the same namespace
// within that DB.
dbs := &DatabaseInstances{
HeightHintDB: databaseBackends.HeightHintDB,
MacaroonDB: databaseBackends.MacaroonDB,
DecayedLogDB: databaseBackends.DecayedLogDB,
WalletDB: databaseBackends.WalletDB,
}
cleanUp := func() {
// We can just close the returned close functions directly. Even
// if we decorate the channel DB with an additional struct, its
// close function still just points to the kvdb backend.
for name, closeFunc := range databaseBackends.CloseFuncs {
if err := closeFunc(); err != nil {
d.logger.Errorf("Error closing %s "+
"database: %v", name, err)
}
}
}
if databaseBackends.Remote {
d.logger.Infof("Using remote %v database! Creating "+
"graph and channel state DB instances", cfg.DB.Backend)
} else {
d.logger.Infof("Creating local graph and channel state DB " +
"instances")
}
dbOptions := []channeldb.OptionModifier{
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
channeldb.OptionDryRunMigration(cfg.DryRunMigration),
}
// We want to pre-allocate the channel graph cache according to what we
// expect for mainnet to speed up memory allocation.
if cfg.ActiveNetParams.Name == chaincfg.MainNetParams.Name {
dbOptions = append(
dbOptions, channeldb.OptionSetPreAllocCacheNumNodes(
channeldb.DefaultPreAllocCacheNumNodes,
),
)
}
// Otherwise, we'll open two instances, one for the state we only need
// locally, and the other for things we want to ensure are replicated.
dbs.GraphDB, err = channeldb.CreateWithBackend(
databaseBackends.GraphDB, dbOptions...,
)
switch {
// Give the DB a chance to dry run the migration. Since we know that
// both the channel state and graph DBs are still always behind the same
// backend, we know this would be applied to both of those DBs.
case err == channeldb.ErrDryRunMigrationOK:
d.logger.Infof("Graph DB dry run migration successful")
return nil, nil, err
case err != nil:
cleanUp()
err := fmt.Errorf("unable to open graph DB: %v", err)
d.logger.Error(err)
return nil, nil, err
}
// For now, we don't _actually_ split the graph and channel state DBs on
// the code level. Since they both are based upon the *channeldb.DB
// struct it will require more refactoring to fully separate them. With
// the full remote mode we at least know for now that they both point to
// the same DB backend (and also namespace within that) so we only need
// to apply any migration once.
//
// TODO(guggero): Once the full separation of anything graph related
// from the channeldb.DB is complete, the decorated instance of the
// channel state DB should be created here individually instead of just
// using the same struct (and DB backend) instance.
dbs.ChanStateDB = dbs.GraphDB
// Wrap the watchtower client DB and make sure we clean up.
if cfg.WtClient.Active {
dbs.TowerClientDB, err = wtdb.OpenClientDB(
databaseBackends.TowerClientDB,
)
if err != nil {
cleanUp()
err := fmt.Errorf("unable to open %s database: %v",
lncfg.NSTowerClientDB, err)
d.logger.Error(err)
return nil, nil, err
}
}
// Wrap the watchtower server DB and make sure we clean up.
if cfg.Watchtower.Active {
dbs.TowerServerDB, err = wtdb.OpenTowerDB(
databaseBackends.TowerServerDB,
)
if err != nil {
cleanUp()
err := fmt.Errorf("unable to open %s database: %v",
lncfg.NSTowerServerDB, err)
d.logger.Error(err)
return nil, nil, err
}
}
openTime := time.Since(startOpenTime)
d.logger.Infof("Database(s) now open (time_to_open=%v)!", openTime)
return dbs, cleanUp, nil
}

179
lnd.go
View file

@ -22,7 +22,6 @@ import (
"sync"
"time"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwallet/wallet"
@ -45,7 +44,6 @@ import (
"github.com/lightningnetwork/lnd/chanacceptor"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/kvdb"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnwallet"
@ -57,8 +55,6 @@ import (
"github.com/lightningnetwork/lnd/tor"
"github.com/lightningnetwork/lnd/walletunlocker"
"github.com/lightningnetwork/lnd/watchtower"
"github.com/lightningnetwork/lnd/watchtower/wtclient"
"github.com/lightningnetwork/lnd/watchtower/wtdb"
)
const (
@ -409,7 +405,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID)
}
dbs, cleanUp, err := initializeDatabases(ctx, cfg)
dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
switch {
case err == channeldb.ErrDryRunMigrationOK:
ltndLog.Infof("%v, exiting", err)
@ -420,8 +416,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
defer cleanUp()
pwService.SetLoaderOpts([]btcwallet.LoaderOption{dbs.walletDB})
pwService.SetMacaroonDB(dbs.macaroonDB)
pwService.SetLoaderOpts([]btcwallet.LoaderOption{dbs.WalletDB})
pwService.SetMacaroonDB(dbs.MacaroonDB)
walletExists, err := pwService.WalletExists()
if err != nil {
return err
@ -500,7 +496,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
}
params, err := waitForWalletPassword(
cfg, pwService, []btcwallet.LoaderOption{dbs.walletDB},
cfg, pwService, []btcwallet.LoaderOption{dbs.WalletDB},
interceptor.ShutdownChannel(),
)
if err != nil {
@ -530,7 +526,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
if !cfg.NoMacaroons {
// Create the macaroon authentication/authorization service.
macaroonService, err = macaroons.NewService(
dbs.macaroonDB, "lnd", walletInitParams.StatelessInit,
dbs.MacaroonDB, "lnd", walletInitParams.StatelessInit,
macaroons.IPLockChecker,
macaroons.CustomChecker(interceptorChain),
)
@ -660,8 +656,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
LitecoindMode: cfg.LitecoindMode,
BtcdMode: cfg.BtcdMode,
LtcdMode: cfg.LtcdMode,
HeightHintDB: dbs.heightHintDB,
ChanStateDB: dbs.chanStateDB.ChannelStateDB(),
HeightHintDB: dbs.HeightHintDB,
ChanStateDB: dbs.ChanStateDB.ChannelStateDB(),
NeutrinoCS: neutrinoCS,
ActiveNetParams: cfg.ActiveNetParams,
FeeURL: cfg.FeeURL,
@ -695,7 +691,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
NetParams: cfg.ActiveNetParams.Params,
CoinType: cfg.ActiveNetParams.CoinType,
Wallet: walletInitParams.Wallet,
LoaderOptions: []btcwallet.LoaderOption{dbs.walletDB},
LoaderOptions: []btcwallet.LoaderOption{dbs.WalletDB},
}
// Parse coin selection strategy.
@ -797,7 +793,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
wtCfg := &watchtower.Config{
BlockFetcher: activeChainControl.ChainIO,
DB: dbs.towerServerDB,
DB: dbs.TowerServerDB,
EpochRegistrar: activeChainControl.ChainNotifier,
Net: cfg.net,
NewAddress: func() (btcutil.Address, error) {
@ -1560,163 +1556,6 @@ func waitForWalletPassword(cfg *Config,
}
}
// databaseInstances is a struct that holds all instances to the actual
// databases that are used in lnd.
type databaseInstances struct {
graphDB *channeldb.DB
chanStateDB *channeldb.DB
heightHintDB kvdb.Backend
macaroonDB kvdb.Backend
decayedLogDB kvdb.Backend
towerClientDB wtclient.DB
towerServerDB watchtower.DB
walletDB btcwallet.LoaderOption
}
// initializeDatabases extracts the current databases that we'll use for normal
// operation in the daemon. A function closure that closes all opened databases
// is also returned.
func initializeDatabases(ctx context.Context,
cfg *Config) (*databaseInstances, func(), error) {
ltndLog.Infof("Opening the main database, this might take a few " +
"minutes...")
if cfg.DB.Backend == lncfg.BoltBackend {
ltndLog.Infof("Opening bbolt database, sync_freelist=%v, "+
"auto_compact=%v", !cfg.DB.Bolt.NoFreelistSync,
cfg.DB.Bolt.AutoCompact)
}
startOpenTime := time.Now()
databaseBackends, err := cfg.DB.GetBackends(
ctx, cfg.graphDatabaseDir(), cfg.networkDir, filepath.Join(
cfg.Watchtower.TowerDir,
cfg.registeredChains.PrimaryChain().String(),
lncfg.NormalizeNetwork(cfg.ActiveNetParams.Name),
), cfg.WtClient.Active, cfg.Watchtower.Active,
)
if err != nil {
return nil, nil, fmt.Errorf("unable to obtain database "+
"backends: %v", err)
}
// With the full remote mode we made sure both the graph and channel
// state DB point to the same local or remote DB and the same namespace
// within that DB.
dbs := &databaseInstances{
heightHintDB: databaseBackends.HeightHintDB,
macaroonDB: databaseBackends.MacaroonDB,
decayedLogDB: databaseBackends.DecayedLogDB,
walletDB: databaseBackends.WalletDB,
}
cleanUp := func() {
// We can just close the returned close functions directly. Even
// if we decorate the channel DB with an additional struct, its
// close function still just points to the kvdb backend.
for name, closeFunc := range databaseBackends.CloseFuncs {
if err := closeFunc(); err != nil {
ltndLog.Errorf("Error closing %s "+
"database: %v", name, err)
}
}
}
if databaseBackends.Remote {
ltndLog.Infof("Using remote %v database! Creating "+
"graph and channel state DB instances", cfg.DB.Backend)
} else {
ltndLog.Infof("Creating local graph and channel state DB " +
"instances")
}
dbOptions := []channeldb.OptionModifier{
channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize),
channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize),
channeldb.OptionSetBatchCommitInterval(cfg.DB.BatchCommitInterval),
channeldb.OptionDryRunMigration(cfg.DryRunMigration),
}
// We want to pre-allocate the channel graph cache according to what we
// expect for mainnet to speed up memory allocation.
if cfg.ActiveNetParams.Name == chaincfg.MainNetParams.Name {
dbOptions = append(
dbOptions, channeldb.OptionSetPreAllocCacheNumNodes(
channeldb.DefaultPreAllocCacheNumNodes,
),
)
}
// Otherwise, we'll open two instances, one for the state we only need
// locally, and the other for things we want to ensure are replicated.
dbs.graphDB, err = channeldb.CreateWithBackend(
databaseBackends.GraphDB, dbOptions...,
)
switch {
// Give the DB a chance to dry run the migration. Since we know that
// both the channel state and graph DBs are still always behind the same
// backend, we know this would be applied to both of those DBs.
case err == channeldb.ErrDryRunMigrationOK:
ltndLog.Infof("Graph DB dry run migration successful")
return nil, nil, err
case err != nil:
cleanUp()
err := fmt.Errorf("unable to open graph DB: %v", err)
ltndLog.Error(err)
return nil, nil, err
}
// For now, we don't _actually_ split the graph and channel state DBs on
// the code level. Since they both are based upon the *channeldb.DB
// struct it will require more refactoring to fully separate them. With
// the full remote mode we at least know for now that they both point to
// the same DB backend (and also namespace within that) so we only need
// to apply any migration once.
//
// TODO(guggero): Once the full separation of anything graph related
// from the channeldb.DB is complete, the decorated instance of the
// channel state DB should be created here individually instead of just
// using the same struct (and DB backend) instance.
dbs.chanStateDB = dbs.graphDB
// Wrap the watchtower client DB and make sure we clean up.
if cfg.WtClient.Active {
dbs.towerClientDB, err = wtdb.OpenClientDB(
databaseBackends.TowerClientDB,
)
if err != nil {
cleanUp()
err := fmt.Errorf("unable to open %s database: %v",
lncfg.NSTowerClientDB, err)
ltndLog.Error(err)
return nil, nil, err
}
}
// Wrap the watchtower server DB and make sure we clean up.
if cfg.Watchtower.Active {
dbs.towerServerDB, err = wtdb.OpenTowerDB(
databaseBackends.TowerServerDB,
)
if err != nil {
cleanUp()
err := fmt.Errorf("unable to open %s database: %v",
lncfg.NSTowerServerDB, err)
ltndLog.Error(err)
return nil, nil, err
}
}
openTime := time.Since(startOpenTime)
ltndLog.Infof("Database(s) now open (time_to_open=%v)!", openTime)
return dbs, cleanUp, nil
}
// initNeutrinoBackend inits a new instance of the neutrino light client
// backend given a target chain directory to store the chain state.
func initNeutrinoBackend(cfg *Config, chainDir string,

View file

@ -445,7 +445,7 @@ func noiseDial(idKey keychain.SingleKeyECDH,
// newServer creates a new instance of the server which is to listen using the
// passed listener address.
func newServer(cfg *Config, listenAddrs []net.Addr,
dbs *databaseInstances, cc *chainreg.ChainControl,
dbs *DatabaseInstances, cc *chainreg.ChainControl,
nodeKeyDesc *keychain.KeyDescriptor,
chansToRestore walletunlocker.ChannelsToRecover,
chanPredicate chanacceptor.ChannelAcceptor,
@ -480,7 +480,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// Initialize the sphinx router.
replayLog := htlcswitch.NewDecayedLog(
dbs.decayedLogDB, cc.ChainNotifier,
dbs.DecayedLogDB, cc.ChainNotifier,
)
sphinxRouter := sphinx.NewRouter(
nodeKeyECDH, cfg.ActiveNetParams.Params, replayLog,
@ -527,10 +527,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
s := &server{
cfg: cfg,
graphDB: dbs.graphDB.ChannelGraph(),
chanStateDB: dbs.chanStateDB.ChannelStateDB(),
addrSource: dbs.chanStateDB,
miscDB: dbs.chanStateDB,
graphDB: dbs.GraphDB.ChannelGraph(),
chanStateDB: dbs.ChanStateDB.ChannelStateDB(),
addrSource: dbs.ChanStateDB,
miscDB: dbs.ChanStateDB,
cc: cc,
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.Signer),
writePool: writePool,
@ -538,7 +538,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
chansToRestore: chansToRestore,
channelNotifier: channelnotifier.New(
dbs.chanStateDB.ChannelStateDB(),
dbs.ChanStateDB.ChannelStateDB(),
),
identityECDH: nodeKeyECDH,
@ -573,7 +573,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
}
s.witnessBeacon = &preimageBeacon{
wCache: dbs.chanStateDB.NewWitnessCache(),
wCache: dbs.ChanStateDB.NewWitnessCache(),
subscribers: make(map[uint64]*preimageSubscriber),
}
@ -587,7 +587,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
uint32(currentHeight), currentHash, cc.ChainNotifier,
)
s.invoices = invoices.NewRegistry(
dbs.chanStateDB, expiryWatcher, &registryConfig,
dbs.ChanStateDB, expiryWatcher, &registryConfig,
)
s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now)
@ -596,7 +596,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
thresholdMSats := lnwire.NewMSatFromSatoshis(thresholdSats)
s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
DB: dbs.chanStateDB,
DB: dbs.ChanStateDB,
FetchAllOpenChannels: s.chanStateDB.FetchAllOpenChannels,
FetchClosedChannels: s.chanStateDB.FetchClosedChannels,
LocalChannelClose: func(pubKey []byte,
@ -613,7 +613,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
peer.HandleLocalCloseChanReqs(request)
},
FwdingLog: dbs.chanStateDB.ForwardingLog(),
FwdingLog: dbs.ChanStateDB.ForwardingLog(),
SwitchPackager: channeldb.NewSwitchPackager(),
ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter,
FetchLastChannelUpdate: s.fetchLastChanUpdate(),
@ -643,7 +643,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
IsChannelActive: s.htlcSwitch.HasActiveLink,
ApplyChannelUpdate: s.applyChannelUpdate,
DB: s.chanStateDB,
Graph: dbs.graphDB.ChannelGraph(),
Graph: dbs.GraphDB.ChannelGraph(),
}
chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg)
@ -735,7 +735,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// As the graph can be obtained at anytime from the network, we won't
// replicate it, and instead it'll only be stored locally.
chanGraph := dbs.graphDB.ChannelGraph()
chanGraph := dbs.GraphDB.ChannelGraph()
// We'll now reconstruct a node announcement based on our current
// configuration so we can send it out as a sort of heart beat within
@ -802,7 +802,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// The router will get access to the payment ID sequencer, such that it
// can generate unique payment IDs.
sequencer, err := htlcswitch.NewPersistentSequencer(dbs.chanStateDB)
sequencer, err := htlcswitch.NewPersistentSequencer(dbs.ChanStateDB)
if err != nil {
return nil, err
}
@ -847,7 +847,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
}
s.missionControl, err = routing.NewMissionControl(
dbs.chanStateDB, selfNode.PubKeyBytes,
dbs.ChanStateDB, selfNode.PubKeyBytes,
&routing.MissionControlConfig{
ProbabilityEstimatorCfg: estimatorCfg,
MaxMcHistory: routingConfig.MaxMcHistory,
@ -884,7 +884,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
PathFindingConfig: pathFindingConfig,
}
paymentControl := channeldb.NewPaymentControl(dbs.chanStateDB)
paymentControl := channeldb.NewPaymentControl(dbs.ChanStateDB)
s.controlTower = routing.NewControlTower(paymentControl)
@ -914,11 +914,11 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
}
chanSeries := discovery.NewChanSeries(s.graphDB)
gossipMessageStore, err := discovery.NewMessageStore(dbs.chanStateDB)
gossipMessageStore, err := discovery.NewMessageStore(dbs.ChanStateDB)
if err != nil {
return nil, err
}
waitingProofStore, err := channeldb.NewWaitingProofStore(dbs.chanStateDB)
waitingProofStore, err := channeldb.NewWaitingProofStore(dbs.ChanStateDB)
if err != nil {
return nil, err
}
@ -960,7 +960,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
}
utxnStore, err := contractcourt.NewNurseryStore(
s.cfg.ActiveNetParams.GenesisHash, dbs.chanStateDB,
s.cfg.ActiveNetParams.GenesisHash, dbs.ChanStateDB,
)
if err != nil {
srvrLog.Errorf("unable to create nursery store: %v", err)
@ -971,7 +971,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
sweep.DefaultBatchWindowDuration)
sweeperStore, err := sweep.NewSweeperStore(
dbs.chanStateDB, s.cfg.ActiveNetParams.GenesisHash,
dbs.ChanStateDB, s.cfg.ActiveNetParams.GenesisHash,
)
if err != nil {
srvrLog.Errorf("unable to create sweeper store: %v", err)
@ -1121,7 +1121,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC,
Clock: clock.NewDefaultClock(),
}, dbs.chanStateDB)
}, dbs.ChanStateDB)
s.breachArbiter = contractcourt.NewBreachArbiter(&contractcourt.BreachConfig{
CloseLink: closeLink,
@ -1133,7 +1133,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
ContractBreaches: contractBreaches,
Signer: cc.Wallet.Cfg.Signer,
Store: contractcourt.NewRetributionStore(
dbs.chanStateDB,
dbs.ChanStateDB,
),
})
@ -1347,7 +1347,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// static backup of the latest channel state.
chanNotifier := &channelNotifier{
chanNotifier: s.channelNotifier,
addrs: dbs.chanStateDB,
addrs: dbs.ChanStateDB,
}
backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath)
startingChans, err := chanbackup.FetchStaticChanBackups(
@ -1415,7 +1415,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
SecretKeyRing: s.cc.KeyRing,
Dial: cfg.net.Dial,
AuthDial: authDial,
DB: dbs.towerClientDB,
DB: dbs.TowerClientDB,
Policy: policy,
ChainHash: *s.cfg.ActiveNetParams.GenesisHash,
MinBackoff: 10 * time.Second,
@ -1438,7 +1438,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
SecretKeyRing: s.cc.KeyRing,
Dial: cfg.net.Dial,
AuthDial: authDial,
DB: dbs.towerClientDB,
DB: dbs.TowerClientDB,
Policy: anchorPolicy,
ChainHash: *s.cfg.ActiveNetParams.GenesisHash,
MinBackoff: 10 * time.Second,