mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-03-13 11:09:23 +01:00
Merge pull request #6214 from yyforyongyu/server-start-order
server: start htlcswitch early in the pipeline
This commit is contained in:
commit
10fba3d859
20 changed files with 34 additions and 22 deletions
|
@ -137,7 +137,7 @@ func NewSubSwapper(startingChans []Single, chanNotifier ChannelNotifier,
|
|||
func (s *SubSwapper) Start() error {
|
||||
var startErr error
|
||||
s.started.Do(func() {
|
||||
log.Infof("Starting chanbackup.SubSwapper")
|
||||
log.Infof("chanbackup.SubSwapper starting")
|
||||
|
||||
// Before we enter our main loop, we'll update the on-disk
|
||||
// state with the latest Single state, as nodes may have new
|
||||
|
|
|
@ -142,6 +142,8 @@ func NewChannelEventStore(config *Config) *ChannelEventStore {
|
|||
// information from the store. If this function fails, it cancels its existing
|
||||
// subscriptions and returns an error.
|
||||
func (c *ChannelEventStore) Start() error {
|
||||
log.Info("ChannelEventStore starting")
|
||||
|
||||
// Create a subscription to channel events.
|
||||
channelClient, err := c.cfg.SubscribeChannelEvents()
|
||||
if err != nil {
|
||||
|
|
|
@ -87,7 +87,7 @@ func New(chanDB *channeldb.ChannelStateDB) *ChannelNotifier {
|
|||
func (c *ChannelNotifier) Start() error {
|
||||
var err error
|
||||
c.started.Do(func() {
|
||||
log.Trace("ChannelNotifier starting")
|
||||
log.Info("ChannelNotifier starting")
|
||||
err = c.ntfnServer.Start()
|
||||
})
|
||||
return err
|
||||
|
|
|
@ -206,14 +206,13 @@ func NewBreachArbiter(cfg *BreachConfig) *BreachArbiter {
|
|||
func (b *BreachArbiter) Start() error {
|
||||
var err error
|
||||
b.started.Do(func() {
|
||||
brarLog.Info("Breach arbiter starting")
|
||||
err = b.start()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *BreachArbiter) start() error {
|
||||
brarLog.Tracef("Starting breach arbiter")
|
||||
|
||||
// Load all retributions currently persisted in the retribution store.
|
||||
var breachRetInfos map[wire.OutPoint]retributionInfo
|
||||
if err := b.cfg.Store.ForAll(func(ret *retributionInfo) error {
|
||||
|
|
|
@ -487,7 +487,7 @@ func (c *ChainArbitrator) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Tracef("Starting ChainArbitrator")
|
||||
log.Info("ChainArbitrator starting")
|
||||
|
||||
// First, we'll fetch all the channels that are still open, in order to
|
||||
// collect them within our set of active contracts.
|
||||
|
|
|
@ -241,7 +241,7 @@ func (u *UtxoNursery) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
utxnLog.Tracef("Starting UTXO nursery")
|
||||
utxnLog.Info("UTXO nursery starting")
|
||||
|
||||
// Retrieve the currently best known block. This is needed to have the
|
||||
// state machine catch up with the blocks we missed when we were down.
|
||||
|
|
|
@ -498,14 +498,13 @@ func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate(
|
|||
func (d *AuthenticatedGossiper) Start() error {
|
||||
var err error
|
||||
d.started.Do(func() {
|
||||
log.Info("Authenticated Gossiper starting")
|
||||
err = d.start()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AuthenticatedGossiper) start() error {
|
||||
log.Info("Authenticated Gossiper is starting")
|
||||
|
||||
// First we register for new notifications of newly discovered blocks.
|
||||
// We do this immediately so we'll later be able to consume any/all
|
||||
// blocks which were discovered.
|
||||
|
|
|
@ -36,6 +36,10 @@
|
|||
could result in an "invoice too large" error when creating invoices. Hints
|
||||
are now properly limited to our maximum of 20.
|
||||
|
||||
* [Fixed an edge case where the lnd might be stuck at starting due to channel
|
||||
arbitrator relying on htlcswitch to be started
|
||||
first](https://github.com/lightningnetwork/lnd/pull/6214).
|
||||
|
||||
## Misc
|
||||
|
||||
* [An example systemd service file](https://github.com/lightningnetwork/lnd/pull/6033)
|
||||
|
|
|
@ -588,14 +588,13 @@ func NewFundingManager(cfg Config) (*Manager, error) {
|
|||
func (f *Manager) Start() error {
|
||||
var err error
|
||||
f.started.Do(func() {
|
||||
log.Info("Funding manager starting")
|
||||
err = f.start()
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Manager) start() error {
|
||||
log.Tracef("Funding manager running")
|
||||
|
||||
// Upon restart, the Funding Manager will check the database to load any
|
||||
// channels that were waiting for their funding transactions to be
|
||||
// confirmed on the blockchain at the time when the daemon last went
|
||||
|
@ -1235,7 +1234,7 @@ func (f *Manager) handleFundingOpen(peer lnpeer.Peer,
|
|||
if amt < f.cfg.MinChanSize {
|
||||
f.failFundingFlow(
|
||||
peer, msg.PendingChannelID,
|
||||
lnwallet.ErrChanTooSmall(amt, btcutil.Amount(f.cfg.MinChanSize)),
|
||||
lnwallet.ErrChanTooSmall(amt, f.cfg.MinChanSize),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -132,6 +132,7 @@ func NewOnionProcessor(router *sphinx.Router) *OnionProcessor {
|
|||
|
||||
// Start spins up the onion processor's sphinx router.
|
||||
func (p *OnionProcessor) Start() error {
|
||||
log.Info("Onion processor starting")
|
||||
return p.router.Start()
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ func NewHtlcNotifier(now func() time.Time) *HtlcNotifier {
|
|||
func (h *HtlcNotifier) Start() error {
|
||||
var err error
|
||||
h.started.Do(func() {
|
||||
log.Trace("HtlcNotifier starting")
|
||||
log.Info("HtlcNotifier starting")
|
||||
err = h.ntfnServer.Start()
|
||||
})
|
||||
return err
|
||||
|
|
|
@ -1846,7 +1846,7 @@ func (s *Switch) Start() error {
|
|||
return errors.New("htlc switch already started")
|
||||
}
|
||||
|
||||
log.Infof("Starting HTLC Switch")
|
||||
log.Infof("HTLC Switch starting")
|
||||
|
||||
blockEpochStream, err := s.cfg.Notifier.RegisterBlockEpochNtfn(nil)
|
||||
if err != nil {
|
||||
|
|
|
@ -238,6 +238,8 @@ func (i *InvoiceRegistry) Start() error {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Info("InvoiceRegistry starting")
|
||||
|
||||
i.wg.Add(1)
|
||||
go i.invoiceEventLoop()
|
||||
|
||||
|
|
|
@ -155,6 +155,7 @@ func NewSigPool(numWorkers int, signer input.Signer) *SigPool {
|
|||
// carry out its duties.
|
||||
func (s *SigPool) Start() error {
|
||||
s.started.Do(func() {
|
||||
walletLog.Info("SigPool starting")
|
||||
for i := 0; i < s.numWorkers; i++ {
|
||||
s.wg.Add(1)
|
||||
go s.poolWorker()
|
||||
|
|
|
@ -172,6 +172,7 @@ func NewChanStatusManager(cfg *ChanStatusConfig) (*ChanStatusManager, error) {
|
|||
func (m *ChanStatusManager) Start() error {
|
||||
var err error
|
||||
m.started.Do(func() {
|
||||
log.Info("Channel Status Manager starting")
|
||||
err = m.start()
|
||||
})
|
||||
return err
|
||||
|
|
|
@ -58,6 +58,7 @@ func NewHostAnnouncer(cfg HostAnnouncerConfig) *HostAnnouncer {
|
|||
// Start starts the HostAnnouncer.
|
||||
func (h *HostAnnouncer) Start() error {
|
||||
h.startOnce.Do(func() {
|
||||
log.Info("HostAnnouncer starting")
|
||||
h.wg.Add(1)
|
||||
go h.hostWatcher()
|
||||
})
|
||||
|
|
|
@ -490,7 +490,7 @@ func (r *ChannelRouter) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Tracef("Channel Router starting")
|
||||
log.Info("Channel Router starting")
|
||||
|
||||
bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock()
|
||||
if err != nil {
|
||||
|
|
15
server.go
15
server.go
|
@ -1777,6 +1777,15 @@ func (s *server) Start() error {
|
|||
}
|
||||
cleanup = cleanup.add(s.fundingMgr.Stop)
|
||||
|
||||
// htlcSwitch must be started before chainArb since the latter
|
||||
// relies on htlcSwitch to deliver resolution message upon
|
||||
// start.
|
||||
if err := s.htlcSwitch.Start(); err != nil {
|
||||
startErr = err
|
||||
return
|
||||
}
|
||||
cleanup = cleanup.add(s.htlcSwitch.Stop)
|
||||
|
||||
if err := s.chainArb.Start(); err != nil {
|
||||
startErr = err
|
||||
return
|
||||
|
@ -1807,12 +1816,6 @@ func (s *server) Start() error {
|
|||
}
|
||||
cleanup = cleanup.add(s.sphinx.Stop)
|
||||
|
||||
if err := s.htlcSwitch.Start(); err != nil {
|
||||
startErr = err
|
||||
return
|
||||
}
|
||||
cleanup = cleanup.add(s.htlcSwitch.Stop)
|
||||
|
||||
if err := s.chanStatusMgr.Start(); err != nil {
|
||||
startErr = err
|
||||
return
|
||||
|
|
|
@ -338,7 +338,7 @@ func (s *UtxoSweeper) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
log.Tracef("Sweeper starting")
|
||||
log.Info("Sweeper starting")
|
||||
|
||||
// Retrieve last published tx from database.
|
||||
lastTx, err := s.cfg.Store.GetLastPublishedTx()
|
||||
|
|
|
@ -439,7 +439,7 @@ func (c *TowerClient) buildHighestCommitHeights() {
|
|||
func (c *TowerClient) Start() error {
|
||||
var err error
|
||||
c.started.Do(func() {
|
||||
c.log.Infof("Starting watchtower client")
|
||||
c.log.Infof("Watchtower client starting")
|
||||
|
||||
// First, restart a session queue for any sessions that have
|
||||
// committed but unacked state updates. This ensures that these
|
||||
|
|
Loading…
Add table
Reference in a new issue