Merge pull request #6588 from C-Otto/peer-logging

peer: add pubkey to log messages
This commit is contained in:
Oliver Gugger 2022-07-05 09:36:26 +02:00 committed by GitHub
commit 4cb68e4de3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 95 additions and 89 deletions

View file

@ -25,6 +25,7 @@
* [Add minor comment](https://github.com/lightningnetwork/lnd/pull/6559) on
subscribe/cancel/lookup invoice parameter encoding.
* [Log pubkey for peer related messages](https://github.com/lightningnetwork/lnd/pull/6588).
## RPC Server
@ -57,6 +58,7 @@
# Contributors (Alphabetical Order)
* Carsten Otto
* Elle Mouton
* ErikEk
* Eugene Siegel

View file

@ -15,8 +15,10 @@ import (
"github.com/btcsuite/btcd/connmgr"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btclog"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/buffer"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channelnotifier"
@ -450,6 +452,9 @@ type Brontide struct {
queueQuit chan struct{}
quit chan struct{}
wg sync.WaitGroup
// log is a peer-specific logging instance.
log btclog.Logger
}
// A compile-time check to ensure that Brontide satisfies the lnpeer.Peer interface.
@ -457,6 +462,8 @@ var _ lnpeer.Peer = (*Brontide)(nil)
// NewBrontide creates a new Brontide from a peer.Config struct.
func NewBrontide(cfg Config) *Brontide {
logPrefix := fmt.Sprintf("Peer(%x):", cfg.PubKeyBytes)
p := &Brontide{
cfg: cfg,
activeSignal: make(chan struct{}),
@ -474,6 +481,7 @@ func NewBrontide(cfg Config) *Brontide {
resentChanSyncMsg: make(map[lnwire.ChannelID]struct{}),
queueQuit: make(chan struct{}),
quit: make(chan struct{}),
log: build.NewPrefixLog(logPrefix, peerLog),
}
return p
@ -486,7 +494,7 @@ func (p *Brontide) Start() error {
return nil
}
peerLog.Tracef("Peer %v starting with conn[%v->%v]", p,
p.log.Tracef("starting with conn[%v->%v]",
p.cfg.Conn.LocalAddr(), p.cfg.Conn.RemoteAddr())
// Fetch and then load all the active channels we have with this remote
@ -495,8 +503,8 @@ func (p *Brontide) Start() error {
p.cfg.Addr.IdentityKey,
)
if err != nil {
peerLog.Errorf("Unable to fetch active chans "+
"for peer %v: %v", p, err)
p.log.Errorf("Unable to fetch active chans "+
"for peer: %v", err)
return err
}
@ -569,8 +577,8 @@ func (p *Brontide) Start() error {
// Next, load all the active channels we have with this peer,
// registering them with the switch and launching the necessary
// goroutines required to operate them.
peerLog.Debugf("Loaded %v active channels from database with "+
"NodeKey(%x)", len(activeChans), p.PubKey())
p.log.Debugf("Loaded %v active channels from database",
len(activeChans))
msgs, err := p.loadActiveChannels(activeChans)
if err != nil {
@ -592,11 +600,11 @@ func (p *Brontide) Start() error {
// Now that the peer has started up, we send any channel sync messages
// that must be resent for borked channels.
if len(msgs) > 0 {
peerLog.Infof("Sending %d channel sync messages to peer after "+
p.log.Infof("Sending %d channel sync messages to peer after "+
"loading active channels", len(msgs))
if err := p.SendMessage(true, msgs...); err != nil {
peerLog.Warnf("Failed sending channel sync "+
"messages to peer %v: %v", p, err)
p.log.Warnf("Failed sending channel sync "+
"messages to peer: %v", err)
}
}
@ -622,8 +630,7 @@ func (p *Brontide) initGossipSync() {
// If the remote peer knows of the new gossip queries feature, then
// we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) {
peerLog.Infof("Negotiated chan series queries with %x",
p.cfg.PubKeyBytes[:])
p.log.Info("Negotiated chan series queries")
// Register the peer's gossip syncer with the gossiper.
// This blocks synchronously to ensure the gossip syncer is
@ -671,15 +678,14 @@ func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
peerLog.Infof("NodeKey(%x) loading ChannelPoint(%v)",
p.PubKey(), chanPoint)
p.log.Infof("loading ChannelPoint(%v)", chanPoint)
// Skip adding any permanently irreconcilable channels to the
// htlcswitch.
if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
!dbChan.HasChanStatus(channeldb.ChanStatusRestored) {
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
p.log.Warnf("ChannelPoint(%v) has status %v, won't "+
"start.", chanPoint, dbChan.ChanStatus())
// To help our peer recover from a potential data loss,
@ -690,7 +696,7 @@ func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
// marking the channel borked.
chanSync, err := dbChan.ChanSyncMsg()
if err != nil {
peerLog.Errorf("Unable to create channel "+
p.log.Errorf("Unable to create channel "+
"reestablish message for channel %v: "+
"%v", chanPoint, err)
continue
@ -707,7 +713,7 @@ func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
shutdownMsg, err := p.restartCoopClose(lnChan)
if err != nil {
peerLog.Errorf("Unable to restart "+
p.log.Errorf("Unable to restart "+
"coop close for channel: %v",
err)
continue
@ -763,13 +769,13 @@ func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
}
} else {
peerLog.Warnf("Unable to find our forwarding policy "+
p.log.Warnf("Unable to find our forwarding policy "+
"for channel %v, using default values",
chanPoint)
forwardingPolicy = &p.cfg.RoutingPolicy
}
peerLog.Tracef("Using link policy of: %v",
p.log.Tracef("Using link policy of: %v",
spew.Sdump(forwardingPolicy))
// If the channel is pending, set the value to nil in the
@ -930,13 +936,12 @@ func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
ourNodeAnn, err := p.cfg.GenNodeAnnouncement(false)
if err != nil {
peerLog.Debugf("Unable to retrieve node announcement: %v", err)
p.log.Debugf("Unable to retrieve node announcement: %v", err)
return
}
if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
peerLog.Debugf("Unable to resend node announcement to %x: %v",
p.cfg.PubKeyBytes, err)
p.log.Debugf("Unable to resend node announcement: %v", err)
}
}
@ -968,7 +973,7 @@ func (p *Brontide) Disconnect(reason error) {
err := fmt.Errorf("disconnecting %s, reason: %v", p, reason)
p.storeError(err)
peerLog.Infof(err.Error())
p.log.Infof(err.Error())
// Ensure that the TCP connection is properly closed before continuing.
p.cfg.Conn.Close()
@ -1366,8 +1371,7 @@ out:
}
}
if err != nil {
peerLog.Infof("unable to read message from %v: %v",
p, err)
p.log.Infof("unable to read message from peer: %v", err)
// If we could not read our peer's message due to an
// unknown type or invalid alias, we continue processing
@ -1477,7 +1481,7 @@ out:
err := p.resendChanSyncMsg(targetChan)
if err != nil {
// TODO(halseth): send error to peer?
peerLog.Errorf("resend failed: %v",
p.log.Errorf("resend failed: %v",
err)
}
}
@ -1502,7 +1506,7 @@ out:
err := p.handleCustomMessage(msg)
if err != nil {
p.storeError(err)
peerLog.Errorf("peer: %v, %v", p, err)
p.log.Errorf("%v", err)
}
default:
@ -1512,7 +1516,7 @@ out:
uint16(msg.MsgType()))
p.storeError(err)
peerLog.Errorf("peer: %v, %v", p, err)
p.log.Errorf("%v", err)
}
if isLinkUpdate {
@ -1539,7 +1543,7 @@ out:
p.Disconnect(errors.New("read handler closed"))
peerLog.Tracef("readHandler for peer %v done", p)
p.log.Trace("readHandler for peer done")
}
// handleCustomMessage handles the given custom message if a handler is
@ -1584,7 +1588,7 @@ func (p *Brontide) storeError(err error) {
// If we do not have any active channels with the peer, we do not store
// errors as a dos mitigation.
if !haveChannels {
peerLog.Tracef("no channels with peer: %v, not storing err", p)
p.log.Trace("no channels with peer, not storing err")
return
}
@ -1773,7 +1777,7 @@ func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
summaryPrefix = "Sending"
}
peerLog.Debugf("%v", newLogClosure(func() string {
p.log.Debugf("%v", newLogClosure(func() string {
// Debug summary of message.
summary := messageSummary(msg)
if len(summary) > 0 {
@ -1796,12 +1800,12 @@ func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
msgType, summary, preposition, p)
}))
prefix := "readMessage from"
prefix := "readMessage from peer"
if !read {
prefix = "writeMessage to"
prefix = "writeMessage to peer"
}
peerLog.Tracef(prefix+" %v: %v", p, newLogClosure(func() string {
p.log.Tracef(prefix+": %v", newLogClosure(func() string {
return spew.Sdump(msg)
}))
}
@ -1919,9 +1923,9 @@ out:
// slow to process messages from the wire.
err := p.writeMessage(outMsg.msg)
if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
peerLog.Debugf("Write timeout detected for "+
"peer %s, first write for message "+
"attempted %v ago", p,
p.log.Debugf("Write timeout detected for "+
"peer, first write for message "+
"attempted %v ago",
time.Since(startTime))
// If we received a timeout error, this implies
@ -1970,7 +1974,7 @@ out:
p.Disconnect(exitErr)
peerLog.Tracef("writeHandler for peer %v done", p)
p.log.Trace("writeHandler for peer done")
}
// queueHandler is responsible for accepting messages from outside subsystems
@ -2056,7 +2060,7 @@ func (p *Brontide) pingHandler() {
blockEpochs, err := p.cfg.ChainNotifier.RegisterBlockEpochNtfn(nil)
if err != nil {
peerLog.Errorf("unable to establish block epoch "+
p.log.Errorf("unable to establish block epoch "+
"subscription: %v", err)
return
}
@ -2077,7 +2081,7 @@ out:
// types of eclipse attacks.
case epoch, ok := <-blockEpochs.Epochs:
if !ok {
peerLog.Debugf("block notifications " +
p.log.Debugf("block notifications " +
"canceled")
return
}
@ -2086,7 +2090,7 @@ out:
headerBuf := bytes.NewBuffer(pingPayload[0:0])
err := blockHeader.Serialize(headerBuf)
if err != nil {
peerLog.Errorf("unable to encode header: %v",
p.log.Errorf("unable to encode header: %v",
err)
}
@ -2132,7 +2136,7 @@ func (p *Brontide) queue(priority bool, msg lnwire.Message,
select {
case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
case <-p.quit:
peerLog.Tracef("Peer shutting down, could not enqueue msg: %v.",
p.log.Tracef("Peer shutting down, could not enqueue msg: %v.",
spew.Sdump(msg))
if errChan != nil {
errChan <- lnpeer.ErrPeerExiting
@ -2175,7 +2179,7 @@ func (p *Brontide) genDeliveryScript() ([]byte, error) {
if err != nil {
return nil, err
}
peerLog.Infof("Delivery addr for channel close: %v",
p.log.Infof("Delivery addr for channel close: %v",
deliveryAddr)
return txscript.PayToAddrScript(deliveryAddr)
@ -2211,7 +2215,7 @@ out:
p.activeChanMtx.Lock()
currentChan, ok := p.activeChannels[chanID]
if ok && currentChan != nil {
peerLog.Infof("Already have ChannelPoint(%v), "+
p.log.Infof("Already have ChannelPoint(%v), "+
"ignoring.", chanPoint)
p.activeChanMtx.Unlock()
@ -2225,14 +2229,14 @@ out:
continue
}
peerLog.Infof("Processing retransmitted "+
p.log.Infof("Processing retransmitted "+
"FundingLocked for ChannelPoint(%v)",
chanPoint)
nextRevoke := newChan.RemoteNextRevocation
err := currentChan.InitNextRevocation(nextRevoke)
if err != nil {
peerLog.Errorf("unable to init chan "+
p.log.Errorf("unable to init chan "+
"revocation: %v", err)
continue
}
@ -2250,7 +2254,7 @@ out:
p.activeChanMtx.Unlock()
err := fmt.Errorf("unable to create "+
"LightningChannel: %v", err)
peerLog.Errorf(err.Error())
p.log.Errorf(err.Error())
newChanReq.err <- err
continue
@ -2262,8 +2266,8 @@ out:
p.addedChannels[chanID] = struct{}{}
p.activeChanMtx.Unlock()
peerLog.Infof("New channel active ChannelPoint(%v) "+
"with NodeKey(%x)", chanPoint, p.PubKey())
p.log.Infof("New channel active ChannelPoint(%v) "+
"with peer", chanPoint)
// Next, we'll assemble a ChannelLink along with the
// necessary items it needs to function.
@ -2275,7 +2279,7 @@ out:
if err != nil {
err := fmt.Errorf("unable to subscribe to "+
"chain events: %v", err)
peerLog.Errorf(err.Error())
p.log.Errorf(err.Error())
newChanReq.err <- err
continue
@ -2311,9 +2315,8 @@ out:
)
if err != nil {
err := fmt.Errorf("can't register new channel "+
"link(%v) with NodeKey(%x)", chanPoint,
p.PubKey())
peerLog.Errorf(err.Error())
"link(%v) with peer", chanPoint)
p.log.Errorf(err.Error())
newChanReq.err <- err
continue
@ -2419,10 +2422,10 @@ func (p *Brontide) reenableActiveChannels() {
for _, chanPoint := range activePublicChans {
err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint, false)
if err == netann.ErrEnableManuallyDisabledChan {
peerLog.Debugf("Channel(%v) was manually disabled, ignoring "+
p.log.Debugf("Channel(%v) was manually disabled, ignoring "+
"automatic enable request", chanPoint)
} else if err != nil {
peerLog.Errorf("Unable to enable channel %v: %v",
p.log.Errorf("Unable to enable channel %v: %v",
chanPoint, err)
}
}
@ -2458,7 +2461,7 @@ func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
// Optimistically try a link shutdown, erroring out if it failed.
if err := p.tryLinkShutdown(chanID); err != nil {
peerLog.Errorf("failed link shutdown: %v", err)
p.log.Errorf("failed link shutdown: %v", err)
return nil, err
}
@ -2475,7 +2478,7 @@ func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
var err error
deliveryScript, err = p.genDeliveryScript()
if err != nil {
peerLog.Errorf("unable to gen delivery script: %v",
p.log.Errorf("unable to gen delivery script: %v",
err)
return nil, fmt.Errorf("close addr unavailable")
}
@ -2487,7 +2490,7 @@ func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
p.cfg.CoopCloseTargetConfs,
)
if err != nil {
peerLog.Errorf("unable to query fee estimator: %v", err)
p.log.Errorf("unable to query fee estimator: %v", err)
return nil, fmt.Errorf("unable to estimate fee")
}
@ -2495,7 +2498,7 @@ func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
channel, deliveryScript, feePerKw, nil, false,
)
if err != nil {
peerLog.Errorf("unable to create chan closer: %v", err)
p.log.Errorf("unable to create chan closer: %v", err)
return nil, fmt.Errorf("unable to create chan closer")
}
@ -2564,7 +2567,7 @@ func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
var err error
deliveryScript, err = p.genDeliveryScript()
if err != nil {
peerLog.Errorf("unable to gen delivery script: %v",
p.log.Errorf("unable to gen delivery script: %v",
err)
return nil, fmt.Errorf("close addr unavailable")
}
@ -2575,7 +2578,7 @@ func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
p.cfg.CoopCloseTargetConfs,
)
if err != nil {
peerLog.Errorf("unable to query fee estimator: %v", err)
p.log.Errorf("unable to query fee estimator: %v", err)
return nil, fmt.Errorf("unable to estimate fee")
}
@ -2589,7 +2592,7 @@ func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
lnChan, deliveryScript, feePerKw, nil, locallyInitiated,
)
if err != nil {
peerLog.Errorf("unable to create chan closer: %v", err)
p.log.Errorf("unable to create chan closer: %v", err)
return nil, fmt.Errorf("unable to create chan closer")
}
@ -2602,7 +2605,7 @@ func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) (
// Create the Shutdown message.
shutdownMsg, err := chanCloser.ShutdownChan()
if err != nil {
peerLog.Errorf("unable to create shutdown message: %v", err)
p.log.Errorf("unable to create shutdown message: %v", err)
delete(p.activeChanCloses, chanID)
return nil, err
}
@ -2619,7 +2622,7 @@ func (p *Brontide) createChanCloser(channel *lnwallet.LightningChannel,
_, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
if err != nil {
peerLog.Errorf("unable to obtain best block: %v", err)
p.log.Errorf("unable to obtain best block: %v", err)
return nil, fmt.Errorf("cannot obtain best block")
}
@ -2661,7 +2664,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
if !ok || channel == nil {
err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
"unknown", chanID)
peerLog.Errorf(err.Error())
p.log.Errorf(err.Error())
req.Err <- err
return
}
@ -2683,7 +2686,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
channel.LocalUpfrontShutdownScript(), req.DeliveryScript,
)
if err != nil {
peerLog.Errorf("cannot close channel %v: %v", req.ChanPoint, err)
p.log.Errorf("cannot close channel %v: %v", req.ChanPoint, err)
req.Err <- err
return
}
@ -2693,7 +2696,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
if len(deliveryScript) == 0 {
deliveryScript, err = p.genDeliveryScript()
if err != nil {
peerLog.Errorf(err.Error())
p.log.Errorf(err.Error())
req.Err <- err
return
}
@ -2702,7 +2705,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
// Optimistically try a link shutdown, erroring out if it
// failed.
if err := p.tryLinkShutdown(chanID); err != nil {
peerLog.Errorf("failed link shutdown: %v", err)
p.log.Errorf("failed link shutdown: %v", err)
req.Err <- err
return
}
@ -2711,7 +2714,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
channel, deliveryScript, req.TargetFeePerKw, req, true,
)
if err != nil {
peerLog.Errorf(err.Error())
p.log.Errorf(err.Error())
req.Err <- err
return
}
@ -2723,7 +2726,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
// party to kick things off.
shutdownMsg, err := chanCloser.ShutdownChan()
if err != nil {
peerLog.Errorf(err.Error())
p.log.Errorf(err.Error())
req.Err <- err
delete(p.activeChanCloses, chanID)
@ -2739,7 +2742,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
// the channel therefore we need to clean up our local state.
case contractcourt.CloseBreach:
// TODO(roasbeef): no longer need with newer beach logic?
peerLog.Infof("ChannelPoint(%v) has been breached, wiping "+
p.log.Infof("ChannelPoint(%v) has been breached, wiping "+
"channel", req.ChanPoint)
p.WipeChannel(req.ChanPoint)
}
@ -2780,17 +2783,17 @@ func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
// If the error encountered was severe enough, we'll now force close the
// channel to prevent reading it to the switch in the future.
if failure.linkErr.ForceClose {
peerLog.Warnf("Force closing link(%v)",
p.log.Warnf("Force closing link(%v)",
failure.shortChanID)
closeTx, err := p.cfg.ChainArb.ForceCloseContract(
failure.chanPoint,
)
if err != nil {
peerLog.Errorf("unable to force close "+
p.log.Errorf("unable to force close "+
"link(%v): %v", failure.shortChanID, err)
} else {
peerLog.Infof("channel(%v) force "+
p.log.Infof("channel(%v) force "+
"closed with txid %v",
failure.shortChanID, closeTx.TxHash())
}
@ -2798,11 +2801,11 @@ func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
// If this is a permanent failure, we will mark the channel borked.
if failure.linkErr.PermanentFailure && lnChan != nil {
peerLog.Warnf("Marking link(%v) borked due to permanent "+
p.log.Warnf("Marking link(%v) borked due to permanent "+
"failure", failure.shortChanID)
if err := lnChan.State().MarkBorked(); err != nil {
peerLog.Errorf("Unable to mark channel %v borked: %v",
p.log.Errorf("Unable to mark channel %v borked: %v",
failure.shortChanID, err)
}
}
@ -2822,7 +2825,7 @@ func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
Data: data,
})
if err != nil {
peerLog.Errorf("unable to send msg to "+
p.log.Errorf("unable to send msg to "+
"remote peer: %v", err)
}
}
@ -2909,7 +2912,7 @@ func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
closingTx, err := chanCloser.ClosingTx()
if err != nil {
if closeReq != nil {
peerLog.Error(err)
p.log.Error(err)
closeReq.Err <- err
}
}
@ -3063,9 +3066,9 @@ func (p *Brontide) sendInitMsg(legacyChan bool) error {
// connection because it does not understand a required feature bit, and
// our channel will be unusable.
if legacyChan && features.RequiresFeature(lnwire.StaticRemoteKeyRequired) {
peerLog.Infof("Legacy channel open with peer: %x, "+
"downgrading static remote required feature bit to "+
"optional", p.PubKey())
p.log.Infof("Legacy channel open with peer, " +
"downgrading static remote required feature bit to " +
"optional")
// Unset and set in both the local and global features to
// ensure both sets are consistent and merge able by old and
@ -3111,16 +3114,16 @@ func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error {
"peer=%x", p.IdentityKey().SerializeCompressed())
}
peerLog.Debugf("Re-sending channel sync message for channel %v to "+
"peer %v", cid, p)
p.log.Debugf("Re-sending channel sync message for channel %v to "+
"peer", cid)
if err := p.SendMessage(true, c.LastChanSyncMsg); err != nil {
return fmt.Errorf("failed resending channel sync "+
"message to peer %v: %v", p, err)
}
peerLog.Debugf("Re-sent channel sync message for channel %v to peer "+
"%v", cid, p)
p.log.Debugf("Re-sent channel sync message for channel %v to peer ",
cid)
// Note down that we sent the message, so we won't resend it again for
// this connection.
@ -3264,7 +3267,7 @@ func (p *Brontide) handleCloseMsg(msg *closeMsg) {
return
}
peerLog.Errorf("Unable to respond to remote close msg: %v", err)
p.log.Errorf("Unable to respond to remote close msg: %v", err)
errMsg := &lnwire.Error{
ChanID: msg.cid,
@ -3281,7 +3284,7 @@ func (p *Brontide) handleCloseMsg(msg *closeMsg) {
)
if err != nil {
err := fmt.Errorf("unable to process close msg: %v", err)
peerLog.Error(err)
p.log.Error(err)
// As the negotiations failed, we'll reset the channel state machine to
// ensure we act to on-chain events as normal.
@ -3318,11 +3321,11 @@ func (p *Brontide) handleCloseMsg(msg *closeMsg) {
func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) {
select {
case p.localCloseChanReqs <- req:
peerLog.Infof("Local close channel request delivered to "+
"peer: %x", p.PubKey())
p.log.Info("Local close channel request delivered to " +
"peer")
case <-p.quit:
peerLog.Infof("Unable to deliver local close channel request "+
"to peer %x", p.PubKey())
p.log.Info("Unable to deliver local close channel request " +
"to peer")
}
}

View file

@ -961,6 +961,7 @@ func TestStaticRemoteDowngrade(t *testing.T) {
WritePool: writePool,
PongBuf: make([]byte, lnwire.MaxPongBytes),
},
log: peerLog,
}
var b bytes.Buffer