multi: enhance loggings and fix logging format

Also adds TODO for a possible bug.
This commit is contained in:
yyforyongyu 2022-11-20 11:14:05 +08:00
parent d793a53bba
commit 6618ab493a
No known key found for this signature in database
GPG Key ID: 9BCD95C4FF296868
7 changed files with 48 additions and 11 deletions

View File

@ -497,10 +497,10 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
"chan_point=%v", chanState.FundingOutpoint)
}
log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
log.Tracef("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
chanState.FundingOutpoint, chanState.ChanType,
spew.Sdump(localCommit))
log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v",
log.Tracef("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v",
chanState.FundingOutpoint, chanState.ChanType,
spew.Sdump(remoteCommit))
@ -527,7 +527,7 @@ func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
var remotePendingCommit *channeldb.ChannelCommitment
if remoteChainTip != nil {
remotePendingCommit = &remoteChainTip.Commitment
log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+
log.Tracef("ChannelPoint(%v): remote_pending_commit_type=%v, "+
"remote_pending_commit=%v", chanState.FundingOutpoint,
chanState.ChanType,
spew.Sdump(remoteChainTip.Commitment))

View File

@ -398,6 +398,7 @@ func (m *ChanStatusManager) processEnableRequest(outpoint wire.OutPoint,
// Channel is already enabled, nothing to do.
case ChanStatusEnabled:
log.Debugf("Channel(%v) already enabled, skipped announcement")
return nil
// The channel is enabled, though we are now canceling the scheduled

View File

@ -1898,7 +1898,7 @@ func messageSummary(msg lnwire.Message) string {
return fmt.Sprintf("type=%d", msg.Type)
}
return ""
return fmt.Sprintf("unknown msg type=%T", msg)
}
// logWireMessage logs the receipt or sending of particular wire message. This

View File

@ -291,6 +291,8 @@ lifecycle:
continue lifecycle
}
log.Tracef("Found route: %s", spew.Sdump(rt.Hops))
// If this route will consume the last remaining amount to send
// to the receiver, this will be our last shard (for now).
lastShard := rt.ReceiverAmt() == currentState.remainingAmt

View File

@ -105,6 +105,8 @@ func (u *unifiedEdge) amtInRange(amt lnwire.MilliSatoshi) bool {
if u.capacity > 0 &&
amt > lnwire.NewMSatFromSatoshis(u.capacity) {
log.Tracef("Not enough capacity: amt=%v, capacity=%v",
amt, u.capacity)
return false
}
@ -112,11 +114,15 @@ func (u *unifiedEdge) amtInRange(amt lnwire.MilliSatoshi) bool {
if u.policy.MessageFlags.HasMaxHtlc() &&
amt > u.policy.MaxHTLC {
log.Tracef("Exceeds policy's MaxHTLC: amt=%v, MaxHTLC=%v",
amt, u.policy.MaxHTLC)
return false
}
// Skip channels for which this htlc is too small.
if amt < u.policy.MinHTLC {
log.Tracef("below policy's MinHTLC: amt=%v, MinHTLC=%v",
amt, u.policy.MinHTLC)
return false
}
@ -155,6 +161,8 @@ func (u *edgeUnifier) getEdgeLocal(amt lnwire.MilliSatoshi,
for _, edge := range u.edges {
// Check valid amount range for the channel.
if !edge.amtInRange(amt) {
log.Debugf("Amount %v not in range for edge %v",
amt, edge.policy.ChannelID)
continue
}
@ -173,11 +181,26 @@ func (u *edgeUnifier) getEdgeLocal(amt lnwire.MilliSatoshi,
edge.policy.ChannelID, amt,
)
if !ok {
log.Debugf("Cannot get bandwidth for edge %v, use max "+
"instead", edge.policy.ChannelID)
bandwidth = lnwire.MaxMilliSatoshi
}
// TODO(yy): if the above `!ok` is chosen, we'd have
// `bandwidth` to be the max value, which will end up having
// the `maxBandwidth` to be have the largest value and this
// edge will be the chosen one. This is wrong in two ways,
// 1. we need to understand why `availableChanBandwidth` cannot
// find bandwidth for this edge as something is wrong with this
// channel, and,
// 2. this edge is likely NOT the local channel with the
// highest available bandwidth.
//
// Skip channels that can't carry the payment.
if amt > bandwidth {
log.Debugf("Skipped edge %v: not enough bandwidth, "+
"bandwidth=%v, amt=%v", edge.policy.ChannelID,
bandwidth, amt)
continue
}
@ -187,6 +210,9 @@ func (u *edgeUnifier) getEdgeLocal(amt lnwire.MilliSatoshi,
// querying the bandwidth hints and sending out the
// htlc.
if bandwidth < maxBandwidth {
log.Debugf("Skipped edge %v: not max bandwidth, "+
"bandwidth=%v, maxBandwidth=%v",
bandwidth, maxBandwidth)
continue
}
maxBandwidth = bandwidth
@ -213,6 +239,8 @@ func (u *edgeUnifier) getEdgeNetwork(amt lnwire.MilliSatoshi) *unifiedEdge {
for _, edge := range u.edges {
// Check valid amount range for the channel.
if !edge.amtInRange(amt) {
log.Debugf("Amount %v not in range for edge %v",
amt, edge.policy.ChannelID)
continue
}
@ -220,6 +248,8 @@ func (u *edgeUnifier) getEdgeNetwork(amt lnwire.MilliSatoshi) *unifiedEdge {
edgeFlags := edge.policy.ChannelFlags
isDisabled := edgeFlags&lnwire.ChanUpdateDisabled != 0
if isDisabled {
log.Debugf("Skipped edge %v due to it being disabled",
edge.policy.ChannelID)
continue
}
@ -245,6 +275,9 @@ func (u *edgeUnifier) getEdgeNetwork(amt lnwire.MilliSatoshi) *unifiedEdge {
// specific amount.
fee := edge.policy.ComputeFee(amt)
if fee < maxFee {
log.Debugf("Skipped edge %v due to it produces less "+
"fee: fee=%v, maxFee=%v",
edge.policy.ChannelID, fee, maxFee)
continue
}
maxFee = fee

View File

@ -198,7 +198,7 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) error {
vertex := route.Vertex(msg.PubKeyBytes)
signals, ok = v.nodeAnnDependencies[vertex]
jobDesc = fmt.Sprintf("job=channeldb.LightningNode, pub=%x",
jobDesc = fmt.Sprintf("job=channeldb.LightningNode, pub=%s",
vertex)
case *lnwire.ChannelUpdate:
@ -210,7 +210,7 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) error {
case *lnwire.NodeAnnouncement:
vertex := route.Vertex(msg.NodeID)
signals, ok = v.nodeAnnDependencies[vertex]
jobDesc = fmt.Sprintf("job=lnwire.NodeAnnouncement, pub=%x",
jobDesc = fmt.Sprintf("job=lnwire.NodeAnnouncement, pub=%s",
vertex)
// Other types of jobs can be executed immediately, so we'll just

View File

@ -3188,18 +3188,16 @@ func (s *server) prunePersistentPeerConnection(compressedPubKey [33]byte) {
func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
msgs ...lnwire.Message) error {
srvrLog.Debugf("Broadcasting %v messages", len(msgs))
// Filter out peers found in the skips map. We synchronize access to
// peersByPub throughout this process to ensure we deliver messages to
// exact set of peers present at the time of invocation.
s.mu.RLock()
peers := make([]*peer.Brontide, 0, len(s.peersByPub))
for _, sPeer := range s.peersByPub {
for pubStr, sPeer := range s.peersByPub {
if skips != nil {
if _, ok := skips[sPeer.PubKey()]; ok {
srvrLog.Tracef("Skipping %x in broadcast",
sPeer.PubKey())
srvrLog.Debugf("Skipping %x in broadcast with "+
"pubStr=%x", sPeer.PubKey(), pubStr)
continue
}
}
@ -3212,6 +3210,9 @@ func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
// all messages to each of peers.
var wg sync.WaitGroup
for _, sPeer := range peers {
srvrLog.Debugf("Sending %v messages to peer %x", len(msgs),
sPeer.PubKey())
// Dispatch a go routine to enqueue all messages to this peer.
wg.Add(1)
s.wg.Add(1)