1
0
Fork 0
mirror of https://github.com/ACINQ/eclair.git synced 2025-03-13 11:35:47 +01:00

Add more path finding logs (#1597)

Since Phoenix has a low number of channels and discards the whole public
network graph, we can be more aggressive with what we log without flooding
the logs.

This information will be very helpful to troubleshoot issues when users
send multipart payments.
This commit is contained in:
Bastien Teinturier 2020-11-12 11:05:00 +01:00 committed by GitHub
parent 71f0afcdc0
commit 68209b804c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 17 additions and 14 deletions

View file

@ -70,7 +70,7 @@ class MultiPartPaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig,
case Event(r: SendMultiPartPayment, _) => case Event(r: SendMultiPartPayment, _) =>
val routeParams = r.getRouteParams(nodeParams, randomize = false) // we don't randomize the first attempt, regardless of configuration choices val routeParams = r.getRouteParams(nodeParams, randomize = false) // we don't randomize the first attempt, regardless of configuration choices
val maxFee = routeParams.getMaxFee(r.totalAmount) val maxFee = routeParams.getMaxFee(r.totalAmount)
log.debug("sending {} with maximum fee {}", r.totalAmount, maxFee) log.info("sending {} with maximum fee {}", r.totalAmount, maxFee)
val d = PaymentProgress(sender, r, r.maxAttempts, Map.empty, Ignore.empty, Nil) val d = PaymentProgress(sender, r, r.maxAttempts, Map.empty, Ignore.empty, Nil)
router ! createRouteRequest(nodeParams, r.totalAmount, maxFee, routeParams, d, cfg) router ! createRouteRequest(nodeParams, r.totalAmount, maxFee, routeParams, d, cfg)
goto(WAIT_FOR_ROUTES) using d goto(WAIT_FOR_ROUTES) using d
@ -106,7 +106,7 @@ class MultiPartPaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig,
// the payment. When we're retrying an MPP split, it may make sense to retry those ignored channels because with // the payment. When we're retrying an MPP split, it may make sense to retry those ignored channels because with
// a different split, they may have enough balance to forward the payment. // a different split, they may have enough balance to forward the payment.
val (toSend, maxFee) = remainingToSend(nodeParams, d.request, d.pending.values) val (toSend, maxFee) = remainingToSend(nodeParams, d.request, d.pending.values)
log.debug("retry sending {} with maximum fee {} without ignoring channels ({})", toSend, maxFee, d.ignore.channels.map(_.shortChannelId).mkString(",")) log.info("retry sending {} with maximum fee {} without ignoring channels ({})", toSend, maxFee, d.ignore.channels.map(_.shortChannelId).mkString(","))
val routeParams = d.request.getRouteParams(nodeParams, randomize = true) // we randomize route selection when we retry val routeParams = d.request.getRouteParams(nodeParams, randomize = true) // we randomize route selection when we retry
router ! createRouteRequest(nodeParams, toSend, maxFee, routeParams, d, cfg).copy(ignore = d.ignore.emptyChannels()) router ! createRouteRequest(nodeParams, toSend, maxFee, routeParams, d, cfg).copy(ignore = d.ignore.emptyChannels())
retriedFailedChannels = true retriedFailedChannels = true
@ -152,7 +152,7 @@ class MultiPartPaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig,
val ignore1 = PaymentFailure.updateIgnored(pf.failures, d.ignore) val ignore1 = PaymentFailure.updateIgnored(pf.failures, d.ignore)
val stillPending = d.pending - pf.id val stillPending = d.pending - pf.id
val (toSend, maxFee) = remainingToSend(nodeParams, d.request, stillPending.values) val (toSend, maxFee) = remainingToSend(nodeParams, d.request, stillPending.values)
log.debug("child payment failed, retry sending {} with maximum fee {}", toSend, maxFee) log.info("child payment failed, retry sending {} with maximum fee {}", toSend, maxFee)
val routeParams = d.request.getRouteParams(nodeParams, randomize = true) // we randomize route selection when we retry val routeParams = d.request.getRouteParams(nodeParams, randomize = true) // we randomize route selection when we retry
val d1 = d.copy(pending = stillPending, ignore = ignore1, failures = d.failures ++ pf.failures) val d1 = d.copy(pending = stillPending, ignore = ignore1, failures = d.failures ++ pf.failures)
router ! createRouteRequest(nodeParams, toSend, maxFee, routeParams, d1, cfg) router ! createRouteRequest(nodeParams, toSend, maxFee, routeParams, d1, cfg)

View file

@ -75,7 +75,7 @@ class PaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig, router: A
span.tag(Tags.Amount, c.finalPayload.amount.toLong) span.tag(Tags.Amount, c.finalPayload.amount.toLong)
span.tag(Tags.TotalAmount, c.finalPayload.totalAmount.toLong) span.tag(Tags.TotalAmount, c.finalPayload.totalAmount.toLong)
span.tag(Tags.Expiry, c.finalPayload.expiry.toLong) span.tag(Tags.Expiry, c.finalPayload.expiry.toLong)
log.debug("sending {} to route {}", c.finalPayload.amount, c.printRoute()) log.info("sending {} to route {}", c.finalPayload.amount, c.printRoute())
val send = SendPayment(c.targetNodeId, c.finalPayload, maxAttempts = 1, assistedRoutes = c.assistedRoutes) val send = SendPayment(c.targetNodeId, c.finalPayload, maxAttempts = 1, assistedRoutes = c.assistedRoutes)
c.route.fold( c.route.fold(
hops => router ! FinalizeRoute(c.finalPayload.amount, hops, c.assistedRoutes, paymentContext = Some(cfg.paymentContext)), hops => router ! FinalizeRoute(c.finalPayload.amount, hops, c.assistedRoutes, paymentContext = Some(cfg.paymentContext)),
@ -91,7 +91,7 @@ class PaymentLifecycle(nodeParams: NodeParams, cfg: SendPaymentConfig, router: A
span.tag(Tags.Amount, c.finalPayload.amount.toLong) span.tag(Tags.Amount, c.finalPayload.amount.toLong)
span.tag(Tags.TotalAmount, c.finalPayload.totalAmount.toLong) span.tag(Tags.TotalAmount, c.finalPayload.totalAmount.toLong)
span.tag(Tags.Expiry, c.finalPayload.expiry.toLong) span.tag(Tags.Expiry, c.finalPayload.expiry.toLong)
log.debug("sending {} to {}", c.finalPayload.amount, c.targetNodeId) log.info("sending {} to {}", c.finalPayload.amount, c.targetNodeId)
router ! RouteRequest(nodeParams.nodeId, c.targetNodeId, c.finalPayload.amount, c.getMaxFee(nodeParams), c.assistedRoutes, routeParams = c.routeParams, paymentContext = Some(cfg.paymentContext)) router ! RouteRequest(nodeParams.nodeId, c.targetNodeId, c.finalPayload.amount, c.getMaxFee(nodeParams), c.assistedRoutes, routeParams = c.routeParams, paymentContext = Some(cfg.paymentContext))
if (cfg.storeInDb) { if (cfg.storeInDb) {
paymentsDb.addOutgoingPayment(OutgoingPayment(id, cfg.parentId, cfg.externalId, paymentHash, PaymentType.Standard, c.finalPayload.amount, cfg.recipientAmount, cfg.recipientNodeId, System.currentTimeMillis, cfg.paymentRequest, OutgoingPaymentStatus.Pending)) paymentsDb.addOutgoingPayment(OutgoingPayment(id, cfg.parentId, cfg.externalId, paymentHash, PaymentType.Standard, c.finalPayload.amount, cfg.recipientAmount, cfg.recipientNodeId, System.currentTimeMillis, cfg.paymentRequest, OutgoingPaymentStatus.Pending))

View file

@ -87,6 +87,7 @@ object RouteCalculation {
log.info(s"finding routes ${r.source}->${r.target} with assistedChannels={} ignoreNodes={} ignoreChannels={} excludedChannels={}", assistedChannels.keys.mkString(","), r.ignore.nodes.map(_.value).mkString(","), r.ignore.channels.mkString(","), d.excludedChannels.mkString(",")) log.info(s"finding routes ${r.source}->${r.target} with assistedChannels={} ignoreNodes={} ignoreChannels={} excludedChannels={}", assistedChannels.keys.mkString(","), r.ignore.nodes.map(_.value).mkString(","), r.ignore.channels.mkString(","), d.excludedChannels.mkString(","))
log.info("finding routes with randomize={} params={}", params.randomize, params) log.info("finding routes with randomize={} params={}", params.randomize, params)
log.info("routing graph: {}", d.graph.edgeSet().map(e => s"${e.desc.shortChannelId}->${e.balance_opt}/${e.capacity}").mkString(", "))
val tags = TagSet.Empty.withTag(Tags.MultiPart, r.allowMultiPart).withTag(Tags.Amount, Tags.amountBucket(r.amount)) val tags = TagSet.Empty.withTag(Tags.MultiPart, r.allowMultiPart).withTag(Tags.Amount, Tags.amountBucket(r.amount))
KamonExt.time(Metrics.FindRouteDuration.withTags(tags.withTag(Tags.NumberOfRoutes, routesToFind.toLong))) { KamonExt.time(Metrics.FindRouteDuration.withTags(tags.withTag(Tags.NumberOfRoutes, routesToFind.toLong))) {
val result = if (r.allowMultiPart) { val result = if (r.allowMultiPart) {

View file

@ -267,7 +267,9 @@ object Validation {
def handleChannelUpdate(d: Data, db: NetworkDb, routerConf: RouterConf, update: Either[LocalChannelUpdate, RemoteChannelUpdate], wasStashed: Boolean = false)(implicit ctx: ActorContext, log: LoggingAdapter): Data = { def handleChannelUpdate(d: Data, db: NetworkDb, routerConf: RouterConf, update: Either[LocalChannelUpdate, RemoteChannelUpdate], wasStashed: Boolean = false)(implicit ctx: ActorContext, log: LoggingAdapter): Data = {
implicit val sender: ActorRef = ctx.self // necessary to preserve origin when sending messages to other actors implicit val sender: ActorRef = ctx.self // necessary to preserve origin when sending messages to other actors
val (uOriginal: ChannelUpdate, origins: Set[GossipOrigin]) = update match { val (uOriginal: ChannelUpdate, origins: Set[GossipOrigin]) = update match {
case Left(lcu) => (lcu.channelUpdate, Set(LocalGossip)) case Left(lcu) =>
log.info("received local channel update for shortChannelId={}, balance={}", lcu.shortChannelId, lcu.commitments.availableBalanceForSend)
(lcu.channelUpdate, Set(LocalGossip))
case Right(rcu) => case Right(rcu) =>
rcu.origins.collect { rcu.origins.collect {
case RemoteGossip(peerConnection, _) if !wasStashed => // stashed changes have already been acknowledged case RemoteGossip(peerConnection, _) if !wasStashed => // stashed changes have already been acknowledged
@ -350,11 +352,11 @@ object Validation {
val pc = d.privateChannels(u.shortChannelId) val pc = d.privateChannels(u.shortChannelId)
val desc = getDesc(u, pc) val desc = getDesc(u, pc)
if (StaleChannels.isStale(u)) { if (StaleChannels.isStale(u)) {
log.debug("ignoring {} (stale)", u) log.info("ignoring {} (stale)", u)
sendDecision(origins, GossipDecision.Stale(u)) sendDecision(origins, GossipDecision.Stale(u))
d d
} else if (pc.getChannelUpdateSameSideAs(u).exists(_.timestamp >= u.timestamp)) { } else if (pc.getChannelUpdateSameSideAs(u).exists(_.timestamp >= u.timestamp)) {
log.debug("ignoring {} (already know same or newer)", u) log.info("ignoring {} (already know same or newer)", u)
sendDecision(origins, GossipDecision.Duplicate(u)) sendDecision(origins, GossipDecision.Duplicate(u))
d d
} else if (!Announcements.checkSig(u, desc.a)) { } else if (!Announcements.checkSig(u, desc.a)) {
@ -362,7 +364,7 @@ object Validation {
sendDecision(origins, GossipDecision.InvalidSignature(u)) sendDecision(origins, GossipDecision.InvalidSignature(u))
d d
} else if (pc.getChannelUpdateSameSideAs(u).isDefined) { } else if (pc.getChannelUpdateSameSideAs(u).isDefined) {
log.debug("updated channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId, publicChannel, u.channelFlags, u) log.info("updated channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId, publicChannel, u.channelFlags, u)
Metrics.channelUpdateRefreshed(u, pc.getChannelUpdateSameSideAs(u).get, publicChannel) Metrics.channelUpdateRefreshed(u, pc.getChannelUpdateSameSideAs(u).get, publicChannel)
sendDecision(origins, GossipDecision.Accepted(u)) sendDecision(origins, GossipDecision.Accepted(u))
ctx.system.eventStream.publish(ChannelUpdatesReceived(u :: Nil)) ctx.system.eventStream.publish(ChannelUpdatesReceived(u :: Nil))
@ -375,7 +377,7 @@ object Validation {
} }
d.copy(privateChannels = d.privateChannels + (u.shortChannelId -> pc1), graph = graph1) d.copy(privateChannels = d.privateChannels + (u.shortChannelId -> pc1), graph = graph1)
} else { } else {
log.debug("added channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId, publicChannel, u.channelFlags, u) log.info("added channel_update for shortChannelId={} public={} flags={} {}", u.shortChannelId, publicChannel, u.channelFlags, u)
sendDecision(origins, GossipDecision.Accepted(u)) sendDecision(origins, GossipDecision.Accepted(u))
ctx.system.eventStream.publish(ChannelUpdatesReceived(u :: Nil)) ctx.system.eventStream.publish(ChannelUpdatesReceived(u :: Nil))
// we also need to update the graph // we also need to update the graph
@ -444,7 +446,7 @@ object Validation {
case None => case None =>
// channel isn't announced and we never heard of it (maybe it is a private channel or maybe it is a public channel that doesn't yet have 6 confirmations) // channel isn't announced and we never heard of it (maybe it is a private channel or maybe it is a public channel that doesn't yet have 6 confirmations)
// let's create a corresponding private channel and process the channel_update // let's create a corresponding private channel and process the channel_update
log.debug("adding unannounced local channel to remote={} shortChannelId={}", lcu.remoteNodeId, lcu.shortChannelId) log.info("adding unannounced local channel to remote={} shortChannelId={}", lcu.remoteNodeId, lcu.shortChannelId)
val pc = PrivateChannel(localNodeId, lcu.remoteNodeId, None, None, ChannelMeta(0 msat, 0 msat)).updateBalances(lcu.commitments) val pc = PrivateChannel(localNodeId, lcu.remoteNodeId, None, None, ChannelMeta(0 msat, 0 msat)).updateBalances(lcu.commitments)
val d1 = d.copy(privateChannels = d.privateChannels + (lcu.shortChannelId -> pc)) val d1 = d.copy(privateChannels = d.privateChannels + (lcu.shortChannelId -> pc))
handleChannelUpdate(d1, db, routerConf, Left(lcu)) handleChannelUpdate(d1, db, routerConf, Left(lcu))
@ -461,7 +463,7 @@ object Validation {
d d
} else if (d.privateChannels.contains(shortChannelId)) { } else if (d.privateChannels.contains(shortChannelId)) {
// the channel was private or public-but-not-yet-announced, let's do the clean up // the channel was private or public-but-not-yet-announced, let's do the clean up
log.debug("removing private local channel and channel_update for channelId={} shortChannelId={}", channelId, shortChannelId) log.info("removing private local channel and channel_update for channelId={} shortChannelId={}", channelId, shortChannelId)
val desc1 = ChannelDesc(shortChannelId, localNodeId, remoteNodeId) val desc1 = ChannelDesc(shortChannelId, localNodeId, remoteNodeId)
val desc2 = ChannelDesc(shortChannelId, remoteNodeId, localNodeId) val desc2 = ChannelDesc(shortChannelId, remoteNodeId, localNodeId)
// we remove the corresponding updates from the graph // we remove the corresponding updates from the graph
@ -480,7 +482,7 @@ object Validation {
val (publicChannels1, graph1) = d.channels.get(e.shortChannelId) match { val (publicChannels1, graph1) = d.channels.get(e.shortChannelId) match {
case Some(pc) => case Some(pc) =>
val pc1 = pc.updateBalances(e.commitments) val pc1 = pc.updateBalances(e.commitments)
log.debug("public channel balance updated: {}", pc1) log.info("public channel balance updated: {} -> {}", pc1.ann.shortChannelId, pc1.meta_opt)
val update_opt = if (e.commitments.localParams.nodeId == pc1.ann.nodeId1) pc1.update_1_opt else pc1.update_2_opt val update_opt = if (e.commitments.localParams.nodeId == pc1.ann.nodeId1) pc1.update_1_opt else pc1.update_2_opt
val graph1 = update_opt.map(u => d.graph.addEdge(desc, u, pc1.capacity, pc1.getBalanceSameSideAs(u))).getOrElse(d.graph) val graph1 = update_opt.map(u => d.graph.addEdge(desc, u, pc1.capacity, pc1.getBalanceSameSideAs(u))).getOrElse(d.graph)
(d.channels + (e.shortChannelId -> pc1), graph1) (d.channels + (e.shortChannelId -> pc1), graph1)
@ -490,7 +492,7 @@ object Validation {
val (privateChannels1, graph2) = d.privateChannels.get(e.shortChannelId) match { val (privateChannels1, graph2) = d.privateChannels.get(e.shortChannelId) match {
case Some(pc) => case Some(pc) =>
val pc1 = pc.updateBalances(e.commitments) val pc1 = pc.updateBalances(e.commitments)
log.debug("private channel balance updated: {}", pc1) log.info("private channel balance updated: {} -> {}", pc1.update_1_opt.orElse(pc1.update_2_opt).map(_.shortChannelId), pc1.meta)
val update_opt = if (e.commitments.localParams.nodeId == pc1.nodeId1) pc1.update_1_opt else pc1.update_2_opt val update_opt = if (e.commitments.localParams.nodeId == pc1.nodeId1) pc1.update_1_opt else pc1.update_2_opt
val graph2 = update_opt.map(u => graph1.addEdge(desc, u, pc1.capacity, pc1.getBalanceSameSideAs(u))).getOrElse(graph1) val graph2 = update_opt.map(u => graph1.addEdge(desc, u, pc1.capacity, pc1.getBalanceSameSideAs(u))).getOrElse(graph1)
(d.privateChannels + (e.shortChannelId -> pc1), graph2) (d.privateChannels + (e.shortChannelId -> pc1), graph2)