mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-01-18 21:35:24 +01:00
routing: fix typos and wrap errors
This commit is contained in:
parent
68494fd91d
commit
06bff6f81a
@ -149,14 +149,14 @@ var (
|
||||
DefaultRouterMacFilename = "router.macaroon"
|
||||
)
|
||||
|
||||
// ServerShell a is shell struct holding a reference to the actual sub-server.
|
||||
// ServerShell is a shell struct holding a reference to the actual sub-server.
|
||||
// It is used to register the gRPC sub-server with the root server before we
|
||||
// have the necessary dependencies to populate the actual sub-server.
|
||||
type ServerShell struct {
|
||||
RouterServer
|
||||
}
|
||||
|
||||
// Server is a stand alone sub RPC server which exposes functionality that
|
||||
// Server is a stand-alone sub RPC server which exposes functionality that
|
||||
// allows clients to route arbitrary payment through the Lightning Network.
|
||||
type Server struct {
|
||||
started int32 // To be used atomically.
|
||||
@ -181,7 +181,7 @@ var _ RouterServer = (*Server)(nil)
|
||||
// that contains all external dependencies. If the target macaroon exists, and
|
||||
// we're unable to create it, then an error will be returned. We also return
|
||||
// the set of permissions that we require as a server. At the time of writing
|
||||
// of this documentation, this is the same macaroon as as the admin macaroon.
|
||||
// of this documentation, this is the same macaroon as the admin macaroon.
|
||||
func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) {
|
||||
// If the path of the router macaroon wasn't generated, then we'll
|
||||
// assume that it's found at the default network directory.
|
||||
@ -986,9 +986,8 @@ func (s *Server) SetMissionControlConfig(ctx context.Context,
|
||||
AprioriHopProbability: float64(
|
||||
req.Config.HopProbability,
|
||||
),
|
||||
AprioriWeight: float64(req.Config.Weight),
|
||||
CapacityFraction: float64(
|
||||
routing.DefaultCapacityFraction),
|
||||
AprioriWeight: float64(req.Config.Weight),
|
||||
CapacityFraction: routing.DefaultCapacityFraction, //nolint:lll
|
||||
}
|
||||
}
|
||||
|
||||
@ -1032,8 +1031,8 @@ func (s *Server) SetMissionControlConfig(ctx context.Context,
|
||||
|
||||
// QueryMissionControl exposes the internal mission control state to callers. It
|
||||
// is a development feature.
|
||||
func (s *Server) QueryMissionControl(ctx context.Context,
|
||||
req *QueryMissionControlRequest) (*QueryMissionControlResponse, error) {
|
||||
func (s *Server) QueryMissionControl(_ context.Context,
|
||||
_ *QueryMissionControlRequest) (*QueryMissionControlResponse, error) {
|
||||
|
||||
snapshot := s.cfg.RouterBackend.MissionControl.GetHistorySnapshot()
|
||||
|
||||
@ -1080,7 +1079,7 @@ func toRPCPairData(data *routing.TimedPairResult) *PairData {
|
||||
|
||||
// XImportMissionControl imports the state provided to our internal mission
|
||||
// control. Only entries that are fresher than our existing state will be used.
|
||||
func (s *Server) XImportMissionControl(ctx context.Context,
|
||||
func (s *Server) XImportMissionControl(_ context.Context,
|
||||
req *XImportMissionControlRequest) (*XImportMissionControlResponse,
|
||||
error) {
|
||||
|
||||
@ -1273,8 +1272,9 @@ func (s *Server) subscribePayment(identifier lntypes.Hash) (
|
||||
sub, err := router.Tower.SubscribePayment(identifier)
|
||||
|
||||
switch {
|
||||
case err == channeldb.ErrPaymentNotInitiated:
|
||||
case errors.Is(err, channeldb.ErrPaymentNotInitiated):
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
@ -1385,7 +1385,7 @@ func (s *Server) trackPaymentStream(context context.Context,
|
||||
}
|
||||
|
||||
// BuildRoute builds a route from a list of hop addresses.
|
||||
func (s *Server) BuildRoute(ctx context.Context,
|
||||
func (s *Server) BuildRoute(_ context.Context,
|
||||
req *BuildRouteRequest) (*BuildRouteResponse, error) {
|
||||
|
||||
// Unmarshall hop list.
|
||||
@ -1446,7 +1446,7 @@ func (s *Server) BuildRoute(ctx context.Context,
|
||||
|
||||
// SubscribeHtlcEvents creates a uni-directional stream from the server to
|
||||
// the client which delivers a stream of htlc events.
|
||||
func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest,
|
||||
func (s *Server) SubscribeHtlcEvents(_ *SubscribeHtlcEventsRequest,
|
||||
stream Router_SubscribeHtlcEventsServer) error {
|
||||
|
||||
htlcClient, err := s.cfg.RouterBackend.SubscribeHtlcEvents()
|
||||
@ -1495,7 +1495,7 @@ func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest,
|
||||
|
||||
// HtlcInterceptor is a bidirectional stream for streaming interception
|
||||
// requests to the caller.
|
||||
// Upon connection it does the following:
|
||||
// Upon connection, it does the following:
|
||||
// 1. Check if there is already a live stream, if yes it rejects the request.
|
||||
// 2. Registered a ForwardInterceptor
|
||||
// 3. Delivers to the caller every √√ and detect his answer.
|
||||
@ -1525,7 +1525,7 @@ func extractOutPoint(req *UpdateChanStatusRequest) (*wire.OutPoint, error) {
|
||||
}
|
||||
|
||||
// UpdateChanStatus allows channel state to be set manually.
|
||||
func (s *Server) UpdateChanStatus(ctx context.Context,
|
||||
func (s *Server) UpdateChanStatus(_ context.Context,
|
||||
req *UpdateChanStatusRequest) (*UpdateChanStatusResponse, error) {
|
||||
|
||||
outPoint, err := extractOutPoint(req)
|
||||
|
@ -368,7 +368,7 @@ func (p *paymentLifecycle) requestRoute(
|
||||
log.Warnf("Failed to find route for payment %v: %v", p.identifier, err)
|
||||
|
||||
// If the error belongs to `noRouteError` set, it means a non-critical
|
||||
// error has happened during path finding and we will mark the payment
|
||||
// error has happened during path finding, and we will mark the payment
|
||||
// failed with this reason. Otherwise, we'll return the critical error
|
||||
// found to abort the lifecycle.
|
||||
var routeErr noRouteError
|
||||
@ -377,9 +377,9 @@ func (p *paymentLifecycle) requestRoute(
|
||||
}
|
||||
|
||||
// It's the `paymentSession`'s responsibility to find a route for us
|
||||
// with best effort. When it cannot find a path, we need to treat it as
|
||||
// a terminal condition and fail the payment no matter it has inflight
|
||||
// HTLCs or not.
|
||||
// with the best effort. When it cannot find a path, we need to treat it
|
||||
// as a terminal condition and fail the payment no matter it has
|
||||
// inflight HTLCs or not.
|
||||
failureCode := routeErr.FailureReason()
|
||||
log.Warnf("Marking payment %v permanently failed with no route: %v",
|
||||
p.identifier, failureCode)
|
||||
@ -415,7 +415,7 @@ type attemptResult struct {
|
||||
|
||||
// collectResultAsync launches a goroutine that will wait for the result of the
|
||||
// given HTLC attempt to be available then handle its result. Once received, it
|
||||
// will send a nil error to channel `resultCollected` to indicate there's an
|
||||
// will send a nil error to channel `resultCollected` to indicate there's a
|
||||
// result.
|
||||
func (p *paymentLifecycle) collectResultAsync(attempt *channeldb.HTLCAttempt) {
|
||||
log.Debugf("Collecting result for attempt %v in payment %v",
|
||||
@ -484,7 +484,7 @@ func (p *paymentLifecycle) collectResult(attempt *channeldb.HTLCAttempt) (
|
||||
return p.failAttempt(attempt.AttemptID, err)
|
||||
}
|
||||
|
||||
// Using the created circuit, initialize the error decrypter so we can
|
||||
// Using the created circuit, initialize the error decrypter, so we can
|
||||
// parse+decode any failures incurred by this payment within the
|
||||
// switch.
|
||||
errorDecryptor := &htlcswitch.SphinxErrorDecrypter{
|
||||
@ -786,7 +786,7 @@ func (p *paymentLifecycle) handleSwitchErr(attempt *channeldb.HTLCAttempt,
|
||||
return p.failAttempt(attemptID, sendErr)
|
||||
}
|
||||
|
||||
if sendErr == htlcswitch.ErrUnreadableFailureMessage {
|
||||
if errors.Is(sendErr, htlcswitch.ErrUnreadableFailureMessage) {
|
||||
log.Warn("Unreadable failure when sending htlc: id=%v, hash=%v",
|
||||
attempt.AttemptID, attempt.Hash)
|
||||
|
||||
@ -801,7 +801,8 @@ func (p *paymentLifecycle) handleSwitchErr(attempt *channeldb.HTLCAttempt,
|
||||
// down the route. If the error is not related to the propagation of
|
||||
// our payment, we can stop trying because an internal error has
|
||||
// occurred.
|
||||
rtErr, ok := sendErr.(htlcswitch.ClearTextError)
|
||||
var rtErr htlcswitch.ClearTextError
|
||||
ok := errors.As(sendErr, &rtErr)
|
||||
if !ok {
|
||||
return p.failPaymentAndAttempt(
|
||||
attemptID, &internalErrorReason, sendErr,
|
||||
@ -815,7 +816,8 @@ func (p *paymentLifecycle) handleSwitchErr(attempt *channeldb.HTLCAttempt,
|
||||
// ForwardingError, it did not originate at our node, so we set
|
||||
// failureSourceIdx to the index of the node where the failure occurred.
|
||||
failureSourceIdx := 0
|
||||
source, ok := rtErr.(*htlcswitch.ForwardingError)
|
||||
var source *htlcswitch.ForwardingError
|
||||
ok = errors.As(rtErr, &source)
|
||||
if ok {
|
||||
failureSourceIdx = source.FailureSourceIdx
|
||||
}
|
||||
@ -863,7 +865,7 @@ func (p *paymentLifecycle) handleFailureMessage(rt *route.Route,
|
||||
|
||||
// Parse pubkey to allow validation of the channel update. This should
|
||||
// always succeed, otherwise there is something wrong in our
|
||||
// implementation. Therefore return an error.
|
||||
// implementation. Therefore, return an error.
|
||||
errVertex := rt.Hops[errorSourceIdx-1].PubKeyBytes
|
||||
errSource, err := btcec.ParsePubKey(errVertex[:])
|
||||
if err != nil {
|
||||
@ -951,17 +953,18 @@ func marshallError(sendError error, time time.Time) *channeldb.HTLCFailInfo {
|
||||
FailTime: time,
|
||||
}
|
||||
|
||||
switch sendError {
|
||||
case htlcswitch.ErrPaymentIDNotFound:
|
||||
switch {
|
||||
case errors.Is(sendError, htlcswitch.ErrPaymentIDNotFound):
|
||||
response.Reason = channeldb.HTLCFailInternal
|
||||
return response
|
||||
|
||||
case htlcswitch.ErrUnreadableFailureMessage:
|
||||
case errors.Is(sendError, htlcswitch.ErrUnreadableFailureMessage):
|
||||
response.Reason = channeldb.HTLCFailUnreadable
|
||||
return response
|
||||
}
|
||||
|
||||
rtErr, ok := sendError.(htlcswitch.ClearTextError)
|
||||
var rtErr htlcswitch.ClearTextError
|
||||
ok := errors.As(sendError, &rtErr)
|
||||
if !ok {
|
||||
response.Reason = channeldb.HTLCFailInternal
|
||||
return response
|
||||
@ -981,7 +984,8 @@ func marshallError(sendError error, time time.Time) *channeldb.HTLCFailInfo {
|
||||
// failure occurred. If the error is not a ForwardingError, the failure
|
||||
// occurred at our node, so we leave the index as 0 to indicate that
|
||||
// we failed locally.
|
||||
fErr, ok := rtErr.(*htlcswitch.ForwardingError)
|
||||
var fErr *htlcswitch.ForwardingError
|
||||
ok = errors.As(rtErr, &fErr)
|
||||
if ok {
|
||||
response.FailureSourceIndex = uint32(fErr.FailureSourceIdx)
|
||||
}
|
||||
|
@ -45,11 +45,11 @@ const (
|
||||
// DefaultPayAttemptTimeout is the default payment attempt timeout. The
|
||||
// payment attempt timeout defines the duration after which we stop
|
||||
// trying more routes for a payment.
|
||||
DefaultPayAttemptTimeout = time.Duration(time.Second * 60)
|
||||
DefaultPayAttemptTimeout = time.Second * 60
|
||||
|
||||
// DefaultChannelPruneExpiry is the default duration used to determine
|
||||
// if a channel should be pruned or not.
|
||||
DefaultChannelPruneExpiry = time.Duration(time.Hour * 24 * 14)
|
||||
DefaultChannelPruneExpiry = time.Hour * 24 * 14
|
||||
|
||||
// DefaultFirstTimePruneDelay is the time we'll wait after startup
|
||||
// before attempting to prune the graph for zombie channels. We don't
|
||||
@ -376,9 +376,9 @@ type Config struct {
|
||||
FirstTimePruneDelay time.Duration
|
||||
|
||||
// QueryBandwidth is a method that allows the router to query the lower
|
||||
// link layer to determine the up to date available bandwidth at a
|
||||
// link layer to determine the up-to-date available bandwidth at a
|
||||
// prospective link to be traversed. If the link isn't available, then
|
||||
// a value of zero should be returned. Otherwise, the current up to
|
||||
// a value of zero should be returned. Otherwise, the current up-to-
|
||||
// date knowledge of the available bandwidth of the link should be
|
||||
// returned.
|
||||
GetLink getLinkQuery
|
||||
@ -389,7 +389,7 @@ type Config struct {
|
||||
// the switch can properly handle the HTLC.
|
||||
NextPaymentID func() (uint64, error)
|
||||
|
||||
// AssumeChannelValid toggles whether or not the router will check for
|
||||
// AssumeChannelValid toggles whether the router will check for
|
||||
// spentness of channel outpoints. For neutrino, this saves long rescans
|
||||
// from blocking initial usage of the daemon.
|
||||
AssumeChannelValid bool
|
||||
@ -423,7 +423,7 @@ type EdgeLocator struct {
|
||||
Direction uint8
|
||||
}
|
||||
|
||||
// String returns a human readable version of the edgeLocator values.
|
||||
// String returns a human-readable version of the edgeLocator values.
|
||||
func (e *EdgeLocator) String() string {
|
||||
return fmt.Sprintf("%v:%v", e.ChannelID, e.Direction)
|
||||
}
|
||||
@ -551,9 +551,10 @@ func (r *ChannelRouter) Start() error {
|
||||
// then we don't treat this as an explicit error.
|
||||
if _, _, err := r.cfg.Graph.PruneTip(); err != nil {
|
||||
switch {
|
||||
case err == channeldb.ErrGraphNeverPruned:
|
||||
case errors.Is(err, channeldb.ErrGraphNeverPruned):
|
||||
fallthrough
|
||||
case err == channeldb.ErrGraphNotFound:
|
||||
|
||||
case errors.Is(err, channeldb.ErrGraphNotFound):
|
||||
// If the graph has never been pruned, then we'll set
|
||||
// the prune height to the current best height of the
|
||||
// chain backend.
|
||||
@ -563,6 +564,7 @@ func (r *ChannelRouter) Start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return err
|
||||
}
|
||||
@ -603,7 +605,10 @@ func (r *ChannelRouter) Start() error {
|
||||
// we may miss on-chain events as the filter hasn't properly
|
||||
// been applied.
|
||||
channelView, err := r.cfg.Graph.ChannelView()
|
||||
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
||||
if err != nil && !errors.Is(
|
||||
err, channeldb.ErrGraphNoEdgesFound,
|
||||
) {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -638,7 +643,10 @@ func (r *ChannelRouter) Start() error {
|
||||
// from the graph in order to ensure we maintain a tight graph
|
||||
// of "useful" nodes.
|
||||
err = r.cfg.Graph.PruneGraphNodes()
|
||||
if err != nil && err != channeldb.ErrGraphNodesNotFound {
|
||||
if err != nil && !errors.Is(
|
||||
err, channeldb.ErrGraphNodesNotFound,
|
||||
) {
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -776,8 +784,8 @@ func (r *ChannelRouter) syncGraphWithChain() error {
|
||||
switch {
|
||||
// If the graph has never been pruned, or hasn't fully been
|
||||
// created yet, then we don't treat this as an explicit error.
|
||||
case err == channeldb.ErrGraphNeverPruned:
|
||||
case err == channeldb.ErrGraphNotFound:
|
||||
case errors.Is(err, channeldb.ErrGraphNeverPruned):
|
||||
case errors.Is(err, channeldb.ErrGraphNotFound):
|
||||
default:
|
||||
return err
|
||||
}
|
||||
@ -826,10 +834,12 @@ func (r *ChannelRouter) syncGraphWithChain() error {
|
||||
// can exit as this entails we are back to the point
|
||||
// where it hasn't seen any block or created channels,
|
||||
// alas there's nothing left to prune.
|
||||
case err == channeldb.ErrGraphNeverPruned:
|
||||
case errors.Is(err, channeldb.ErrGraphNeverPruned):
|
||||
return nil
|
||||
case err == channeldb.ErrGraphNotFound:
|
||||
|
||||
case errors.Is(err, channeldb.ErrGraphNotFound):
|
||||
return nil
|
||||
|
||||
default:
|
||||
return err
|
||||
}
|
||||
@ -1047,7 +1057,10 @@ func (r *ChannelRouter) pruneZombieChans() error {
|
||||
}
|
||||
|
||||
for _, u := range oldEdges {
|
||||
filterPruneChans(u.Info, u.Policy1, u.Policy2)
|
||||
err = filterPruneChans(u.Info, u.Policy1, u.Policy2)
|
||||
if err != nil {
|
||||
log.Warnf("Filter pruning channels: %w\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Pruning %v zombie channels", len(chansToPrune))
|
||||
@ -1072,7 +1085,7 @@ func (r *ChannelRouter) pruneZombieChans() error {
|
||||
// With the channels pruned, we'll also attempt to prune any nodes that
|
||||
// were a part of them.
|
||||
err = r.cfg.Graph.PruneGraphNodes()
|
||||
if err != nil && err != channeldb.ErrGraphNodesNotFound {
|
||||
if err != nil && !errors.Is(err, channeldb.ErrGraphNodesNotFound) {
|
||||
return fmt.Errorf("unable to prune graph nodes: %w", err)
|
||||
}
|
||||
|
||||
@ -1125,7 +1138,7 @@ func (r *ChannelRouter) handleNetworkUpdate(vb *ValidationBarrier,
|
||||
if err != nil {
|
||||
// We now decide to log an error or not. If allowDependents is
|
||||
// false, it means there is an error and the error is neither
|
||||
// ErrIgnored or ErrOutdated. In this case, we'll log an error.
|
||||
// ErrIgnored nor ErrOutdated. In this case, we'll log an error.
|
||||
// Otherwise, we'll add debug log only.
|
||||
if allowDependents {
|
||||
log.Debugf("process network updates got: %v", err)
|
||||
@ -1225,7 +1238,7 @@ func (r *ChannelRouter) networkHandler() {
|
||||
|
||||
// Since this block is stale, we update our best height
|
||||
// to the previous block.
|
||||
blockHeight := uint32(chainUpdate.Height)
|
||||
blockHeight := chainUpdate.Height
|
||||
atomic.StoreUint32(&r.bestHeight, blockHeight-1)
|
||||
|
||||
// Update the channel graph to reflect that this block
|
||||
@ -1485,9 +1498,9 @@ func (r *ChannelRouter) assertNodeAnnFreshness(node route.Vertex,
|
||||
}
|
||||
|
||||
// addZombieEdge adds a channel that failed complete validation into the zombie
|
||||
// index so we can avoid having to re-validate it in the future.
|
||||
// index, so we can avoid having to re-validate it in the future.
|
||||
func (r *ChannelRouter) addZombieEdge(chanID uint64) error {
|
||||
// If the edge fails validation we'll mark the edge itself as a zombie
|
||||
// If the edge fails validation we'll mark the edge itself as a zombie,
|
||||
// so we don't continue to request it. We use the "zero key" for both
|
||||
// node pubkeys so this edge can't be resurrected.
|
||||
var zeroKey [33]byte
|
||||
@ -1598,7 +1611,10 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
_, _, exists, isZombie, err := r.cfg.Graph.HasChannelEdge(
|
||||
msg.ChannelID,
|
||||
)
|
||||
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
||||
if err != nil && !errors.Is(
|
||||
err, channeldb.ErrGraphNoEdgesFound,
|
||||
) {
|
||||
|
||||
return errors.Errorf("unable to check for edge "+
|
||||
"existence: %v", err)
|
||||
}
|
||||
@ -1655,7 +1671,7 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
case strings.Contains(err.Error(), "out of range"):
|
||||
// If the funding transaction isn't found at
|
||||
// all, then we'll mark the edge itself as a
|
||||
// zombie so we don't continue to request it.
|
||||
// zombie, so we don't continue to request it.
|
||||
// We use the "zero key" for both node pubkeys
|
||||
// so this edge can't be resurrected.
|
||||
zErr := r.addZombieEdge(msg.ChannelID)
|
||||
@ -1681,8 +1697,8 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
return err
|
||||
}
|
||||
|
||||
// Next we'll validate that this channel is actually well
|
||||
// formed. If this check fails, then this channel either
|
||||
// Next we'll validate that this channel is actually
|
||||
// well-formed. If this check fails, then this channel either
|
||||
// doesn't exist, or isn't the one that was meant to be created
|
||||
// according to the passed channel proofs.
|
||||
fundingPoint, err := chanvalidate.Validate(&chanvalidate.Context{
|
||||
@ -1693,7 +1709,7 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
FundingTx: fundingTx,
|
||||
})
|
||||
if err != nil {
|
||||
// Mark the edge as a zombie so we won't try to
|
||||
// Mark the edge as a zombie, so we won't try to
|
||||
// re-validate it on start up.
|
||||
if err := r.addZombieEdge(msg.ChannelID); err != nil {
|
||||
return err
|
||||
@ -1705,7 +1721,7 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
|
||||
// Now that we have the funding outpoint of the channel, ensure
|
||||
// that it hasn't yet been spent. If so, then this channel has
|
||||
// been closed so we'll ignore it.
|
||||
// been closed, so we'll ignore it.
|
||||
chanUtxo, err := r.cfg.Chain.GetUtxo(
|
||||
fundingPoint, fundingPkScript, channelID.BlockHeight,
|
||||
r.quit,
|
||||
@ -1740,7 +1756,7 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
|
||||
// As a new edge has been added to the channel graph, we'll
|
||||
// update the current UTXO filter within our active
|
||||
// FilteredChainView so we are notified if/when this channel is
|
||||
// FilteredChainView, so we are notified if/when this channel is
|
||||
// closed.
|
||||
filterUpdate := []channeldb.EdgePoint{
|
||||
{
|
||||
@ -1768,7 +1784,10 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
|
||||
edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
|
||||
r.cfg.Graph.HasChannelEdge(msg.ChannelID)
|
||||
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
||||
if err != nil && !errors.Is(
|
||||
err, channeldb.ErrGraphNoEdgesFound,
|
||||
) {
|
||||
|
||||
return errors.Errorf("unable to check for edge "+
|
||||
"existence: %v", err)
|
||||
|
||||
@ -1795,8 +1814,8 @@ func (r *ChannelRouter) processUpdate(msg interface{},
|
||||
}
|
||||
|
||||
// As edges are directional edge node has a unique policy for
|
||||
// the direction of the edge they control. Therefore we first
|
||||
// check if we already have the most up to date information for
|
||||
// the direction of the edge they control. Therefore, we first
|
||||
// check if we already have the most up-to-date information for
|
||||
// that edge. If this message has a timestamp not strictly
|
||||
// newer than what we already know of we can exit early.
|
||||
switch {
|
||||
@ -2099,8 +2118,8 @@ func (r *ChannelRouter) FindRoute(req *RouteRequest) (*route.Route, float64,
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// We'll fetch the current block height so we can properly calculate the
|
||||
// required HTLC time locks within the route.
|
||||
// We'll fetch the current block height, so we can properly calculate
|
||||
// the required HTLC time locks within the route.
|
||||
_, currentHeight, err := r.cfg.Chain.GetBestBlock()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@ -2172,7 +2191,7 @@ func generateSphinxPacket(rt *route.Route, paymentHash []byte,
|
||||
// Now that we know we have an actual route, we'll map the route into a
|
||||
// sphinx payment path which includes per-hop payloads for each hop
|
||||
// that give each node within the route the necessary information
|
||||
// (fees, CLTV value, etc) to properly forward the payment.
|
||||
// (fees, CLTV value, etc.) to properly forward the payment.
|
||||
sphinxPath, err := rt.ToSphinxPath()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -2285,11 +2304,11 @@ type LightningPayment struct {
|
||||
LastHop *route.Vertex
|
||||
|
||||
// DestFeatures specifies the set of features we assume the final node
|
||||
// has for pathfinding. Typically these will be taken directly from an
|
||||
// has for pathfinding. Typically, these will be taken directly from an
|
||||
// invoice, but they can also be manually supplied or assumed by the
|
||||
// sender. If a nil feature vector is provided, the router will try to
|
||||
// fallback to the graph in order to load a feature vector for a node in
|
||||
// the public graph.
|
||||
// fall back to the graph in order to load a feature vector for a node
|
||||
// in the public graph.
|
||||
DestFeatures *lnwire.FeatureVector
|
||||
|
||||
// PaymentAddr is the payment address specified by the receiver. This
|
||||
@ -2520,8 +2539,8 @@ func (r *ChannelRouter) sendToRoute(htlcHash lntypes.Hash, rt *route.Route,
|
||||
// Calculate amount paid to receiver.
|
||||
amt := rt.ReceiverAmt()
|
||||
|
||||
// If this is meant as a MP payment shard, we set the amount
|
||||
// for the creating info to the total amount of the payment.
|
||||
// If this is meant as an MP payment shard, we set the amount for the
|
||||
// creating info to the total amount of the payment.
|
||||
finalHop := rt.Hops[len(rt.Hops)-1]
|
||||
mpp := finalHop.MPP
|
||||
if mpp != nil {
|
||||
@ -2575,7 +2594,7 @@ func (r *ChannelRouter) sendToRoute(htlcHash lntypes.Hash, rt *route.Route,
|
||||
|
||||
// Since the HTLC hashes and preimages are specified manually over the
|
||||
// RPC for SendToRoute requests, we don't have to worry about creating
|
||||
// a ShardTracker that can generate hashes for AMP payments. Instead we
|
||||
// a ShardTracker that can generate hashes for AMP payments. Instead, we
|
||||
// create a simple tracker that can just return the hash for the single
|
||||
// shard we'll now launch.
|
||||
shardTracker := shards.NewSimpleShardTracker(htlcHash, nil)
|
||||
@ -2608,7 +2627,7 @@ func (r *ChannelRouter) sendToRoute(htlcHash lntypes.Hash, rt *route.Route,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We now lookup the payment to see if it's already failed.
|
||||
// We now look up the payment to see if it's already failed.
|
||||
payment, err := p.router.cfg.Control.FetchPayment(p.identifier)
|
||||
if err != nil {
|
||||
return result.attempt, err
|
||||
@ -2677,7 +2696,7 @@ func (r *ChannelRouter) sendToRoute(htlcHash lntypes.Hash, rt *route.Route,
|
||||
// returned.
|
||||
//
|
||||
// This method relies on the ControlTower's internal payment state machine to
|
||||
// carry out its execution. After restarts it is safe, and assumed, that the
|
||||
// carry out its execution. After restarts, it is safe, and assumed, that the
|
||||
// router will call this method for every payment still in-flight according to
|
||||
// the ControlTower.
|
||||
func (r *ChannelRouter) sendPayment(feeLimit lnwire.MilliSatoshi,
|
||||
@ -2685,7 +2704,7 @@ func (r *ChannelRouter) sendPayment(feeLimit lnwire.MilliSatoshi,
|
||||
paySession PaymentSession,
|
||||
shardTracker shards.ShardTracker) ([32]byte, *route.Route, error) {
|
||||
|
||||
// We'll also fetch the current block height so we can properly
|
||||
// We'll also fetch the current block height, so we can properly
|
||||
// calculate the required HTLC time locks within the route.
|
||||
_, currentHeight, err := r.cfg.Chain.GetBestBlock()
|
||||
if err != nil {
|
||||
@ -3022,8 +3041,8 @@ func (r *ChannelRouter) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
|
||||
}
|
||||
|
||||
// As edges are directional edge node has a unique policy for the
|
||||
// direction of the edge they control. Therefore we first check if we
|
||||
// already have the most up to date information for that edge. If so,
|
||||
// direction of the edge they control. Therefore, we first check if we
|
||||
// already have the most up-to-date information for that edge. If so,
|
||||
// then we can exit early.
|
||||
switch {
|
||||
// A flag set of 0 indicates this is an announcement for the "first"
|
||||
@ -3054,7 +3073,7 @@ type ErrNoChannel struct {
|
||||
fromNode route.Vertex
|
||||
}
|
||||
|
||||
// Error returns a human readable string describing the error.
|
||||
// Error returns a human-readable string describing the error.
|
||||
func (e ErrNoChannel) Error() string {
|
||||
return fmt.Sprintf("no matching outgoing channel available for "+
|
||||
"node %v (%v)", e.position, e.fromNode)
|
||||
|
Loading…
Reference in New Issue
Block a user