trivial: Fix spelling errors

- Fixes some spelling in code comments and a couple of function names
This commit is contained in:
Michael Rooke 2023-09-20 11:37:32 -04:00
parent ec585431a9
commit 78d9996620
No known key found for this signature in database
GPG Key ID: 461B76D5649DC759
30 changed files with 65 additions and 52 deletions

View File

@ -141,7 +141,7 @@ type Agent struct {
// time.
chanOpenFailures chan *chanOpenFailureUpdate
// heuristicUpdates is a channel where updates from active heurstics
// heuristicUpdates is a channel where updates from active heuristics
// will be sent.
heuristicUpdates chan *heuristicUpdate

View File

@ -31,7 +31,7 @@ type AgentConstraints interface {
MaxChanSize() btcutil.Amount
}
// agenConstraints is an implementation of the AgentConstraints interface that
// agentConstraints is an implementation of the AgentConstraints interface that
// indicate the constraints the autopilot agent must adhere to when opening
// channels.
type agentConstraints struct {

View File

@ -293,7 +293,7 @@ func (m *Manager) queryHeuristics(nodes map[NodeID]struct{}, localState bool) (
HeuristicScores, error) {
// If we want to take the local state into action when querying the
// heuristics, we fetch it. If not we'll just pass an emply slice to
// heuristics, we fetch it. If not we'll just pass an empty slice to
// the heuristic.
var totalChans []LocalChannel
var err error

View File

@ -7,7 +7,7 @@ package autopilot
// where 0 is maximally costly.
const diameterCutoff = 0.75
// SimpleGraph stores a simplifed adj graph of a channel graph to speed
// SimpleGraph stores a simplified adj graph of a channel graph to speed
// up graph processing by eliminating all unnecessary hashing and map access.
type SimpleGraph struct {
// Nodes is a map from node index to NodeID.
@ -26,7 +26,7 @@ func NewSimpleGraph(g ChannelGraph) (*SimpleGraph, error) {
nextIndex := 0
// getNodeIndex returns the integer index of the passed node.
// The returned index is then used to create a simplifed adjacency list
// The returned index is then used to create a simplified adjacency list
// where each node is identified by its index instead of its pubkey, and
// also to create a mapping from node index to node pubkey.
getNodeIndex := func(node Node) int {

View File

@ -66,7 +66,7 @@ func testTopCentrality(t *testing.T, graph testGraph,
}
}
// TestTopCentrality tests that we return the correct normalized centralitiy
// TestTopCentrality tests that we return the correct normalized centrality
// values given a non empty graph, and given our node has an increasing amount
// of channels from 0 to N-1 simulating the whole range from non-connected to
// fully connected.

View File

@ -79,7 +79,7 @@ func TestIsOutdatedCert(t *testing.T) {
}
// TestIsOutdatedPermutation tests that the order of listed IPs or DNS names,
// nor dulicates in the lists, matter for whether we consider the certificate
// nor duplicates in the lists, matter for whether we consider the certificate
// outdated.
func TestIsOutdatedPermutation(t *testing.T) {
tempDir := t.TempDir()

View File

@ -67,7 +67,7 @@ func (b *BtcWallet) FundPsbt(packet *psbt.Packet, minConfs int32,
switch accountName {
// For default accounts and single imported public keys, we'll provide a
// nil key scope to FundPsbt, allowing it to select nputs from all
// nil key scope to FundPsbt, allowing it to select inputs from all
// scopes (NP2WKH, P2WKH, P2TR). By default, the change key scope for
// these accounts will be P2WKH.
case lnwallet.DefaultAccountName:

View File

@ -189,7 +189,7 @@ func TestWebAPIFeeEstimator(t *testing.T) {
expectedErr: "",
},
{
// When requested target is smaller than the min cahced
// When requested target is smaller than the min cached
// target, the fee rate of the min cached target is
// returned.
name: "API-omitted_target",

View File

@ -39,10 +39,10 @@ var (
ErrUpfrontShutdownScriptMismatch = fmt.Errorf("shutdown script does not " +
"match upfront shutdown script")
// ErrProposalExeceedsMaxFee is returned when as the initiator, the
// ErrProposalExceedsMaxFee is returned when as the initiator, the
// latest fee proposal sent by the responder exceed our max fee.
// responder.
ErrProposalExeceedsMaxFee = fmt.Errorf("latest fee proposal exceeds " +
ErrProposalExceedsMaxFee = fmt.Errorf("latest fee proposal exceeds " +
"max fee")
// ErrInvalidShutdownScript is returned when we receive an address from
@ -526,7 +526,7 @@ func (c *ChanCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message,
// use this when we craft the closure transaction.
c.remoteDeliveryScript = shutdownMsg.Address
// Now that we know their desried delivery script, we can
// Now that we know their desired delivery script, we can
// compute what our max/ideal fee will be.
c.initFeeBaseline()
@ -722,7 +722,7 @@ func (c *ChanCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message,
)
if c.cfg.Channel.IsInitiator() && feeProposal > c.maxFee {
return nil, false, fmt.Errorf("%w: %v > %v",
ErrProposalExeceedsMaxFee, feeProposal,
ErrProposalExceedsMaxFee, feeProposal,
c.maxFee)
}

View File

@ -402,12 +402,16 @@ func TestMaxFeeBailOut(t *testing.T) {
// If we're the initiator, then we expect an error at
// this point.
case true:
require.ErrorIs(t, err, ErrProposalExeceedsMaxFee)
require.ErrorIs(
t, err, ErrProposalExceedsMaxFee,
)
// Otherwise, we expect things to fail for some other
// reason (invalid sig, etc).
case false:
require.NotErrorIs(t, err, ErrProposalExeceedsMaxFee)
require.NotErrorIs(
t, err, ErrProposalExceedsMaxFee,
)
}
})
}

View File

@ -271,7 +271,7 @@ func CoinSelectUpToAmount(feeRate chainfee.SatPerKWeight, minAmount, maxAmount,
)
// Get total balance from coins which we need for reserve considerations
// and fee santiy checks.
// and fee sanity checks.
var totalBalance btcutil.Amount
for _, coin := range coins {
totalBalance += btcutil.Amount(coin.Value)

View File

@ -35,7 +35,7 @@ const (
// DataLossProtectRequired is a feature bit that indicates that a peer
// *requires* the other party know about the data-loss-protect optional
// feature. If the remote peer does not know of such a feature, then
// the sending peer SHOLUD disconnect them. The data-loss-protect
// the sending peer SHOULD disconnect them. The data-loss-protect
// feature allows a peer that's lost partial data to recover their
// settled funds of the latest commitment state.
DataLossProtectRequired FeatureBit = 0

View File

@ -254,7 +254,7 @@ func BenchmarkReadMessage(b *testing.B) {
// type.
//
// TODO(yy): the following testing messages are created somewhat arbitrary. We
// should standardlize each of the testing messages so that a better baseline
// should standardize each of the testing messages so that a better baseline
// can be used.
func makeAllMessages(t testing.TB, r *rand.Rand) []lnwire.Message {
msgAll := []lnwire.Message{}

View File

@ -589,14 +589,14 @@ func (f *FailInvalidOnionKey) Error() string {
return fmt.Sprintf("InvalidOnionKey(onion_sha=%x)", f.OnionSHA256[:])
}
// parseChannelUpdateCompatabilityMode will attempt to parse a channel updated
// parseChannelUpdateCompatibilityMode will attempt to parse a channel updated
// encoded into an onion error payload in two ways. First, we'll try the
// compatibility oriented version wherein we'll _skip_ the length prefixing on
// the channel update message. Older versions of c-lighting do this so we'll
// attempt to parse these messages in order to retain compatibility. If we're
// unable to pull out a fully valid version, then we'll fall back to the
// regular parsing mechanism which includes the length prefix an NO type byte.
func parseChannelUpdateCompatabilityMode(reader io.Reader, length uint16,
func parseChannelUpdateCompatibilityMode(reader io.Reader, length uint16,
chanUpdate *ChannelUpdate, pver uint32) error {
// Instantiate a LimitReader because there may be additional data
@ -683,7 +683,8 @@ func (f *FailTemporaryChannelFailure) Decode(r io.Reader, pver uint32) error {
if length != 0 {
f.Update = &ChannelUpdate{}
return parseChannelUpdateCompatabilityMode(
return parseChannelUpdateCompatibilityMode(
r, length, f.Update, pver,
)
}
@ -767,7 +768,8 @@ func (f *FailAmountBelowMinimum) Decode(r io.Reader, pver uint32) error {
}
f.Update = ChannelUpdate{}
return parseChannelUpdateCompatabilityMode(
return parseChannelUpdateCompatibilityMode(
r, length, &f.Update, pver,
)
}
@ -835,7 +837,8 @@ func (f *FailFeeInsufficient) Decode(r io.Reader, pver uint32) error {
}
f.Update = ChannelUpdate{}
return parseChannelUpdateCompatabilityMode(
return parseChannelUpdateCompatibilityMode(
r, length, &f.Update, pver,
)
}
@ -903,7 +906,8 @@ func (f *FailIncorrectCltvExpiry) Decode(r io.Reader, pver uint32) error {
}
f.Update = ChannelUpdate{}
return parseChannelUpdateCompatabilityMode(
return parseChannelUpdateCompatibilityMode(
r, length, &f.Update, pver,
)
}
@ -960,7 +964,8 @@ func (f *FailExpiryTooSoon) Decode(r io.Reader, pver uint32) error {
}
f.Update = ChannelUpdate{}
return parseChannelUpdateCompatabilityMode(
return parseChannelUpdateCompatibilityMode(
r, length, &f.Update, pver,
)
}
@ -1024,7 +1029,8 @@ func (f *FailChannelDisabled) Decode(r io.Reader, pver uint32) error {
}
f.Update = ChannelUpdate{}
return parseChannelUpdateCompatabilityMode(
return parseChannelUpdateCompatibilityMode(
r, length, &f.Update, pver,
)
}

View File

@ -120,10 +120,10 @@ func testEncodeDecodeTlv(t *testing.T, testFailure FailureMessage) {
require.Equal(t, testFailure, failure)
}
// TestChannelUpdateCompatabilityParsing tests that we're able to properly read
// TestChannelUpdateCompatibilityParsing tests that we're able to properly read
// out channel update messages encoded in an onion error payload that was
// written in the legacy (type prefixed) format.
func TestChannelUpdateCompatabilityParsing(t *testing.T) {
func TestChannelUpdateCompatibilityParsing(t *testing.T) {
t.Parallel()
// We'll start by taking out test channel update, and encoding it into
@ -137,7 +137,7 @@ func TestChannelUpdateCompatabilityParsing(t *testing.T) {
// able to decode it using our compatibility method, as it's a regular
// encoded channel update message.
var newChanUpdate ChannelUpdate
err := parseChannelUpdateCompatabilityMode(
err := parseChannelUpdateCompatibilityMode(
&b, uint16(b.Len()), &newChanUpdate, 0,
)
require.NoError(t, err, "unable to parse channel update")
@ -164,7 +164,7 @@ func TestChannelUpdateCompatabilityParsing(t *testing.T) {
// We should be able to properly parse the encoded channel update
// message even with the extra two bytes.
var newChanUpdate2 ChannelUpdate
err = parseChannelUpdateCompatabilityMode(
err = parseChannelUpdateCompatibilityMode(
&b, uint16(b.Len()), &newChanUpdate2, 0,
)
require.NoError(t, err, "unable to parse channel update")

View File

@ -125,7 +125,7 @@ func submitNonblockingGeneric(t *testing.T, p interface{}, nWorkers int) {
// Now, unblock them all simultaneously. All of the tasks should then be
// processed in parallel. Afterward, no more errors should come through.
close(semChan)
pullParllel(t, nUnblocked, errChan)
pullParallel(t, nUnblocked, errChan)
pullNothing(t, errChan)
}
@ -149,7 +149,7 @@ func submitBlockingGeneric(t *testing.T, p interface{}, nWorkers int) {
// Now, pull each blocking task sequentially from the pool. Afterwards,
// no more errors should come through.
pullSequntial(t, nBlocked, errChan, semChan)
pullSequential(t, nBlocked, errChan, semChan)
pullNothing(t, errChan)
}
@ -187,12 +187,12 @@ func submitPartialBlockingGeneric(t *testing.T, p interface{}, nWorkers int) {
// Now, unblock the unblocked task and pull all of them. After they have
// been pulled, we should see no more tasks.
close(semChanNB)
pullParllel(t, nUnblocked, errChan)
pullParallel(t, nUnblocked, errChan)
pullNothing(t, errChan)
// Finally, unblock each the blocked tasks we added initially, and
// assert that no further errors come through.
pullSequntial(t, nBlocked, errChan, semChan)
pullSequential(t, nBlocked, errChan, semChan)
pullNothing(t, errChan)
}
@ -208,7 +208,7 @@ func pullNothing(t *testing.T, errChan chan error) {
}
}
func pullParllel(t *testing.T, n int, errChan chan error) {
func pullParallel(t *testing.T, n int, errChan chan error) {
t.Helper()
for i := 0; i < n; i++ {
@ -224,7 +224,10 @@ func pullParllel(t *testing.T, n int, errChan chan error) {
}
}
func pullSequntial(t *testing.T, n int, errChan chan error, semChan chan struct{}) {
func pullSequential(
t *testing.T, n int, errChan chan error, semChan chan struct{},
) {
t.Helper()
for i := 0; i < n; i++ {

View File

@ -355,7 +355,7 @@ type RestrictParams struct {
}
// PathFindingConfig defines global parameters that control the trade-off in
// path finding between fees and probabiity.
// path finding between fees and probability.
type PathFindingConfig struct {
// AttemptCost is the fixed virtual cost in path finding of a failed
// payment attempt. It is used to trade off potentially better routes
@ -1035,7 +1035,7 @@ func getProbabilityBasedDist(weight int64, probability float64,
// Avoid cast if an overflow would occur. The maxFloat constant is
// chosen to stay well below the maximum float64 value that is still
// convertable to int64.
// convertible to int64.
const maxFloat = 9000000000000000000
if dist > maxFloat {
return infinity

View File

@ -519,7 +519,7 @@ func (g *testGraphInstance) getLink(chanID lnwire.ShortChannelID) (
// createTestGraphFromChannels returns a fully populated ChannelGraph based on a set of
// test channels. Additional required information like keys are derived in
// a deterministical way and added to the channel graph. A list of nodes is
// a deterministic way and added to the channel graph. A list of nodes is
// not required and derived from the channel data. The goal is to keep
// instantiating a test channel graph as light weight as possible.
func createTestGraphFromChannels(t *testing.T, useCache bool,

View File

@ -31,7 +31,7 @@ type paymentLifecycle struct {
currentHeight int32
}
// payemntState holds a number of key insights learned from a given MPPayment
// paymentState holds a number of key insights learned from a given MPPayment
// that we use to determine what to do on each payment loop iteration.
type paymentState struct {
numShardsInFlight int

View File

@ -175,7 +175,7 @@ type paymentSession struct {
getRoutingGraph func() (routingGraph, func(), error)
// pathFindingConfig defines global parameters that control the
// trade-off in path finding between fees and probabiity.
// trade-off in path finding between fees and probability.
pathFindingConfig PathFindingConfig
missionControl MissionController

View File

@ -39,7 +39,7 @@ type SessionSource struct {
MissionControl MissionController
// PathFindingConfig defines global parameters that control the
// trade-off in path finding between fees and probabiity.
// trade-off in path finding between fees and probability.
PathFindingConfig PathFindingConfig
}

View File

@ -32,7 +32,7 @@ const (
// minCapacityFactor is the minimal value the capacityFactor can take.
// Having a too low value can lead to discarding of paths due to the
// enforced minimal proability or to too high pathfinding weights.
// enforced minimal probability or to too high pathfinding weights.
minCapacityFactor = 0.5
// minCapacityFraction is the minimum allowed value for

View File

@ -18,7 +18,7 @@ const (
// untriedNode is a node id for which we don't record any results in
// this test. This can be used to assert the probability for untried
// ndoes.
// nodes.
untriedNode = 255
// Define test estimator parameters.

View File

@ -400,7 +400,7 @@ func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32,
}
// ToSphinxPath converts a complete route into a sphinx PaymentPath that
// contains the per-hop paylods used to encoding the HTLC routing data for each
// contains the per-hop payloads used to encoding the HTLC routing data for each
// hop in the route. This method also accepts an optional EOB payload for the
// final hop.
func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, error) {

View File

@ -68,7 +68,7 @@ const (
//
// NOTE: For payment requests, BOLT 11 stipulates that a final CLTV
// delta of 9 should be used when no value is decoded. This however
// leads to inflexiblity in upgrading this default parameter, since it
// leads to inflexibility in upgrading this default parameter, since it
// can create inconsistencies around the assumed value between sender
// and receiver. Specifically, if the receiver assumes a higher value
// than the sender, the receiver will always see the received HTLCs as

View File

@ -72,7 +72,7 @@ type clientUpdate struct {
clientID uint64
// client is the new client that will receive updates. Will be nil in
// case this is a cancallation update.
// case this is a cancellation update.
client *Client
}

View File

@ -517,7 +517,7 @@ func (q *DiskOverflowQueue[T]) feedMemQueue() {
// instead persist the task to disk. After the producer,
// drainInputList, has pushed an item to inputChan, it is
// guaranteed to await a response on the task's success channel
// before quiting. Therefore, it is not required to listen on
// before quitting. Therefore, it is not required to listen on
// the quit channel here.
case task := <-q.inputChan:
select {

View File

@ -281,7 +281,7 @@ func PaymentAddr(addr [32]byte) func(*Invoice) {
}
// Metadata is a functional option that allows callers of NewInvoice to set
// the desired payment Metadata tht is advertised on the invoice.
// the desired payment Metadata that is advertised on the invoice.
func Metadata(metadata []byte) func(*Invoice) {
return func(i *Invoice) {
i.Metadata = metadata