multi: Fix typos [skip ci]

This commit is contained in:
Dimitris Apostolou 2022-01-13 18:29:43 +02:00
parent f50950640f
commit 530a2059e5
No known key found for this signature in database
GPG Key ID: 4B5D20E938204A8A
139 changed files with 254 additions and 254 deletions

View File

@ -498,7 +498,7 @@ func decipherCipherSeed(cipherSeedBytes [EncipheredCipherSeedSize]byte,
} }
// Decipher attempts to decipher the encoded mnemonic by first mapping to the // Decipher attempts to decipher the encoded mnemonic by first mapping to the
// original chipertext, then applying our deciphering scheme. ErrInvalidPass // original ciphertext, then applying our deciphering scheme. ErrInvalidPass
// will be returned if the passphrase is incorrect. // will be returned if the passphrase is incorrect.
func (m *Mnemonic) Decipher(pass []byte) ([DecipheredCipherSeedSize]byte, error) { func (m *Mnemonic) Decipher(pass []byte) ([DecipheredCipherSeedSize]byte, error) {

View File

@ -339,7 +339,7 @@ func TestAgentHeuristicUpdateSignal(t *testing.T) {
// initial check. // initial check.
respondMoreChans(t, testCtx, moreChansResp{0, 0}) respondMoreChans(t, testCtx, moreChansResp{0, 0})
// Next we'll signal that one of the heuristcs have been updated. // Next we'll signal that one of the heuristics have been updated.
testCtx.agent.OnHeuristicUpdate(testCtx.heuristic) testCtx.agent.OnHeuristicUpdate(testCtx.heuristic)
// The update should trigger the agent to ask for a channel budget.so // The update should trigger the agent to ask for a channel budget.so
@ -1256,7 +1256,7 @@ func TestAgentChannelSizeAllocation(t *testing.T) {
"had %v", len(arg.chans)) "had %v", len(arg.chans))
} }
if arg.balance != testCtx.walletBalance { if arg.balance != testCtx.walletBalance {
t.Fatalf("expectd agent to have %v balance, had %v", t.Fatalf("expected agent to have %v balance, had %v",
testCtx.walletBalance, arg.balance) testCtx.walletBalance, arg.balance)
} }
case <-time.After(time.Second * 3): case <-time.After(time.Second * 3):

View File

@ -51,7 +51,7 @@ func (q *queue) empty() bool {
// BetweennessCentrality is a NodeMetric that calculates node betweenness // BetweennessCentrality is a NodeMetric that calculates node betweenness
// centrality using Brandes' algorithm. Betweenness centrality for each node // centrality using Brandes' algorithm. Betweenness centrality for each node
// is the number of shortest paths passing trough that node, not counting // is the number of shortest paths passing through that node, not counting
// shortest paths starting or ending at that node. This is a useful metric // shortest paths starting or ending at that node. This is a useful metric
// to measure control of individual nodes over the whole network. // to measure control of individual nodes over the whole network.
type BetweennessCentrality struct { type BetweennessCentrality struct {
@ -167,7 +167,7 @@ func betweennessCentrality(g *SimpleGraph, s int, centrality []float64) {
} }
} }
// Refresh recaculates and stores centrality values. // Refresh recalculates and stores centrality values.
func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) error { func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) error {
cache, err := NewSimpleGraph(graph) cache, err := NewSimpleGraph(graph)
if err != nil { if err != nil {
@ -186,7 +186,7 @@ func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) error {
partial := make([]float64, len(cache.Nodes)) partial := make([]float64, len(cache.Nodes))
// Consume the next node, update centrality // Consume the next node, update centrality
// parital to avoid unnecessary synchronizaton. // parital to avoid unnecessary synchronization.
for node := range work { for node := range work {
betweennessCentrality(cache, node, partial) betweennessCentrality(cache, node, partial)
} }

View File

@ -35,7 +35,7 @@ var normalizedTestGraphCentrality = []float64{
0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0, 0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0,
} }
// buildTestGraph builds a test graph from a passed graph desriptor. // buildTestGraph builds a test graph from a passed graph descriptor.
func buildTestGraph(t *testing.T, func buildTestGraph(t *testing.T,
graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey { graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey {

View File

@ -11,7 +11,7 @@ import (
var ErrNoPositive = errors.New("no positive weights left") var ErrNoPositive = errors.New("no positive weights left")
// weightedChoice draws a random index from the slice of weights, with a // weightedChoice draws a random index from the slice of weights, with a
// probability propotional to the weight at the given index. // probability proportional to the weight at the given index.
func weightedChoice(w []float64) (int, error) { func weightedChoice(w []float64) (int, error) {
// Calculate the sum of weights. // Calculate the sum of weights.
var sum float64 var sum float64
@ -25,7 +25,7 @@ func weightedChoice(w []float64) (int, error) {
// Pick a random number in the range [0.0, 1.0) and multiply it with // Pick a random number in the range [0.0, 1.0) and multiply it with
// the sum of weights. Then we'll iterate the weights until the number // the sum of weights. Then we'll iterate the weights until the number
// goes below 0. This means that each index is picked with a probablity // goes below 0. This means that each index is picked with a probability
// equal to their normalized score. // equal to their normalized score.
// //
// Example: // Example:

View File

@ -92,7 +92,7 @@ func (c *WeightedCombAttachment) NodeScores(g ChannelGraph, chans []LocalChannel
} }
// We combine the scores given by the sub-heuristics by using the // We combine the scores given by the sub-heuristics by using the
// heruistics' given weight factor. // heuristics' given weight factor.
scores := make(map[NodeID]*NodeScore) scores := make(map[NodeID]*NodeScore)
for nID := range nodes { for nID := range nodes {
score := &NodeScore{ score := &NodeScore{

View File

@ -32,7 +32,7 @@ func (m *mockSubLogger) SetLogLevels(logLevel string) {
m.globalLogLevel = logLevel m.globalLogLevel = logLevel
} }
// TestParseAndSetDebugLevels tests tha we can properly set the log levels for // TestParseAndSetDebugLevels tests that we can properly set the log levels for
// all andspecified subsystems. // all andspecified subsystems.
func TestParseAndSetDebugLevels(t *testing.T) { func TestParseAndSetDebugLevels(t *testing.T) {
testCases := []struct { testCases := []struct {

View File

@ -26,7 +26,7 @@ var (
RawTags string RawTags string
// GoVersion stores the go version that the executable was compiled // GoVersion stores the go version that the executable was compiled
// with. This hsould be set using -ldflags during compilation. // with. This should be set using -ldflags during compilation.
GoVersion string GoVersion string
) )

View File

@ -254,7 +254,7 @@ type ConfNtfn struct {
dispatched bool dispatched bool
} }
// HistoricalConfDispatch parameterizes a manual rescan for a particular // HistoricalConfDispatch parametrizes a manual rescan for a particular
// transaction/output script. The parameters include the start and end block // transaction/output script. The parameters include the start and end block
// heights specifying the range of blocks to scan. // heights specifying the range of blocks to scan.
type HistoricalConfDispatch struct { type HistoricalConfDispatch struct {
@ -408,12 +408,12 @@ type SpendNtfn struct {
// an entry for it. // an entry for it.
HeightHint uint32 HeightHint uint32
// dispatched signals whether a spend notification has been disptached // dispatched signals whether a spend notification has been dispatched
// to the client. // to the client.
dispatched bool dispatched bool
} }
// HistoricalSpendDispatch parameterizes a manual rescan to determine the // HistoricalSpendDispatch parametrizes a manual rescan to determine the
// spending details (if any) of an outpoint/output script. The parameters // spending details (if any) of an outpoint/output script. The parameters
// include the start and end block heights specifying the range of blocks to // include the start and end block heights specifying the range of blocks to
// scan. // scan.
@ -523,7 +523,7 @@ type TxNotifier struct {
// NewTxNotifier creates a TxNotifier. The current height of the blockchain is // NewTxNotifier creates a TxNotifier. The current height of the blockchain is
// accepted as a parameter. The different hint caches (confirm and spend) are // accepted as a parameter. The different hint caches (confirm and spend) are
// used as an optimization in order to retrieve a better starting point when // used as an optimization in order to retrieve a better starting point when
// dispatching a recan for a historical event in the chain. // dispatching a rescan for a historical event in the chain.
func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32,
confirmHintCache ConfirmHintCache, confirmHintCache ConfirmHintCache,
spendHintCache SpendHintCache) *TxNotifier { spendHintCache SpendHintCache) *TxNotifier {

View File

@ -282,7 +282,7 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) {
t.Fatal("Expected confirmation update for tx1") t.Fatal("Expected confirmation update for tx1")
} }
// A confirmation notification for this tranaction should be dispatched, // A confirmation notification for this transaction should be dispatched,
// as it only required one confirmation. // as it only required one confirmation.
select { select {
case txConf := <-ntfn1.Event.Confirmed: case txConf := <-ntfn1.Event.Confirmed:
@ -2148,7 +2148,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
op2Height = 203 op2Height = 203
) )
// Intiialize our TxNotifier instance backed by a height hint cache. // Initialize our TxNotifier instance backed by a height hint cache.
hintCache := newMockHintCache() hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier( n := chainntnfs.NewTxNotifier(
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache, startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
@ -2331,7 +2331,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
reorgSafety = 10 reorgSafety = 10
) )
// Intiialize our TxNotifier instance backed by a height hint cache. // Initialize our TxNotifier instance backed by a height hint cache.
hintCache := newMockHintCache() hintCache := newMockHintCache()
n := chainntnfs.NewTxNotifier( n := chainntnfs.NewTxNotifier(
startingHeight, reorgSafety, hintCache, hintCache, startingHeight, reorgSafety, hintCache, hintCache,
@ -2511,7 +2511,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
// matured. // matured.
err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
if err == nil { if err == nil {
t.Fatalf("expcted updating spend details to fail") t.Fatalf("expected updating spend details to fail")
} }
// Finally, check that the height hint is still there, unchanged. // Finally, check that the height hint is still there, unchanged.

View File

@ -243,7 +243,7 @@ func (r *RPCAcceptor) sendAcceptRequests(errChan chan error,
defer close(r.done) defer close(r.done)
// Create a map of pending channel IDs to our original open channel // Create a map of pending channel IDs to our original open channel
// request and a response channel. We keep the original chanel open // request and a response channel. We keep the original channel open
// message so that we can validate our response against it. // message so that we can validate our response against it.
acceptRequests := make(map[[32]byte]*chanAcceptInfo) acceptRequests := make(map[[32]byte]*chanAcceptInfo)

View File

@ -287,7 +287,7 @@ func (c ChannelType) HasFundingTx() bool {
return c&NoFundingTxBit == 0 return c&NoFundingTxBit == 0
} }
// HasAnchors returns true if this channel type has anchor ouputs on its // HasAnchors returns true if this channel type has anchor outputs on its
// commitment. // commitment.
func (c ChannelType) HasAnchors() bool { func (c ChannelType) HasAnchors() bool {
return c&AnchorOutputsBit == AnchorOutputsBit return c&AnchorOutputsBit == AnchorOutputsBit
@ -3192,7 +3192,7 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
// The RemoteNextRevocation field is optional, as it's possible for a // The RemoteNextRevocation field is optional, as it's possible for a
// channel to be closed before we learn of the next unrevoked // channel to be closed before we learn of the next unrevoked
// revocation point for the remote party. Write a boolen indicating // revocation point for the remote party. Write a boolean indicating
// whether this field is present or not. // whether this field is present or not.
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
return err return err
@ -3311,7 +3311,7 @@ func writeChanConfig(b io.Writer, c *ChannelConfig) error {
} }
// fundingTxPresent returns true if expect the funding transcation to be found // fundingTxPresent returns true if expect the funding transcation to be found
// on disk or already populated within the passed oen chanel struct. // on disk or already populated within the passed open channel struct.
func fundingTxPresent(channel *OpenChannel) bool { func fundingTxPresent(channel *OpenChannel) bool {
chanType := channel.ChanType chanType := channel.ChanType

View File

@ -1037,7 +1037,7 @@ func (c *ChannelStateDB) pruneLinkNode(openChannels []*OpenChannel,
return c.linkNodeDB.DeleteLinkNode(remotePub) return c.linkNodeDB.DeleteLinkNode(remotePub)
} }
// PruneLinkNodes attempts to prune all link nodes found within the databse with // PruneLinkNodes attempts to prune all link nodes found within the database with
// whom we no longer have any open channels with. // whom we no longer have any open channels with.
func (c *ChannelStateDB) PruneLinkNodes() error { func (c *ChannelStateDB) PruneLinkNodes() error {
allLinkNodes, err := c.linkNodeDB.FetchAllLinkNodes() allLinkNodes, err := c.linkNodeDB.FetchAllLinkNodes()
@ -1189,7 +1189,7 @@ func (c *ChannelStateDB) AbandonChannel(chanPoint *wire.OutPoint,
} }
// If the channel was already closed, then we don't return an // If the channel was already closed, then we don't return an
// error as we'd like fro this step to be repeatable. // error as we'd like this step to be repeatable.
return nil return nil
case err != nil: case err != nil:
return err return err

View File

@ -448,7 +448,7 @@ func TestRestoreChannelShells(t *testing.T) {
// The node should have the same address, as specified in the channel // The node should have the same address, as specified in the channel
// shell. // shell.
if reflect.DeepEqual(linkNode.Addresses, channelShell.NodeAddrs) { if reflect.DeepEqual(linkNode.Addresses, channelShell.NodeAddrs) {
t.Fatalf("addr mismach: expected %v, got %v", t.Fatalf("addr mismatch: expected %v, got %v",
linkNode.Addresses, channelShell.NodeAddrs) linkNode.Addresses, channelShell.NodeAddrs)
} }
} }
@ -738,7 +738,7 @@ func TestFetchHistoricalChannel(t *testing.T) {
histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint) histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint)
if err != nil { if err != nil {
t.Fatalf("unexepected error getting channel: %v", err) t.Fatalf("unexpected error getting channel: %v", err)
} }
// FetchHistoricalChannel will attach the cdb to channel.Db, we set it // FetchHistoricalChannel will attach the cdb to channel.Db, we set it

View File

@ -47,7 +47,7 @@ func (d *DB) ForwardingLog() *ForwardingLog {
} }
} }
// ForwardingLog is a time series database that logs the fulfilment of payment // ForwardingLog is a time series database that logs the fulfillment of payment
// circuits by a lightning network daemon. The log contains a series of // circuits by a lightning network daemon. The log contains a series of
// forwarding events which map a timestamp to a forwarding event. A forwarding // forwarding events which map a timestamp to a forwarding event. A forwarding
// event describes which channels were used to create+settle a circuit, and the // event describes which channels were used to create+settle a circuit, and the
@ -204,7 +204,7 @@ type ForwardingEventQuery struct {
// ForwardingLogTimeSlice is the response to a forwarding query. It includes // ForwardingLogTimeSlice is the response to a forwarding query. It includes
// the original query, the set events that match the query, and an integer // the original query, the set events that match the query, and an integer
// which represents the offset index of the last item in the set of retuned // which represents the offset index of the last item in the set of returned
// events. This integer allows callers to resume their query using this offset // events. This integer allows callers to resume their query using this offset
// in the event that the query's response exceeds the max number of returnable // in the event that the query's response exceeds the max number of returnable
// events. // events.

View File

@ -895,7 +895,7 @@ func validateInvoice(i *Invoice, paymentHash lntypes.Hash) error {
return nil return nil
} }
// IsPending returns ture if the invoice is in ContractOpen state. // IsPending returns true if the invoice is in ContractOpen state.
func (i *Invoice) IsPending() bool { func (i *Invoice) IsPending() bool {
return i.State == ContractOpen || i.State == ContractAccepted return i.State == ContractOpen || i.State == ContractAccepted
} }
@ -1189,7 +1189,7 @@ func fetchInvoiceNumByRef(invoiceIndex, payAddrIndex, setIDIndex kvdb.RBucket,
} }
} }
// ScanInvoices scans trough all invoices and calls the passed scanFunc for // ScanInvoices scans through all invoices and calls the passed scanFunc for
// for each invoice with its respective payment hash. Additionally a reset() // for each invoice with its respective payment hash. Additionally a reset()
// closure is passed which is used to reset/initialize partial results and also // closure is passed which is used to reset/initialize partial results and also
// to signal if the kvdb.View transaction has been retried. // to signal if the kvdb.View transaction has been retried.
@ -3020,7 +3020,7 @@ func updateInvoiceState(invoice *Invoice, hash *lntypes.Hash,
} }
} }
// cancelSingleHtlc validates cancelation of a single htlc and update its state. // cancelSingleHtlc validates cancellation of a single htlc and update its state.
func cancelSingleHtlc(resolveTime time.Time, htlc *InvoiceHTLC, func cancelSingleHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
invState ContractState) error { invState ContractState) error {

View File

@ -75,6 +75,6 @@ func (g *GossipTimestampRange) MsgType() MessageType {
func (g *GossipTimestampRange) MaxPayloadLength(uint32) uint32 { func (g *GossipTimestampRange) MaxPayloadLength(uint32) uint32 {
// 32 + 4 + 4 // 32 + 4 + 4
// //
// TODO(roasbeef): update to 8 byte timestmaps? // TODO(roasbeef): update to 8 byte timestamps?
return 40 return 40
} }

View File

@ -280,7 +280,7 @@ func SerializeChannelCloseSummary(w io.Writer, cs *common.ChannelCloseSummary) e
// The RemoteNextRevocation field is optional, as it's possible for a // The RemoteNextRevocation field is optional, as it's possible for a
// channel to be closed before we learn of the next unrevoked // channel to be closed before we learn of the next unrevoked
// revocation point for the remote party. Write a boolen indicating // revocation point for the remote party. Write a boolean indicating
// whether this field is present or not. // whether this field is present or not.
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
return err return err

View File

@ -379,7 +379,7 @@ func SerializeChannelCloseSummary(w io.Writer, cs *common.ChannelCloseSummary) e
// The RemoteNextRevocation field is optional, as it's possible for a // The RemoteNextRevocation field is optional, as it's possible for a
// channel to be closed before we learn of the next unrevoked // channel to be closed before we learn of the next unrevoked
// revocation point for the remote party. Write a boolen indicating // revocation point for the remote party. Write a boolean indicating
// whether this field is present or not. // whether this field is present or not.
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
return err return err

View File

@ -221,7 +221,7 @@ func migrateOpenChanBucket(tx kvdb.RwTx) error {
} }
} }
// Remote unsiged updates as well. // Remote unsigned updates as well.
updateBytes = chanBucket.Get(remoteUnsignedLocalUpdatesKey) updateBytes = chanBucket.Get(remoteUnsignedLocalUpdatesKey)
if updateBytes != nil { if updateBytes != nil {
legacyUnsignedUpdates, err := legacy.DeserializeLogUpdates( legacyUnsignedUpdates, err := legacy.DeserializeLogUpdates(
@ -316,7 +316,7 @@ func migrateForwardingPackages(tx kvdb.RwTx) error {
return err return err
} }
// Now load all forwading packages using the legacy encoding. // Now load all forwarding packages using the legacy encoding.
var pkgsToMigrate []*common.FwdPkg var pkgsToMigrate []*common.FwdPkg
for _, source := range sources { for _, source := range sources {
packager := legacy.NewChannelPackager(source) packager := legacy.NewChannelPackager(source)

View File

@ -622,7 +622,7 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
// The RemoteNextRevocation field is optional, as it's possible for a // The RemoteNextRevocation field is optional, as it's possible for a
// channel to be closed before we learn of the next unrevoked // channel to be closed before we learn of the next unrevoked
// revocation point for the remote party. Write a boolen indicating // revocation point for the remote party. Write a boolean indicating
// whether this field is present or not. // whether this field is present or not.
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
return err return err

View File

@ -626,7 +626,7 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
// Order of payments should be be preserved. // Order of payments should be be preserved.
old := oldPayments[i] old := oldPayments[i]
// Check the individial fields. // Check the individual fields.
if p.Info.Value != old.Terms.Value { if p.Info.Value != old.Terms.Value {
t.Fatalf("value mismatch") t.Fatalf("value mismatch")
} }

View File

@ -266,7 +266,7 @@ func Features(features *lnwire.FeatureVector) func(*Invoice) {
} }
// PaymentAddr is a functional option that allows callers of NewInvoice to set // PaymentAddr is a functional option that allows callers of NewInvoice to set
// the desired payment address tht is advertised on the invoice. // the desired payment address that is advertised on the invoice.
func PaymentAddr(addr [32]byte) func(*Invoice) { func PaymentAddr(addr [32]byte) func(*Invoice) {
return func(i *Invoice) { return func(i *Invoice) {
i.PaymentAddr = &addr i.PaymentAddr = &addr

View File

@ -157,7 +157,7 @@ func OptionClock(clock clock.Clock) OptionModifier {
} }
} }
// OptionDryRunMigration controls whether or not to intentially fail to commit a // OptionDryRunMigration controls whether or not to intentionally fail to commit a
// successful migration that occurs when opening the database. // successful migration that occurs when opening the database.
func OptionDryRunMigration(dryRun bool) OptionModifier { func OptionDryRunMigration(dryRun bool) OptionModifier {
return func(o *Options) { return func(o *Options) {

View File

@ -64,11 +64,11 @@ var (
"amount") "amount")
// ErrNonMPPayment is returned if we try to register an MPP attempt for // ErrNonMPPayment is returned if we try to register an MPP attempt for
// a payment that already has a non-MPP attempt regitered. // a payment that already has a non-MPP attempt registered.
ErrNonMPPayment = errors.New("payment has non-MPP attempts") ErrNonMPPayment = errors.New("payment has non-MPP attempts")
// ErrMPPayment is returned if we try to register a non-MPP attempt for // ErrMPPayment is returned if we try to register a non-MPP attempt for
// a payment that already has an MPP attempt regitered. // a payment that already has an MPP attempt registered.
ErrMPPayment = errors.New("payment has MPP attempts") ErrMPPayment = errors.New("payment has MPP attempts")
// ErrMPPPaymentAddrMismatch is returned if we try to register an MPP // ErrMPPPaymentAddrMismatch is returned if we try to register an MPP
@ -106,7 +106,7 @@ func NewPaymentControl(db *DB) *PaymentControl {
// InitPayment checks or records the given PaymentCreationInfo with the DB, // InitPayment checks or records the given PaymentCreationInfo with the DB,
// making sure it does not already exist as an in-flight payment. When this // making sure it does not already exist as an in-flight payment. When this
// method returns successfully, the payment is guranteeed to be in the InFlight // method returns successfully, the payment is guaranteed to be in the InFlight
// state. // state.
func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
info *PaymentCreationInfo) error { info *PaymentCreationInfo) error {

View File

@ -551,7 +551,7 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) {
require.Equal(t, 1, indexCount) require.Equal(t, 1, indexCount)
} }
// TestPaymentControlDeletePayments tests that DeletePayments correcly deletes // TestPaymentControlDeletePayments tests that DeletePayments correctly deletes
// information about completed payments from the database. // information about completed payments from the database.
func TestPaymentControlDeletePayments(t *testing.T) { func TestPaymentControlDeletePayments(t *testing.T) {
t.Parallel() t.Parallel()
@ -605,7 +605,7 @@ func TestPaymentControlDeletePayments(t *testing.T) {
assertPayments(t, db, payments[2:]) assertPayments(t, db, payments[2:])
} }
// TestPaymentControlDeleteSinglePayment tests that DeletePayment correcly // TestPaymentControlDeleteSinglePayment tests that DeletePayment correctly
// deletes information about a completed payment from the database. // deletes information about a completed payment from the database.
func TestPaymentControlDeleteSinglePayment(t *testing.T) { func TestPaymentControlDeleteSinglePayment(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -382,7 +382,7 @@ func fetchPayment(bucket kvdb.RBucket) (*MPPayment, error) {
}, nil }, nil
} }
// fetchHtlcAttempts retrives all htlc attempts made for the payment found in // fetchHtlcAttempts retrieves all htlc attempts made for the payment found in
// the given bucket. // the given bucket.
func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, error) { func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, error) {
htlcsMap := make(map[uint64]*HTLCAttempt) htlcsMap := make(map[uint64]*HTLCAttempt)

View File

@ -23,7 +23,7 @@ const (
// Enforce that etcdLeaderElector implements the LeaderElector interface. // Enforce that etcdLeaderElector implements the LeaderElector interface.
var _ LeaderElector = (*etcdLeaderElector)(nil) var _ LeaderElector = (*etcdLeaderElector)(nil)
// etcdLeaderElector is an implemetation of LeaderElector using etcd as the // etcdLeaderElector is an implementation of LeaderElector using etcd as the
// election governor. // election governor.
type etcdLeaderElector struct { type etcdLeaderElector struct {
id string id string

View File

@ -23,7 +23,7 @@ func RegisterLeaderElectorFactory(id string, factory leaderElectorFactoryFunc) {
leaderElectorFactories[id] = factory leaderElectorFactories[id] = factory
} }
// MakeLeaderElector will constuct a LeaderElector identified by id with the // MakeLeaderElector will construct a LeaderElector identified by id with the
// passed arguments. // passed arguments.
func MakeLeaderElector(ctx context.Context, id string, args ...interface{}) ( func MakeLeaderElector(ctx context.Context, id string, args ...interface{}) (
LeaderElector, error) { LeaderElector, error) {

View File

@ -21,8 +21,8 @@ var secondsPer = map[string]int64{
"y": 31557600, // 365.25 days "y": 31557600, // 365.25 days
} }
// parseTime parses UNIX timestamps or short timeranges inspired by sytemd (when starting with "-"), // parseTime parses UNIX timestamps or short timeranges inspired by systemd
// e.g. "-1M" for one month (30.44 days) ago. // (when starting with "-"), e.g. "-1M" for one month (30.44 days) ago.
func parseTime(s string, base time.Time) (uint64, error) { func parseTime(s string, base time.Time) (uint64, error) {
if reTimeRange.MatchString(s) { if reTimeRange.MatchString(s) {
last := len(s) - 1 last := len(s) - 1

View File

@ -256,7 +256,7 @@ type Config struct {
LetsEncryptDir string `long:"letsencryptdir" description:"The directory to store Let's Encrypt certificates within"` LetsEncryptDir string `long:"letsencryptdir" description:"The directory to store Let's Encrypt certificates within"`
LetsEncryptListen string `long:"letsencryptlisten" description:"The IP:port on which lnd will listen for Let's Encrypt challenges. Let's Encrypt will always try to contact on port 80. Often non-root processes are not allowed to bind to ports lower than 1024. This configuration option allows a different port to be used, but must be used in combination with port forwarding from port 80. This configuration can also be used to specify another IP address to listen on, for example an IPv6 address."` LetsEncryptListen string `long:"letsencryptlisten" description:"The IP:port on which lnd will listen for Let's Encrypt challenges. Let's Encrypt will always try to contact on port 80. Often non-root processes are not allowed to bind to ports lower than 1024. This configuration option allows a different port to be used, but must be used in combination with port forwarding from port 80. This configuration can also be used to specify another IP address to listen on, for example an IPv6 address."`
LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certicate is only requested and stored when the first rpc connection comes in."` LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certificate is only requested and stored when the first rpc connection comes in."`
// We'll parse these 'raw' string arguments into real net.Addrs in the // We'll parse these 'raw' string arguments into real net.Addrs in the
// loadConfig function. We need to expose the 'raw' strings so the // loadConfig function. We need to expose the 'raw' strings so the
@ -326,7 +326,7 @@ type Config struct {
PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."` PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."`
TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"` TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"`
ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network."` ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to re-enable or cancel a pending disables of the peer's channels on the network."`
ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."` ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."`
ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."` ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."`
HeightHintCacheQueryDisable bool `long:"height-hint-cache-query-disable" description:"Disable queries from the height-hint cache to try to recover channels stuck in the pending close state. Disabling height hint queries may cause longer chain rescans, resulting in a performance hit. Unset this after channels are unstuck so you can get better performance again."` HeightHintCacheQueryDisable bool `long:"height-hint-cache-query-disable" description:"Disable queries from the height-hint cache to try to recover channels stuck in the pending close state. Disabling height hint queries may cause longer chain rescans, resulting in a performance hit. Unset this after channels are unstuck so you can get better performance again."`
@ -900,7 +900,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
) )
} }
// Don't allow superflous --maxchansize greater than // Don't allow superfluous --maxchansize greater than
// BOLT 02 soft-limit for non-wumbo channel // BOLT 02 soft-limit for non-wumbo channel
if !cfg.ProtocolOptions.Wumbo() && if !cfg.ProtocolOptions.Wumbo() &&
cfg.MaxChanSize > int64(MaxFundingAmount) { cfg.MaxChanSize > int64(MaxFundingAmount) {

View File

@ -651,7 +651,7 @@ func (b *BreachArbiter) exactRetribution(confChan *chainntnfs.ConfirmationEvent,
// We may have to wait for some of the HTLC outputs to be spent to the // We may have to wait for some of the HTLC outputs to be spent to the
// second level before broadcasting the justice tx. We'll store the // second level before broadcasting the justice tx. We'll store the
// SpendEvents between each attempt to not re-register uneccessarily. // SpendEvents between each attempt to not re-register unnecessarily.
spendNtfns := make(map[wire.OutPoint]*chainntnfs.SpendEvent) spendNtfns := make(map[wire.OutPoint]*chainntnfs.SpendEvent)
// Compute both the total value of funds being swept and the // Compute both the total value of funds being swept and the

View File

@ -1263,7 +1263,7 @@ func TestBreachCreateJusticeTx(t *testing.T) {
// The spendCommitOuts tx should be spending the 4 typed of commit outs // The spendCommitOuts tx should be spending the 4 typed of commit outs
// (note that in practice there will be at most two commit outputs per // (note that in practice there will be at most two commit outputs per
// commmit, but we test all 4 types here). // commit, but we test all 4 types here).
require.Len(t, justiceTxs.spendCommitOuts.TxIn, 4) require.Len(t, justiceTxs.spendCommitOuts.TxIn, 4)
// Finally check that the spendHTLCs tx are spending the two revoked // Finally check that the spendHTLCs tx are spending the two revoked
@ -1279,7 +1279,7 @@ type breachTest struct {
// spend2ndLevel requests that second level htlcs be spent *again*, as // spend2ndLevel requests that second level htlcs be spent *again*, as
// if by a remote party or watchtower. The outpoint of the second level // if by a remote party or watchtower. The outpoint of the second level
// htlc is in effect "readded" to the set of inputs. // htlc is in effect "re-added" to the set of inputs.
spend2ndLevel bool spend2ndLevel bool
// sweepHtlc tests that the HTLC output is swept using the revocation // sweepHtlc tests that the HTLC output is swept using the revocation

View File

@ -364,7 +364,7 @@ var (
// chain resolutions. // chain resolutions.
errNoResolutions = fmt.Errorf("no contract resolutions exist") errNoResolutions = fmt.Errorf("no contract resolutions exist")
// errNoActions is retuned when the log doesn't contain any stored // errNoActions is returned when the log doesn't contain any stored
// chain actions. // chain actions.
errNoActions = fmt.Errorf("no chain actions exist") errNoActions = fmt.Errorf("no chain actions exist")
@ -1098,7 +1098,7 @@ func (b *boltArbitratorLog) checkpointContract(c ContractResolver,
}, func() {}) }, func() {})
} }
// encodeSignDetails encodes the gived SignDetails struct to the writer. // encodeSignDetails encodes the given SignDetails struct to the writer.
// SignDetails is allowed to be nil, in which we will encode that it is not // SignDetails is allowed to be nil, in which we will encode that it is not
// present. // present.
func encodeSignDetails(w io.Writer, s *input.SignDetails) error { func encodeSignDetails(w io.Writer, s *input.SignDetails) error {

View File

@ -102,7 +102,7 @@ type ChainArbitratorConfig struct {
// ContractBreach is a function closure that the ChainArbitrator will // ContractBreach is a function closure that the ChainArbitrator will
// use to notify the breachArbiter about a contract breach. A callback // use to notify the breachArbiter about a contract breach. A callback
// should be passed that when called will mark the channel pending // should be passed that when called will mark the channel pending
// close in the databae. It should only return a non-nil error when the // close in the database. It should only return a non-nil error when the
// breachArbiter has preserved the necessary breach info for this // breachArbiter has preserved the necessary breach info for this
// channel point, and the callback has succeeded, meaning it is safe to // channel point, and the callback has succeeded, meaning it is safe to
// stop watching the channel. // stop watching the channel.
@ -846,8 +846,8 @@ func (c *ChainArbitrator) publishClosingTxs(
// rebroadcast is a helper method which will republish the unilateral or // rebroadcast is a helper method which will republish the unilateral or
// cooperative close transaction or a channel in a particular state. // cooperative close transaction or a channel in a particular state.
// //
// NOTE: There is no risk to caling this method if the channel isn't in either // NOTE: There is no risk to calling this method if the channel isn't in either
// CommimentBroadcasted or CoopBroadcasted, but the logs will be misleading. // CommitmentBroadcasted or CoopBroadcasted, but the logs will be misleading.
func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel, func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel,
state channeldb.ChannelStatus) error { state channeldb.ChannelStatus) error {

View File

@ -750,7 +750,7 @@ func TestHtlcTimeoutSingleStageRemoteSpend(t *testing.T) {
witnessBeacon := ctx.resolver.(*htlcTimeoutResolver).PreimageDB.(*mockWitnessBeacon) witnessBeacon := ctx.resolver.(*htlcTimeoutResolver).PreimageDB.(*mockWitnessBeacon)
// The remote spends the output direcly with // The remote spends the output directly with
// the preimage. // the preimage.
ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{ ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
SpendingTx: spendTx, SpendingTx: spendTx,
@ -978,7 +978,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
} }
// twoStageResolution is a resolution for a htlc on the local // twoStageResolution is a resolution for a htlc on the local
// party's commitment, where the timout tx can be re-signed. // party's commitment, where the timeout tx can be re-signed.
twoStageResolution := lnwallet.OutgoingHtlcResolution{ twoStageResolution := lnwallet.OutgoingHtlcResolution{
ClaimOutpoint: htlcOutpoint, ClaimOutpoint: htlcOutpoint,
SignedTimeoutTx: timeoutTx, SignedTimeoutTx: timeoutTx,
@ -1041,7 +1041,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
preCheckpoint: func(ctx *htlcResolverTestContext, preCheckpoint: func(ctx *htlcResolverTestContext,
resumed bool) error { resumed bool) error {
// If we are resuming from a checkpoing, we // If we are resuming from a checkpoint, we
// expect the resolver to re-subscribe to a // expect the resolver to re-subscribe to a
// spend, hence we must resend it. // spend, hence we must resend it.
if resumed { if resumed {
@ -1070,7 +1070,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
Height: 13, Height: 13,
} }
// The timout tx output should now be given to // The timeout tx output should now be given to
// the sweeper. // the sweeper.
resolver := ctx.resolver.(*htlcTimeoutResolver) resolver := ctx.resolver.(*htlcTimeoutResolver)
inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
@ -1161,7 +1161,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
spendTxHash := spendTx.TxHash() spendTxHash := spendTx.TxHash()
// twoStageResolution is a resolution for a htlc on the local // twoStageResolution is a resolution for a htlc on the local
// party's commitment, where the timout tx can be re-signed. // party's commitment, where the timeout tx can be re-signed.
twoStageResolution := lnwallet.OutgoingHtlcResolution{ twoStageResolution := lnwallet.OutgoingHtlcResolution{
ClaimOutpoint: htlcOutpoint, ClaimOutpoint: htlcOutpoint,
SignedTimeoutTx: timeoutTx, SignedTimeoutTx: timeoutTx,
@ -1214,7 +1214,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
preCheckpoint: func(ctx *htlcResolverTestContext, preCheckpoint: func(ctx *htlcResolverTestContext,
resumed bool) error { resumed bool) error {
// If we are resuming from a checkpoing, we // If we are resuming from a checkpoint, we
// expect the resolver to re-subscribe to a // expect the resolver to re-subscribe to a
// spend, hence we must resend it. // spend, hence we must resend it.
if resumed { if resumed {

View File

@ -313,7 +313,7 @@ func NewDNSSeedBootstrapper(
// fallBackSRVLookup attempts to manually query for SRV records we need to // fallBackSRVLookup attempts to manually query for SRV records we need to
// properly bootstrap. We do this by querying the special record at the "soa." // properly bootstrap. We do this by querying the special record at the "soa."
// sub-domain of supporting DNS servers. The retuned IP address will be the IP // sub-domain of supporting DNS servers. The returned IP address will be the IP
// address of the authoritative DNS server. Once we have this IP address, we'll // address of the authoritative DNS server. Once we have this IP address, we'll
// connect manually over TCP to request the SRV record. This is necessary as // connect manually over TCP to request the SRV record. This is necessary as
// the records we return are currently too large for a class of resolvers, // the records we return are currently too large for a class of resolvers,
@ -361,7 +361,7 @@ func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string,
"received: %v", resp.Rcode) "received: %v", resp.Rcode)
} }
// Retrieve the RR(s) of the Answer section, and covert to the format // Retrieve the RR(s) of the Answer section, and convert to the format
// that net.LookupSRV would normally return. // that net.LookupSRV would normally return.
var rrs []*net.SRV var rrs []*net.SRV
for _, rr := range resp.Answer { for _, rr := range resp.Answer {

View File

@ -286,7 +286,7 @@ type cachedNetworkMsg struct {
} }
// Size returns the "size" of an entry. We return the number of items as we // Size returns the "size" of an entry. We return the number of items as we
// just want to limit the total amount of entires rather than do accurate size // just want to limit the total amount of entries rather than do accurate size
// accounting. // accounting.
func (c *cachedNetworkMsg) Size() (uint64, error) { func (c *cachedNetworkMsg) Size() (uint64, error) {
return uint64(len(c.msgs)), nil return uint64(len(c.msgs)), nil

View File

@ -2329,8 +2329,8 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
} }
// We'll also add the edge to our zombie index, provide a blank pubkey // We'll also add the edge to our zombie index, provide a blank pubkey
// for the first node as we're simulating the sitaution where the first // for the first node as we're simulating the situation where the first
// ndoe is updating but the second node isn't. In this case we only // node is updating but the second node isn't. In this case we only
// want to allow a new update from the second node to allow the entire // want to allow a new update from the second node to allow the entire
// edge to be resurrected. // edge to be resurrected.
chanID := batch.chanAnn.ShortChannelID chanID := batch.chanAnn.ShortChannelID
@ -2350,7 +2350,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
} }
processAnnouncement(batch.chanUpdAnn1, true, true) processAnnouncement(batch.chanUpdAnn1, true, true)
// At this point, the channel should still be consiered a zombie. // At this point, the channel should still be considered a zombie.
_, _, _, err = ctx.router.GetChannelByID(chanID) _, _, _, err = ctx.router.GetChannelByID(chanID)
if err != channeldb.ErrZombieEdge { if err != channeldb.ErrZombieEdge {
t.Fatalf("channel should still be a zombie") t.Fatalf("channel should still be a zombie")
@ -2448,7 +2448,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit} remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
// Override NotifyWhenOnline to return the remote peer which we expect // Override NotifyWhenOnline to return the remote peer which we expect
// meesages to be sent to. // messages to be sent to.
ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte, ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
peerChan chan<- lnpeer.Peer) { peerChan chan<- lnpeer.Peer) {
@ -2645,7 +2645,7 @@ func TestExtraDataChannelAnnouncementValidation(t *testing.T) {
// We'll now create an announcement that contains an extra set of bytes // We'll now create an announcement that contains an extra set of bytes
// that we don't know of ourselves, but should still include in the // that we don't know of ourselves, but should still include in the
// final signature check. // final signature check.
extraBytes := []byte("gotta validate this stil!") extraBytes := []byte("gotta validate this still!")
ca, err := createRemoteChannelAnnouncement(0, extraBytes) ca, err := createRemoteChannelAnnouncement(0, extraBytes)
if err != nil { if err != nil {
t.Fatalf("can't create channel announcement: %v", err) t.Fatalf("can't create channel announcement: %v", err)
@ -2828,7 +2828,7 @@ func TestRetransmit(t *testing.T) {
} }
remotePeer := &mockPeer{remoteKey, nil, nil} remotePeer := &mockPeer{remoteKey, nil, nil}
// Process a local channel annoucement, channel update and node // Process a local channel announcement, channel update and node
// announcement. No messages should be broadcasted yet, since no proof // announcement. No messages should be broadcasted yet, since no proof
// has been exchanged. // has been exchanged.
assertProcessAnnouncement( assertProcessAnnouncement(
@ -2961,7 +2961,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) {
} }
// Now add the node's channel to the graph by processing the channel // Now add the node's channel to the graph by processing the channel
// announement and channel update. // announcement and channel update.
select { select {
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanAnn, case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanAnn,
remotePeer): remotePeer):
@ -3002,7 +3002,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) {
} }
} }
// Processing the same node announement again should be ignored, as it // Processing the same node announcement again should be ignored, as it
// is stale. // is stale.
select { select {
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2, case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2,

View File

@ -220,7 +220,7 @@ func (m *SyncManager) syncerHandler() {
initialHistoricalSyncer *GossipSyncer initialHistoricalSyncer *GossipSyncer
// initialHistoricalSyncSignal is a signal that will fire once // initialHistoricalSyncSignal is a signal that will fire once
// the intiial historical sync has been completed. This is // the initial historical sync has been completed. This is
// crucial to ensure that another historical sync isn't // crucial to ensure that another historical sync isn't
// attempted just because the initialHistoricalSyncer was // attempted just because the initialHistoricalSyncer was
// disconnected. // disconnected.
@ -232,7 +232,7 @@ func (m *SyncManager) syncerHandler() {
initialHistoricalSyncSignal = s.ResetSyncedSignal() initialHistoricalSyncSignal = s.ResetSyncedSignal()
// Restart the timer for our new historical sync peer. This will // Restart the timer for our new historical sync peer. This will
// ensure that all initial syncers recevie an equivalent // ensure that all initial syncers receive an equivalent
// duration before attempting the next sync. Without doing so we // duration before attempting the next sync. Without doing so we
// might attempt two historical sync back to back if a peer // might attempt two historical sync back to back if a peer
// disconnects just before the ticker fires. // disconnects just before the ticker fires.
@ -362,7 +362,7 @@ func (m *SyncManager) syncerHandler() {
// Otherwise, our initialHistoricalSyncer corresponds to // Otherwise, our initialHistoricalSyncer corresponds to
// the peer being disconnected, so we'll have to find a // the peer being disconnected, so we'll have to find a
// replacement. // replacement.
log.Debug("Finding replacement for intitial " + log.Debug("Finding replacement for initial " +
"historical sync") "historical sync")
s := m.forceHistoricalSync() s := m.forceHistoricalSync()

View File

@ -459,7 +459,7 @@ below):
`lnd` plus any application that consumes the RPC could cause `lnd` to miss `lnd` plus any application that consumes the RPC could cause `lnd` to miss
crucial updates from the backend. crucial updates from the backend.
- The default fee estimate mode in `bitcoind` is CONSERVATIVE. You can set - The default fee estimate mode in `bitcoind` is CONSERVATIVE. You can set
`bitcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Futhermore, `bitcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Furthermore,
if you start `bitcoind` in `regtest`, this configuration won't take any effect. if you start `bitcoind` in `regtest`, this configuration won't take any effect.

View File

@ -179,7 +179,7 @@ Arguments:
`unit-cover` `unit-cover`
------------ ------------
Runs the unit test suite with test coverage, compiling the statisitics in Runs the unit test suite with test coverage, compiling the statistics in
`profile.cov`. `profile.cov`.
Arguments: Arguments:

View File

@ -12,7 +12,7 @@
1. [Code Spacing](#code-spacing) 1. [Code Spacing](#code-spacing)
1. [Protobuf Compilation](#protobuf-compilation) 1. [Protobuf Compilation](#protobuf-compilation)
1. [Additional Style Constraints On Top of gofmt](#additional-style-constraints-on-top-of-gofmt) 1. [Additional Style Constraints On Top of gofmt](#additional-style-constraints-on-top-of-gofmt)
1. [Pointing to Remote Dependant Branches in Go Modules](#pointing-to-remote-dependant-branches-in-go-modules) 1. [Pointing to Remote Dependent Branches in Go Modules](#pointing-to-remote-dependent-branches-in-go-modules)
1. [Use of Log Levels](#use-of-log-levels) 1. [Use of Log Levels](#use-of-log-levels)
5. [Code Approval Process](#code-approval-process) 5. [Code Approval Process](#code-approval-process)
1. [Code Review](#code-review) 1. [Code Review](#code-review)
@ -546,7 +546,7 @@ to `gofmt` we've opted to enforce the following style guidelines.
log and error messages, committers should attempt to minimize the number of log and error messages, committers should attempt to minimize the number of
lines utilized, while still adhering to the 80-character column limit. lines utilized, while still adhering to the 80-character column limit.
## Pointing to Remote Dependant Branches in Go Modules ## Pointing to Remote Dependent Branches in Go Modules
It's common that a developer may need to make a change in a dependent project It's common that a developer may need to make a change in a dependent project
of `lnd` such as `btcd`, `neutrino`, `btcwallet`, etc. In order to test changes of `lnd` such as `btcd`, `neutrino`, `btcwallet`, etc. In order to test changes

View File

@ -2,7 +2,7 @@
With the recent introduction of the `kvdb` interface LND can support multiple With the recent introduction of the `kvdb` interface LND can support multiple
database backends allowing experimentation with the storage model as well as database backends allowing experimentation with the storage model as well as
improving robustness trough eg. replicating essential data. improving robustness through eg. replicating essential data.
Building on `kvdb` in v0.11.0 we're adding experimental [etcd](https://etcd.io) Building on `kvdb` in v0.11.0 we're adding experimental [etcd](https://etcd.io)
support to LND. As this is an unstable feature heavily in development, it still support to LND. As this is an unstable feature heavily in development, it still
@ -71,7 +71,7 @@ db.etcd.keyfile=/home/user/etcd/bin/default.etcd/fixtures/client/key.pem
db.etcd.insecure_skip_verify=true db.etcd.insecure_skip_verify=true
``` ```
Optionally users can specifiy `db.etcd.user` and `db.etcd.pass` for db user Optionally users can specify `db.etcd.user` and `db.etcd.pass` for db user
authentication. If the database is shared, it is possible to separate our data authentication. If the database is shared, it is possible to separate our data
from other users by setting `db.etcd.namespace` to an (already existing) etcd from other users by setting `db.etcd.namespace` to an (already existing) etcd
namespace. In order to test without TLS, users are able to set `db.etcd.disabletls` namespace. In order to test without TLS, users are able to set `db.etcd.disabletls`

View File

@ -1,4 +1,4 @@
# Increasing LND reliablity by clustering # Increasing LND reliability by clustering
Normally LND nodes use the embedded bbolt database to store all important states. Normally LND nodes use the embedded bbolt database to store all important states.
This method of running has been proven to work well in a variety of environments, This method of running has been proven to work well in a variety of environments,
@ -9,7 +9,7 @@ do updates and be more resilient to datacenter failures.
It is now possible to store all essential state in a replicated etcd DB and to It is now possible to store all essential state in a replicated etcd DB and to
run multiple LND nodes on different machines where only one of them (the leader) run multiple LND nodes on different machines where only one of them (the leader)
is able to read and mutate the database. In such setup if the leader node fails is able to read and mutate the database. In such setup if the leader node fails
or decomissioned, a follower node will be elected as the new leader and will or decommissioned, a follower node will be elected as the new leader and will
quickly come online to minimize downtime. quickly come online to minimize downtime.
The leader election feature currently relies on etcd to work both for the election The leader election feature currently relies on etcd to work both for the election

View File

@ -286,7 +286,7 @@ the same format.
#### Streaming Updates via `SubscribeChannelBackups` #### Streaming Updates via `SubscribeChannelBackups`
Using the gRPC interace directly, [a new call: Using the gRPC interface directly, [a new call:
`SubscribeChannelBackups`](https://api.lightning.community/#subscribechannelbackups). `SubscribeChannelBackups`](https://api.lightning.community/#subscribechannelbackups).
This call allows users to receive a new notification each time the underlying This call allows users to receive a new notification each time the underlying
SCB state changes. This can be used to implement more complex backup SCB state changes. This can be used to implement more complex backup

View File

@ -53,7 +53,7 @@ ws.onmessage = function (event) {
console.log(JSON.parse(event.data).result); console.log(JSON.parse(event.data).result);
} }
ws.onerror = function (event) { ws.onerror = function (event) {
// An error occured, let's log it to the console. // An error occurred, let's log it to the console.
console.log(event); console.log(event);
} }
``` ```

View File

@ -33,7 +33,7 @@ type Config struct {
// feature sets. // feature sets.
type Manager struct { type Manager struct {
// fsets is a static map of feature set to raw feature vectors. Requests // fsets is a static map of feature set to raw feature vectors. Requests
// are fulfilled by cloning these interal feature vectors. // are fulfilled by cloning these internal feature vectors.
fsets map[Set]*lnwire.RawFeatureVector fsets map[Set]*lnwire.RawFeatureVector
} }

View File

@ -112,7 +112,7 @@ func explicitNegotiateCommitmentType(channelType lnwire.ChannelType,
// implicitNegotiateCommitmentType negotiates the commitment type of a channel // implicitNegotiateCommitmentType negotiates the commitment type of a channel
// implicitly by choosing the latest type supported by the local and remote // implicitly by choosing the latest type supported by the local and remote
// fetures. // features.
func implicitNegotiateCommitmentType(local, func implicitNegotiateCommitmentType(local,
remote *lnwire.FeatureVector) (*lnwire.ChannelType, lnwallet.CommitmentType) { remote *lnwire.FeatureVector) (*lnwire.ChannelType, lnwallet.CommitmentType) {

View File

@ -2405,7 +2405,7 @@ func (f *Manager) waitForTimeout(completeChan *channeldb.OpenChannel,
} }
// Close the timeout channel and exit if the block is // Close the timeout channel and exit if the block is
// aboce the max height. // above the max height.
if uint32(epoch.Height) >= maxHeight { if uint32(epoch.Height) >= maxHeight {
log.Warnf("Waited for %v blocks without "+ log.Warnf("Waited for %v blocks without "+
"seeing funding transaction confirmed,"+ "seeing funding transaction confirmed,"+

View File

@ -2584,7 +2584,7 @@ func TestFundingManagerPrivateRestart(t *testing.T) {
} }
// TestFundingManagerCustomChannelParameters checks that custom requirements we // TestFundingManagerCustomChannelParameters checks that custom requirements we
// specify during the channel funding flow is preserved correcly on both sides. // specify during the channel funding flow is preserved correctly on both sides.
func TestFundingManagerCustomChannelParameters(t *testing.T) { func TestFundingManagerCustomChannelParameters(t *testing.T) {
t.Parallel() t.Parallel()
@ -2780,7 +2780,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) {
} }
// The max value in flight Alice can have should be maxValueAcceptChannel, // The max value in flight Alice can have should be maxValueAcceptChannel,
// which is the default value and the maxium Bob can offer should be // which is the default value and the maximum Bob can offer should be
// maxValueInFlight. // maxValueInFlight.
if err := assertMaxHtlc(resCtx, if err := assertMaxHtlc(resCtx,
maxValueAcceptChannel, maxValueInFlight); err != nil { maxValueAcceptChannel, maxValueInFlight); err != nil {

View File

@ -438,7 +438,7 @@ func (cm *circuitMap) cleanClosedChannels() error {
return ErrCorruptedCircuitMap return ErrCorruptedCircuitMap
} }
// Delete the ciruit. // Delete the circuit.
for inKey := range circuitKeySet { for inKey := range circuitKeySet {
if err := circuitBkt.Delete(inKey.Bytes()); err != nil { if err := circuitBkt.Delete(inKey.Bytes()); err != nil {
return err return err

View File

@ -85,7 +85,7 @@ func (s *InterceptableSwitch) interceptForward(packet *htlcPacket,
switch htlc := packet.htlc.(type) { switch htlc := packet.htlc.(type) {
case *lnwire.UpdateAddHTLC: case *lnwire.UpdateAddHTLC:
// We are not interested in intercepting initated payments. // We are not interested in intercepting initiated payments.
if packet.incomingChanID == hop.Source { if packet.incomingChanID == hop.Source {
return false return false
} }

View File

@ -3507,7 +3507,7 @@ func TestChannelRetransmission(t *testing.T) {
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
// Proceed the payment farther by sending the // Proceed the payment farther by sending the
// fulfilment message and trigger the state // fulfillment message and trigger the state
// update. // update.
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false}, {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
@ -3548,7 +3548,7 @@ func TestChannelRetransmission(t *testing.T) {
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
// Proceed the payment farther by sending the // Proceed the payment farther by sending the
// fulfilment message and trigger the state // fulfillment message and trigger the state
// update. // update.
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false}, {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
@ -5980,7 +5980,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) {
t.Fatal("timeout") t.Fatal("timeout")
case h := <-receiver.registry.settleChan: case h := <-receiver.registry.settleChan:
if hash != h { if hash != h {
t.Fatal("unexpect invoice settled") t.Fatal("unexpected invoice settled")
} }
} }

View File

@ -91,7 +91,7 @@ type mailBoxConfig struct {
// belongs to. // belongs to.
shortChanID lnwire.ShortChannelID shortChanID lnwire.ShortChannelID
// fetchUpdate retreives the most recent channel update for the channel // fetchUpdate retrieves the most recent channel update for the channel
// this mailbox belongs to. // this mailbox belongs to.
fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)
@ -803,7 +803,7 @@ type mailOrchestrator struct {
// chan_id -> short_chan_id // chan_id -> short_chan_id
// short_chan_id -> mailbox // short_chan_id -> mailbox
// so that Deliver can lookup mailbox directly once live, // so that Deliver can lookup mailbox directly once live,
// but still queriable by channel_id. // but still queryable by channel_id.
// unclaimedPackets maps a live short chan id to queue of packets if no // unclaimedPackets maps a live short chan id to queue of packets if no
// mailbox has been created. // mailbox has been created.
@ -816,7 +816,7 @@ type mailOrchConfig struct {
// properly exit during shutdown. // properly exit during shutdown.
forwardPackets func(chan struct{}, ...*htlcPacket) error forwardPackets func(chan struct{}, ...*htlcPacket) error
// fetchUpdate retreives the most recent channel update for the channel // fetchUpdate retrieves the most recent channel update for the channel
// this mailbox belongs to. // this mailbox belongs to.
fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)

View File

@ -89,7 +89,7 @@ type htlcPacket struct {
incomingTimeout uint32 incomingTimeout uint32
// outgoingTimeout is the timeout of the proposed outgoing HTLC. This // outgoingTimeout is the timeout of the proposed outgoing HTLC. This
// will be extraced from the hop payload recevived by the incoming // will be extracted from the hop payload received by the incoming
// link. // link.
outgoingTimeout uint32 outgoingTimeout uint32

View File

@ -290,7 +290,7 @@ type Switch struct {
// blockEpochStream is an active block epoch event stream backed by an // blockEpochStream is an active block epoch event stream backed by an
// active ChainNotifier instance. This will be used to retrieve the // active ChainNotifier instance. This will be used to retrieve the
// lastest height of the chain. // latest height of the chain.
blockEpochStream *chainntnfs.BlockEpochEvent blockEpochStream *chainntnfs.BlockEpochEvent
// pendingSettleFails is the set of settle/fail entries that we need to // pendingSettleFails is the set of settle/fail entries that we need to

View File

@ -1546,7 +1546,7 @@ func TestCheckCircularForward(t *testing.T) {
} }
// TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes // TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes
// along, then we won't attempt to froward it down al ink that isn't yet able // along, then we won't attempt to forward it down al ink that isn't yet able
// to forward any HTLC's. // to forward any HTLC's.
func TestSkipIneligibleLinksMultiHopForward(t *testing.T) { func TestSkipIneligibleLinksMultiHopForward(t *testing.T) {
tests := []multiHopFwdTest{ tests := []multiHopFwdTest{
@ -1601,7 +1601,7 @@ func TestSkipIneligibleLinksMultiHopForward(t *testing.T) {
} }
// testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes // testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes
// along, then we won't attempt to froward it down al ink that isn't yet able // along, then we won't attempt to forward it down al ink that isn't yet able
// to forward any HTLC's. // to forward any HTLC's.
func testSkipIneligibleLinksMultiHopForward(t *testing.T, func testSkipIneligibleLinksMultiHopForward(t *testing.T,
testCase *multiHopFwdTest) { testCase *multiHopFwdTest) {
@ -2563,7 +2563,7 @@ func TestSwitchGetPaymentResult(t *testing.T) {
t.Fatalf("unable to store result: %v", err) t.Fatalf("unable to store result: %v", err)
} }
// The result should be availble. // The result should be available.
select { select {
case res, ok := <-resultChan: case res, ok := <-resultChan:
if !ok { if !ok {

View File

@ -328,7 +328,7 @@ func SenderHTLCScript(senderHtlcKey, receiverHtlcKey,
// HTLC to claim the output with knowledge of the revocation private key in the // HTLC to claim the output with knowledge of the revocation private key in the
// scenario that the sender of the HTLC broadcasts a previously revoked // scenario that the sender of the HTLC broadcasts a previously revoked
// commitment transaction. A valid spend requires knowledge of the private key // commitment transaction. A valid spend requires knowledge of the private key
// that corresponds to their revocation base point and also the private key fro // that corresponds to their revocation base point and also the private key from
// the per commitment point, and a valid signature under the combined public // the per commitment point, and a valid signature under the combined public
// key. // key.
func SenderHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor, func SenderHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor,

View File

@ -84,7 +84,7 @@ const (
// us to sweep an HTLC output that we extended to a party, but was // us to sweep an HTLC output that we extended to a party, but was
// never fulfilled. This _is_ the HTLC output directly on our // never fulfilled. This _is_ the HTLC output directly on our
// commitment transaction, and the input to the second-level HTLC // commitment transaction, and the input to the second-level HTLC
// tiemout transaction. It can only be spent after CLTV expiry, and // timeout transaction. It can only be spent after CLTV expiry, and
// commitment confirmation. // commitment confirmation.
HtlcOfferedTimeoutSecondLevelInputConfirmed StandardWitnessType = 15 HtlcOfferedTimeoutSecondLevelInputConfirmed StandardWitnessType = 15

View File

@ -31,7 +31,7 @@ type invoiceExpiryTs struct {
} }
// Less implements PriorityQueueItem.Less such that the top item in the // Less implements PriorityQueueItem.Less such that the top item in the
// priorty queue will be the one that expires next. // priority queue will be the one that expires next.
func (e invoiceExpiryTs) Less(other queue.PriorityQueueItem) bool { func (e invoiceExpiryTs) Less(other queue.PriorityQueueItem) bool {
return e.Expiry.Before(other.(*invoiceExpiryTs).Expiry) return e.Expiry.Before(other.(*invoiceExpiryTs).Expiry)
} }
@ -58,10 +58,10 @@ func (b invoiceExpiryHeight) expired(currentHeight, delta uint32) bool {
return currentHeight+delta >= b.expiryHeight return currentHeight+delta >= b.expiryHeight
} }
// InvoiceExpiryWatcher handles automatic invoice cancellation of expried // InvoiceExpiryWatcher handles automatic invoice cancellation of expired
// invoices. Upon start InvoiceExpiryWatcher will retrieve all pending (not yet // invoices. Upon start InvoiceExpiryWatcher will retrieve all pending (not yet
// settled or canceled) invoices invoices to its watcing queue. When a new // settled or canceled) invoices invoices to its watching queue. When a new
// invoice is added to the InvoiceRegistry, it'll be forarded to the // invoice is added to the InvoiceRegistry, it'll be forwarded to the
// InvoiceExpiryWatcher and will end up in the watching queue as well. // InvoiceExpiryWatcher and will end up in the watching queue as well.
// If any of the watched invoices expire, they'll be removed from the watching // If any of the watched invoices expire, they'll be removed from the watching
// queue and will be cancelled through InvoiceRegistry.CancelInvoice(). // queue and will be cancelled through InvoiceRegistry.CancelInvoice().

View File

@ -630,7 +630,7 @@ func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef,
updateInvoice := func(invoice *channeldb.Invoice) ( updateInvoice := func(invoice *channeldb.Invoice) (
*channeldb.InvoiceUpdateDesc, error) { *channeldb.InvoiceUpdateDesc, error) {
// Only allow individual htlc cancelation on open invoices. // Only allow individual htlc cancellation on open invoices.
if invoice.State != channeldb.ContractOpen { if invoice.State != channeldb.ContractOpen {
log.Debugf("cancelSingleHtlc: invoice %v no longer "+ log.Debugf("cancelSingleHtlc: invoice %v no longer "+
"open", invoiceRef) "open", invoiceRef)
@ -669,7 +669,7 @@ func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef,
htlcState = htlc.State htlcState = htlc.State
} }
// Cancelation is only possible if the htlc wasn't already // Cancellation is only possible if the htlc wasn't already
// resolved. // resolved.
if htlcState != channeldb.HtlcStateAccepted { if htlcState != channeldb.HtlcStateAccepted {
log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+ log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+
@ -1248,7 +1248,7 @@ func shouldCancel(state channeldb.ContractState, cancelAccepted bool) bool {
} }
// If the invoice is accepted, we should only cancel if we want to // If the invoice is accepted, we should only cancel if we want to
// force cancelation of accepted invoices. // force cancellation of accepted invoices.
return cancelAccepted return cancelAccepted
} }
@ -1396,7 +1396,7 @@ type InvoiceSubscription struct {
// StartingInvoiceIndex field. // StartingInvoiceIndex field.
NewInvoices chan *channeldb.Invoice NewInvoices chan *channeldb.Invoice
// SettledInvoices is a channel that we'll use to send all setted // SettledInvoices is a channel that we'll use to send all settled
// invoices with an invoices index greater than the specified // invoices with an invoices index greater than the specified
// StartingInvoiceIndex field. // StartingInvoiceIndex field.
SettledInvoices chan *channeldb.Invoice SettledInvoices chan *channeldb.Invoice

View File

@ -183,10 +183,10 @@ func TestSettleInvoice(t *testing.T) {
// Try to cancel. // Try to cancel.
err = ctx.registry.CancelInvoice(testInvoicePaymentHash) err = ctx.registry.CancelInvoice(testInvoicePaymentHash)
if err != channeldb.ErrInvoiceAlreadySettled { if err != channeldb.ErrInvoiceAlreadySettled {
t.Fatal("expected cancelation of a settled invoice to fail") t.Fatal("expected cancellation of a settled invoice to fail")
} }
// As this is a direct sette, we expect nothing on the hodl chan. // As this is a direct settle, we expect nothing on the hodl chan.
select { select {
case <-hodlChan: case <-hodlChan:
t.Fatal("unexpected resolution") t.Fatal("unexpected resolution")
@ -325,10 +325,10 @@ func testCancelInvoice(t *testing.T, gc bool) {
require.Equal(t, testCurrentHeight, failResolution.AcceptHeight) require.Equal(t, testCurrentHeight, failResolution.AcceptHeight)
} }
// TestCancelInvoice tests cancelation of an invoice and related notifications. // TestCancelInvoice tests cancellation of an invoice and related notifications.
func TestCancelInvoice(t *testing.T) { func TestCancelInvoice(t *testing.T) {
// Test cancellation both with garbage collection (meaning that canceled // Test cancellation both with garbage collection (meaning that canceled
// invoice will be deleted) and without (meain it'll be kept). // invoice will be deleted) and without (meaning it'll be kept).
t.Run("garbage collect", func(t *testing.T) { t.Run("garbage collect", func(t *testing.T) {
testCancelInvoice(t, true) testCancelInvoice(t, true)
}) })
@ -507,7 +507,7 @@ func TestSettleHoldInvoice(t *testing.T) {
// Try to cancel. // Try to cancel.
err = registry.CancelInvoice(testInvoicePaymentHash) err = registry.CancelInvoice(testInvoicePaymentHash)
if err == nil { if err == nil {
t.Fatal("expected cancelation of a settled invoice to fail") t.Fatal("expected cancellation of a settled invoice to fail")
} }
} }
@ -1074,7 +1074,7 @@ func TestOldInvoiceRemovalOnStart(t *testing.T) {
i := 0 i := 0
for paymentHash, invoice := range existingInvoices.expiredInvoices { for paymentHash, invoice := range existingInvoices.expiredInvoices {
// Mark half of the invoices as settled, the other hald as // Mark half of the invoices as settled, the other half as
// canceled. // canceled.
if i%2 == 0 { if i%2 == 0 {
invoice.State = channeldb.ContractSettled invoice.State = channeldb.ContractSettled
@ -1225,7 +1225,7 @@ func testHeightExpiryWithRegistry(t *testing.T, numParts int, settle bool) {
} }
// If we did not settle the invoice before its expiry, we now expect // If we did not settle the invoice before its expiry, we now expect
// a cancelation. // a cancellation.
expectedState := channeldb.ContractSettled expectedState := channeldb.ContractSettled
if !settle { if !settle {
expectedState = channeldb.ContractCanceled expectedState = channeldb.ContractCanceled

View File

@ -286,7 +286,7 @@ type HTLCPreimages = map[channeldb.CircuitKey]lntypes.Preimage
// verifies that all derived child hashes match the payment hashes of the HTLCs // verifies that all derived child hashes match the payment hashes of the HTLCs
// in the set. This method is meant to be called after receiving the full amount // in the set. This method is meant to be called after receiving the full amount
// committed to via mpp_total_msat. This method will return a fail resolution if // committed to via mpp_total_msat. This method will return a fail resolution if
// any of the child hashes fail to matche theire corresponding HTLCs. // any of the child hashes fail to match their corresponding HTLCs.
func reconstructAMPPreimages(ctx *invoiceUpdateCtx, func reconstructAMPPreimages(ctx *invoiceUpdateCtx,
htlcSet HTLCSet) (HTLCPreimages, *HtlcFailResolution) { htlcSet HTLCSet) (HTLCPreimages, *HtlcFailResolution) {

View File

@ -9,7 +9,7 @@ package lncfg
type ProtocolOptions struct { type ProtocolOptions struct {
// LegacyProtocol is a sub-config that houses all the legacy protocol // LegacyProtocol is a sub-config that houses all the legacy protocol
// options. These are mostly used for integration tests as most modern // options. These are mostly used for integration tests as most modern
// nodes shuld always run with them on by default. // nodes should always run with them on by default.
LegacyProtocol `group:"legacy" namespace:"legacy"` LegacyProtocol `group:"legacy" namespace:"legacy"`
// ExperimentalProtocol is a sub-config that houses any experimental // ExperimentalProtocol is a sub-config that houses any experimental

View File

@ -4,7 +4,7 @@
package lncfg package lncfg
// Legacy is a sub-config that houses all the legacy protocol options. These // Legacy is a sub-config that houses all the legacy protocol options. These
// are mostly used for integration tests as most modern nodes shuld always run // are mostly used for integration tests as most modern nodes should always run
// with them on by default. // with them on by default.
type LegacyProtocol struct { type LegacyProtocol struct {
} }

View File

@ -4,7 +4,7 @@
package lncfg package lncfg
// Legacy is a sub-config that houses all the legacy protocol options. These // Legacy is a sub-config that houses all the legacy protocol options. These
// are mostly used for integration tests as most modern nodes shuld always run // are mostly used for integration tests as most modern nodes should always run
// with them on by default. // with them on by default.
type LegacyProtocol struct { type LegacyProtocol struct {
// LegacyOnionFormat if set to true, then we won't signal // LegacyOnionFormat if set to true, then we won't signal

View File

@ -9,7 +9,7 @@ package lncfg
type ProtocolOptions struct { type ProtocolOptions struct {
// LegacyProtocol is a sub-config that houses all the legacy protocol // LegacyProtocol is a sub-config that houses all the legacy protocol
// options. These are mostly used for integration tests as most modern // options. These are mostly used for integration tests as most modern
// nodes shuld always run with them on by default. // nodes should always run with them on by default.
LegacyProtocol `group:"legacy" namespace:"legacy"` LegacyProtocol `group:"legacy" namespace:"legacy"`
// ExperimentalProtocol is a sub-config that houses any experimental // ExperimentalProtocol is a sub-config that houses any experimental

2
lnd.go
View File

@ -645,7 +645,7 @@ func getTLSConfig(cfg *Config) ([]grpc.ServerOption, []grpc.DialOption,
return nil, nil, nil, nil, err return nil, nil, nil, nil, err
} }
// We check whether the certifcate we have on disk match the IPs and // We check whether the certificate we have on disk match the IPs and
// domains specified by the config. If the extra IPs or domains have // domains specified by the config. If the extra IPs or domains have
// changed from when the certificate was created, we will refresh the // changed from when the certificate was created, we will refresh the
// certificate if auto refresh is active. // certificate if auto refresh is active.

View File

@ -36,7 +36,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
} }
// Before we try to make the new service instance, we'll perform // Before we try to make the new service instance, we'll perform
// some sanity checks on the arguments to ensure that they're useable. // some sanity checks on the arguments to ensure that they're usable.
switch { switch {
case config.Manager == nil: case config.Manager == nil:
return nil, nil, fmt.Errorf("Manager must be set to create " + return nil, nil, fmt.Errorf("Manager must be set to create " +

View File

@ -33,7 +33,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
} }
// Before we try to make the new router service instance, we'll perform // Before we try to make the new router service instance, we'll perform
// some sanity checks on the arguments to ensure that they're useable. // some sanity checks on the arguments to ensure that they're usable.
switch { switch {
case config.Router == nil: case config.Router == nil:
return nil, nil, fmt.Errorf("Router must be set to create " + return nil, nil, fmt.Errorf("Router must be set to create " +

View File

@ -59,7 +59,7 @@ func newForwardInterceptor(server *Server, stream Router_HtlcInterceptorServer)
} }
// run sends the intercepted packets to the client and receives the // run sends the intercepted packets to the client and receives the
// corersponding responses. On one hand it regsitered itself as an interceptor // corersponding responses. On one hand it registered itself as an interceptor
// that receives the switch packets and on the other hand launches a go routine // that receives the switch packets and on the other hand launches a go routine
// to read from the client stream. // to read from the client stream.
// To coordinate all this and make sure it is safe for concurrent access all // To coordinate all this and make sure it is safe for concurrent access all

View File

@ -132,7 +132,7 @@ type MissionControl interface {
// QueryRoutes attempts to query the daemons' Channel Router for a possible // QueryRoutes attempts to query the daemons' Channel Router for a possible
// route to a target destination capable of carrying a specific amount of // route to a target destination capable of carrying a specific amount of
// satoshis within the route's flow. The retuned route contains the full // satoshis within the route's flow. The returned route contains the full
// details required to craft and send an HTLC, also including the necessary // details required to craft and send an HTLC, also including the necessary
// information that should be present within the Sphinx packet encapsulated // information that should be present within the Sphinx packet encapsulated
// within the HTLC. // within the HTLC.

View File

@ -666,7 +666,7 @@ func getMsatPairValue(msatValue lnwire.MilliSatoshi,
return msatValue, nil return msatValue, nil
} }
// If we have no msatValue, we can just return our sate value even if // If we have no msatValue, we can just return our state value even if
// it is zero, because it's impossible that we have mismatched values. // it is zero, because it's impossible that we have mismatched values.
if msatValue == 0 { if msatValue == 0 {
return lnwire.MilliSatoshi(satValue * 1000), nil return lnwire.MilliSatoshi(satValue * 1000), nil
@ -879,7 +879,7 @@ func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest,
// requests to the caller. // requests to the caller.
// Upon connection it does the following: // Upon connection it does the following:
// 1. Check if there is already a live stream, if yes it rejects the request. // 1. Check if there is already a live stream, if yes it rejects the request.
// 2. Regsitered a ForwardInterceptor // 2. Registered a ForwardInterceptor
// 3. Delivers to the caller every √√ and detect his answer. // 3. Delivers to the caller every √√ and detect his answer.
// It uses a local implementation of holdForwardsStore to keep all the hold // It uses a local implementation of holdForwardsStore to keep all the hold
// forwards and find them when manual resolution is later needed. // forwards and find them when manual resolution is later needed.

View File

@ -36,7 +36,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
} }
// Before we try to make the new signer service instance, we'll perform // Before we try to make the new signer service instance, we'll perform
// some sanity checks on the arguments to ensure that they're useable. // some sanity checks on the arguments to ensure that they're usable.
switch { switch {
// If the macaroon service is set (we should use macaroons), then // If the macaroon service is set (we should use macaroons), then

View File

@ -37,7 +37,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
// Before we try to make the new WalletKit service instance, we'll // Before we try to make the new WalletKit service instance, we'll
// perform some sanity checks on the arguments to ensure that they're // perform some sanity checks on the arguments to ensure that they're
// useable. // usable.
switch { switch {
case config.MacService != nil && config.NetworkDir == "": case config.MacService != nil && config.NetworkDir == "":
return nil, nil, fmt.Errorf("NetworkDir must be set to " + return nil, nil, fmt.Errorf("NetworkDir must be set to " +

View File

@ -175,7 +175,7 @@ func (c *Handler) GetInfo(ctx context.Context,
} }
// isActive returns nil if the tower backend is initialized, and the Handler can // isActive returns nil if the tower backend is initialized, and the Handler can
// proccess RPC requests. // process RPC requests.
func (c *Handler) isActive() error { func (c *Handler) isActive() error {
if c.cfg.Active { if c.cfg.Active {
return nil return nil

View File

@ -34,7 +34,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
} }
// Before we try to make the new service instance, we'll perform // Before we try to make the new service instance, we'll perform
// some sanity checks on the arguments to ensure that they're useable. // some sanity checks on the arguments to ensure that they're usable.
switch { switch {
case config.Resolver == nil: case config.Resolver == nil:
return nil, nil, errors.New("a lncfg.TCPResolver is required") return nil, nil, errors.New("a lncfg.TCPResolver is required")

View File

@ -819,7 +819,7 @@ func (hn *HarnessNode) Init(
initReq.StatelessInit, response.AdminMacaroon, initReq.StatelessInit, response.AdminMacaroon,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("faied to init: %w", err) return nil, fmt.Errorf("failed to init: %w", err)
} }
return response, nil return response, nil

View File

@ -163,7 +163,7 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
require.NoError(t.t, err) require.NoError(t.t, err)
// Also fetch Bob's invoice from ListInvoices and assert it is equal to // Also fetch Bob's invoice from ListInvoices and assert it is equal to
// the one recevied via the subscription. // the one received via the subscription.
invoiceResp, err := ctx.bob.ListInvoices( invoiceResp, err := ctx.bob.ListInvoices(
ctxb, &lnrpc.ListInvoiceRequest{}, ctxb, &lnrpc.ListInvoiceRequest{},
) )
@ -695,7 +695,7 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
require.NoError(t.t, err) require.NoError(t.t, err)
// Also fetch Bob's invoice from ListInvoices and assert it is equal to // Also fetch Bob's invoice from ListInvoices and assert it is equal to
// the one recevied via the subscription. // the one received via the subscription.
invoiceResp, err := ctx.bob.ListInvoices( invoiceResp, err := ctx.bob.ListInvoices(
ctxb, &lnrpc.ListInvoiceRequest{}, ctxb, &lnrpc.ListInvoiceRequest{},
) )

View File

@ -1199,7 +1199,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
} }
} }
// Check that each HTLC output was spent exactly onece. // Check that each HTLC output was spent exactly once.
for op, num := range htlcTxOutpointSet { for op, num := range htlcTxOutpointSet {
if num != 1 { if num != 1 {
t.Fatalf("HTLC outpoint %v was spent %v times", op, num) t.Fatalf("HTLC outpoint %v was spent %v times", op, num)

View File

@ -393,7 +393,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
// Bob stimmy. // Bob stimmy.
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob) net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob)
// Assert that Bob has the correct sync type before proceeeding. // Assert that Bob has the correct sync type before proceeding.
if pinned { if pinned {
assertSyncType(t, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC) assertSyncType(t, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC)
} else { } else {
@ -426,7 +426,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
// Ensure that a new update for both created edges is properly // Ensure that a new update for both created edges is properly
// dispatched to our registered client. // dispatched to our registered client.
case graphUpdate := <-graphSub.updateChan: case graphUpdate := <-graphSub.updateChan:
// Process all channel updates prsented in this update // Process all channel updates presented in this update
// message. // message.
for _, chanUpdate := range graphUpdate.ChannelUpdates { for _, chanUpdate := range graphUpdate.ChannelUpdates {
switch chanUpdate.AdvertisingNode { switch chanUpdate.AdvertisingNode {

View File

@ -807,7 +807,7 @@ func testUpdateChannelPolicyForPrivateChannel(net *lntest.NetworkHarness,
assertAmountPaid(t, "Bob(local) [private=>] Carol(remote)", assertAmountPaid(t, "Bob(local) [private=>] Carol(remote)",
net.Bob, bobFundPoint, paymentAmt, 0) net.Bob, bobFundPoint, paymentAmt, 0)
// Calcuate the amount in satoshis. // Calculate the amount in satoshis.
amtExpected := int64(paymentAmt + baseFeeMSat/1000) amtExpected := int64(paymentAmt + baseFeeMSat/1000)
// Bob should have received 20k satoshis + fee from Alice. // Bob should have received 20k satoshis + fee from Alice.

View File

@ -424,7 +424,7 @@ func testForwardInterceptorBasic(net *lntest.NetworkHarness, t *harnessTest) {
}) })
return err == nil && len(channels.Channels) > 0 return err == nil && len(channels.Channels) > 0
}, defaultTimeout) }, defaultTimeout)
require.NoError(t.t, err, "alice <> bob channel didnt re-activate") require.NoError(t.t, err, "alice <> bob channel didn't re-activate")
} }

View File

@ -213,7 +213,7 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness,
require.NoError(t.t, err, "bob didn't report channel") require.NoError(t.t, err, "bob didn't report channel")
cType, err := channelCommitType(alice, chanPoint) cType, err := channelCommitType(alice, chanPoint)
require.NoError(t.t, err, "unable to get channnel type") require.NoError(t.t, err, "unable to get channel type")
// With the channel open, ensure that the amount specified above has // With the channel open, ensure that the amount specified above has
// properly been pushed to Bob. // properly been pushed to Bob.

View File

@ -14,7 +14,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// testHoldInvoiceForceClose tests cancelation of accepted hold invoices which // testHoldInvoiceForceClose tests cancellation of accepted hold invoices which
// would otherwise trigger force closes when they expire. // would otherwise trigger force closes when they expire.
func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) { func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
ctxb, cancel := context.WithCancel(context.Background()) ctxb, cancel := context.WithCancel(context.Background())

View File

@ -289,7 +289,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
payment.Status) payment.Status)
} }
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
t.Fatalf("in flight status not recevied") t.Fatalf("in flight status not received")
} }
} }
@ -341,7 +341,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
payment = p payment = p
case <-time.After(5 * time.Second): case <-time.After(5 * time.Second):
t.Fatalf("in flight status not recevied") t.Fatalf("in flight status not received")
} }
} }

View File

@ -1094,7 +1094,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
daveBalance := daveBalResp.ConfirmedBalance daveBalance := daveBalResp.ConfirmedBalance
if daveBalance <= daveStartingBalance { if daveBalance <= daveStartingBalance {
return fmt.Errorf("expected dave to have balance "+ return fmt.Errorf("expected dave to have balance "+
"above %d, intead had %v", daveStartingBalance, "above %d, instead had %v", daveStartingBalance,
daveBalance) daveBalance)
} }

View File

@ -168,7 +168,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = restartBob() err = restartBob()
require.NoError(t.t, err) require.NoError(t.t, err)
// After the force close transacion is mined, transactions will be // After the force close transaction is mined, transactions will be
// broadcast by both Bob and Carol. // broadcast by both Bob and Carol.
switch c { switch c {
// Carol will broadcast her second level HTLC transaction and Bob will // Carol will broadcast her second level HTLC transaction and Bob will

View File

@ -184,7 +184,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
err = restartBob() err = restartBob()
require.NoError(t.t, err) require.NoError(t.t, err)
// After the force close transacion is mined, we should expect Bob and // After the force close transaction is mined, we should expect Bob and
// Carol to broadcast some transactions depending on the channel // Carol to broadcast some transactions depending on the channel
// commitment type. // commitment type.
switch c { switch c {

View File

@ -52,7 +52,7 @@ func testWipeForwardingPackages(net *lntest.NetworkHarness,
// close channel should now become pending force closed channel. // close channel should now become pending force closed channel.
pendingChan = assertPendingForceClosedChannel(t.t, net.Bob) pendingChan = assertPendingForceClosedChannel(t.t, net.Bob)
// Check the forwarding pacakges are deleted. // Check the forwarding packages are deleted.
require.Zero(t.t, pendingChan.NumForwardingPackages) require.Zero(t.t, pendingChan.NumForwardingPackages)
// For Alice, the forwarding packages should have been wiped too. // For Alice, the forwarding packages should have been wiped too.
@ -87,7 +87,7 @@ func testWipeForwardingPackages(net *lntest.NetworkHarness,
// really contains is channels whose closing tx has been broadcast. // really contains is channels whose closing tx has been broadcast.
pendingChan = assertPendingForceClosedChannel(t.t, net.Bob) pendingChan = assertPendingForceClosedChannel(t.t, net.Bob)
// Check the forwarding pacakges are deleted. // Check the forwarding packages are deleted.
require.Zero(t.t, pendingChan.NumForwardingPackages) require.Zero(t.t, pendingChan.NumForwardingPackages)
// Mine a block to confirm sweep transactions such that they // Mine a block to confirm sweep transactions such that they

View File

@ -2024,7 +2024,7 @@ func (lc *LightningChannel) restorePendingRemoteUpdates(
logIdx) logIdx)
} }
// We previously restored Adds along with all the other upates, // We previously restored Adds along with all the other updates,
// but this Add restoration was a no-op as every single one of // but this Add restoration was a no-op as every single one of
// these Adds was already restored since they're all incoming // these Adds was already restored since they're all incoming
// htlcs on the local commitment. // htlcs on the local commitment.
@ -2143,7 +2143,7 @@ func (lc *LightningChannel) restorePendingLocalUpdates(
} }
// At this point the restored update's logIndex must be equal // At this point the restored update's logIndex must be equal
// to the update log, otherwise somthing is horribly wrong. // to the update log, otherwise something is horribly wrong.
if payDesc.LogIndex != lc.localUpdateLog.logIndex { if payDesc.LogIndex != lc.localUpdateLog.logIndex {
panic(fmt.Sprintf("log index mismatch: "+ panic(fmt.Sprintf("log index mismatch: "+
"%v vs %v", payDesc.LogIndex, "%v vs %v", payDesc.LogIndex,
@ -3460,7 +3460,7 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
} }
// Now that we know the total value of added HTLCs, we check // Now that we know the total value of added HTLCs, we check
// that this satisfy the MaxPendingAmont contraint. // that this satisfy the MaxPendingAmont constraint.
if amtInFlight > constraints.MaxPendingAmount { if amtInFlight > constraints.MaxPendingAmount {
return ErrMaxPendingAmount return ErrMaxPendingAmount
} }
@ -6780,7 +6780,7 @@ func (lc *LightningChannel) availableCommitmentBalance(view *htlcView,
// than the htlcCommitFee, where we could still be sending dust // than the htlcCommitFee, where we could still be sending dust
// HTLCs, but we return 0 in this case. This is to avoid // HTLCs, but we return 0 in this case. This is to avoid
// lowering our balance even further, as this takes us into a // lowering our balance even further, as this takes us into a
// bad state wehere neither we nor our channel counterparty can // bad state where neither we nor our channel counterparty can
// add HTLCs. // add HTLCs.
if ourBalance < htlcCommitFee { if ourBalance < htlcCommitFee {
return 0, commitWeight return 0, commitWeight

View File

@ -654,7 +654,7 @@ func testCommitHTLCSigTieBreak(t *testing.T, restart bool) {
lastIndex = htlc.OutputIndex lastIndex = htlc.OutputIndex
} }
// If requsted, restart Alice so that we can test that the necessary // If requested, restart Alice so that we can test that the necessary
// indexes can be reconstructed before needing to validate the // indexes can be reconstructed before needing to validate the
// signatures from Bob. // signatures from Bob.
if restart { if restart {
@ -1172,7 +1172,7 @@ func TestForceCloseDustOutput(t *testing.T) {
defer cleanUp() defer cleanUp()
// We set both node's channel reserves to 0, to make sure // We set both node's channel reserves to 0, to make sure
// they can create small dust ouputs without going under // they can create small dust outputs without going under
// their channel reserves. // their channel reserves.
aliceChannel.channelState.LocalChanCfg.ChanReserve = 0 aliceChannel.channelState.LocalChanCfg.ChanReserve = 0
bobChannel.channelState.LocalChanCfg.ChanReserve = 0 bobChannel.channelState.LocalChanCfg.ChanReserve = 0
@ -5427,7 +5427,7 @@ func TestChanCommitWeightDustHtlcs(t *testing.T) {
return w return w
} }
// Start by getting the initial remote commitment wight seen from // Start by getting the initial remote commitment weight seen from
// Alice's perspective. At this point there are no HTLCs on the // Alice's perspective. At this point there are no HTLCs on the
// commitment. // commitment.
weight1 := remoteCommitWeight(aliceChannel) weight1 := remoteCommitWeight(aliceChannel)
@ -5438,7 +5438,7 @@ func TestChanCommitWeightDustHtlcs(t *testing.T) {
bobDustHtlc := bobDustlimit + htlcSuccessFee - 1 bobDustHtlc := bobDustlimit + htlcSuccessFee - 1
preimg := addHtlc(bobDustHtlc) preimg := addHtlc(bobDustHtlc)
// Now get the current wight of the remote commitment. We expect it to // Now get the current weight of the remote commitment. We expect it to
// not have changed, since the HTLC we added is considered dust. // not have changed, since the HTLC we added is considered dust.
weight2 := remoteCommitWeight(aliceChannel) weight2 := remoteCommitWeight(aliceChannel)
require.Equal(t, weight1, weight2) require.Equal(t, weight1, weight2)
@ -5711,7 +5711,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
t.Fatalf("unable to restart bob: %v", err) t.Fatalf("unable to restart bob: %v", err)
} }
// Readd the Fail to both Alice and Bob's channels, as the non-committed // Re-add the Fail to both Alice and Bob's channels, as the non-committed
// update will not have survived the restart. // update will not have survived the restart.
err = bobChannel.FailHTLC(htlc2.ID, []byte("failreason"), nil, nil, nil) err = bobChannel.FailHTLC(htlc2.ID, []byte("failreason"), nil, nil, nil)
if err != nil { if err != nil {
@ -5723,7 +5723,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
} }
// Have Alice initiate a state transition, which does not include the // Have Alice initiate a state transition, which does not include the
// HTLCs just readded to the channel state. // HTLCs just re-added to the channel state.
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -6808,7 +6808,7 @@ func TestChanReserveRemoteInitiator(t *testing.T) {
// Set Alice's channel reserve to be 5 BTC-commitfee. This means she // Set Alice's channel reserve to be 5 BTC-commitfee. This means she
// has just enough balance to cover the comitment fee, but not enough // has just enough balance to cover the comitment fee, but not enough
// to add any more HTLCs to the commitment. Although a reserve this // to add any more HTLCs to the commitment. Although a reserve this
// high is unrealistic, a channel can easiliy get into a situation // high is unrealistic, a channel can easily get into a situation
// where the initiator cannot pay for the fee of any more HTLCs. // where the initiator cannot pay for the fee of any more HTLCs.
commitFee := aliceChannel.channelState.LocalCommitment.CommitFee commitFee := aliceChannel.channelState.LocalCommitment.CommitFee
aliceMinReserve := 5*btcutil.SatoshiPerBitcoin - commitFee aliceMinReserve := 5*btcutil.SatoshiPerBitcoin - commitFee
@ -7203,7 +7203,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
// signature from Bob yet. // signature from Bob yet.
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
if err != nil { if err != nil {
t.Fatalf("unable to recive revocation: %v", err) t.Fatalf("unable to receive revocation: %v", err)
} }
// Now make Alice send and sign an additional HTLC. We don't let Bob // Now make Alice send and sign an additional HTLC. We don't let Bob
@ -7709,7 +7709,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
// Alice receives the revocation, ACKing her pending commitment. // Alice receives the revocation, ACKing her pending commitment.
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
if err != nil { if err != nil {
t.Fatalf("unable to recive revocation: %v", err) t.Fatalf("unable to receive revocation: %v", err)
} }
// However, the HTLC is still not locked into her local commitment, so // However, the HTLC is still not locked into her local commitment, so
@ -7726,7 +7726,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
} }
// At this stage Bob has a pending remote commitment. Make sure // At this stage Bob has a pending remote commitment. Make sure
// restoring at this stage correcly restores the HTLC add commit // restoring at this stage correctly restores the HTLC add commit
// heights. // heights.
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 1, 1) bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 1, 1)
@ -7746,7 +7746,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
if err != nil { if err != nil {
t.Fatalf("unable to recive revocation: %v", err) t.Fatalf("unable to receive revocation: %v", err)
} }
// Alice ACKing Bob's pending commitment shouldn't change the heights // Alice ACKing Bob's pending commitment shouldn't change the heights
@ -7790,7 +7790,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
} }
// Since Bob just revoked another commitment, a restoration should // Since Bob just revoked another commitment, a restoration should
// increase the add height of the firt HTLC to 2, as we only keep the // increase the add height of the first HTLC to 2, as we only keep the
// last unrevoked commitment. The new HTLC will also have a local add // last unrevoked commitment. The new HTLC will also have a local add
// height of 2. // height of 2.
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 2, 1) bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 2, 1)

View File

@ -193,7 +193,7 @@ type ScriptInfo struct {
// CommitScriptToSelf constructs the public key script for the output on the // CommitScriptToSelf constructs the public key script for the output on the
// commitment transaction paying to the "owner" of said commitment transaction. // commitment transaction paying to the "owner" of said commitment transaction.
// The `initiator` argument should correspond to the owner of the commitment // The `initiator` argument should correspond to the owner of the commitment
// tranasction which we are generating the to_local script for. If the other // transaction which we are generating the to_local script for. If the other
// party learns of the preimage to the revocation hash, then they can claim all // party learns of the preimage to the revocation hash, then they can claim all
// the settled funds in the channel, plus the unsettled funds. // the settled funds in the channel, plus the unsettled funds.
func CommitScriptToSelf(chanType channeldb.ChannelType, initiator bool, func CommitScriptToSelf(chanType channeldb.ChannelType, initiator bool,
@ -234,7 +234,7 @@ func CommitScriptToSelf(chanType channeldb.ChannelType, initiator bool,
// CommitScriptToRemote derives the appropriate to_remote script based on the // CommitScriptToRemote derives the appropriate to_remote script based on the
// channel's commitment type. The `initiator` argument should correspond to the // channel's commitment type. The `initiator` argument should correspond to the
// owner of the commitment tranasction which we are generating the to_remote // owner of the commitment transaction which we are generating the to_remote
// script for. The second return value is the CSV delay of the output script, // script for. The second return value is the CSV delay of the output script,
// what must be satisfied in order to spend the output. // what must be satisfied in order to spend the output.
func CommitScriptToRemote(chanType channeldb.ChannelType, initiator bool, func CommitScriptToRemote(chanType channeldb.ChannelType, initiator bool,
@ -340,7 +340,7 @@ func HtlcSecondLevelInputSequence(chanType channeldb.ChannelType) uint32 {
// output for the second-level HTLC transactions. The second level transaction // output for the second-level HTLC transactions. The second level transaction
// act as a sort of covenant, ensuring that a 2-of-2 multi-sig output can only // act as a sort of covenant, ensuring that a 2-of-2 multi-sig output can only
// be spent in a particular way, and to a particular output. The `initiator` // be spent in a particular way, and to a particular output. The `initiator`
// argument should correspond to the owner of the commitment tranasction which // argument should correspond to the owner of the commitment transaction which
// we are generating the to_local script for. // we are generating the to_local script for.
func SecondLevelHtlcScript(chanType channeldb.ChannelType, initiator bool, func SecondLevelHtlcScript(chanType channeldb.ChannelType, initiator bool,
revocationKey, delayKey *btcec.PublicKey, revocationKey, delayKey *btcec.PublicKey,
@ -725,7 +725,7 @@ func (cb *CommitmentBuilder) createUnsignedCommitmentTx(ourBalance,
// spent after a relative block delay or revocation event, and a remote output // spent after a relative block delay or revocation event, and a remote output
// paying the counterparty within the channel, which can be spent immediately // paying the counterparty within the channel, which can be spent immediately
// or after a delay depending on the commitment type. The `initiator` argument // or after a delay depending on the commitment type. The `initiator` argument
// should correspond to the owner of the commitment tranasction we are creating. // should correspond to the owner of the commitment transaction we are creating.
func CreateCommitTx(chanType channeldb.ChannelType, func CreateCommitTx(chanType channeldb.ChannelType,
fundingOutput wire.TxIn, keyRing *CommitmentKeyRing, fundingOutput wire.TxIn, keyRing *CommitmentKeyRing,
localChanCfg, remoteChanCfg *channeldb.ChannelConfig, localChanCfg, remoteChanCfg *channeldb.ChannelConfig,

View File

@ -1221,7 +1221,7 @@ func (l *LightningWallet) handleFundingCancelRequest(req *fundingReserveCancelMs
pendingReservation.Lock() pendingReservation.Lock()
defer pendingReservation.Unlock() defer pendingReservation.Unlock()
// Mark all previously locked outpoints as useable for future funding // Mark all previously locked outpoints as usable for future funding
// requests. // requests.
for _, unusedInput := range pendingReservation.ourContribution.Inputs { for _, unusedInput := range pendingReservation.ourContribution.Inputs {
delete(l.lockedOutPoints, unusedInput.PreviousOutPoint) delete(l.lockedOutPoints, unusedInput.PreviousOutPoint)

View File

@ -223,7 +223,7 @@ If Android Studio tells you that the `aar` file cannot be included into the `app
![separate_gradle_module](docs/separate_gradle_module.png) ![separate_gradle_module](docs/separate_gradle_module.png)
3. Gradle file should countain only these lines: 3. Gradle file should contain only these lines:
```shell ```shell
configurations.maybeCreate("default") configurations.maybeCreate("default")

View File

@ -16,7 +16,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
// lndStarted will be used atomically to ensure only a singel lnd instance is // lndStarted will be used atomically to ensure only a single lnd instance is
// attempted to be started at once. // attempted to be started at once.
var lndStarted int32 var lndStarted int32

View File

@ -7,7 +7,7 @@ import (
var ( var (
// private24BitBlock contains the set of private IPv4 addresses within // private24BitBlock contains the set of private IPv4 addresses within
// the 10.0.0.0/8 adddress space. // the 10.0.0.0/8 address space.
private24BitBlock *net.IPNet private24BitBlock *net.IPNet
// private20BitBlock contains the set of private IPv4 addresses within // private20BitBlock contains the set of private IPv4 addresses within

View File

@ -69,12 +69,12 @@ type ChanStatusConfig struct {
Graph ChannelGraph Graph ChannelGraph
// ChanEnableTimeout is the duration a peer's connect must remain stable // ChanEnableTimeout is the duration a peer's connect must remain stable
// before attempting to reenable the channel. // before attempting to re-enable the channel.
// //
// NOTE: This value is only used to verify that the relation between // NOTE: This value is only used to verify that the relation between
// itself, ChanDisableTimeout, and ChanStatusSampleInterval is correct. // itself, ChanDisableTimeout, and ChanStatusSampleInterval is correct.
// The user is still responsible for ensuring that the same duration // The user is still responsible for ensuring that the same duration
// elapses before attempting to reenable a channel. // elapses before attempting to re-enable a channel.
ChanEnableTimeout time.Duration ChanEnableTimeout time.Duration
// ChanDisableTimeout is the duration the manager will wait after // ChanDisableTimeout is the duration the manager will wait after
@ -138,7 +138,7 @@ func NewChanStatusManager(cfg *ChanStatusConfig) (*ChanStatusManager, error) {
// enable_timeout + sample_interval to be less than or equal to the // enable_timeout + sample_interval to be less than or equal to the
// disable_timeout and that all are positive values. A peer that // disable_timeout and that all are positive values. A peer that
// disconnects and reconnects quickly may cause a disable update to be // disconnects and reconnects quickly may cause a disable update to be
// sent, shortly followed by a reenable. Ensuring a healthy separation // sent, shortly followed by a re-enable. Ensuring a healthy separation
// helps dampen the possibility of spamming updates that toggle the // helps dampen the possibility of spamming updates that toggle the
// disable bit for such events. // disable bit for such events.
if cfg.ChanStatusSampleInterval <= 0 { if cfg.ChanStatusSampleInterval <= 0 {
@ -492,7 +492,7 @@ func (m *ChanStatusManager) processAutoRequest(outpoint wire.OutPoint) error {
// scheduled. Once an active channel is determined to be pending-inactive, one // scheduled. Once an active channel is determined to be pending-inactive, one
// of two transitions can follow. Either the channel is disabled because no // of two transitions can follow. Either the channel is disabled because no
// request to enable is received before the scheduled disable is broadcast, or // request to enable is received before the scheduled disable is broadcast, or
// the channel is successfully reenabled and channel is returned to an active // the channel is successfully re-enabled and channel is returned to an active
// state from the POV of the ChanStatusManager. // state from the POV of the ChanStatusManager.
func (m *ChanStatusManager) markPendingInactiveChannels() { func (m *ChanStatusManager) markPendingInactiveChannels() {
channels, err := m.fetchChannels() channels, err := m.fetchChannels()

View File

@ -651,7 +651,7 @@ var stateMachineTests = []stateMachineTest{
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
// Simulate reconnect by making channels active. // Simulate reconnect by making channels active.
h.markActive(h.graph.chans()) h.markActive(h.graph.chans())
// Request that all channels be reenabled. // Request that all channels be re-enabled.
h.assertEnables(h.graph.chans(), nil, false) h.assertEnables(h.graph.chans(), nil, false)
// Pending disable should have been canceled, and // Pending disable should have been canceled, and
// no updates sent. Channels remain enabled on the // no updates sent. Channels remain enabled on the

Some files were not shown because too many files have changed in this diff Show More