mirror of
https://github.com/lightningnetwork/lnd.git
synced 2024-11-19 01:43:16 +01:00
commit
1482e5e20a
@ -498,7 +498,7 @@ func decipherCipherSeed(cipherSeedBytes [EncipheredCipherSeedSize]byte,
|
||||
}
|
||||
|
||||
// Decipher attempts to decipher the encoded mnemonic by first mapping to the
|
||||
// original chipertext, then applying our deciphering scheme. ErrInvalidPass
|
||||
// original ciphertext, then applying our deciphering scheme. ErrInvalidPass
|
||||
// will be returned if the passphrase is incorrect.
|
||||
func (m *Mnemonic) Decipher(pass []byte) ([DecipheredCipherSeedSize]byte, error) {
|
||||
|
||||
|
@ -339,7 +339,7 @@ func TestAgentHeuristicUpdateSignal(t *testing.T) {
|
||||
// initial check.
|
||||
respondMoreChans(t, testCtx, moreChansResp{0, 0})
|
||||
|
||||
// Next we'll signal that one of the heuristcs have been updated.
|
||||
// Next we'll signal that one of the heuristics have been updated.
|
||||
testCtx.agent.OnHeuristicUpdate(testCtx.heuristic)
|
||||
|
||||
// The update should trigger the agent to ask for a channel budget.so
|
||||
@ -1256,7 +1256,7 @@ func TestAgentChannelSizeAllocation(t *testing.T) {
|
||||
"had %v", len(arg.chans))
|
||||
}
|
||||
if arg.balance != testCtx.walletBalance {
|
||||
t.Fatalf("expectd agent to have %v balance, had %v",
|
||||
t.Fatalf("expected agent to have %v balance, had %v",
|
||||
testCtx.walletBalance, arg.balance)
|
||||
}
|
||||
case <-time.After(time.Second * 3):
|
||||
|
@ -51,7 +51,7 @@ func (q *queue) empty() bool {
|
||||
|
||||
// BetweennessCentrality is a NodeMetric that calculates node betweenness
|
||||
// centrality using Brandes' algorithm. Betweenness centrality for each node
|
||||
// is the number of shortest paths passing trough that node, not counting
|
||||
// is the number of shortest paths passing through that node, not counting
|
||||
// shortest paths starting or ending at that node. This is a useful metric
|
||||
// to measure control of individual nodes over the whole network.
|
||||
type BetweennessCentrality struct {
|
||||
@ -167,7 +167,7 @@ func betweennessCentrality(g *SimpleGraph, s int, centrality []float64) {
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh recaculates and stores centrality values.
|
||||
// Refresh recalculates and stores centrality values.
|
||||
func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) error {
|
||||
cache, err := NewSimpleGraph(graph)
|
||||
if err != nil {
|
||||
@ -186,7 +186,7 @@ func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) error {
|
||||
partial := make([]float64, len(cache.Nodes))
|
||||
|
||||
// Consume the next node, update centrality
|
||||
// parital to avoid unnecessary synchronizaton.
|
||||
// parital to avoid unnecessary synchronization.
|
||||
for node := range work {
|
||||
betweennessCentrality(cache, node, partial)
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ var normalizedTestGraphCentrality = []float64{
|
||||
0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0,
|
||||
}
|
||||
|
||||
// buildTestGraph builds a test graph from a passed graph desriptor.
|
||||
// buildTestGraph builds a test graph from a passed graph descriptor.
|
||||
func buildTestGraph(t *testing.T,
|
||||
graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey {
|
||||
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
var ErrNoPositive = errors.New("no positive weights left")
|
||||
|
||||
// weightedChoice draws a random index from the slice of weights, with a
|
||||
// probability propotional to the weight at the given index.
|
||||
// probability proportional to the weight at the given index.
|
||||
func weightedChoice(w []float64) (int, error) {
|
||||
// Calculate the sum of weights.
|
||||
var sum float64
|
||||
@ -25,7 +25,7 @@ func weightedChoice(w []float64) (int, error) {
|
||||
|
||||
// Pick a random number in the range [0.0, 1.0) and multiply it with
|
||||
// the sum of weights. Then we'll iterate the weights until the number
|
||||
// goes below 0. This means that each index is picked with a probablity
|
||||
// goes below 0. This means that each index is picked with a probability
|
||||
// equal to their normalized score.
|
||||
//
|
||||
// Example:
|
||||
|
@ -92,7 +92,7 @@ func (c *WeightedCombAttachment) NodeScores(g ChannelGraph, chans []LocalChannel
|
||||
}
|
||||
|
||||
// We combine the scores given by the sub-heuristics by using the
|
||||
// heruistics' given weight factor.
|
||||
// heuristics' given weight factor.
|
||||
scores := make(map[NodeID]*NodeScore)
|
||||
for nID := range nodes {
|
||||
score := &NodeScore{
|
||||
|
@ -32,7 +32,7 @@ func (m *mockSubLogger) SetLogLevels(logLevel string) {
|
||||
m.globalLogLevel = logLevel
|
||||
}
|
||||
|
||||
// TestParseAndSetDebugLevels tests tha we can properly set the log levels for
|
||||
// TestParseAndSetDebugLevels tests that we can properly set the log levels for
|
||||
// all andspecified subsystems.
|
||||
func TestParseAndSetDebugLevels(t *testing.T) {
|
||||
testCases := []struct {
|
||||
|
@ -26,7 +26,7 @@ var (
|
||||
RawTags string
|
||||
|
||||
// GoVersion stores the go version that the executable was compiled
|
||||
// with. This hsould be set using -ldflags during compilation.
|
||||
// with. This should be set using -ldflags during compilation.
|
||||
GoVersion string
|
||||
)
|
||||
|
||||
|
@ -254,7 +254,7 @@ type ConfNtfn struct {
|
||||
dispatched bool
|
||||
}
|
||||
|
||||
// HistoricalConfDispatch parameterizes a manual rescan for a particular
|
||||
// HistoricalConfDispatch parametrizes a manual rescan for a particular
|
||||
// transaction/output script. The parameters include the start and end block
|
||||
// heights specifying the range of blocks to scan.
|
||||
type HistoricalConfDispatch struct {
|
||||
@ -408,12 +408,12 @@ type SpendNtfn struct {
|
||||
// an entry for it.
|
||||
HeightHint uint32
|
||||
|
||||
// dispatched signals whether a spend notification has been disptached
|
||||
// dispatched signals whether a spend notification has been dispatched
|
||||
// to the client.
|
||||
dispatched bool
|
||||
}
|
||||
|
||||
// HistoricalSpendDispatch parameterizes a manual rescan to determine the
|
||||
// HistoricalSpendDispatch parametrizes a manual rescan to determine the
|
||||
// spending details (if any) of an outpoint/output script. The parameters
|
||||
// include the start and end block heights specifying the range of blocks to
|
||||
// scan.
|
||||
@ -523,7 +523,7 @@ type TxNotifier struct {
|
||||
// NewTxNotifier creates a TxNotifier. The current height of the blockchain is
|
||||
// accepted as a parameter. The different hint caches (confirm and spend) are
|
||||
// used as an optimization in order to retrieve a better starting point when
|
||||
// dispatching a recan for a historical event in the chain.
|
||||
// dispatching a rescan for a historical event in the chain.
|
||||
func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32,
|
||||
confirmHintCache ConfirmHintCache,
|
||||
spendHintCache SpendHintCache) *TxNotifier {
|
||||
|
@ -282,7 +282,7 @@ func TestTxNotifierFutureConfDispatch(t *testing.T) {
|
||||
t.Fatal("Expected confirmation update for tx1")
|
||||
}
|
||||
|
||||
// A confirmation notification for this tranaction should be dispatched,
|
||||
// A confirmation notification for this transaction should be dispatched,
|
||||
// as it only required one confirmation.
|
||||
select {
|
||||
case txConf := <-ntfn1.Event.Confirmed:
|
||||
@ -2148,7 +2148,7 @@ func TestTxNotifierSpendHintCache(t *testing.T) {
|
||||
op2Height = 203
|
||||
)
|
||||
|
||||
// Intiialize our TxNotifier instance backed by a height hint cache.
|
||||
// Initialize our TxNotifier instance backed by a height hint cache.
|
||||
hintCache := newMockHintCache()
|
||||
n := chainntnfs.NewTxNotifier(
|
||||
startingHeight, chainntnfs.ReorgSafetyLimit, hintCache,
|
||||
@ -2331,7 +2331,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
|
||||
reorgSafety = 10
|
||||
)
|
||||
|
||||
// Intiialize our TxNotifier instance backed by a height hint cache.
|
||||
// Initialize our TxNotifier instance backed by a height hint cache.
|
||||
hintCache := newMockHintCache()
|
||||
n := chainntnfs.NewTxNotifier(
|
||||
startingHeight, reorgSafety, hintCache, hintCache,
|
||||
@ -2511,7 +2511,7 @@ func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) {
|
||||
// matured.
|
||||
err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("expcted updating spend details to fail")
|
||||
t.Fatalf("expected updating spend details to fail")
|
||||
}
|
||||
|
||||
// Finally, check that the height hint is still there, unchanged.
|
||||
|
@ -243,7 +243,7 @@ func (r *RPCAcceptor) sendAcceptRequests(errChan chan error,
|
||||
defer close(r.done)
|
||||
|
||||
// Create a map of pending channel IDs to our original open channel
|
||||
// request and a response channel. We keep the original chanel open
|
||||
// request and a response channel. We keep the original channel open
|
||||
// message so that we can validate our response against it.
|
||||
acceptRequests := make(map[[32]byte]*chanAcceptInfo)
|
||||
|
||||
|
@ -287,7 +287,7 @@ func (c ChannelType) HasFundingTx() bool {
|
||||
return c&NoFundingTxBit == 0
|
||||
}
|
||||
|
||||
// HasAnchors returns true if this channel type has anchor ouputs on its
|
||||
// HasAnchors returns true if this channel type has anchor outputs on its
|
||||
// commitment.
|
||||
func (c ChannelType) HasAnchors() bool {
|
||||
return c&AnchorOutputsBit == AnchorOutputsBit
|
||||
@ -3192,7 +3192,7 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
|
||||
|
||||
// The RemoteNextRevocation field is optional, as it's possible for a
|
||||
// channel to be closed before we learn of the next unrevoked
|
||||
// revocation point for the remote party. Write a boolen indicating
|
||||
// revocation point for the remote party. Write a boolean indicating
|
||||
// whether this field is present or not.
|
||||
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
|
||||
return err
|
||||
@ -3311,7 +3311,7 @@ func writeChanConfig(b io.Writer, c *ChannelConfig) error {
|
||||
}
|
||||
|
||||
// fundingTxPresent returns true if expect the funding transcation to be found
|
||||
// on disk or already populated within the passed oen chanel struct.
|
||||
// on disk or already populated within the passed open channel struct.
|
||||
func fundingTxPresent(channel *OpenChannel) bool {
|
||||
chanType := channel.ChanType
|
||||
|
||||
|
@ -1037,7 +1037,7 @@ func (c *ChannelStateDB) pruneLinkNode(openChannels []*OpenChannel,
|
||||
return c.linkNodeDB.DeleteLinkNode(remotePub)
|
||||
}
|
||||
|
||||
// PruneLinkNodes attempts to prune all link nodes found within the databse with
|
||||
// PruneLinkNodes attempts to prune all link nodes found within the database with
|
||||
// whom we no longer have any open channels with.
|
||||
func (c *ChannelStateDB) PruneLinkNodes() error {
|
||||
allLinkNodes, err := c.linkNodeDB.FetchAllLinkNodes()
|
||||
@ -1189,7 +1189,7 @@ func (c *ChannelStateDB) AbandonChannel(chanPoint *wire.OutPoint,
|
||||
}
|
||||
|
||||
// If the channel was already closed, then we don't return an
|
||||
// error as we'd like fro this step to be repeatable.
|
||||
// error as we'd like this step to be repeatable.
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
|
@ -448,7 +448,7 @@ func TestRestoreChannelShells(t *testing.T) {
|
||||
// The node should have the same address, as specified in the channel
|
||||
// shell.
|
||||
if reflect.DeepEqual(linkNode.Addresses, channelShell.NodeAddrs) {
|
||||
t.Fatalf("addr mismach: expected %v, got %v",
|
||||
t.Fatalf("addr mismatch: expected %v, got %v",
|
||||
linkNode.Addresses, channelShell.NodeAddrs)
|
||||
}
|
||||
}
|
||||
@ -738,7 +738,7 @@ func TestFetchHistoricalChannel(t *testing.T) {
|
||||
|
||||
histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("unexepected error getting channel: %v", err)
|
||||
t.Fatalf("unexpected error getting channel: %v", err)
|
||||
}
|
||||
|
||||
// FetchHistoricalChannel will attach the cdb to channel.Db, we set it
|
||||
|
@ -47,7 +47,7 @@ func (d *DB) ForwardingLog() *ForwardingLog {
|
||||
}
|
||||
}
|
||||
|
||||
// ForwardingLog is a time series database that logs the fulfilment of payment
|
||||
// ForwardingLog is a time series database that logs the fulfillment of payment
|
||||
// circuits by a lightning network daemon. The log contains a series of
|
||||
// forwarding events which map a timestamp to a forwarding event. A forwarding
|
||||
// event describes which channels were used to create+settle a circuit, and the
|
||||
@ -204,7 +204,7 @@ type ForwardingEventQuery struct {
|
||||
|
||||
// ForwardingLogTimeSlice is the response to a forwarding query. It includes
|
||||
// the original query, the set events that match the query, and an integer
|
||||
// which represents the offset index of the last item in the set of retuned
|
||||
// which represents the offset index of the last item in the set of returned
|
||||
// events. This integer allows callers to resume their query using this offset
|
||||
// in the event that the query's response exceeds the max number of returnable
|
||||
// events.
|
||||
|
@ -895,7 +895,7 @@ func validateInvoice(i *Invoice, paymentHash lntypes.Hash) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsPending returns ture if the invoice is in ContractOpen state.
|
||||
// IsPending returns true if the invoice is in ContractOpen state.
|
||||
func (i *Invoice) IsPending() bool {
|
||||
return i.State == ContractOpen || i.State == ContractAccepted
|
||||
}
|
||||
@ -1189,7 +1189,7 @@ func fetchInvoiceNumByRef(invoiceIndex, payAddrIndex, setIDIndex kvdb.RBucket,
|
||||
}
|
||||
}
|
||||
|
||||
// ScanInvoices scans trough all invoices and calls the passed scanFunc for
|
||||
// ScanInvoices scans through all invoices and calls the passed scanFunc for
|
||||
// for each invoice with its respective payment hash. Additionally a reset()
|
||||
// closure is passed which is used to reset/initialize partial results and also
|
||||
// to signal if the kvdb.View transaction has been retried.
|
||||
@ -3020,7 +3020,7 @@ func updateInvoiceState(invoice *Invoice, hash *lntypes.Hash,
|
||||
}
|
||||
}
|
||||
|
||||
// cancelSingleHtlc validates cancelation of a single htlc and update its state.
|
||||
// cancelSingleHtlc validates cancellation of a single htlc and update its state.
|
||||
func cancelSingleHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
|
||||
invState ContractState) error {
|
||||
|
||||
|
@ -75,6 +75,6 @@ func (g *GossipTimestampRange) MsgType() MessageType {
|
||||
func (g *GossipTimestampRange) MaxPayloadLength(uint32) uint32 {
|
||||
// 32 + 4 + 4
|
||||
//
|
||||
// TODO(roasbeef): update to 8 byte timestmaps?
|
||||
// TODO(roasbeef): update to 8 byte timestamps?
|
||||
return 40
|
||||
}
|
||||
|
@ -280,7 +280,7 @@ func SerializeChannelCloseSummary(w io.Writer, cs *common.ChannelCloseSummary) e
|
||||
|
||||
// The RemoteNextRevocation field is optional, as it's possible for a
|
||||
// channel to be closed before we learn of the next unrevoked
|
||||
// revocation point for the remote party. Write a boolen indicating
|
||||
// revocation point for the remote party. Write a boolean indicating
|
||||
// whether this field is present or not.
|
||||
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
|
||||
return err
|
||||
|
@ -379,7 +379,7 @@ func SerializeChannelCloseSummary(w io.Writer, cs *common.ChannelCloseSummary) e
|
||||
|
||||
// The RemoteNextRevocation field is optional, as it's possible for a
|
||||
// channel to be closed before we learn of the next unrevoked
|
||||
// revocation point for the remote party. Write a boolen indicating
|
||||
// revocation point for the remote party. Write a boolean indicating
|
||||
// whether this field is present or not.
|
||||
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
|
||||
return err
|
||||
|
@ -221,7 +221,7 @@ func migrateOpenChanBucket(tx kvdb.RwTx) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Remote unsiged updates as well.
|
||||
// Remote unsigned updates as well.
|
||||
updateBytes = chanBucket.Get(remoteUnsignedLocalUpdatesKey)
|
||||
if updateBytes != nil {
|
||||
legacyUnsignedUpdates, err := legacy.DeserializeLogUpdates(
|
||||
@ -316,7 +316,7 @@ func migrateForwardingPackages(tx kvdb.RwTx) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now load all forwading packages using the legacy encoding.
|
||||
// Now load all forwarding packages using the legacy encoding.
|
||||
var pkgsToMigrate []*common.FwdPkg
|
||||
for _, source := range sources {
|
||||
packager := legacy.NewChannelPackager(source)
|
||||
|
@ -622,7 +622,7 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
|
||||
|
||||
// The RemoteNextRevocation field is optional, as it's possible for a
|
||||
// channel to be closed before we learn of the next unrevoked
|
||||
// revocation point for the remote party. Write a boolen indicating
|
||||
// revocation point for the remote party. Write a boolean indicating
|
||||
// whether this field is present or not.
|
||||
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
|
||||
return err
|
||||
|
@ -626,7 +626,7 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
|
||||
// Order of payments should be be preserved.
|
||||
old := oldPayments[i]
|
||||
|
||||
// Check the individial fields.
|
||||
// Check the individual fields.
|
||||
if p.Info.Value != old.Terms.Value {
|
||||
t.Fatalf("value mismatch")
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func Features(features *lnwire.FeatureVector) func(*Invoice) {
|
||||
}
|
||||
|
||||
// PaymentAddr is a functional option that allows callers of NewInvoice to set
|
||||
// the desired payment address tht is advertised on the invoice.
|
||||
// the desired payment address that is advertised on the invoice.
|
||||
func PaymentAddr(addr [32]byte) func(*Invoice) {
|
||||
return func(i *Invoice) {
|
||||
i.PaymentAddr = &addr
|
||||
|
@ -157,7 +157,7 @@ func OptionClock(clock clock.Clock) OptionModifier {
|
||||
}
|
||||
}
|
||||
|
||||
// OptionDryRunMigration controls whether or not to intentially fail to commit a
|
||||
// OptionDryRunMigration controls whether or not to intentionally fail to commit a
|
||||
// successful migration that occurs when opening the database.
|
||||
func OptionDryRunMigration(dryRun bool) OptionModifier {
|
||||
return func(o *Options) {
|
||||
|
@ -64,11 +64,11 @@ var (
|
||||
"amount")
|
||||
|
||||
// ErrNonMPPayment is returned if we try to register an MPP attempt for
|
||||
// a payment that already has a non-MPP attempt regitered.
|
||||
// a payment that already has a non-MPP attempt registered.
|
||||
ErrNonMPPayment = errors.New("payment has non-MPP attempts")
|
||||
|
||||
// ErrMPPayment is returned if we try to register a non-MPP attempt for
|
||||
// a payment that already has an MPP attempt regitered.
|
||||
// a payment that already has an MPP attempt registered.
|
||||
ErrMPPayment = errors.New("payment has MPP attempts")
|
||||
|
||||
// ErrMPPPaymentAddrMismatch is returned if we try to register an MPP
|
||||
@ -106,7 +106,7 @@ func NewPaymentControl(db *DB) *PaymentControl {
|
||||
|
||||
// InitPayment checks or records the given PaymentCreationInfo with the DB,
|
||||
// making sure it does not already exist as an in-flight payment. When this
|
||||
// method returns successfully, the payment is guranteeed to be in the InFlight
|
||||
// method returns successfully, the payment is guaranteed to be in the InFlight
|
||||
// state.
|
||||
func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
|
||||
info *PaymentCreationInfo) error {
|
||||
|
@ -551,7 +551,7 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) {
|
||||
require.Equal(t, 1, indexCount)
|
||||
}
|
||||
|
||||
// TestPaymentControlDeletePayments tests that DeletePayments correcly deletes
|
||||
// TestPaymentControlDeletePayments tests that DeletePayments correctly deletes
|
||||
// information about completed payments from the database.
|
||||
func TestPaymentControlDeletePayments(t *testing.T) {
|
||||
t.Parallel()
|
||||
@ -605,7 +605,7 @@ func TestPaymentControlDeletePayments(t *testing.T) {
|
||||
assertPayments(t, db, payments[2:])
|
||||
}
|
||||
|
||||
// TestPaymentControlDeleteSinglePayment tests that DeletePayment correcly
|
||||
// TestPaymentControlDeleteSinglePayment tests that DeletePayment correctly
|
||||
// deletes information about a completed payment from the database.
|
||||
func TestPaymentControlDeleteSinglePayment(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -382,7 +382,7 @@ func fetchPayment(bucket kvdb.RBucket) (*MPPayment, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// fetchHtlcAttempts retrives all htlc attempts made for the payment found in
|
||||
// fetchHtlcAttempts retrieves all htlc attempts made for the payment found in
|
||||
// the given bucket.
|
||||
func fetchHtlcAttempts(bucket kvdb.RBucket) ([]HTLCAttempt, error) {
|
||||
htlcsMap := make(map[uint64]*HTLCAttempt)
|
||||
|
@ -23,7 +23,7 @@ const (
|
||||
// Enforce that etcdLeaderElector implements the LeaderElector interface.
|
||||
var _ LeaderElector = (*etcdLeaderElector)(nil)
|
||||
|
||||
// etcdLeaderElector is an implemetation of LeaderElector using etcd as the
|
||||
// etcdLeaderElector is an implementation of LeaderElector using etcd as the
|
||||
// election governor.
|
||||
type etcdLeaderElector struct {
|
||||
id string
|
||||
|
@ -23,7 +23,7 @@ func RegisterLeaderElectorFactory(id string, factory leaderElectorFactoryFunc) {
|
||||
leaderElectorFactories[id] = factory
|
||||
}
|
||||
|
||||
// MakeLeaderElector will constuct a LeaderElector identified by id with the
|
||||
// MakeLeaderElector will construct a LeaderElector identified by id with the
|
||||
// passed arguments.
|
||||
func MakeLeaderElector(ctx context.Context, id string, args ...interface{}) (
|
||||
LeaderElector, error) {
|
||||
|
@ -21,8 +21,8 @@ var secondsPer = map[string]int64{
|
||||
"y": 31557600, // 365.25 days
|
||||
}
|
||||
|
||||
// parseTime parses UNIX timestamps or short timeranges inspired by sytemd (when starting with "-"),
|
||||
// e.g. "-1M" for one month (30.44 days) ago.
|
||||
// parseTime parses UNIX timestamps or short timeranges inspired by systemd
|
||||
// (when starting with "-"), e.g. "-1M" for one month (30.44 days) ago.
|
||||
func parseTime(s string, base time.Time) (uint64, error) {
|
||||
if reTimeRange.MatchString(s) {
|
||||
last := len(s) - 1
|
||||
|
@ -256,7 +256,7 @@ type Config struct {
|
||||
|
||||
LetsEncryptDir string `long:"letsencryptdir" description:"The directory to store Let's Encrypt certificates within"`
|
||||
LetsEncryptListen string `long:"letsencryptlisten" description:"The IP:port on which lnd will listen for Let's Encrypt challenges. Let's Encrypt will always try to contact on port 80. Often non-root processes are not allowed to bind to ports lower than 1024. This configuration option allows a different port to be used, but must be used in combination with port forwarding from port 80. This configuration can also be used to specify another IP address to listen on, for example an IPv6 address."`
|
||||
LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certicate is only requested and stored when the first rpc connection comes in."`
|
||||
LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certificate is only requested and stored when the first rpc connection comes in."`
|
||||
|
||||
// We'll parse these 'raw' string arguments into real net.Addrs in the
|
||||
// loadConfig function. We need to expose the 'raw' strings so the
|
||||
@ -326,7 +326,7 @@ type Config struct {
|
||||
|
||||
PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."`
|
||||
TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"`
|
||||
ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network."`
|
||||
ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to re-enable or cancel a pending disables of the peer's channels on the network."`
|
||||
ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."`
|
||||
ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."`
|
||||
HeightHintCacheQueryDisable bool `long:"height-hint-cache-query-disable" description:"Disable queries from the height-hint cache to try to recover channels stuck in the pending close state. Disabling height hint queries may cause longer chain rescans, resulting in a performance hit. Unset this after channels are unstuck so you can get better performance again."`
|
||||
@ -900,7 +900,7 @@ func ValidateConfig(cfg Config, interceptor signal.Interceptor, fileParser,
|
||||
)
|
||||
}
|
||||
|
||||
// Don't allow superflous --maxchansize greater than
|
||||
// Don't allow superfluous --maxchansize greater than
|
||||
// BOLT 02 soft-limit for non-wumbo channel
|
||||
if !cfg.ProtocolOptions.Wumbo() &&
|
||||
cfg.MaxChanSize > int64(MaxFundingAmount) {
|
||||
|
@ -651,7 +651,7 @@ func (b *BreachArbiter) exactRetribution(confChan *chainntnfs.ConfirmationEvent,
|
||||
|
||||
// We may have to wait for some of the HTLC outputs to be spent to the
|
||||
// second level before broadcasting the justice tx. We'll store the
|
||||
// SpendEvents between each attempt to not re-register uneccessarily.
|
||||
// SpendEvents between each attempt to not re-register unnecessarily.
|
||||
spendNtfns := make(map[wire.OutPoint]*chainntnfs.SpendEvent)
|
||||
|
||||
// Compute both the total value of funds being swept and the
|
||||
|
@ -1263,7 +1263,7 @@ func TestBreachCreateJusticeTx(t *testing.T) {
|
||||
|
||||
// The spendCommitOuts tx should be spending the 4 typed of commit outs
|
||||
// (note that in practice there will be at most two commit outputs per
|
||||
// commmit, but we test all 4 types here).
|
||||
// commit, but we test all 4 types here).
|
||||
require.Len(t, justiceTxs.spendCommitOuts.TxIn, 4)
|
||||
|
||||
// Finally check that the spendHTLCs tx are spending the two revoked
|
||||
@ -1279,7 +1279,7 @@ type breachTest struct {
|
||||
|
||||
// spend2ndLevel requests that second level htlcs be spent *again*, as
|
||||
// if by a remote party or watchtower. The outpoint of the second level
|
||||
// htlc is in effect "readded" to the set of inputs.
|
||||
// htlc is in effect "re-added" to the set of inputs.
|
||||
spend2ndLevel bool
|
||||
|
||||
// sweepHtlc tests that the HTLC output is swept using the revocation
|
||||
|
@ -364,7 +364,7 @@ var (
|
||||
// chain resolutions.
|
||||
errNoResolutions = fmt.Errorf("no contract resolutions exist")
|
||||
|
||||
// errNoActions is retuned when the log doesn't contain any stored
|
||||
// errNoActions is returned when the log doesn't contain any stored
|
||||
// chain actions.
|
||||
errNoActions = fmt.Errorf("no chain actions exist")
|
||||
|
||||
@ -1098,7 +1098,7 @@ func (b *boltArbitratorLog) checkpointContract(c ContractResolver,
|
||||
}, func() {})
|
||||
}
|
||||
|
||||
// encodeSignDetails encodes the gived SignDetails struct to the writer.
|
||||
// encodeSignDetails encodes the given SignDetails struct to the writer.
|
||||
// SignDetails is allowed to be nil, in which we will encode that it is not
|
||||
// present.
|
||||
func encodeSignDetails(w io.Writer, s *input.SignDetails) error {
|
||||
|
@ -102,7 +102,7 @@ type ChainArbitratorConfig struct {
|
||||
// ContractBreach is a function closure that the ChainArbitrator will
|
||||
// use to notify the breachArbiter about a contract breach. A callback
|
||||
// should be passed that when called will mark the channel pending
|
||||
// close in the databae. It should only return a non-nil error when the
|
||||
// close in the database. It should only return a non-nil error when the
|
||||
// breachArbiter has preserved the necessary breach info for this
|
||||
// channel point, and the callback has succeeded, meaning it is safe to
|
||||
// stop watching the channel.
|
||||
@ -846,8 +846,8 @@ func (c *ChainArbitrator) publishClosingTxs(
|
||||
// rebroadcast is a helper method which will republish the unilateral or
|
||||
// cooperative close transaction or a channel in a particular state.
|
||||
//
|
||||
// NOTE: There is no risk to caling this method if the channel isn't in either
|
||||
// CommimentBroadcasted or CoopBroadcasted, but the logs will be misleading.
|
||||
// NOTE: There is no risk to calling this method if the channel isn't in either
|
||||
// CommitmentBroadcasted or CoopBroadcasted, but the logs will be misleading.
|
||||
func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel,
|
||||
state channeldb.ChannelStatus) error {
|
||||
|
||||
|
@ -750,7 +750,7 @@ func TestHtlcTimeoutSingleStageRemoteSpend(t *testing.T) {
|
||||
|
||||
witnessBeacon := ctx.resolver.(*htlcTimeoutResolver).PreimageDB.(*mockWitnessBeacon)
|
||||
|
||||
// The remote spends the output direcly with
|
||||
// The remote spends the output directly with
|
||||
// the preimage.
|
||||
ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{
|
||||
SpendingTx: spendTx,
|
||||
@ -978,7 +978,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
|
||||
}
|
||||
|
||||
// twoStageResolution is a resolution for a htlc on the local
|
||||
// party's commitment, where the timout tx can be re-signed.
|
||||
// party's commitment, where the timeout tx can be re-signed.
|
||||
twoStageResolution := lnwallet.OutgoingHtlcResolution{
|
||||
ClaimOutpoint: htlcOutpoint,
|
||||
SignedTimeoutTx: timeoutTx,
|
||||
@ -1041,7 +1041,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
|
||||
preCheckpoint: func(ctx *htlcResolverTestContext,
|
||||
resumed bool) error {
|
||||
|
||||
// If we are resuming from a checkpoing, we
|
||||
// If we are resuming from a checkpoint, we
|
||||
// expect the resolver to re-subscribe to a
|
||||
// spend, hence we must resend it.
|
||||
if resumed {
|
||||
@ -1070,7 +1070,7 @@ func TestHtlcTimeoutSecondStageSweeper(t *testing.T) {
|
||||
Height: 13,
|
||||
}
|
||||
|
||||
// The timout tx output should now be given to
|
||||
// The timeout tx output should now be given to
|
||||
// the sweeper.
|
||||
resolver := ctx.resolver.(*htlcTimeoutResolver)
|
||||
inp := <-resolver.Sweeper.(*mockSweeper).sweptInputs
|
||||
@ -1161,7 +1161,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
|
||||
spendTxHash := spendTx.TxHash()
|
||||
|
||||
// twoStageResolution is a resolution for a htlc on the local
|
||||
// party's commitment, where the timout tx can be re-signed.
|
||||
// party's commitment, where the timeout tx can be re-signed.
|
||||
twoStageResolution := lnwallet.OutgoingHtlcResolution{
|
||||
ClaimOutpoint: htlcOutpoint,
|
||||
SignedTimeoutTx: timeoutTx,
|
||||
@ -1214,7 +1214,7 @@ func TestHtlcTimeoutSecondStageSweeperRemoteSpend(t *testing.T) {
|
||||
preCheckpoint: func(ctx *htlcResolverTestContext,
|
||||
resumed bool) error {
|
||||
|
||||
// If we are resuming from a checkpoing, we
|
||||
// If we are resuming from a checkpoint, we
|
||||
// expect the resolver to re-subscribe to a
|
||||
// spend, hence we must resend it.
|
||||
if resumed {
|
||||
|
@ -313,7 +313,7 @@ func NewDNSSeedBootstrapper(
|
||||
|
||||
// fallBackSRVLookup attempts to manually query for SRV records we need to
|
||||
// properly bootstrap. We do this by querying the special record at the "soa."
|
||||
// sub-domain of supporting DNS servers. The retuned IP address will be the IP
|
||||
// sub-domain of supporting DNS servers. The returned IP address will be the IP
|
||||
// address of the authoritative DNS server. Once we have this IP address, we'll
|
||||
// connect manually over TCP to request the SRV record. This is necessary as
|
||||
// the records we return are currently too large for a class of resolvers,
|
||||
@ -361,7 +361,7 @@ func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string,
|
||||
"received: %v", resp.Rcode)
|
||||
}
|
||||
|
||||
// Retrieve the RR(s) of the Answer section, and covert to the format
|
||||
// Retrieve the RR(s) of the Answer section, and convert to the format
|
||||
// that net.LookupSRV would normally return.
|
||||
var rrs []*net.SRV
|
||||
for _, rr := range resp.Answer {
|
||||
|
@ -286,7 +286,7 @@ type cachedNetworkMsg struct {
|
||||
}
|
||||
|
||||
// Size returns the "size" of an entry. We return the number of items as we
|
||||
// just want to limit the total amount of entires rather than do accurate size
|
||||
// just want to limit the total amount of entries rather than do accurate size
|
||||
// accounting.
|
||||
func (c *cachedNetworkMsg) Size() (uint64, error) {
|
||||
return uint64(len(c.msgs)), nil
|
||||
|
@ -2329,8 +2329,8 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
}
|
||||
|
||||
// We'll also add the edge to our zombie index, provide a blank pubkey
|
||||
// for the first node as we're simulating the sitaution where the first
|
||||
// ndoe is updating but the second node isn't. In this case we only
|
||||
// for the first node as we're simulating the situation where the first
|
||||
// node is updating but the second node isn't. In this case we only
|
||||
// want to allow a new update from the second node to allow the entire
|
||||
// edge to be resurrected.
|
||||
chanID := batch.chanAnn.ShortChannelID
|
||||
@ -2350,7 +2350,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
}
|
||||
processAnnouncement(batch.chanUpdAnn1, true, true)
|
||||
|
||||
// At this point, the channel should still be consiered a zombie.
|
||||
// At this point, the channel should still be considered a zombie.
|
||||
_, _, _, err = ctx.router.GetChannelByID(chanID)
|
||||
if err != channeldb.ErrZombieEdge {
|
||||
t.Fatalf("channel should still be a zombie")
|
||||
@ -2448,7 +2448,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
|
||||
remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
|
||||
|
||||
// Override NotifyWhenOnline to return the remote peer which we expect
|
||||
// meesages to be sent to.
|
||||
// messages to be sent to.
|
||||
ctx.gossiper.reliableSender.cfg.NotifyWhenOnline = func(peer [33]byte,
|
||||
peerChan chan<- lnpeer.Peer) {
|
||||
|
||||
@ -2645,7 +2645,7 @@ func TestExtraDataChannelAnnouncementValidation(t *testing.T) {
|
||||
// We'll now create an announcement that contains an extra set of bytes
|
||||
// that we don't know of ourselves, but should still include in the
|
||||
// final signature check.
|
||||
extraBytes := []byte("gotta validate this stil!")
|
||||
extraBytes := []byte("gotta validate this still!")
|
||||
ca, err := createRemoteChannelAnnouncement(0, extraBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create channel announcement: %v", err)
|
||||
@ -2828,7 +2828,7 @@ func TestRetransmit(t *testing.T) {
|
||||
}
|
||||
remotePeer := &mockPeer{remoteKey, nil, nil}
|
||||
|
||||
// Process a local channel annoucement, channel update and node
|
||||
// Process a local channel announcement, channel update and node
|
||||
// announcement. No messages should be broadcasted yet, since no proof
|
||||
// has been exchanged.
|
||||
assertProcessAnnouncement(
|
||||
@ -2961,7 +2961,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now add the node's channel to the graph by processing the channel
|
||||
// announement and channel update.
|
||||
// announcement and channel update.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanAnn,
|
||||
remotePeer):
|
||||
@ -3002,7 +3002,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Processing the same node announement again should be ignored, as it
|
||||
// Processing the same node announcement again should be ignored, as it
|
||||
// is stale.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.nodeAnn2,
|
||||
|
@ -220,7 +220,7 @@ func (m *SyncManager) syncerHandler() {
|
||||
initialHistoricalSyncer *GossipSyncer
|
||||
|
||||
// initialHistoricalSyncSignal is a signal that will fire once
|
||||
// the intiial historical sync has been completed. This is
|
||||
// the initial historical sync has been completed. This is
|
||||
// crucial to ensure that another historical sync isn't
|
||||
// attempted just because the initialHistoricalSyncer was
|
||||
// disconnected.
|
||||
@ -232,7 +232,7 @@ func (m *SyncManager) syncerHandler() {
|
||||
initialHistoricalSyncSignal = s.ResetSyncedSignal()
|
||||
|
||||
// Restart the timer for our new historical sync peer. This will
|
||||
// ensure that all initial syncers recevie an equivalent
|
||||
// ensure that all initial syncers receive an equivalent
|
||||
// duration before attempting the next sync. Without doing so we
|
||||
// might attempt two historical sync back to back if a peer
|
||||
// disconnects just before the ticker fires.
|
||||
@ -362,7 +362,7 @@ func (m *SyncManager) syncerHandler() {
|
||||
// Otherwise, our initialHistoricalSyncer corresponds to
|
||||
// the peer being disconnected, so we'll have to find a
|
||||
// replacement.
|
||||
log.Debug("Finding replacement for intitial " +
|
||||
log.Debug("Finding replacement for initial " +
|
||||
"historical sync")
|
||||
|
||||
s := m.forceHistoricalSync()
|
||||
|
@ -459,7 +459,7 @@ below):
|
||||
`lnd` plus any application that consumes the RPC could cause `lnd` to miss
|
||||
crucial updates from the backend.
|
||||
- The default fee estimate mode in `bitcoind` is CONSERVATIVE. You can set
|
||||
`bitcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Futhermore,
|
||||
`bitcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Furthermore,
|
||||
if you start `bitcoind` in `regtest`, this configuration won't take any effect.
|
||||
|
||||
|
||||
|
@ -179,7 +179,7 @@ Arguments:
|
||||
|
||||
`unit-cover`
|
||||
------------
|
||||
Runs the unit test suite with test coverage, compiling the statisitics in
|
||||
Runs the unit test suite with test coverage, compiling the statistics in
|
||||
`profile.cov`.
|
||||
|
||||
Arguments:
|
||||
|
@ -12,7 +12,7 @@
|
||||
1. [Code Spacing](#code-spacing)
|
||||
1. [Protobuf Compilation](#protobuf-compilation)
|
||||
1. [Additional Style Constraints On Top of gofmt](#additional-style-constraints-on-top-of-gofmt)
|
||||
1. [Pointing to Remote Dependant Branches in Go Modules](#pointing-to-remote-dependant-branches-in-go-modules)
|
||||
1. [Pointing to Remote Dependent Branches in Go Modules](#pointing-to-remote-dependent-branches-in-go-modules)
|
||||
1. [Use of Log Levels](#use-of-log-levels)
|
||||
5. [Code Approval Process](#code-approval-process)
|
||||
1. [Code Review](#code-review)
|
||||
@ -546,7 +546,7 @@ to `gofmt` we've opted to enforce the following style guidelines.
|
||||
log and error messages, committers should attempt to minimize the number of
|
||||
lines utilized, while still adhering to the 80-character column limit.
|
||||
|
||||
## Pointing to Remote Dependant Branches in Go Modules
|
||||
## Pointing to Remote Dependent Branches in Go Modules
|
||||
|
||||
It's common that a developer may need to make a change in a dependent project
|
||||
of `lnd` such as `btcd`, `neutrino`, `btcwallet`, etc. In order to test changes
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
With the recent introduction of the `kvdb` interface LND can support multiple
|
||||
database backends allowing experimentation with the storage model as well as
|
||||
improving robustness trough eg. replicating essential data.
|
||||
improving robustness through eg. replicating essential data.
|
||||
|
||||
Building on `kvdb` in v0.11.0 we're adding experimental [etcd](https://etcd.io)
|
||||
support to LND. As this is an unstable feature heavily in development, it still
|
||||
@ -71,7 +71,7 @@ db.etcd.keyfile=/home/user/etcd/bin/default.etcd/fixtures/client/key.pem
|
||||
db.etcd.insecure_skip_verify=true
|
||||
```
|
||||
|
||||
Optionally users can specifiy `db.etcd.user` and `db.etcd.pass` for db user
|
||||
Optionally users can specify `db.etcd.user` and `db.etcd.pass` for db user
|
||||
authentication. If the database is shared, it is possible to separate our data
|
||||
from other users by setting `db.etcd.namespace` to an (already existing) etcd
|
||||
namespace. In order to test without TLS, users are able to set `db.etcd.disabletls`
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Increasing LND reliablity by clustering
|
||||
# Increasing LND reliability by clustering
|
||||
|
||||
Normally LND nodes use the embedded bbolt database to store all important states.
|
||||
This method of running has been proven to work well in a variety of environments,
|
||||
@ -9,7 +9,7 @@ do updates and be more resilient to datacenter failures.
|
||||
It is now possible to store all essential state in a replicated etcd DB and to
|
||||
run multiple LND nodes on different machines where only one of them (the leader)
|
||||
is able to read and mutate the database. In such setup if the leader node fails
|
||||
or decomissioned, a follower node will be elected as the new leader and will
|
||||
or decommissioned, a follower node will be elected as the new leader and will
|
||||
quickly come online to minimize downtime.
|
||||
|
||||
The leader election feature currently relies on etcd to work both for the election
|
||||
|
@ -286,7 +286,7 @@ the same format.
|
||||
|
||||
#### Streaming Updates via `SubscribeChannelBackups`
|
||||
|
||||
Using the gRPC interace directly, [a new call:
|
||||
Using the gRPC interface directly, [a new call:
|
||||
`SubscribeChannelBackups`](https://api.lightning.community/#subscribechannelbackups).
|
||||
This call allows users to receive a new notification each time the underlying
|
||||
SCB state changes. This can be used to implement more complex backup
|
||||
|
@ -53,7 +53,7 @@ ws.onmessage = function (event) {
|
||||
console.log(JSON.parse(event.data).result);
|
||||
}
|
||||
ws.onerror = function (event) {
|
||||
// An error occured, let's log it to the console.
|
||||
// An error occurred, let's log it to the console.
|
||||
console.log(event);
|
||||
}
|
||||
```
|
||||
|
@ -33,7 +33,7 @@ type Config struct {
|
||||
// feature sets.
|
||||
type Manager struct {
|
||||
// fsets is a static map of feature set to raw feature vectors. Requests
|
||||
// are fulfilled by cloning these interal feature vectors.
|
||||
// are fulfilled by cloning these internal feature vectors.
|
||||
fsets map[Set]*lnwire.RawFeatureVector
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ func explicitNegotiateCommitmentType(channelType lnwire.ChannelType,
|
||||
|
||||
// implicitNegotiateCommitmentType negotiates the commitment type of a channel
|
||||
// implicitly by choosing the latest type supported by the local and remote
|
||||
// fetures.
|
||||
// features.
|
||||
func implicitNegotiateCommitmentType(local,
|
||||
remote *lnwire.FeatureVector) (*lnwire.ChannelType, lnwallet.CommitmentType) {
|
||||
|
||||
|
@ -2405,7 +2405,7 @@ func (f *Manager) waitForTimeout(completeChan *channeldb.OpenChannel,
|
||||
}
|
||||
|
||||
// Close the timeout channel and exit if the block is
|
||||
// aboce the max height.
|
||||
// above the max height.
|
||||
if uint32(epoch.Height) >= maxHeight {
|
||||
log.Warnf("Waited for %v blocks without "+
|
||||
"seeing funding transaction confirmed,"+
|
||||
|
@ -2584,7 +2584,7 @@ func TestFundingManagerPrivateRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestFundingManagerCustomChannelParameters checks that custom requirements we
|
||||
// specify during the channel funding flow is preserved correcly on both sides.
|
||||
// specify during the channel funding flow is preserved correctly on both sides.
|
||||
func TestFundingManagerCustomChannelParameters(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -2780,7 +2780,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) {
|
||||
}
|
||||
|
||||
// The max value in flight Alice can have should be maxValueAcceptChannel,
|
||||
// which is the default value and the maxium Bob can offer should be
|
||||
// which is the default value and the maximum Bob can offer should be
|
||||
// maxValueInFlight.
|
||||
if err := assertMaxHtlc(resCtx,
|
||||
maxValueAcceptChannel, maxValueInFlight); err != nil {
|
||||
|
@ -438,7 +438,7 @@ func (cm *circuitMap) cleanClosedChannels() error {
|
||||
return ErrCorruptedCircuitMap
|
||||
}
|
||||
|
||||
// Delete the ciruit.
|
||||
// Delete the circuit.
|
||||
for inKey := range circuitKeySet {
|
||||
if err := circuitBkt.Delete(inKey.Bytes()); err != nil {
|
||||
return err
|
||||
|
@ -85,7 +85,7 @@ func (s *InterceptableSwitch) interceptForward(packet *htlcPacket,
|
||||
|
||||
switch htlc := packet.htlc.(type) {
|
||||
case *lnwire.UpdateAddHTLC:
|
||||
// We are not interested in intercepting initated payments.
|
||||
// We are not interested in intercepting initiated payments.
|
||||
if packet.incomingChanID == hop.Source {
|
||||
return false
|
||||
}
|
||||
|
@ -3507,7 +3507,7 @@ func TestChannelRetransmission(t *testing.T) {
|
||||
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
|
||||
|
||||
// Proceed the payment farther by sending the
|
||||
// fulfilment message and trigger the state
|
||||
// fulfillment message and trigger the state
|
||||
// update.
|
||||
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
|
||||
{"bob", "alice", &lnwire.CommitSig{}, false},
|
||||
@ -3548,7 +3548,7 @@ func TestChannelRetransmission(t *testing.T) {
|
||||
{"bob", "alice", &lnwire.CommitSig{}, false},
|
||||
|
||||
// Proceed the payment farther by sending the
|
||||
// fulfilment message and trigger the state
|
||||
// fulfillment message and trigger the state
|
||||
// update.
|
||||
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
|
||||
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
|
||||
@ -5980,7 +5980,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) {
|
||||
t.Fatal("timeout")
|
||||
case h := <-receiver.registry.settleChan:
|
||||
if hash != h {
|
||||
t.Fatal("unexpect invoice settled")
|
||||
t.Fatal("unexpected invoice settled")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ type mailBoxConfig struct {
|
||||
// belongs to.
|
||||
shortChanID lnwire.ShortChannelID
|
||||
|
||||
// fetchUpdate retreives the most recent channel update for the channel
|
||||
// fetchUpdate retrieves the most recent channel update for the channel
|
||||
// this mailbox belongs to.
|
||||
fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)
|
||||
|
||||
@ -803,7 +803,7 @@ type mailOrchestrator struct {
|
||||
// chan_id -> short_chan_id
|
||||
// short_chan_id -> mailbox
|
||||
// so that Deliver can lookup mailbox directly once live,
|
||||
// but still queriable by channel_id.
|
||||
// but still queryable by channel_id.
|
||||
|
||||
// unclaimedPackets maps a live short chan id to queue of packets if no
|
||||
// mailbox has been created.
|
||||
@ -816,7 +816,7 @@ type mailOrchConfig struct {
|
||||
// properly exit during shutdown.
|
||||
forwardPackets func(chan struct{}, ...*htlcPacket) error
|
||||
|
||||
// fetchUpdate retreives the most recent channel update for the channel
|
||||
// fetchUpdate retrieves the most recent channel update for the channel
|
||||
// this mailbox belongs to.
|
||||
fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)
|
||||
|
||||
|
@ -89,7 +89,7 @@ type htlcPacket struct {
|
||||
incomingTimeout uint32
|
||||
|
||||
// outgoingTimeout is the timeout of the proposed outgoing HTLC. This
|
||||
// will be extraced from the hop payload recevived by the incoming
|
||||
// will be extracted from the hop payload received by the incoming
|
||||
// link.
|
||||
outgoingTimeout uint32
|
||||
|
||||
|
@ -290,7 +290,7 @@ type Switch struct {
|
||||
|
||||
// blockEpochStream is an active block epoch event stream backed by an
|
||||
// active ChainNotifier instance. This will be used to retrieve the
|
||||
// lastest height of the chain.
|
||||
// latest height of the chain.
|
||||
blockEpochStream *chainntnfs.BlockEpochEvent
|
||||
|
||||
// pendingSettleFails is the set of settle/fail entries that we need to
|
||||
|
@ -1546,7 +1546,7 @@ func TestCheckCircularForward(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes
|
||||
// along, then we won't attempt to froward it down al ink that isn't yet able
|
||||
// along, then we won't attempt to forward it down al ink that isn't yet able
|
||||
// to forward any HTLC's.
|
||||
func TestSkipIneligibleLinksMultiHopForward(t *testing.T) {
|
||||
tests := []multiHopFwdTest{
|
||||
@ -1601,7 +1601,7 @@ func TestSkipIneligibleLinksMultiHopForward(t *testing.T) {
|
||||
}
|
||||
|
||||
// testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes
|
||||
// along, then we won't attempt to froward it down al ink that isn't yet able
|
||||
// along, then we won't attempt to forward it down al ink that isn't yet able
|
||||
// to forward any HTLC's.
|
||||
func testSkipIneligibleLinksMultiHopForward(t *testing.T,
|
||||
testCase *multiHopFwdTest) {
|
||||
@ -2563,7 +2563,7 @@ func TestSwitchGetPaymentResult(t *testing.T) {
|
||||
t.Fatalf("unable to store result: %v", err)
|
||||
}
|
||||
|
||||
// The result should be availble.
|
||||
// The result should be available.
|
||||
select {
|
||||
case res, ok := <-resultChan:
|
||||
if !ok {
|
||||
|
@ -328,7 +328,7 @@ func SenderHTLCScript(senderHtlcKey, receiverHtlcKey,
|
||||
// HTLC to claim the output with knowledge of the revocation private key in the
|
||||
// scenario that the sender of the HTLC broadcasts a previously revoked
|
||||
// commitment transaction. A valid spend requires knowledge of the private key
|
||||
// that corresponds to their revocation base point and also the private key fro
|
||||
// that corresponds to their revocation base point and also the private key from
|
||||
// the per commitment point, and a valid signature under the combined public
|
||||
// key.
|
||||
func SenderHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor,
|
||||
|
@ -84,7 +84,7 @@ const (
|
||||
// us to sweep an HTLC output that we extended to a party, but was
|
||||
// never fulfilled. This _is_ the HTLC output directly on our
|
||||
// commitment transaction, and the input to the second-level HTLC
|
||||
// tiemout transaction. It can only be spent after CLTV expiry, and
|
||||
// timeout transaction. It can only be spent after CLTV expiry, and
|
||||
// commitment confirmation.
|
||||
HtlcOfferedTimeoutSecondLevelInputConfirmed StandardWitnessType = 15
|
||||
|
||||
|
@ -31,7 +31,7 @@ type invoiceExpiryTs struct {
|
||||
}
|
||||
|
||||
// Less implements PriorityQueueItem.Less such that the top item in the
|
||||
// priorty queue will be the one that expires next.
|
||||
// priority queue will be the one that expires next.
|
||||
func (e invoiceExpiryTs) Less(other queue.PriorityQueueItem) bool {
|
||||
return e.Expiry.Before(other.(*invoiceExpiryTs).Expiry)
|
||||
}
|
||||
@ -58,10 +58,10 @@ func (b invoiceExpiryHeight) expired(currentHeight, delta uint32) bool {
|
||||
return currentHeight+delta >= b.expiryHeight
|
||||
}
|
||||
|
||||
// InvoiceExpiryWatcher handles automatic invoice cancellation of expried
|
||||
// InvoiceExpiryWatcher handles automatic invoice cancellation of expired
|
||||
// invoices. Upon start InvoiceExpiryWatcher will retrieve all pending (not yet
|
||||
// settled or canceled) invoices invoices to its watcing queue. When a new
|
||||
// invoice is added to the InvoiceRegistry, it'll be forarded to the
|
||||
// settled or canceled) invoices invoices to its watching queue. When a new
|
||||
// invoice is added to the InvoiceRegistry, it'll be forwarded to the
|
||||
// InvoiceExpiryWatcher and will end up in the watching queue as well.
|
||||
// If any of the watched invoices expire, they'll be removed from the watching
|
||||
// queue and will be cancelled through InvoiceRegistry.CancelInvoice().
|
||||
|
@ -630,7 +630,7 @@ func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef,
|
||||
updateInvoice := func(invoice *channeldb.Invoice) (
|
||||
*channeldb.InvoiceUpdateDesc, error) {
|
||||
|
||||
// Only allow individual htlc cancelation on open invoices.
|
||||
// Only allow individual htlc cancellation on open invoices.
|
||||
if invoice.State != channeldb.ContractOpen {
|
||||
log.Debugf("cancelSingleHtlc: invoice %v no longer "+
|
||||
"open", invoiceRef)
|
||||
@ -669,7 +669,7 @@ func (i *InvoiceRegistry) cancelSingleHtlc(invoiceRef channeldb.InvoiceRef,
|
||||
htlcState = htlc.State
|
||||
}
|
||||
|
||||
// Cancelation is only possible if the htlc wasn't already
|
||||
// Cancellation is only possible if the htlc wasn't already
|
||||
// resolved.
|
||||
if htlcState != channeldb.HtlcStateAccepted {
|
||||
log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+
|
||||
@ -1248,7 +1248,7 @@ func shouldCancel(state channeldb.ContractState, cancelAccepted bool) bool {
|
||||
}
|
||||
|
||||
// If the invoice is accepted, we should only cancel if we want to
|
||||
// force cancelation of accepted invoices.
|
||||
// force cancellation of accepted invoices.
|
||||
return cancelAccepted
|
||||
}
|
||||
|
||||
@ -1396,7 +1396,7 @@ type InvoiceSubscription struct {
|
||||
// StartingInvoiceIndex field.
|
||||
NewInvoices chan *channeldb.Invoice
|
||||
|
||||
// SettledInvoices is a channel that we'll use to send all setted
|
||||
// SettledInvoices is a channel that we'll use to send all settled
|
||||
// invoices with an invoices index greater than the specified
|
||||
// StartingInvoiceIndex field.
|
||||
SettledInvoices chan *channeldb.Invoice
|
||||
|
@ -183,10 +183,10 @@ func TestSettleInvoice(t *testing.T) {
|
||||
// Try to cancel.
|
||||
err = ctx.registry.CancelInvoice(testInvoicePaymentHash)
|
||||
if err != channeldb.ErrInvoiceAlreadySettled {
|
||||
t.Fatal("expected cancelation of a settled invoice to fail")
|
||||
t.Fatal("expected cancellation of a settled invoice to fail")
|
||||
}
|
||||
|
||||
// As this is a direct sette, we expect nothing on the hodl chan.
|
||||
// As this is a direct settle, we expect nothing on the hodl chan.
|
||||
select {
|
||||
case <-hodlChan:
|
||||
t.Fatal("unexpected resolution")
|
||||
@ -325,10 +325,10 @@ func testCancelInvoice(t *testing.T, gc bool) {
|
||||
require.Equal(t, testCurrentHeight, failResolution.AcceptHeight)
|
||||
}
|
||||
|
||||
// TestCancelInvoice tests cancelation of an invoice and related notifications.
|
||||
// TestCancelInvoice tests cancellation of an invoice and related notifications.
|
||||
func TestCancelInvoice(t *testing.T) {
|
||||
// Test cancellation both with garbage collection (meaning that canceled
|
||||
// invoice will be deleted) and without (meain it'll be kept).
|
||||
// invoice will be deleted) and without (meaning it'll be kept).
|
||||
t.Run("garbage collect", func(t *testing.T) {
|
||||
testCancelInvoice(t, true)
|
||||
})
|
||||
@ -507,7 +507,7 @@ func TestSettleHoldInvoice(t *testing.T) {
|
||||
// Try to cancel.
|
||||
err = registry.CancelInvoice(testInvoicePaymentHash)
|
||||
if err == nil {
|
||||
t.Fatal("expected cancelation of a settled invoice to fail")
|
||||
t.Fatal("expected cancellation of a settled invoice to fail")
|
||||
}
|
||||
}
|
||||
|
||||
@ -1074,7 +1074,7 @@ func TestOldInvoiceRemovalOnStart(t *testing.T) {
|
||||
|
||||
i := 0
|
||||
for paymentHash, invoice := range existingInvoices.expiredInvoices {
|
||||
// Mark half of the invoices as settled, the other hald as
|
||||
// Mark half of the invoices as settled, the other half as
|
||||
// canceled.
|
||||
if i%2 == 0 {
|
||||
invoice.State = channeldb.ContractSettled
|
||||
@ -1225,7 +1225,7 @@ func testHeightExpiryWithRegistry(t *testing.T, numParts int, settle bool) {
|
||||
}
|
||||
|
||||
// If we did not settle the invoice before its expiry, we now expect
|
||||
// a cancelation.
|
||||
// a cancellation.
|
||||
expectedState := channeldb.ContractSettled
|
||||
if !settle {
|
||||
expectedState = channeldb.ContractCanceled
|
||||
|
@ -286,7 +286,7 @@ type HTLCPreimages = map[channeldb.CircuitKey]lntypes.Preimage
|
||||
// verifies that all derived child hashes match the payment hashes of the HTLCs
|
||||
// in the set. This method is meant to be called after receiving the full amount
|
||||
// committed to via mpp_total_msat. This method will return a fail resolution if
|
||||
// any of the child hashes fail to matche theire corresponding HTLCs.
|
||||
// any of the child hashes fail to match their corresponding HTLCs.
|
||||
func reconstructAMPPreimages(ctx *invoiceUpdateCtx,
|
||||
htlcSet HTLCSet) (HTLCPreimages, *HtlcFailResolution) {
|
||||
|
||||
|
@ -9,7 +9,7 @@ package lncfg
|
||||
type ProtocolOptions struct {
|
||||
// LegacyProtocol is a sub-config that houses all the legacy protocol
|
||||
// options. These are mostly used for integration tests as most modern
|
||||
// nodes shuld always run with them on by default.
|
||||
// nodes should always run with them on by default.
|
||||
LegacyProtocol `group:"legacy" namespace:"legacy"`
|
||||
|
||||
// ExperimentalProtocol is a sub-config that houses any experimental
|
||||
|
@ -4,7 +4,7 @@
|
||||
package lncfg
|
||||
|
||||
// Legacy is a sub-config that houses all the legacy protocol options. These
|
||||
// are mostly used for integration tests as most modern nodes shuld always run
|
||||
// are mostly used for integration tests as most modern nodes should always run
|
||||
// with them on by default.
|
||||
type LegacyProtocol struct {
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
package lncfg
|
||||
|
||||
// Legacy is a sub-config that houses all the legacy protocol options. These
|
||||
// are mostly used for integration tests as most modern nodes shuld always run
|
||||
// are mostly used for integration tests as most modern nodes should always run
|
||||
// with them on by default.
|
||||
type LegacyProtocol struct {
|
||||
// LegacyOnionFormat if set to true, then we won't signal
|
||||
|
@ -9,7 +9,7 @@ package lncfg
|
||||
type ProtocolOptions struct {
|
||||
// LegacyProtocol is a sub-config that houses all the legacy protocol
|
||||
// options. These are mostly used for integration tests as most modern
|
||||
// nodes shuld always run with them on by default.
|
||||
// nodes should always run with them on by default.
|
||||
LegacyProtocol `group:"legacy" namespace:"legacy"`
|
||||
|
||||
// ExperimentalProtocol is a sub-config that houses any experimental
|
||||
|
2
lnd.go
2
lnd.go
@ -645,7 +645,7 @@ func getTLSConfig(cfg *Config) ([]grpc.ServerOption, []grpc.DialOption,
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
// We check whether the certifcate we have on disk match the IPs and
|
||||
// We check whether the certificate we have on disk match the IPs and
|
||||
// domains specified by the config. If the extra IPs or domains have
|
||||
// changed from when the certificate was created, we will refresh the
|
||||
// certificate if auto refresh is active.
|
||||
|
@ -36,7 +36,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
}
|
||||
|
||||
// Before we try to make the new service instance, we'll perform
|
||||
// some sanity checks on the arguments to ensure that they're useable.
|
||||
// some sanity checks on the arguments to ensure that they're usable.
|
||||
switch {
|
||||
case config.Manager == nil:
|
||||
return nil, nil, fmt.Errorf("Manager must be set to create " +
|
||||
|
@ -33,7 +33,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
}
|
||||
|
||||
// Before we try to make the new router service instance, we'll perform
|
||||
// some sanity checks on the arguments to ensure that they're useable.
|
||||
// some sanity checks on the arguments to ensure that they're usable.
|
||||
switch {
|
||||
case config.Router == nil:
|
||||
return nil, nil, fmt.Errorf("Router must be set to create " +
|
||||
|
@ -59,7 +59,7 @@ func newForwardInterceptor(server *Server, stream Router_HtlcInterceptorServer)
|
||||
}
|
||||
|
||||
// run sends the intercepted packets to the client and receives the
|
||||
// corersponding responses. On one hand it regsitered itself as an interceptor
|
||||
// corersponding responses. On one hand it registered itself as an interceptor
|
||||
// that receives the switch packets and on the other hand launches a go routine
|
||||
// to read from the client stream.
|
||||
// To coordinate all this and make sure it is safe for concurrent access all
|
||||
|
@ -132,7 +132,7 @@ type MissionControl interface {
|
||||
|
||||
// QueryRoutes attempts to query the daemons' Channel Router for a possible
|
||||
// route to a target destination capable of carrying a specific amount of
|
||||
// satoshis within the route's flow. The retuned route contains the full
|
||||
// satoshis within the route's flow. The returned route contains the full
|
||||
// details required to craft and send an HTLC, also including the necessary
|
||||
// information that should be present within the Sphinx packet encapsulated
|
||||
// within the HTLC.
|
||||
|
@ -666,7 +666,7 @@ func getMsatPairValue(msatValue lnwire.MilliSatoshi,
|
||||
return msatValue, nil
|
||||
}
|
||||
|
||||
// If we have no msatValue, we can just return our sate value even if
|
||||
// If we have no msatValue, we can just return our state value even if
|
||||
// it is zero, because it's impossible that we have mismatched values.
|
||||
if msatValue == 0 {
|
||||
return lnwire.MilliSatoshi(satValue * 1000), nil
|
||||
@ -879,7 +879,7 @@ func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest,
|
||||
// requests to the caller.
|
||||
// Upon connection it does the following:
|
||||
// 1. Check if there is already a live stream, if yes it rejects the request.
|
||||
// 2. Regsitered a ForwardInterceptor
|
||||
// 2. Registered a ForwardInterceptor
|
||||
// 3. Delivers to the caller every √√ and detect his answer.
|
||||
// It uses a local implementation of holdForwardsStore to keep all the hold
|
||||
// forwards and find them when manual resolution is later needed.
|
||||
|
@ -36,7 +36,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
}
|
||||
|
||||
// Before we try to make the new signer service instance, we'll perform
|
||||
// some sanity checks on the arguments to ensure that they're useable.
|
||||
// some sanity checks on the arguments to ensure that they're usable.
|
||||
|
||||
switch {
|
||||
// If the macaroon service is set (we should use macaroons), then
|
||||
|
@ -37,7 +37,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
|
||||
// Before we try to make the new WalletKit service instance, we'll
|
||||
// perform some sanity checks on the arguments to ensure that they're
|
||||
// useable.
|
||||
// usable.
|
||||
switch {
|
||||
case config.MacService != nil && config.NetworkDir == "":
|
||||
return nil, nil, fmt.Errorf("NetworkDir must be set to " +
|
||||
|
@ -175,7 +175,7 @@ func (c *Handler) GetInfo(ctx context.Context,
|
||||
}
|
||||
|
||||
// isActive returns nil if the tower backend is initialized, and the Handler can
|
||||
// proccess RPC requests.
|
||||
// process RPC requests.
|
||||
func (c *Handler) isActive() error {
|
||||
if c.cfg.Active {
|
||||
return nil
|
||||
|
@ -34,7 +34,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
}
|
||||
|
||||
// Before we try to make the new service instance, we'll perform
|
||||
// some sanity checks on the arguments to ensure that they're useable.
|
||||
// some sanity checks on the arguments to ensure that they're usable.
|
||||
switch {
|
||||
case config.Resolver == nil:
|
||||
return nil, nil, errors.New("a lncfg.TCPResolver is required")
|
||||
|
@ -819,7 +819,7 @@ func (hn *HarnessNode) Init(
|
||||
initReq.StatelessInit, response.AdminMacaroon,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faied to init: %w", err)
|
||||
return nil, fmt.Errorf("failed to init: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
|
@ -163,7 +163,7 @@ func testSendPaymentAMPInvoiceCase(net *lntest.NetworkHarness, t *harnessTest,
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
|
||||
// the one recevied via the subscription.
|
||||
// the one received via the subscription.
|
||||
invoiceResp, err := ctx.bob.ListInvoices(
|
||||
ctxb, &lnrpc.ListInvoiceRequest{},
|
||||
)
|
||||
@ -695,7 +695,7 @@ func testSendToRouteAMP(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Also fetch Bob's invoice from ListInvoices and assert it is equal to
|
||||
// the one recevied via the subscription.
|
||||
// the one received via the subscription.
|
||||
invoiceResp, err := ctx.bob.ListInvoices(
|
||||
ctxb, &lnrpc.ListInvoiceRequest{},
|
||||
)
|
||||
|
@ -1199,7 +1199,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
}
|
||||
}
|
||||
|
||||
// Check that each HTLC output was spent exactly onece.
|
||||
// Check that each HTLC output was spent exactly once.
|
||||
for op, num := range htlcTxOutpointSet {
|
||||
if num != 1 {
|
||||
t.Fatalf("HTLC outpoint %v was spent %v times", op, num)
|
||||
|
@ -393,7 +393,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
|
||||
// Bob stimmy.
|
||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob)
|
||||
|
||||
// Assert that Bob has the correct sync type before proceeeding.
|
||||
// Assert that Bob has the correct sync type before proceeding.
|
||||
if pinned {
|
||||
assertSyncType(t, alice, bobPubkey, lnrpc.Peer_PINNED_SYNC)
|
||||
} else {
|
||||
@ -426,7 +426,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
|
||||
// Ensure that a new update for both created edges is properly
|
||||
// dispatched to our registered client.
|
||||
case graphUpdate := <-graphSub.updateChan:
|
||||
// Process all channel updates prsented in this update
|
||||
// Process all channel updates presented in this update
|
||||
// message.
|
||||
for _, chanUpdate := range graphUpdate.ChannelUpdates {
|
||||
switch chanUpdate.AdvertisingNode {
|
||||
|
@ -807,7 +807,7 @@ func testUpdateChannelPolicyForPrivateChannel(net *lntest.NetworkHarness,
|
||||
assertAmountPaid(t, "Bob(local) [private=>] Carol(remote)",
|
||||
net.Bob, bobFundPoint, paymentAmt, 0)
|
||||
|
||||
// Calcuate the amount in satoshis.
|
||||
// Calculate the amount in satoshis.
|
||||
amtExpected := int64(paymentAmt + baseFeeMSat/1000)
|
||||
|
||||
// Bob should have received 20k satoshis + fee from Alice.
|
||||
|
@ -424,7 +424,7 @@ func testForwardInterceptorBasic(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
})
|
||||
return err == nil && len(channels.Channels) > 0
|
||||
}, defaultTimeout)
|
||||
require.NoError(t.t, err, "alice <> bob channel didnt re-activate")
|
||||
require.NoError(t.t, err, "alice <> bob channel didn't re-activate")
|
||||
|
||||
}
|
||||
|
||||
|
@ -213,7 +213,7 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness,
|
||||
require.NoError(t.t, err, "bob didn't report channel")
|
||||
|
||||
cType, err := channelCommitType(alice, chanPoint)
|
||||
require.NoError(t.t, err, "unable to get channnel type")
|
||||
require.NoError(t.t, err, "unable to get channel type")
|
||||
|
||||
// With the channel open, ensure that the amount specified above has
|
||||
// properly been pushed to Bob.
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// testHoldInvoiceForceClose tests cancelation of accepted hold invoices which
|
||||
// testHoldInvoiceForceClose tests cancellation of accepted hold invoices which
|
||||
// would otherwise trigger force closes when they expire.
|
||||
func testHoldInvoiceForceClose(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb, cancel := context.WithCancel(context.Background())
|
||||
|
@ -289,7 +289,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
payment.Status)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("in flight status not recevied")
|
||||
t.Fatalf("in flight status not received")
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,7 +341,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
payment = p
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("in flight status not recevied")
|
||||
t.Fatalf("in flight status not received")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1094,7 +1094,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
daveBalance := daveBalResp.ConfirmedBalance
|
||||
if daveBalance <= daveStartingBalance {
|
||||
return fmt.Errorf("expected dave to have balance "+
|
||||
"above %d, intead had %v", daveStartingBalance,
|
||||
"above %d, instead had %v", daveStartingBalance,
|
||||
daveBalance)
|
||||
}
|
||||
|
||||
|
@ -168,7 +168,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
err = restartBob()
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// After the force close transacion is mined, transactions will be
|
||||
// After the force close transaction is mined, transactions will be
|
||||
// broadcast by both Bob and Carol.
|
||||
switch c {
|
||||
// Carol will broadcast her second level HTLC transaction and Bob will
|
||||
|
@ -184,7 +184,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
err = restartBob()
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// After the force close transacion is mined, we should expect Bob and
|
||||
// After the force close transaction is mined, we should expect Bob and
|
||||
// Carol to broadcast some transactions depending on the channel
|
||||
// commitment type.
|
||||
switch c {
|
||||
|
@ -52,7 +52,7 @@ func testWipeForwardingPackages(net *lntest.NetworkHarness,
|
||||
// close channel should now become pending force closed channel.
|
||||
pendingChan = assertPendingForceClosedChannel(t.t, net.Bob)
|
||||
|
||||
// Check the forwarding pacakges are deleted.
|
||||
// Check the forwarding packages are deleted.
|
||||
require.Zero(t.t, pendingChan.NumForwardingPackages)
|
||||
|
||||
// For Alice, the forwarding packages should have been wiped too.
|
||||
@ -87,7 +87,7 @@ func testWipeForwardingPackages(net *lntest.NetworkHarness,
|
||||
// really contains is channels whose closing tx has been broadcast.
|
||||
pendingChan = assertPendingForceClosedChannel(t.t, net.Bob)
|
||||
|
||||
// Check the forwarding pacakges are deleted.
|
||||
// Check the forwarding packages are deleted.
|
||||
require.Zero(t.t, pendingChan.NumForwardingPackages)
|
||||
|
||||
// Mine a block to confirm sweep transactions such that they
|
||||
|
@ -2024,7 +2024,7 @@ func (lc *LightningChannel) restorePendingRemoteUpdates(
|
||||
logIdx)
|
||||
}
|
||||
|
||||
// We previously restored Adds along with all the other upates,
|
||||
// We previously restored Adds along with all the other updates,
|
||||
// but this Add restoration was a no-op as every single one of
|
||||
// these Adds was already restored since they're all incoming
|
||||
// htlcs on the local commitment.
|
||||
@ -2143,7 +2143,7 @@ func (lc *LightningChannel) restorePendingLocalUpdates(
|
||||
}
|
||||
|
||||
// At this point the restored update's logIndex must be equal
|
||||
// to the update log, otherwise somthing is horribly wrong.
|
||||
// to the update log, otherwise something is horribly wrong.
|
||||
if payDesc.LogIndex != lc.localUpdateLog.logIndex {
|
||||
panic(fmt.Sprintf("log index mismatch: "+
|
||||
"%v vs %v", payDesc.LogIndex,
|
||||
@ -3460,7 +3460,7 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
|
||||
}
|
||||
|
||||
// Now that we know the total value of added HTLCs, we check
|
||||
// that this satisfy the MaxPendingAmont contraint.
|
||||
// that this satisfy the MaxPendingAmont constraint.
|
||||
if amtInFlight > constraints.MaxPendingAmount {
|
||||
return ErrMaxPendingAmount
|
||||
}
|
||||
@ -6780,7 +6780,7 @@ func (lc *LightningChannel) availableCommitmentBalance(view *htlcView,
|
||||
// than the htlcCommitFee, where we could still be sending dust
|
||||
// HTLCs, but we return 0 in this case. This is to avoid
|
||||
// lowering our balance even further, as this takes us into a
|
||||
// bad state wehere neither we nor our channel counterparty can
|
||||
// bad state where neither we nor our channel counterparty can
|
||||
// add HTLCs.
|
||||
if ourBalance < htlcCommitFee {
|
||||
return 0, commitWeight
|
||||
|
@ -654,7 +654,7 @@ func testCommitHTLCSigTieBreak(t *testing.T, restart bool) {
|
||||
lastIndex = htlc.OutputIndex
|
||||
}
|
||||
|
||||
// If requsted, restart Alice so that we can test that the necessary
|
||||
// If requested, restart Alice so that we can test that the necessary
|
||||
// indexes can be reconstructed before needing to validate the
|
||||
// signatures from Bob.
|
||||
if restart {
|
||||
@ -1172,7 +1172,7 @@ func TestForceCloseDustOutput(t *testing.T) {
|
||||
defer cleanUp()
|
||||
|
||||
// We set both node's channel reserves to 0, to make sure
|
||||
// they can create small dust ouputs without going under
|
||||
// they can create small dust outputs without going under
|
||||
// their channel reserves.
|
||||
aliceChannel.channelState.LocalChanCfg.ChanReserve = 0
|
||||
bobChannel.channelState.LocalChanCfg.ChanReserve = 0
|
||||
@ -5427,7 +5427,7 @@ func TestChanCommitWeightDustHtlcs(t *testing.T) {
|
||||
return w
|
||||
}
|
||||
|
||||
// Start by getting the initial remote commitment wight seen from
|
||||
// Start by getting the initial remote commitment weight seen from
|
||||
// Alice's perspective. At this point there are no HTLCs on the
|
||||
// commitment.
|
||||
weight1 := remoteCommitWeight(aliceChannel)
|
||||
@ -5438,7 +5438,7 @@ func TestChanCommitWeightDustHtlcs(t *testing.T) {
|
||||
bobDustHtlc := bobDustlimit + htlcSuccessFee - 1
|
||||
preimg := addHtlc(bobDustHtlc)
|
||||
|
||||
// Now get the current wight of the remote commitment. We expect it to
|
||||
// Now get the current weight of the remote commitment. We expect it to
|
||||
// not have changed, since the HTLC we added is considered dust.
|
||||
weight2 := remoteCommitWeight(aliceChannel)
|
||||
require.Equal(t, weight1, weight2)
|
||||
@ -5711,7 +5711,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
t.Fatalf("unable to restart bob: %v", err)
|
||||
}
|
||||
|
||||
// Readd the Fail to both Alice and Bob's channels, as the non-committed
|
||||
// Re-add the Fail to both Alice and Bob's channels, as the non-committed
|
||||
// update will not have survived the restart.
|
||||
err = bobChannel.FailHTLC(htlc2.ID, []byte("failreason"), nil, nil, nil)
|
||||
if err != nil {
|
||||
@ -5723,7 +5723,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
// Have Alice initiate a state transition, which does not include the
|
||||
// HTLCs just readded to the channel state.
|
||||
// HTLCs just re-added to the channel state.
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -6808,7 +6808,7 @@ func TestChanReserveRemoteInitiator(t *testing.T) {
|
||||
// Set Alice's channel reserve to be 5 BTC-commitfee. This means she
|
||||
// has just enough balance to cover the comitment fee, but not enough
|
||||
// to add any more HTLCs to the commitment. Although a reserve this
|
||||
// high is unrealistic, a channel can easiliy get into a situation
|
||||
// high is unrealistic, a channel can easily get into a situation
|
||||
// where the initiator cannot pay for the fee of any more HTLCs.
|
||||
commitFee := aliceChannel.channelState.LocalCommitment.CommitFee
|
||||
aliceMinReserve := 5*btcutil.SatoshiPerBitcoin - commitFee
|
||||
@ -7203,7 +7203,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
||||
// signature from Bob yet.
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recive revocation: %v", err)
|
||||
t.Fatalf("unable to receive revocation: %v", err)
|
||||
}
|
||||
|
||||
// Now make Alice send and sign an additional HTLC. We don't let Bob
|
||||
@ -7709,7 +7709,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
// Alice receives the revocation, ACKing her pending commitment.
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recive revocation: %v", err)
|
||||
t.Fatalf("unable to receive revocation: %v", err)
|
||||
}
|
||||
|
||||
// However, the HTLC is still not locked into her local commitment, so
|
||||
@ -7726,7 +7726,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
// At this stage Bob has a pending remote commitment. Make sure
|
||||
// restoring at this stage correcly restores the HTLC add commit
|
||||
// restoring at this stage correctly restores the HTLC add commit
|
||||
// heights.
|
||||
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 1, 1)
|
||||
|
||||
@ -7746,7 +7746,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recive revocation: %v", err)
|
||||
t.Fatalf("unable to receive revocation: %v", err)
|
||||
}
|
||||
|
||||
// Alice ACKing Bob's pending commitment shouldn't change the heights
|
||||
@ -7790,7 +7790,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
// Since Bob just revoked another commitment, a restoration should
|
||||
// increase the add height of the firt HTLC to 2, as we only keep the
|
||||
// increase the add height of the first HTLC to 2, as we only keep the
|
||||
// last unrevoked commitment. The new HTLC will also have a local add
|
||||
// height of 2.
|
||||
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 2, 1)
|
||||
|
@ -193,7 +193,7 @@ type ScriptInfo struct {
|
||||
// CommitScriptToSelf constructs the public key script for the output on the
|
||||
// commitment transaction paying to the "owner" of said commitment transaction.
|
||||
// The `initiator` argument should correspond to the owner of the commitment
|
||||
// tranasction which we are generating the to_local script for. If the other
|
||||
// transaction which we are generating the to_local script for. If the other
|
||||
// party learns of the preimage to the revocation hash, then they can claim all
|
||||
// the settled funds in the channel, plus the unsettled funds.
|
||||
func CommitScriptToSelf(chanType channeldb.ChannelType, initiator bool,
|
||||
@ -234,7 +234,7 @@ func CommitScriptToSelf(chanType channeldb.ChannelType, initiator bool,
|
||||
|
||||
// CommitScriptToRemote derives the appropriate to_remote script based on the
|
||||
// channel's commitment type. The `initiator` argument should correspond to the
|
||||
// owner of the commitment tranasction which we are generating the to_remote
|
||||
// owner of the commitment transaction which we are generating the to_remote
|
||||
// script for. The second return value is the CSV delay of the output script,
|
||||
// what must be satisfied in order to spend the output.
|
||||
func CommitScriptToRemote(chanType channeldb.ChannelType, initiator bool,
|
||||
@ -340,7 +340,7 @@ func HtlcSecondLevelInputSequence(chanType channeldb.ChannelType) uint32 {
|
||||
// output for the second-level HTLC transactions. The second level transaction
|
||||
// act as a sort of covenant, ensuring that a 2-of-2 multi-sig output can only
|
||||
// be spent in a particular way, and to a particular output. The `initiator`
|
||||
// argument should correspond to the owner of the commitment tranasction which
|
||||
// argument should correspond to the owner of the commitment transaction which
|
||||
// we are generating the to_local script for.
|
||||
func SecondLevelHtlcScript(chanType channeldb.ChannelType, initiator bool,
|
||||
revocationKey, delayKey *btcec.PublicKey,
|
||||
@ -725,7 +725,7 @@ func (cb *CommitmentBuilder) createUnsignedCommitmentTx(ourBalance,
|
||||
// spent after a relative block delay or revocation event, and a remote output
|
||||
// paying the counterparty within the channel, which can be spent immediately
|
||||
// or after a delay depending on the commitment type. The `initiator` argument
|
||||
// should correspond to the owner of the commitment tranasction we are creating.
|
||||
// should correspond to the owner of the commitment transaction we are creating.
|
||||
func CreateCommitTx(chanType channeldb.ChannelType,
|
||||
fundingOutput wire.TxIn, keyRing *CommitmentKeyRing,
|
||||
localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
|
||||
|
@ -1221,7 +1221,7 @@ func (l *LightningWallet) handleFundingCancelRequest(req *fundingReserveCancelMs
|
||||
pendingReservation.Lock()
|
||||
defer pendingReservation.Unlock()
|
||||
|
||||
// Mark all previously locked outpoints as useable for future funding
|
||||
// Mark all previously locked outpoints as usable for future funding
|
||||
// requests.
|
||||
for _, unusedInput := range pendingReservation.ourContribution.Inputs {
|
||||
delete(l.lockedOutPoints, unusedInput.PreviousOutPoint)
|
||||
|
@ -223,7 +223,7 @@ If Android Studio tells you that the `aar` file cannot be included into the `app
|
||||
|
||||
![separate_gradle_module](docs/separate_gradle_module.png)
|
||||
|
||||
3. Gradle file should countain only these lines:
|
||||
3. Gradle file should contain only these lines:
|
||||
|
||||
```shell
|
||||
configurations.maybeCreate("default")
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// lndStarted will be used atomically to ensure only a singel lnd instance is
|
||||
// lndStarted will be used atomically to ensure only a single lnd instance is
|
||||
// attempted to be started at once.
|
||||
var lndStarted int32
|
||||
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
|
||||
var (
|
||||
// private24BitBlock contains the set of private IPv4 addresses within
|
||||
// the 10.0.0.0/8 adddress space.
|
||||
// the 10.0.0.0/8 address space.
|
||||
private24BitBlock *net.IPNet
|
||||
|
||||
// private20BitBlock contains the set of private IPv4 addresses within
|
||||
|
@ -69,12 +69,12 @@ type ChanStatusConfig struct {
|
||||
Graph ChannelGraph
|
||||
|
||||
// ChanEnableTimeout is the duration a peer's connect must remain stable
|
||||
// before attempting to reenable the channel.
|
||||
// before attempting to re-enable the channel.
|
||||
//
|
||||
// NOTE: This value is only used to verify that the relation between
|
||||
// itself, ChanDisableTimeout, and ChanStatusSampleInterval is correct.
|
||||
// The user is still responsible for ensuring that the same duration
|
||||
// elapses before attempting to reenable a channel.
|
||||
// elapses before attempting to re-enable a channel.
|
||||
ChanEnableTimeout time.Duration
|
||||
|
||||
// ChanDisableTimeout is the duration the manager will wait after
|
||||
@ -138,7 +138,7 @@ func NewChanStatusManager(cfg *ChanStatusConfig) (*ChanStatusManager, error) {
|
||||
// enable_timeout + sample_interval to be less than or equal to the
|
||||
// disable_timeout and that all are positive values. A peer that
|
||||
// disconnects and reconnects quickly may cause a disable update to be
|
||||
// sent, shortly followed by a reenable. Ensuring a healthy separation
|
||||
// sent, shortly followed by a re-enable. Ensuring a healthy separation
|
||||
// helps dampen the possibility of spamming updates that toggle the
|
||||
// disable bit for such events.
|
||||
if cfg.ChanStatusSampleInterval <= 0 {
|
||||
@ -492,7 +492,7 @@ func (m *ChanStatusManager) processAutoRequest(outpoint wire.OutPoint) error {
|
||||
// scheduled. Once an active channel is determined to be pending-inactive, one
|
||||
// of two transitions can follow. Either the channel is disabled because no
|
||||
// request to enable is received before the scheduled disable is broadcast, or
|
||||
// the channel is successfully reenabled and channel is returned to an active
|
||||
// the channel is successfully re-enabled and channel is returned to an active
|
||||
// state from the POV of the ChanStatusManager.
|
||||
func (m *ChanStatusManager) markPendingInactiveChannels() {
|
||||
channels, err := m.fetchChannels()
|
||||
|
@ -651,7 +651,7 @@ var stateMachineTests = []stateMachineTest{
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
// Simulate reconnect by making channels active.
|
||||
h.markActive(h.graph.chans())
|
||||
// Request that all channels be reenabled.
|
||||
// Request that all channels be re-enabled.
|
||||
h.assertEnables(h.graph.chans(), nil, false)
|
||||
// Pending disable should have been canceled, and
|
||||
// no updates sent. Channels remain enabled on the
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user