mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-02-23 14:40:30 +01:00
Merge pull request #5745 from Roasbeef/contract-court-move
multi: move breach arbiter and utxo nursery into contractcourt package
This commit is contained in:
commit
42ce27f663
15 changed files with 388 additions and 348 deletions
|
@ -1,4 +1,4 @@
|
|||
package lnd
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -17,7 +17,6 @@ import (
|
|||
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/kvdb"
|
||||
"github.com/lightningnetwork/lnd/labels"
|
||||
|
@ -59,7 +58,7 @@ var (
|
|||
errBrarShuttingDown = errors.New("breacharbiter shutting down")
|
||||
)
|
||||
|
||||
// ContractBreachEvent is an event the breachArbiter will receive in case a
|
||||
// ContractBreachEvent is an event the BreachArbiter will receive in case a
|
||||
// contract breach is observed on-chain. It contains the necessary information
|
||||
// to handle the breach, and a ProcessACK closure we will use to ACK the event
|
||||
// when we have safely stored all the necessary information.
|
||||
|
@ -80,6 +79,51 @@ type ContractBreachEvent struct {
|
|||
BreachRetribution *lnwallet.BreachRetribution
|
||||
}
|
||||
|
||||
// ChannelCloseType is an enum which signals the type of channel closure the
|
||||
// peer should execute.
|
||||
type ChannelCloseType uint8
|
||||
|
||||
const (
|
||||
// CloseRegular indicates a regular cooperative channel closure
|
||||
// should be attempted.
|
||||
CloseRegular ChannelCloseType = iota
|
||||
|
||||
// CloseBreach indicates that a channel breach has been detected, and
|
||||
// the link should immediately be marked as unavailable.
|
||||
CloseBreach
|
||||
)
|
||||
|
||||
// RetributionStorer provides an interface for managing a persistent map from
|
||||
// wire.OutPoint -> retributionInfo. Upon learning of a breach, a BreachArbiter
|
||||
// should record the retributionInfo for the breached channel, which serves a
|
||||
// checkpoint in the event that retribution needs to be resumed after failure.
|
||||
// A RetributionStore provides an interface for managing the persisted set, as
|
||||
// well as mapping user defined functions over the entire on-disk contents.
|
||||
//
|
||||
// Calls to RetributionStore may occur concurrently. A concrete instance of
|
||||
// RetributionStore should use appropriate synchronization primitives, or
|
||||
// be otherwise safe for concurrent access.
|
||||
type RetributionStorer interface {
|
||||
// Add persists the retributionInfo to disk, using the information's
|
||||
// chanPoint as the key. This method should overwrite any existing
|
||||
// entries found under the same key, and an error should be raised if
|
||||
// the addition fails.
|
||||
Add(retInfo *retributionInfo) error
|
||||
|
||||
// IsBreached queries the retribution store to see if the breach arbiter
|
||||
// is aware of any breaches for the provided channel point.
|
||||
IsBreached(chanPoint *wire.OutPoint) (bool, error)
|
||||
|
||||
// Remove deletes the retributionInfo from disk, if any exists, under
|
||||
// the given key. An error should be re raised if the removal fails.
|
||||
Remove(key *wire.OutPoint) error
|
||||
|
||||
// ForAll iterates over the existing on-disk contents and applies a
|
||||
// chosen, read-only callback to each. This method should ensure that it
|
||||
// immediately propagate any errors generated by the callback.
|
||||
ForAll(cb func(*retributionInfo) error, reset func()) error
|
||||
}
|
||||
|
||||
// BreachConfig bundles the required subsystems used by the breach arbiter. An
|
||||
// instance of BreachConfig is passed to newBreachArbiter during instantiation.
|
||||
type BreachConfig struct {
|
||||
|
@ -87,7 +131,7 @@ type BreachConfig struct {
|
|||
// which it detects a breach, ensuring now further activity will
|
||||
// continue across the link. The method accepts link's channel point and
|
||||
// a close type to be included in the channel close summary.
|
||||
CloseLink func(*wire.OutPoint, htlcswitch.ChannelCloseType)
|
||||
CloseLink func(*wire.OutPoint, ChannelCloseType)
|
||||
|
||||
// DB provides access to the user's channels, allowing the breach
|
||||
// arbiter to determine the current state of a user's channels, and how
|
||||
|
@ -110,9 +154,9 @@ type BreachConfig struct {
|
|||
// transaction to the network.
|
||||
PublishTransaction func(*wire.MsgTx, string) error
|
||||
|
||||
// ContractBreaches is a channel where the breachArbiter will receive
|
||||
// ContractBreaches is a channel where the BreachArbiter will receive
|
||||
// notifications in the event of a contract breach being observed. A
|
||||
// ContractBreachEvent must be ACKed by the breachArbiter, such that
|
||||
// ContractBreachEvent must be ACKed by the BreachArbiter, such that
|
||||
// the sending subsystem knows that the event is properly handed off.
|
||||
ContractBreaches <-chan *ContractBreachEvent
|
||||
|
||||
|
@ -124,10 +168,10 @@ type BreachConfig struct {
|
|||
// Store is a persistent resource that maintains information regarding
|
||||
// breached channels. This is used in conjunction with DB to recover
|
||||
// from crashes, restarts, or other failures.
|
||||
Store RetributionStore
|
||||
Store RetributionStorer
|
||||
}
|
||||
|
||||
// breachArbiter is a special subsystem which is responsible for watching and
|
||||
// BreachArbiter is a special subsystem which is responsible for watching and
|
||||
// acting on the detection of any attempted uncooperative channel breaches by
|
||||
// channel counterparties. This file essentially acts as deterrence code for
|
||||
// those attempting to launch attacks against the daemon. In practice it's
|
||||
|
@ -135,7 +179,7 @@ type BreachConfig struct {
|
|||
// important to have it in place just in case we encounter cheating channel
|
||||
// counterparties.
|
||||
// TODO(roasbeef): closures in config for subsystem pointers to decouple?
|
||||
type breachArbiter struct {
|
||||
type BreachArbiter struct {
|
||||
started sync.Once
|
||||
stopped sync.Once
|
||||
|
||||
|
@ -146,18 +190,18 @@ type breachArbiter struct {
|
|||
sync.Mutex
|
||||
}
|
||||
|
||||
// newBreachArbiter creates a new instance of a breachArbiter initialized with
|
||||
// NewBreachArbiter creates a new instance of a BreachArbiter initialized with
|
||||
// its dependent objects.
|
||||
func newBreachArbiter(cfg *BreachConfig) *breachArbiter {
|
||||
return &breachArbiter{
|
||||
func NewBreachArbiter(cfg *BreachConfig) *BreachArbiter {
|
||||
return &BreachArbiter{
|
||||
cfg: cfg,
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start is an idempotent method that officially starts the breachArbiter along
|
||||
// Start is an idempotent method that officially starts the BreachArbiter along
|
||||
// with all other goroutines it needs to perform its functions.
|
||||
func (b *breachArbiter) Start() error {
|
||||
func (b *BreachArbiter) Start() error {
|
||||
var err error
|
||||
b.started.Do(func() {
|
||||
err = b.start()
|
||||
|
@ -165,7 +209,7 @@ func (b *breachArbiter) Start() error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (b *breachArbiter) start() error {
|
||||
func (b *BreachArbiter) start() error {
|
||||
brarLog.Tracef("Starting breach arbiter")
|
||||
|
||||
// Load all retributions currently persisted in the retribution store.
|
||||
|
@ -259,10 +303,10 @@ func (b *breachArbiter) start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Stop is an idempotent method that signals the breachArbiter to execute a
|
||||
// Stop is an idempotent method that signals the BreachArbiter to execute a
|
||||
// graceful shutdown. This function will block until all goroutines spawned by
|
||||
// the breachArbiter have gracefully exited.
|
||||
func (b *breachArbiter) Stop() error {
|
||||
// the BreachArbiter have gracefully exited.
|
||||
func (b *BreachArbiter) Stop() error {
|
||||
b.stopped.Do(func() {
|
||||
brarLog.Infof("Breach arbiter shutting down")
|
||||
|
||||
|
@ -274,11 +318,11 @@ func (b *breachArbiter) Stop() error {
|
|||
|
||||
// IsBreached queries the breach arbiter's retribution store to see if it is
|
||||
// aware of any channel breaches for a particular channel point.
|
||||
func (b *breachArbiter) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
||||
func (b *BreachArbiter) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
||||
return b.cfg.Store.IsBreached(chanPoint)
|
||||
}
|
||||
|
||||
// contractObserver is the primary goroutine for the breachArbiter. This
|
||||
// contractObserver is the primary goroutine for the BreachArbiter. This
|
||||
// goroutine is responsible for handling breach events coming from the
|
||||
// contractcourt on the ContractBreaches channel. If a channel breach is
|
||||
// detected, then the contractObserver will execute the retribution logic
|
||||
|
@ -286,7 +330,7 @@ func (b *breachArbiter) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
|||
// wallet.
|
||||
//
|
||||
// NOTE: This MUST be run as a goroutine.
|
||||
func (b *breachArbiter) contractObserver() {
|
||||
func (b *BreachArbiter) contractObserver() {
|
||||
defer b.wg.Done()
|
||||
|
||||
brarLog.Infof("Starting contract observer, watching for breaches.")
|
||||
|
@ -307,45 +351,6 @@ func (b *breachArbiter) contractObserver() {
|
|||
}
|
||||
}
|
||||
|
||||
// convertToSecondLevelRevoke takes a breached output, and a transaction that
|
||||
// spends it to the second level, and mutates the breach output into one that
|
||||
// is able to properly sweep that second level output. We'll use this function
|
||||
// when we go to sweep a breached commitment transaction, but the cheating
|
||||
// party has already attempted to take it to the second level
|
||||
func convertToSecondLevelRevoke(bo *breachedOutput, breachInfo *retributionInfo,
|
||||
spendDetails *chainntnfs.SpendDetail) {
|
||||
|
||||
// In this case, we'll modify the witness type of this output to
|
||||
// actually prepare for a second level revoke.
|
||||
bo.witnessType = input.HtlcSecondLevelRevoke
|
||||
|
||||
// We'll also redirect the outpoint to this second level output, so the
|
||||
// spending transaction updates it inputs accordingly.
|
||||
spendingTx := spendDetails.SpendingTx
|
||||
oldOp := bo.outpoint
|
||||
bo.outpoint = wire.OutPoint{
|
||||
Hash: spendingTx.TxHash(),
|
||||
Index: 0,
|
||||
}
|
||||
|
||||
// Next, we need to update the amount so we can do fee estimation
|
||||
// properly, and also so we can generate a valid signature as we need
|
||||
// to know the new input value (the second level transactions shaves
|
||||
// off some funds to fees).
|
||||
newAmt := spendingTx.TxOut[0].Value
|
||||
bo.amt = btcutil.Amount(newAmt)
|
||||
bo.signDesc.Output.Value = newAmt
|
||||
bo.signDesc.Output.PkScript = spendingTx.TxOut[0].PkScript
|
||||
|
||||
// Finally, we'll need to adjust the witness program in the
|
||||
// SignDescriptor.
|
||||
bo.signDesc.WitnessScript = bo.secondLevelWitnessScript
|
||||
|
||||
brarLog.Warnf("HTLC(%v) for ChannelPoint(%v) has been spent to the "+
|
||||
"second-level, adjusting -> %v", oldOp, breachInfo.chanPoint,
|
||||
bo.outpoint)
|
||||
}
|
||||
|
||||
// spend is used to wrap the index of the retributionInfo output that gets
|
||||
// spent together with the spend details.
|
||||
type spend struct {
|
||||
|
@ -357,7 +362,7 @@ type spend struct {
|
|||
// returns the spend details for those outputs. The spendNtfns map is a cache
|
||||
// used to store registered spend subscriptions, in case we must call this
|
||||
// method multiple times.
|
||||
func (b *breachArbiter) waitForSpendEvent(breachInfo *retributionInfo,
|
||||
func (b *BreachArbiter) waitForSpendEvent(breachInfo *retributionInfo,
|
||||
spendNtfns map[wire.OutPoint]*chainntnfs.SpendEvent) ([]spend, error) {
|
||||
|
||||
inputs := breachInfo.breachedOutputs
|
||||
|
@ -480,6 +485,46 @@ func (b *breachArbiter) waitForSpendEvent(breachInfo *retributionInfo,
|
|||
}
|
||||
}
|
||||
|
||||
// convertToSecondLevelRevoke takes a breached output, and a transaction that
|
||||
// spends it to the second level, and mutates the breach output into one that
|
||||
// is able to properly sweep that second level output. We'll use this function
|
||||
// when we go to sweep a breached commitment transaction, but the cheating
|
||||
// party has already attempted to take it to the second level
|
||||
func convertToSecondLevelRevoke(bo *breachedOutput, breachInfo *retributionInfo,
|
||||
spendDetails *chainntnfs.SpendDetail) {
|
||||
|
||||
// In this case, we'll modify the witness type of this output to
|
||||
// actually prepare for a second level revoke.
|
||||
bo.witnessType = input.HtlcSecondLevelRevoke
|
||||
|
||||
// We'll also redirect the outpoint to this second level output, so the
|
||||
// spending transaction updates it inputs accordingly.
|
||||
spendingTx := spendDetails.SpendingTx
|
||||
spendInputIndex := spendDetails.SpenderInputIndex
|
||||
oldOp := bo.outpoint
|
||||
bo.outpoint = wire.OutPoint{
|
||||
Hash: spendingTx.TxHash(),
|
||||
Index: spendInputIndex,
|
||||
}
|
||||
|
||||
// Next, we need to update the amount so we can do fee estimation
|
||||
// properly, and also so we can generate a valid signature as we need
|
||||
// to know the new input value (the second level transactions shaves
|
||||
// off some funds to fees).
|
||||
newAmt := spendingTx.TxOut[spendInputIndex].Value
|
||||
bo.amt = btcutil.Amount(newAmt)
|
||||
bo.signDesc.Output.Value = newAmt
|
||||
bo.signDesc.Output.PkScript = spendingTx.TxOut[spendInputIndex].PkScript
|
||||
|
||||
// Finally, we'll need to adjust the witness program in the
|
||||
// SignDescriptor.
|
||||
bo.signDesc.WitnessScript = bo.secondLevelWitnessScript
|
||||
|
||||
brarLog.Warnf("HTLC(%v) for ChannelPoint(%v) has been spent to the "+
|
||||
"second-level, adjusting -> %v", oldOp, breachInfo.chanPoint,
|
||||
bo.outpoint)
|
||||
}
|
||||
|
||||
// updateBreachInfo mutates the passed breachInfo by removing or converting any
|
||||
// outputs among the spends. It also counts the total and revoked funds swept
|
||||
// by our justice spends.
|
||||
|
@ -581,7 +626,7 @@ func updateBreachInfo(breachInfo *retributionInfo, spends []spend) (
|
|||
// the lingering funds within the channel into the daemon's wallet.
|
||||
//
|
||||
// NOTE: This MUST be run as a goroutine.
|
||||
func (b *breachArbiter) exactRetribution(confChan *chainntnfs.ConfirmationEvent,
|
||||
func (b *BreachArbiter) exactRetribution(confChan *chainntnfs.ConfirmationEvent,
|
||||
breachInfo *retributionInfo) {
|
||||
|
||||
defer b.wg.Done()
|
||||
|
@ -797,7 +842,7 @@ Loop:
|
|||
|
||||
// cleanupBreach marks the given channel point as fully resolved and removes the
|
||||
// retribution for that the channel from the retribution store.
|
||||
func (b *breachArbiter) cleanupBreach(chanPoint *wire.OutPoint) error {
|
||||
func (b *BreachArbiter) cleanupBreach(chanPoint *wire.OutPoint) error {
|
||||
// With the channel closed, mark it in the database as such.
|
||||
err := b.cfg.DB.MarkChanFullyClosed(chanPoint)
|
||||
if err != nil {
|
||||
|
@ -816,15 +861,15 @@ func (b *breachArbiter) cleanupBreach(chanPoint *wire.OutPoint) error {
|
|||
}
|
||||
|
||||
// handleBreachHandoff handles a new breach event, by writing it to disk, then
|
||||
// notifies the breachArbiter contract observer goroutine that a channel's
|
||||
// notifies the BreachArbiter contract observer goroutine that a channel's
|
||||
// contract has been breached by the prior counterparty. Once notified the
|
||||
// breachArbiter will attempt to sweep ALL funds within the channel using the
|
||||
// BreachArbiter will attempt to sweep ALL funds within the channel using the
|
||||
// information provided within the BreachRetribution generated due to the
|
||||
// breach of channel contract. The funds will be swept only after the breaching
|
||||
// transaction receives a necessary number of confirmations.
|
||||
//
|
||||
// NOTE: This MUST be run as a goroutine.
|
||||
func (b *breachArbiter) handleBreachHandoff(breachEvent *ContractBreachEvent) {
|
||||
func (b *BreachArbiter) handleBreachHandoff(breachEvent *ContractBreachEvent) {
|
||||
defer b.wg.Done()
|
||||
|
||||
chanPoint := breachEvent.ChanPoint
|
||||
|
@ -844,7 +889,7 @@ func (b *breachArbiter) handleBreachHandoff(breachEvent *ContractBreachEvent) {
|
|||
// breached in order to ensure any incoming or outgoing
|
||||
// multi-hop HTLCs aren't sent over this link, nor any other
|
||||
// links associated with this peer.
|
||||
b.cfg.CloseLink(&chanPoint, htlcswitch.CloseBreach)
|
||||
b.cfg.CloseLink(&chanPoint, CloseBreach)
|
||||
|
||||
// TODO(roasbeef): need to handle case of remote broadcast
|
||||
// mid-local initiated state-transition, possible
|
||||
|
@ -1185,7 +1230,7 @@ type justiceTxVariants struct {
|
|||
// the funds within the channel which we are now entitled to due to a breach of
|
||||
// the channel's contract by the counterparty. This function returns a *fully*
|
||||
// signed transaction with the witness for each input fully in place.
|
||||
func (b *breachArbiter) createJusticeTx(
|
||||
func (b *BreachArbiter) createJusticeTx(
|
||||
breachedOutputs []breachedOutput) (*justiceTxVariants, error) {
|
||||
|
||||
var (
|
||||
|
@ -1235,7 +1280,7 @@ func (b *breachArbiter) createJusticeTx(
|
|||
}
|
||||
|
||||
// createSweepTx creates a tx that sweeps the passed inputs back to our wallet.
|
||||
func (b *breachArbiter) createSweepTx(inputs []input.Input) (*wire.MsgTx,
|
||||
func (b *BreachArbiter) createSweepTx(inputs []input.Input) (*wire.MsgTx,
|
||||
error) {
|
||||
if len(inputs) == 0 {
|
||||
return nil, nil
|
||||
|
@ -1289,7 +1334,7 @@ func (b *breachArbiter) createSweepTx(inputs []input.Input) (*wire.MsgTx,
|
|||
|
||||
// sweepSpendableOutputsTxn creates a signed transaction from a sequence of
|
||||
// spendable outputs by sweeping the funds into a single p2wkh output.
|
||||
func (b *breachArbiter) sweepSpendableOutputsTxn(txWeight int64,
|
||||
func (b *BreachArbiter) sweepSpendableOutputsTxn(txWeight int64,
|
||||
inputs ...input.Input) (*wire.MsgTx, error) {
|
||||
|
||||
// First, we obtain a new public key script from the wallet which we'll
|
||||
|
@ -1382,55 +1427,24 @@ func (b *breachArbiter) sweepSpendableOutputsTxn(txWeight int64,
|
|||
return txn, nil
|
||||
}
|
||||
|
||||
// RetributionStore provides an interface for managing a persistent map from
|
||||
// wire.OutPoint -> retributionInfo. Upon learning of a breach, a BreachArbiter
|
||||
// should record the retributionInfo for the breached channel, which serves a
|
||||
// checkpoint in the event that retribution needs to be resumed after failure.
|
||||
// A RetributionStore provides an interface for managing the persisted set, as
|
||||
// well as mapping user defined functions over the entire on-disk contents.
|
||||
//
|
||||
// Calls to RetributionStore may occur concurrently. A concrete instance of
|
||||
// RetributionStore should use appropriate synchronization primitives, or
|
||||
// be otherwise safe for concurrent access.
|
||||
type RetributionStore interface {
|
||||
// Add persists the retributionInfo to disk, using the information's
|
||||
// chanPoint as the key. This method should overwrite any existing
|
||||
// entries found under the same key, and an error should be raised if
|
||||
// the addition fails.
|
||||
Add(retInfo *retributionInfo) error
|
||||
|
||||
// IsBreached queries the retribution store to see if the breach arbiter
|
||||
// is aware of any breaches for the provided channel point.
|
||||
IsBreached(chanPoint *wire.OutPoint) (bool, error)
|
||||
|
||||
// Remove deletes the retributionInfo from disk, if any exists, under
|
||||
// the given key. An error should be re raised if the removal fails.
|
||||
Remove(key *wire.OutPoint) error
|
||||
|
||||
// ForAll iterates over the existing on-disk contents and applies a
|
||||
// chosen, read-only callback to each. This method should ensure that it
|
||||
// immediately propagate any errors generated by the callback.
|
||||
ForAll(cb func(*retributionInfo) error, reset func()) error
|
||||
}
|
||||
|
||||
// retributionStore handles persistence of retribution states to disk and is
|
||||
// RetributionStore handles persistence of retribution states to disk and is
|
||||
// backed by a boltdb bucket. The primary responsibility of the retribution
|
||||
// store is to ensure that we can recover from a restart in the middle of a
|
||||
// breached contract retribution.
|
||||
type retributionStore struct {
|
||||
type RetributionStore struct {
|
||||
db *channeldb.DB
|
||||
}
|
||||
|
||||
// newRetributionStore creates a new instance of a retributionStore.
|
||||
func newRetributionStore(db *channeldb.DB) *retributionStore {
|
||||
return &retributionStore{
|
||||
// NewRetributionStore creates a new instance of a RetributionStore.
|
||||
func NewRetributionStore(db *channeldb.DB) *RetributionStore {
|
||||
return &RetributionStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a retribution state to the retributionStore, which is then persisted
|
||||
// Add adds a retribution state to the RetributionStore, which is then persisted
|
||||
// to disk.
|
||||
func (rs *retributionStore) Add(ret *retributionInfo) error {
|
||||
func (rs *RetributionStore) Add(ret *retributionInfo) error {
|
||||
return kvdb.Update(rs.db, func(tx kvdb.RwTx) error {
|
||||
// If this is our first contract breach, the retributionBucket
|
||||
// won't exist, in which case, we just create a new bucket.
|
||||
|
@ -1457,7 +1471,7 @@ func (rs *retributionStore) Add(ret *retributionInfo) error {
|
|||
// previously breached. This is used when connecting to a peer to determine if
|
||||
// it is safe to add a link to the htlcswitch, as we should never add a channel
|
||||
// that has already been breached.
|
||||
func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
||||
func (rs *RetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
||||
var found bool
|
||||
err := kvdb.View(rs.db, func(tx kvdb.RTx) error {
|
||||
retBucket := tx.ReadBucket(retributionBucket)
|
||||
|
@ -1485,7 +1499,7 @@ func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
|||
|
||||
// Remove removes a retribution state and finalized justice transaction by
|
||||
// channel point from the retribution store.
|
||||
func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error {
|
||||
func (rs *RetributionStore) Remove(chanPoint *wire.OutPoint) error {
|
||||
return kvdb.Update(rs.db, func(tx kvdb.RwTx) error {
|
||||
retBucket := tx.ReadWriteBucket(retributionBucket)
|
||||
|
||||
|
@ -1524,7 +1538,7 @@ func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error {
|
|||
|
||||
// ForAll iterates through all stored retributions and executes the passed
|
||||
// callback function on each retribution.
|
||||
func (rs *retributionStore) ForAll(cb func(*retributionInfo) error,
|
||||
func (rs *RetributionStore) ForAll(cb func(*retributionInfo) error,
|
||||
reset func()) error {
|
||||
|
||||
return kvdb.View(rs.db, func(tx kvdb.RTx) error {
|
|
@ -1,6 +1,7 @@
|
|||
//go:build !rpctest
|
||||
// +build !rpctest
|
||||
|
||||
package lnd
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lntest/channels"
|
||||
|
@ -338,7 +338,7 @@ func init() {
|
|||
// modifications to the entries are made between calls or through side effects,
|
||||
// and (2) that the database is actually being persisted between actions.
|
||||
type FailingRetributionStore interface {
|
||||
RetributionStore
|
||||
RetributionStorer
|
||||
|
||||
Restart()
|
||||
}
|
||||
|
@ -350,18 +350,18 @@ type FailingRetributionStore interface {
|
|||
type failingRetributionStore struct {
|
||||
mu sync.Mutex
|
||||
|
||||
rs RetributionStore
|
||||
rs RetributionStorer
|
||||
|
||||
nextAddErr error
|
||||
|
||||
restart func() RetributionStore
|
||||
restart func() RetributionStorer
|
||||
}
|
||||
|
||||
// newFailingRetributionStore creates a new failing retribution store. The given
|
||||
// restart closure should ensure that it is reloading its contents from the
|
||||
// persistent source.
|
||||
func newFailingRetributionStore(
|
||||
restart func() RetributionStore) *failingRetributionStore {
|
||||
restart func() RetributionStorer) *failingRetributionStore {
|
||||
|
||||
return &failingRetributionStore{
|
||||
mu: sync.Mutex{},
|
||||
|
@ -631,7 +631,7 @@ func TestMockRetributionStore(t *testing.T) {
|
|||
func(tt *testing.T) {
|
||||
mrs := newMockRetributionStore()
|
||||
frs := newFailingRetributionStore(
|
||||
func() RetributionStore { return mrs },
|
||||
func() RetributionStorer { return mrs },
|
||||
)
|
||||
test.test(frs, tt)
|
||||
},
|
||||
|
@ -677,7 +677,7 @@ func TestChannelDBRetributionStore(t *testing.T) {
|
|||
defer db.Close()
|
||||
defer cleanUp()
|
||||
|
||||
restartDb := func() RetributionStore {
|
||||
restartDb := func() RetributionStorer {
|
||||
// Close and reopen channeldb
|
||||
if err = db.Close(); err != nil {
|
||||
t.Fatalf("unable to close "+
|
||||
|
@ -691,7 +691,7 @@ func TestChannelDBRetributionStore(t *testing.T) {
|
|||
"channeldb: %v", err)
|
||||
}
|
||||
|
||||
return newRetributionStore(db)
|
||||
return NewRetributionStore(db)
|
||||
}
|
||||
|
||||
frs := newFailingRetributionStore(restartDb)
|
||||
|
@ -703,7 +703,7 @@ func TestChannelDBRetributionStore(t *testing.T) {
|
|||
|
||||
// countRetributions uses a retribution store's ForAll to count the number of
|
||||
// elements emitted from the store.
|
||||
func countRetributions(t *testing.T, rs RetributionStore) int {
|
||||
func countRetributions(t *testing.T, rs RetributionStorer) int {
|
||||
count := 0
|
||||
err := rs.ForAll(func(_ *retributionInfo) error {
|
||||
count++
|
||||
|
@ -971,7 +971,7 @@ restartCheck:
|
|||
}
|
||||
}
|
||||
|
||||
func initBreachedState(t *testing.T) (*breachArbiter,
|
||||
func initBreachedState(t *testing.T) (*BreachArbiter,
|
||||
*lnwallet.LightningChannel, *lnwallet.LightningChannel,
|
||||
*lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent,
|
||||
func(), func()) {
|
||||
|
@ -2035,7 +2035,7 @@ func findInputIndex(t *testing.T, op wire.OutPoint, tx *wire.MsgTx) int {
|
|||
|
||||
// assertArbiterBreach checks that the breach arbiter has persisted the breach
|
||||
// information for a particular channel.
|
||||
func assertArbiterBreach(t *testing.T, brar *breachArbiter,
|
||||
func assertArbiterBreach(t *testing.T, brar *BreachArbiter,
|
||||
chanPoint *wire.OutPoint) {
|
||||
|
||||
t.Helper()
|
||||
|
@ -2055,7 +2055,7 @@ func assertArbiterBreach(t *testing.T, brar *breachArbiter,
|
|||
|
||||
// assertNoArbiterBreach checks that the breach arbiter has not persisted the
|
||||
// breach information for a particular channel.
|
||||
func assertNoArbiterBreach(t *testing.T, brar *breachArbiter,
|
||||
func assertNoArbiterBreach(t *testing.T, brar *BreachArbiter,
|
||||
chanPoint *wire.OutPoint) {
|
||||
|
||||
t.Helper()
|
||||
|
@ -2074,7 +2074,7 @@ func assertNoArbiterBreach(t *testing.T, brar *breachArbiter,
|
|||
|
||||
// assertBrarCleanup blocks until the given channel point has been removed the
|
||||
// retribution store and the channel is fully closed in the database.
|
||||
func assertBrarCleanup(t *testing.T, brar *breachArbiter,
|
||||
func assertBrarCleanup(t *testing.T, brar *BreachArbiter,
|
||||
chanPoint *wire.OutPoint, db *channeldb.DB) {
|
||||
|
||||
t.Helper()
|
||||
|
@ -2159,11 +2159,11 @@ func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
|
|||
// createTestArbiter instantiates a breach arbiter with a failing retribution
|
||||
// store, so that controlled failures can be tested.
|
||||
func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
||||
db *channeldb.DB) (*breachArbiter, func(), error) {
|
||||
db *channeldb.DB) (*BreachArbiter, func(), error) {
|
||||
|
||||
// Create a failing retribution store, that wraps a normal one.
|
||||
store := newFailingRetributionStore(func() RetributionStore {
|
||||
return newRetributionStore(db)
|
||||
store := newFailingRetributionStore(func() RetributionStorer {
|
||||
return NewRetributionStore(db)
|
||||
})
|
||||
|
||||
aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(),
|
||||
|
@ -2172,8 +2172,8 @@ func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
|||
|
||||
// Assemble our test arbiter.
|
||||
notifier := mock.MakeMockSpendNotifier()
|
||||
ba := newBreachArbiter(&BreachConfig{
|
||||
CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {},
|
||||
ba := NewBreachArbiter(&BreachConfig{
|
||||
CloseLink: func(_ *wire.OutPoint, _ ChannelCloseType) {},
|
||||
DB: db,
|
||||
Estimator: chainfee.NewStaticEstimator(12500, 0),
|
||||
GenSweepScript: func() ([]byte, error) { return nil, nil },
|
|
@ -28,8 +28,6 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
defaultTimeout = time.Second * 5
|
||||
|
||||
// stateTimeout is the timeout we allow when waiting for state
|
||||
// transitions.
|
||||
stateTimeout = time.Second * 15
|
||||
|
|
|
@ -5,14 +5,24 @@ import (
|
|||
"github.com/lightningnetwork/lnd/build"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
var (
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
log btclog.Logger
|
||||
|
||||
// brarLog is the logger used by the breach arb.
|
||||
brarLog btclog.Logger
|
||||
|
||||
// utxnLog is the logger used by the utxo nursary.
|
||||
utxnLog btclog.Logger
|
||||
)
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
UseLogger(build.NewSubLogger("CNCT", nil))
|
||||
UseBreachLogger(build.NewSubLogger("BRAR", nil))
|
||||
UseNurseryLogger(build.NewSubLogger("UTXN", nil))
|
||||
}
|
||||
|
||||
// DisableLog disables all library log output. Logging output is disabled
|
||||
|
@ -28,6 +38,20 @@ func UseLogger(logger btclog.Logger) {
|
|||
log = logger
|
||||
}
|
||||
|
||||
// UseBreachLogger uses a specified Logger to output package logging info.
|
||||
// This should be used in preference to SetLogWriter if the caller is also
|
||||
// using btclog.
|
||||
func UseBreachLogger(logger btclog.Logger) {
|
||||
brarLog = logger
|
||||
}
|
||||
|
||||
// UseNurseryLogger uses a specified Logger to output package logging info.
|
||||
// This should be used in preference to SetLogWriter if the caller is also
|
||||
// using btclog.
|
||||
func UseNurseryLogger(logger btclog.Logger) {
|
||||
utxnLog = logger
|
||||
}
|
||||
|
||||
// logClosure is used to provide a closure over expensive logging operations so
|
||||
// don't have to be performed when the logging level doesn't warrant it.
|
||||
type logClosure func() string
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package lnd
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -79,7 +79,7 @@ import (
|
|||
// Concretely, it stores commitment and htlc outputs until any time-bounded
|
||||
// constraints have fully matured. The store exposes methods for enumerating its
|
||||
// contents, and persisting state transitions detected by the utxo nursery.
|
||||
type NurseryStore interface {
|
||||
type NurseryStorer interface {
|
||||
// Incubate registers a set of CSV delayed outputs (incoming HTLC's on
|
||||
// our commitment transaction, or a commitment output), and a slice of
|
||||
// outgoing htlc outputs to be swept back into the user's wallet. The
|
||||
|
@ -229,20 +229,20 @@ func prefixOutputKey(statePrefix []byte,
|
|||
return pfxOutputBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// nurseryStore is a concrete instantiation of a NurseryStore that is backed by
|
||||
// NurseryStore is a concrete instantiation of a NurseryStore that is backed by
|
||||
// a channeldb.DB instance.
|
||||
type nurseryStore struct {
|
||||
type NurseryStore struct {
|
||||
chainHash chainhash.Hash
|
||||
db *channeldb.DB
|
||||
|
||||
pfxChainKey []byte
|
||||
}
|
||||
|
||||
// newNurseryStore accepts a chain hash and a channeldb.DB instance, returning
|
||||
// an instance of nurseryStore who's database is properly segmented for the
|
||||
// NewNurseryStore accepts a chain hash and a channeldb.DB instance, returning
|
||||
// an instance of NurseryStore who's database is properly segmented for the
|
||||
// given chain.
|
||||
func newNurseryStore(chainHash *chainhash.Hash,
|
||||
db *channeldb.DB) (*nurseryStore, error) {
|
||||
func NewNurseryStore(chainHash *chainhash.Hash,
|
||||
db *channeldb.DB) (*NurseryStore, error) {
|
||||
|
||||
// Prefix the provided chain hash with "utxn" to create the key for the
|
||||
// nursery store's root bucket, ensuring each one has proper chain
|
||||
|
@ -252,7 +252,7 @@ func newNurseryStore(chainHash *chainhash.Hash,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &nurseryStore{
|
||||
return &NurseryStore{
|
||||
chainHash: *chainHash,
|
||||
db: db,
|
||||
pfxChainKey: pfxChainKey,
|
||||
|
@ -262,7 +262,7 @@ func newNurseryStore(chainHash *chainhash.Hash,
|
|||
// Incubate persists the beginning of the incubation process for the
|
||||
// CSV-delayed outputs (commitment and incoming HTLC's), commitment output and
|
||||
// a list of outgoing two-stage htlc outputs.
|
||||
func (ns *nurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error {
|
||||
func (ns *NurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error {
|
||||
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
|
||||
// If we have any kid outputs to incubate, then we'll attempt
|
||||
// to add each of them to the nursery store. Any duplicate
|
||||
|
@ -289,7 +289,7 @@ func (ns *nurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error {
|
|||
// CribToKinder atomically moves a babyOutput in the crib bucket to the
|
||||
// kindergarten bucket. The now mature kidOutput contained in the babyOutput
|
||||
// will be stored as it waits out the kidOutput's CSV delay.
|
||||
func (ns *nurseryStore) CribToKinder(bby *babyOutput) error {
|
||||
func (ns *NurseryStore) CribToKinder(bby *babyOutput) error {
|
||||
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
|
||||
|
||||
// First, retrieve or create the channel bucket corresponding to
|
||||
|
@ -371,7 +371,7 @@ func (ns *nurseryStore) CribToKinder(bby *babyOutput) error {
|
|||
// PreschoolToKinder atomically moves a kidOutput from the preschool bucket to
|
||||
// the kindergarten bucket. This transition should be executed after receiving
|
||||
// confirmation of the preschool output's commitment transaction.
|
||||
func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput,
|
||||
func (ns *NurseryStore) PreschoolToKinder(kid *kidOutput,
|
||||
lastGradHeight uint32) error {
|
||||
|
||||
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
|
||||
|
@ -470,7 +470,7 @@ func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput,
|
|||
// graduated status. This involves removing the kindergarten entries from both
|
||||
// the height and channel indexes. The height bucket will be opportunistically
|
||||
// pruned from the height index as outputs are removed.
|
||||
func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error {
|
||||
func (ns *NurseryStore) GraduateKinder(height uint32, kid *kidOutput) error {
|
||||
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
|
||||
|
||||
hghtBucket := ns.getHeightBucket(tx, height)
|
||||
|
@ -532,8 +532,8 @@ func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error {
|
|||
// delay expires at the provided block height.
|
||||
// FetchClass returns a list of the kindergarten and crib outputs whose timeouts
|
||||
// are expiring
|
||||
func (ns *nurseryStore) FetchClass(
|
||||
height uint32) ([]kidOutput, []babyOutput, error) {
|
||||
func (ns *NurseryStore) FetchClass(
|
||||
height uint32) ([]kidOutput, []babyOutput, error) { // nolint:golint
|
||||
|
||||
// Construct list of all crib and kindergarten outputs that need to be
|
||||
// processed at the provided block height.
|
||||
|
@ -594,7 +594,7 @@ func (ns *nurseryStore) FetchClass(
|
|||
|
||||
// FetchPreschools returns a list of all outputs currently stored in the
|
||||
// preschool bucket.
|
||||
func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
|
||||
func (ns *NurseryStore) FetchPreschools() ([]kidOutput, error) { // nolint:golint
|
||||
var kids []kidOutput
|
||||
if err := kvdb.View(ns.db, func(tx kvdb.RTx) error {
|
||||
|
||||
|
@ -669,7 +669,7 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
|
|||
|
||||
// HeightsBelowOrEqual returns a slice of all non-empty heights in the height
|
||||
// index at or below the provided upper bound.
|
||||
func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
|
||||
func (ns *NurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
|
||||
var activeHeights []uint32
|
||||
err := kvdb.View(ns.db, func(tx kvdb.RTx) error {
|
||||
// Ensure that the chain bucket for this nursery store exists.
|
||||
|
@ -715,7 +715,7 @@ func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
|
|||
// inferred from the key's prefix.
|
||||
// NOTE: The callback should not modify the provided byte slices and is
|
||||
// preferably non-blocking.
|
||||
func (ns *nurseryStore) ForChanOutputs(chanPoint *wire.OutPoint,
|
||||
func (ns *NurseryStore) ForChanOutputs(chanPoint *wire.OutPoint,
|
||||
callback func([]byte, []byte) error, reset func()) error {
|
||||
|
||||
return kvdb.View(ns.db, func(tx kvdb.RTx) error {
|
||||
|
@ -724,7 +724,7 @@ func (ns *nurseryStore) ForChanOutputs(chanPoint *wire.OutPoint,
|
|||
}
|
||||
|
||||
// ListChannels returns all channels the nursery is currently tracking.
|
||||
func (ns *nurseryStore) ListChannels() ([]wire.OutPoint, error) {
|
||||
func (ns *NurseryStore) ListChannels() ([]wire.OutPoint, error) {
|
||||
var activeChannels []wire.OutPoint
|
||||
if err := kvdb.View(ns.db, func(tx kvdb.RTx) error {
|
||||
// Retrieve the existing chain bucket for this nursery store.
|
||||
|
@ -761,7 +761,7 @@ func (ns *nurseryStore) ListChannels() ([]wire.OutPoint, error) {
|
|||
|
||||
// IsMatureChannel determines the whether or not all of the outputs in a
|
||||
// particular channel bucket have been marked as graduated.
|
||||
func (ns *nurseryStore) IsMatureChannel(chanPoint *wire.OutPoint) (bool, error) {
|
||||
func (ns *NurseryStore) IsMatureChannel(chanPoint *wire.OutPoint) (bool, error) {
|
||||
err := kvdb.View(ns.db, func(tx kvdb.RTx) error {
|
||||
// Iterate over the contents of the channel bucket, computing
|
||||
// both total number of outputs, and those that have the grad
|
||||
|
@ -790,7 +790,7 @@ var ErrImmatureChannel = errors.New("cannot remove immature channel, " +
|
|||
// RemoveChannel channel erases all entries from the channel bucket for the
|
||||
// provided channel point.
|
||||
// NOTE: The channel's entries in the height index are assumed to be removed.
|
||||
func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
|
||||
func (ns *NurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
|
||||
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
|
||||
// Retrieve the existing chain bucket for this nursery store.
|
||||
chainBucket := tx.ReadWriteBucket(ns.pfxChainKey)
|
||||
|
@ -853,7 +853,7 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
|
|||
// its two-stage process of sweeping funds back to the user's wallet. These
|
||||
// outputs are persisted in the nursery store in the crib state, and will be
|
||||
// revisited after the first-stage output's CLTV has expired.
|
||||
func (ns *nurseryStore) enterCrib(tx kvdb.RwTx, baby *babyOutput) error {
|
||||
func (ns *NurseryStore) enterCrib(tx kvdb.RwTx, baby *babyOutput) error {
|
||||
// First, retrieve or create the channel bucket corresponding to the
|
||||
// baby output's origin channel point.
|
||||
chanPoint := baby.OriginChanPoint()
|
||||
|
@ -910,7 +910,7 @@ func (ns *nurseryStore) enterCrib(tx kvdb.RwTx, baby *babyOutput) error {
|
|||
// through a single stage before sweeping. Outputs are stored in the preschool
|
||||
// bucket until the commitment transaction has been confirmed, at which point
|
||||
// they will be moved to the kindergarten bucket.
|
||||
func (ns *nurseryStore) enterPreschool(tx kvdb.RwTx, kid *kidOutput) error {
|
||||
func (ns *NurseryStore) enterPreschool(tx kvdb.RwTx, kid *kidOutput) error {
|
||||
// First, retrieve or create the channel bucket corresponding to the
|
||||
// baby output's origin channel point.
|
||||
chanPoint := kid.OriginChanPoint()
|
||||
|
@ -943,7 +943,7 @@ func (ns *nurseryStore) enterPreschool(tx kvdb.RwTx, kid *kidOutput) error {
|
|||
|
||||
// createChannelBucket creates or retrieves a channel bucket for the provided
|
||||
// channel point.
|
||||
func (ns *nurseryStore) createChannelBucket(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) createChannelBucket(tx kvdb.RwTx,
|
||||
chanPoint *wire.OutPoint) (kvdb.RwBucket, error) {
|
||||
|
||||
// Ensure that the chain bucket for this nursery store exists.
|
||||
|
@ -974,7 +974,7 @@ func (ns *nurseryStore) createChannelBucket(tx kvdb.RwTx,
|
|||
// getChannelBucket retrieves an existing channel bucket from the nursery store,
|
||||
// using the given channel point. If the bucket does not exist, or any bucket
|
||||
// along its path does not exist, a nil value is returned.
|
||||
func (ns *nurseryStore) getChannelBucket(tx kvdb.RTx,
|
||||
func (ns *NurseryStore) getChannelBucket(tx kvdb.RTx,
|
||||
chanPoint *wire.OutPoint) kvdb.RBucket {
|
||||
|
||||
// Retrieve the existing chain bucket for this nursery store.
|
||||
|
@ -1002,7 +1002,7 @@ func (ns *nurseryStore) getChannelBucket(tx kvdb.RTx,
|
|||
// getChannelBucketWrite retrieves an existing channel bucket from the nursery store,
|
||||
// using the given channel point. If the bucket does not exist, or any bucket
|
||||
// along its path does not exist, a nil value is returned.
|
||||
func (ns *nurseryStore) getChannelBucketWrite(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) getChannelBucketWrite(tx kvdb.RwTx,
|
||||
chanPoint *wire.OutPoint) kvdb.RwBucket {
|
||||
|
||||
// Retrieve the existing chain bucket for this nursery store.
|
||||
|
@ -1029,7 +1029,7 @@ func (ns *nurseryStore) getChannelBucketWrite(tx kvdb.RwTx,
|
|||
|
||||
// createHeightBucket creates or retrieves an existing bucket from the height
|
||||
// index, corresponding to the provided height.
|
||||
func (ns *nurseryStore) createHeightBucket(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) createHeightBucket(tx kvdb.RwTx,
|
||||
height uint32) (kvdb.RwBucket, error) {
|
||||
|
||||
// Ensure that the chain bucket for this nursery store exists.
|
||||
|
@ -1057,7 +1057,7 @@ func (ns *nurseryStore) createHeightBucket(tx kvdb.RwTx,
|
|||
// getHeightBucketPath retrieves an existing height bucket from the nursery
|
||||
// store, using the provided block height. If the bucket does not exist, or any
|
||||
// bucket along its path does not exist, a nil value is returned.
|
||||
func (ns *nurseryStore) getHeightBucketPath(tx kvdb.RTx,
|
||||
func (ns *NurseryStore) getHeightBucketPath(tx kvdb.RTx,
|
||||
height uint32) (kvdb.RBucket, kvdb.RBucket, kvdb.RBucket) {
|
||||
|
||||
// Retrieve the existing chain bucket for this nursery store.
|
||||
|
@ -1083,7 +1083,7 @@ func (ns *nurseryStore) getHeightBucketPath(tx kvdb.RTx,
|
|||
// getHeightBucketPathWrite retrieves an existing height bucket from the nursery
|
||||
// store, using the provided block height. If the bucket does not exist, or any
|
||||
// bucket along its path does not exist, a nil value is returned.
|
||||
func (ns *nurseryStore) getHeightBucketPathWrite(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) getHeightBucketPathWrite(tx kvdb.RwTx,
|
||||
height uint32) (kvdb.RwBucket, kvdb.RwBucket, kvdb.RwBucket) {
|
||||
|
||||
// Retrieve the existing chain bucket for this nursery store.
|
||||
|
@ -1111,7 +1111,7 @@ func (ns *nurseryStore) getHeightBucketPathWrite(tx kvdb.RwTx,
|
|||
// getHeightBucket retrieves an existing height bucket from the nursery store,
|
||||
// using the provided block height. If the bucket does not exist, or any bucket
|
||||
// along its path does not exist, a nil value is returned.
|
||||
func (ns *nurseryStore) getHeightBucket(tx kvdb.RTx,
|
||||
func (ns *NurseryStore) getHeightBucket(tx kvdb.RTx,
|
||||
height uint32) kvdb.RBucket {
|
||||
_, _, hghtBucket := ns.getHeightBucketPath(tx, height)
|
||||
|
||||
|
@ -1121,7 +1121,7 @@ func (ns *nurseryStore) getHeightBucket(tx kvdb.RTx,
|
|||
// getHeightBucketWrite retrieves an existing height bucket from the nursery store,
|
||||
// using the provided block height. If the bucket does not exist, or any bucket
|
||||
// along its path does not exist, a nil value is returned.
|
||||
func (ns *nurseryStore) getHeightBucketWrite(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) getHeightBucketWrite(tx kvdb.RwTx,
|
||||
height uint32) kvdb.RwBucket {
|
||||
|
||||
_, _, hghtBucket := ns.getHeightBucketPathWrite(tx, height)
|
||||
|
@ -1132,7 +1132,7 @@ func (ns *nurseryStore) getHeightBucketWrite(tx kvdb.RwTx,
|
|||
// createHeightChanBucket creates or retrieves an existing height-channel bucket
|
||||
// for the provided block height and channel point. This method will attempt to
|
||||
// instantiate all buckets along the path if required.
|
||||
func (ns *nurseryStore) createHeightChanBucket(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) createHeightChanBucket(tx kvdb.RwTx,
|
||||
height uint32, chanPoint *wire.OutPoint) (kvdb.RwBucket, error) {
|
||||
|
||||
// Ensure that the height bucket for this nursery store exists.
|
||||
|
@ -1158,7 +1158,7 @@ func (ns *nurseryStore) createHeightChanBucket(tx kvdb.RwTx,
|
|||
// nursery store, using the provided block height and channel point. if the
|
||||
// bucket does not exist, or any bucket along its path does not exist, a nil
|
||||
// value is returned.
|
||||
func (ns *nurseryStore) getHeightChanBucketWrite(tx kvdb.RwTx,
|
||||
func (ns *NurseryStore) getHeightChanBucketWrite(tx kvdb.RwTx,
|
||||
height uint32, chanPoint *wire.OutPoint) kvdb.RwBucket {
|
||||
|
||||
// Retrieve the existing height bucket from this nursery store.
|
||||
|
@ -1185,7 +1185,7 @@ func (ns *nurseryStore) getHeightChanBucketWrite(tx kvdb.RwTx,
|
|||
// enumerate crib and kindergarten outputs at a particular height. The callback
|
||||
// is invoked with serialized bytes retrieved for each output of interest,
|
||||
// allowing the caller to deserialize them into the appropriate type.
|
||||
func (ns *nurseryStore) forEachHeightPrefix(tx kvdb.RTx, prefix []byte,
|
||||
func (ns *NurseryStore) forEachHeightPrefix(tx kvdb.RTx, prefix []byte,
|
||||
height uint32, callback func([]byte) error) error {
|
||||
|
||||
// Start by retrieving the height bucket corresponding to the provided
|
||||
|
@ -1273,7 +1273,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx kvdb.RTx, prefix []byte,
|
|||
// provided callback. The callback accepts a key-value pair of byte slices
|
||||
// corresponding to the prefixed-output key and the serialized output,
|
||||
// respectively.
|
||||
func (ns *nurseryStore) forChanOutputs(tx kvdb.RTx, chanPoint *wire.OutPoint,
|
||||
func (ns *NurseryStore) forChanOutputs(tx kvdb.RTx, chanPoint *wire.OutPoint,
|
||||
callback func([]byte, []byte) error) error {
|
||||
|
||||
chanBucket := ns.getChannelBucket(tx, chanPoint)
|
||||
|
@ -1291,7 +1291,7 @@ var errBucketNotEmpty = errors.New("bucket is not empty, cannot be pruned")
|
|||
// removeOutputFromHeight will delete the given output from the specified
|
||||
// height-channel bucket, and attempt to prune the upstream directories if they
|
||||
// are empty.
|
||||
func (ns *nurseryStore) removeOutputFromHeight(tx kvdb.RwTx, height uint32,
|
||||
func (ns *NurseryStore) removeOutputFromHeight(tx kvdb.RwTx, height uint32,
|
||||
chanPoint *wire.OutPoint, pfxKey []byte) error {
|
||||
|
||||
// Retrieve the height-channel bucket and delete the prefixed output.
|
||||
|
@ -1343,7 +1343,7 @@ func (ns *nurseryStore) removeOutputFromHeight(tx kvdb.RwTx, height uint32,
|
|||
// all active outputs at this height have been removed from their respective
|
||||
// height-channel buckets. The returned boolean value indicated whether or not
|
||||
// this invocation successfully pruned the height bucket.
|
||||
func (ns *nurseryStore) pruneHeight(tx kvdb.RwTx, height uint32) (bool, error) {
|
||||
func (ns *NurseryStore) pruneHeight(tx kvdb.RwTx, height uint32) (bool, error) {
|
||||
// Fetch the existing height index and height bucket.
|
||||
_, hghtIndex, hghtBucket := ns.getHeightBucketPathWrite(tx, height)
|
||||
if hghtBucket == nil {
|
||||
|
@ -1427,5 +1427,5 @@ func isBucketEmpty(parent kvdb.RBucket) error {
|
|||
})
|
||||
}
|
||||
|
||||
// Compile-time constraint to ensure nurseryStore implements NurseryStore.
|
||||
var _ NurseryStore = (*nurseryStore)(nil)
|
||||
// Compile-time constraint to ensure NurseryStore implements NurseryStorer.
|
||||
var _ NurseryStorer = (*NurseryStore)(nil)
|
|
@ -1,13 +1,14 @@
|
|||
//go:build !rpctest
|
||||
// +build !rpctest
|
||||
|
||||
package lnd
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/chainreg"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
)
|
||||
|
||||
|
@ -23,6 +24,8 @@ type incubateTest struct {
|
|||
// outputs stored in the nursery store.
|
||||
var incubateTests []incubateTest
|
||||
|
||||
var chainHash chainhash.Hash
|
||||
|
||||
// initIncubateTests instantiates the test vectors during package init, which
|
||||
// properly captures the sign descriptors and public keys.
|
||||
func initIncubateTests() {
|
||||
|
@ -55,7 +58,7 @@ func TestNurseryStoreInit(t *testing.T) {
|
|||
}
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := newNurseryStore(&chainreg.BitcoinTestnetGenesis, cdb)
|
||||
ns, err := NewNurseryStore(&chainHash, cdb)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open nursery store: %v", err)
|
||||
}
|
||||
|
@ -75,7 +78,7 @@ func TestNurseryStoreIncubate(t *testing.T) {
|
|||
}
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := newNurseryStore(&chainreg.BitcoinTestnetGenesis, cdb)
|
||||
ns, err := NewNurseryStore(&chainHash, cdb)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open nursery store: %v", err)
|
||||
}
|
||||
|
@ -316,7 +319,7 @@ func TestNurseryStoreGraduate(t *testing.T) {
|
|||
}
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := newNurseryStore(&chainreg.BitcoinTestnetGenesis, cdb)
|
||||
ns, err := NewNurseryStore(&chainHash, cdb)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open nursery store: %v", err)
|
||||
}
|
||||
|
@ -363,7 +366,7 @@ func TestNurseryStoreGraduate(t *testing.T) {
|
|||
|
||||
// assertNumChanOutputs checks that the channel bucket has the expected number
|
||||
// of outputs.
|
||||
func assertNumChanOutputs(t *testing.T, ns NurseryStore,
|
||||
func assertNumChanOutputs(t *testing.T, ns NurseryStorer,
|
||||
chanPoint *wire.OutPoint, expectedNum int) {
|
||||
|
||||
var count int
|
||||
|
@ -389,7 +392,7 @@ func assertNumChanOutputs(t *testing.T, ns NurseryStore,
|
|||
|
||||
// assertNumPreschools loads all preschool outputs and verifies their count
|
||||
// matches the expected number.
|
||||
func assertNumPreschools(t *testing.T, ns NurseryStore, expected int) {
|
||||
func assertNumPreschools(t *testing.T, ns NurseryStorer, expected int) {
|
||||
psclOutputs, err := ns.FetchPreschools()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve preschool outputs: %v", err)
|
||||
|
@ -403,7 +406,7 @@ func assertNumPreschools(t *testing.T, ns NurseryStore, expected int) {
|
|||
|
||||
// assertNumChannels checks that the nursery has a given number of active
|
||||
// channels.
|
||||
func assertNumChannels(t *testing.T, ns NurseryStore, expected int) {
|
||||
func assertNumChannels(t *testing.T, ns NurseryStorer, expected int) {
|
||||
channels, err := ns.ListChannels()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch channels from nursery store: %v",
|
||||
|
@ -418,7 +421,7 @@ func assertNumChannels(t *testing.T, ns NurseryStore, expected int) {
|
|||
|
||||
// assertHeightIsPurged checks that the finalized transaction, kindergarten, and
|
||||
// htlc outputs at a particular height are all nil.
|
||||
func assertHeightIsPurged(t *testing.T, ns NurseryStore,
|
||||
func assertHeightIsPurged(t *testing.T, ns NurseryStorer,
|
||||
height uint32) {
|
||||
|
||||
kndrOutputs, cribOutputs, err := ns.FetchClass(height)
|
||||
|
@ -438,7 +441,7 @@ func assertHeightIsPurged(t *testing.T, ns NurseryStore,
|
|||
|
||||
// assertCribAtExpiryHeight loads the class at the given height, and verifies
|
||||
// that the given htlc output is one of the crib outputs.
|
||||
func assertCribAtExpiryHeight(t *testing.T, ns NurseryStore,
|
||||
func assertCribAtExpiryHeight(t *testing.T, ns NurseryStorer,
|
||||
htlcOutput *babyOutput) {
|
||||
|
||||
expiryHeight := htlcOutput.expiry
|
||||
|
@ -460,7 +463,7 @@ func assertCribAtExpiryHeight(t *testing.T, ns NurseryStore,
|
|||
|
||||
// assertCribNotAtExpiryHeight loads the class at the given height, and verifies
|
||||
// that the given htlc output is not one of the crib outputs.
|
||||
func assertCribNotAtExpiryHeight(t *testing.T, ns NurseryStore,
|
||||
func assertCribNotAtExpiryHeight(t *testing.T, ns NurseryStorer,
|
||||
htlcOutput *babyOutput) {
|
||||
|
||||
expiryHeight := htlcOutput.expiry
|
||||
|
@ -481,7 +484,7 @@ func assertCribNotAtExpiryHeight(t *testing.T, ns NurseryStore,
|
|||
// assertKndrAtMaturityHeight loads the class at the provided height and
|
||||
// verifies that the provided kid output is one of the kindergarten outputs
|
||||
// returned.
|
||||
func assertKndrAtMaturityHeight(t *testing.T, ns NurseryStore,
|
||||
func assertKndrAtMaturityHeight(t *testing.T, ns NurseryStorer,
|
||||
kndrOutput *kidOutput) {
|
||||
|
||||
maturityHeight := kndrOutput.ConfHeight() +
|
||||
|
@ -505,7 +508,7 @@ func assertKndrAtMaturityHeight(t *testing.T, ns NurseryStore,
|
|||
// assertKndrNotAtMaturityHeight loads the class at the provided height and
|
||||
// verifies that the provided kid output is not one of the kindergarten outputs
|
||||
// returned.
|
||||
func assertKndrNotAtMaturityHeight(t *testing.T, ns NurseryStore,
|
||||
func assertKndrNotAtMaturityHeight(t *testing.T, ns NurseryStorer,
|
||||
kndrOutput *kidOutput) {
|
||||
|
||||
maturityHeight := kndrOutput.ConfHeight() +
|
||||
|
@ -527,7 +530,7 @@ func assertKndrNotAtMaturityHeight(t *testing.T, ns NurseryStore,
|
|||
|
||||
// assertChannelMaturity queries the nursery store for the maturity of the given
|
||||
// channel, failing if the result does not match the expectedMaturity.
|
||||
func assertChannelMaturity(t *testing.T, ns NurseryStore,
|
||||
func assertChannelMaturity(t *testing.T, ns NurseryStorer,
|
||||
chanPoint *wire.OutPoint, expectedMaturity bool) {
|
||||
|
||||
isMature, err := ns.IsMatureChannel(chanPoint)
|
||||
|
@ -543,7 +546,7 @@ func assertChannelMaturity(t *testing.T, ns NurseryStore,
|
|||
|
||||
// assertCanRemoveChannel tries to remove a channel from the nursery store,
|
||||
// failing if the result does match expected canRemove.
|
||||
func assertCanRemoveChannel(t *testing.T, ns NurseryStore,
|
||||
func assertCanRemoveChannel(t *testing.T, ns NurseryStorer,
|
||||
chanPoint *wire.OutPoint, canRemove bool) {
|
||||
|
||||
err := ns.RemoveChannel(chanPoint)
|
|
@ -1,4 +1,4 @@
|
|||
package lnd
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -198,13 +198,13 @@ type NurseryConfig struct {
|
|||
|
||||
// Store provides access to and modification of the persistent state
|
||||
// maintained about the utxo nursery's incubating outputs.
|
||||
Store NurseryStore
|
||||
Store NurseryStorer
|
||||
|
||||
// Sweep sweeps an input back to the wallet.
|
||||
SweepInput func(input.Input, sweep.Params) (chan sweep.Result, error)
|
||||
}
|
||||
|
||||
// utxoNursery is a system dedicated to incubating time-locked outputs created
|
||||
// UtxoNursery is a system dedicated to incubating time-locked outputs created
|
||||
// by the broadcast of a commitment transaction either by us, or the remote
|
||||
// peer. The nursery accepts outputs and "incubates" them until they've reached
|
||||
// maturity, then sweep the outputs into the source wallet. An output is
|
||||
|
@ -212,7 +212,7 @@ type NurseryConfig struct {
|
|||
// passed. As outputs reach their maturity age, they're swept in batches into
|
||||
// the source wallet, returning the outputs so they can be used within future
|
||||
// channels, or regular Bitcoin transactions.
|
||||
type utxoNursery struct {
|
||||
type UtxoNursery struct {
|
||||
started uint32 // To be used atomically.
|
||||
stopped uint32 // To be used atomically.
|
||||
|
||||
|
@ -225,18 +225,18 @@ type utxoNursery struct {
|
|||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// newUtxoNursery creates a new instance of the utxoNursery from a
|
||||
// NewUtxoNursery creates a new instance of the UtxoNursery from a
|
||||
// ChainNotifier and LightningWallet instance.
|
||||
func newUtxoNursery(cfg *NurseryConfig) *utxoNursery {
|
||||
return &utxoNursery{
|
||||
func NewUtxoNursery(cfg *NurseryConfig) *UtxoNursery {
|
||||
return &UtxoNursery{
|
||||
cfg: cfg,
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start launches all goroutines the utxoNursery needs to properly carry out
|
||||
// Start launches all goroutines the UtxoNursery needs to properly carry out
|
||||
// its duties.
|
||||
func (u *utxoNursery) Start() error {
|
||||
func (u *UtxoNursery) Start() error {
|
||||
if !atomic.CompareAndSwapUint32(&u.started, 0, 1) {
|
||||
return nil
|
||||
}
|
||||
|
@ -311,8 +311,8 @@ func (u *utxoNursery) Start() error {
|
|||
}
|
||||
|
||||
// Stop gracefully shuts down any lingering goroutines launched during normal
|
||||
// operation of the utxoNursery.
|
||||
func (u *utxoNursery) Stop() error {
|
||||
// operation of the UtxoNursery.
|
||||
func (u *UtxoNursery) Stop() error {
|
||||
if !atomic.CompareAndSwapUint32(&u.stopped, 0, 1) {
|
||||
return nil
|
||||
}
|
||||
|
@ -325,11 +325,11 @@ func (u *utxoNursery) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// IncubateOutputs sends a request to the utxoNursery to incubate a set of
|
||||
// IncubateOutputs sends a request to the UtxoNursery to incubate a set of
|
||||
// outputs from an existing commitment transaction. Outputs need to incubate if
|
||||
// they're CLTV absolute time locked, or if they're CSV relative time locked.
|
||||
// Once all outputs reach maturity, they'll be swept back into the wallet.
|
||||
func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint,
|
||||
func (u *UtxoNursery) IncubateOutputs(chanPoint wire.OutPoint,
|
||||
outgoingHtlcs []lnwallet.OutgoingHtlcResolution,
|
||||
incomingHtlcs []lnwallet.IncomingHtlcResolution,
|
||||
broadcastHeight uint32) error {
|
||||
|
@ -468,8 +468,8 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint,
|
|||
// outpoint. A nursery report details the maturity/sweeping progress for a
|
||||
// contract that was previously force closed. If a report entry for the target
|
||||
// chanPoint is unable to be constructed, then an error will be returned.
|
||||
func (u *utxoNursery) NurseryReport(
|
||||
chanPoint *wire.OutPoint) (*contractMaturityReport, error) {
|
||||
func (u *UtxoNursery) NurseryReport(
|
||||
chanPoint *wire.OutPoint) (*ContractMaturityReport, error) {
|
||||
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
@ -477,7 +477,7 @@ func (u *utxoNursery) NurseryReport(
|
|||
utxnLog.Debugf("NurseryReport: building nursery report for channel %v",
|
||||
chanPoint)
|
||||
|
||||
var report *contractMaturityReport
|
||||
var report *ContractMaturityReport
|
||||
|
||||
if err := u.cfg.Store.ForChanOutputs(chanPoint, func(k, v []byte) error {
|
||||
switch {
|
||||
|
@ -577,7 +577,7 @@ func (u *utxoNursery) NurseryReport(
|
|||
|
||||
return nil
|
||||
}, func() {
|
||||
report = &contractMaturityReport{}
|
||||
report = &ContractMaturityReport{}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -587,7 +587,7 @@ func (u *utxoNursery) NurseryReport(
|
|||
|
||||
// reloadPreschool re-initializes the chain notifier with all of the outputs
|
||||
// that had been saved to the "preschool" database bucket prior to shutdown.
|
||||
func (u *utxoNursery) reloadPreschool() error {
|
||||
func (u *UtxoNursery) reloadPreschool() error {
|
||||
psclOutputs, err := u.cfg.Store.FetchPreschools()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -634,7 +634,7 @@ func (u *utxoNursery) reloadPreschool() error {
|
|||
// This allows the nursery to reinitialize all state to continue sweeping
|
||||
// outputs, even in the event that we missed blocks while offline. reloadClasses
|
||||
// is called during the startup of the UTXO Nursery.
|
||||
func (u *utxoNursery) reloadClasses(bestHeight uint32) error {
|
||||
func (u *UtxoNursery) reloadClasses(bestHeight uint32) error {
|
||||
// Loading all active heights up to and including the current block.
|
||||
activeHeights, err := u.cfg.Store.HeightsBelowOrEqual(
|
||||
uint32(bestHeight))
|
||||
|
@ -674,7 +674,7 @@ func (u *utxoNursery) reloadClasses(bestHeight uint32) error {
|
|||
// confirmation of these spends will either 1) move a crib output into the
|
||||
// kindergarten bucket or 2) move a kindergarten output into the graduated
|
||||
// bucket.
|
||||
func (u *utxoNursery) incubator(newBlockChan *chainntnfs.BlockEpochEvent) {
|
||||
func (u *UtxoNursery) incubator(newBlockChan *chainntnfs.BlockEpochEvent) {
|
||||
defer u.wg.Done()
|
||||
defer newBlockChan.Cancel()
|
||||
|
||||
|
@ -722,7 +722,7 @@ func (u *utxoNursery) incubator(newBlockChan *chainntnfs.BlockEpochEvent) {
|
|||
// CLTV delay expires at the nursery's current height. This method is called
|
||||
// each time a new block arrives, or during startup to catch up on heights we
|
||||
// may have missed while the nursery was offline.
|
||||
func (u *utxoNursery) graduateClass(classHeight uint32) error {
|
||||
func (u *UtxoNursery) graduateClass(classHeight uint32) error {
|
||||
// Record this height as the nursery's current best height.
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
@ -769,7 +769,7 @@ func (u *utxoNursery) graduateClass(classHeight uint32) error {
|
|||
// control of funds from a prior channel commitment transaction to the user's
|
||||
// wallet. The outputs swept were previously time locked (either absolute or
|
||||
// relative), but are not mature enough to sweep into the wallet.
|
||||
func (u *utxoNursery) sweepMatureOutputs(classHeight uint32,
|
||||
func (u *UtxoNursery) sweepMatureOutputs(classHeight uint32,
|
||||
kgtnOutputs []kidOutput) error {
|
||||
|
||||
utxnLog.Infof("Sweeping %v CSV-delayed outputs with sweep tx for "+
|
||||
|
@ -799,7 +799,7 @@ func (u *utxoNursery) sweepMatureOutputs(classHeight uint32,
|
|||
// received, the nursery will mark those outputs as fully graduated, and proceed
|
||||
// to mark any mature channels as fully closed in channeldb.
|
||||
// NOTE(conner): this method MUST be called as a go routine.
|
||||
func (u *utxoNursery) waitForSweepConf(classHeight uint32,
|
||||
func (u *UtxoNursery) waitForSweepConf(classHeight uint32,
|
||||
output *kidOutput, resultChan chan sweep.Result) {
|
||||
|
||||
defer u.wg.Done()
|
||||
|
@ -834,7 +834,7 @@ func (u *utxoNursery) waitForSweepConf(classHeight uint32,
|
|||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
// TODO(conner): add retry logic?
|
||||
// TODO(conner): add retry utxnLogic?
|
||||
|
||||
// Mark the confirmed kindergarten output as graduated.
|
||||
if err := u.cfg.Store.GraduateKinder(classHeight, output); err != nil {
|
||||
|
@ -859,7 +859,7 @@ func (u *utxoNursery) waitForSweepConf(classHeight uint32,
|
|||
// sweepCribOutput broadcasts the crib output's htlc timeout txn, and sets up a
|
||||
// notification that will advance it to the kindergarten bucket upon
|
||||
// confirmation.
|
||||
func (u *utxoNursery) sweepCribOutput(classHeight uint32, baby *babyOutput) error {
|
||||
func (u *UtxoNursery) sweepCribOutput(classHeight uint32, baby *babyOutput) error {
|
||||
utxnLog.Infof("Publishing CLTV-delayed HTLC output using timeout tx "+
|
||||
"(txid=%v): %v", baby.timeoutTx.TxHash(),
|
||||
newLogClosure(func() string {
|
||||
|
@ -884,7 +884,7 @@ func (u *utxoNursery) sweepCribOutput(classHeight uint32, baby *babyOutput) erro
|
|||
// notification for an htlc timeout transaction. If successful, a goroutine
|
||||
// will be spawned that will transition the provided baby output into the
|
||||
// kindergarten state within the nursery store.
|
||||
func (u *utxoNursery) registerTimeoutConf(baby *babyOutput, heightHint uint32) error {
|
||||
func (u *UtxoNursery) registerTimeoutConf(baby *babyOutput, heightHint uint32) error {
|
||||
|
||||
birthTxID := baby.timeoutTx.TxHash()
|
||||
|
||||
|
@ -909,7 +909,7 @@ func (u *utxoNursery) registerTimeoutConf(baby *babyOutput, heightHint uint32) e
|
|||
// waitForTimeoutConf watches for the confirmation of an htlc timeout
|
||||
// transaction, and attempts to move the htlc output from the crib bucket to the
|
||||
// kindergarten bucket upon success.
|
||||
func (u *utxoNursery) waitForTimeoutConf(baby *babyOutput,
|
||||
func (u *UtxoNursery) waitForTimeoutConf(baby *babyOutput,
|
||||
confChan *chainntnfs.ConfirmationEvent) {
|
||||
|
||||
defer u.wg.Done()
|
||||
|
@ -932,7 +932,7 @@ func (u *utxoNursery) waitForTimeoutConf(baby *babyOutput,
|
|||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
// TODO(conner): add retry logic?
|
||||
// TODO(conner): add retry utxnLogic?
|
||||
|
||||
err := u.cfg.Store.CribToKinder(baby)
|
||||
if err != nil {
|
||||
|
@ -950,7 +950,7 @@ func (u *utxoNursery) waitForTimeoutConf(baby *babyOutput,
|
|||
// HTLC on our commitment transaction.. If successful, the provided preschool
|
||||
// output will be moved persistently into the kindergarten state within the
|
||||
// nursery store.
|
||||
func (u *utxoNursery) registerPreschoolConf(kid *kidOutput, heightHint uint32) error {
|
||||
func (u *UtxoNursery) registerPreschoolConf(kid *kidOutput, heightHint uint32) error {
|
||||
txID := kid.OutPoint().Hash
|
||||
|
||||
// TODO(roasbeef): ensure we don't already have one waiting, need to
|
||||
|
@ -988,7 +988,7 @@ func (u *utxoNursery) registerPreschoolConf(kid *kidOutput, heightHint uint32) e
|
|||
// will delete the output from the "preschool" database bucket and atomically
|
||||
// add it to the "kindergarten" database bucket. This is the second step in
|
||||
// the output incubation process.
|
||||
func (u *utxoNursery) waitForPreschoolConf(kid *kidOutput,
|
||||
func (u *UtxoNursery) waitForPreschoolConf(kid *kidOutput,
|
||||
confChan *chainntnfs.ConfirmationEvent) {
|
||||
|
||||
defer u.wg.Done()
|
||||
|
@ -1011,7 +1011,7 @@ func (u *utxoNursery) waitForPreschoolConf(kid *kidOutput,
|
|||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
// TODO(conner): add retry logic?
|
||||
// TODO(conner): add retry utxnLogic?
|
||||
|
||||
var outputType string
|
||||
if kid.isHtlc {
|
||||
|
@ -1030,113 +1030,120 @@ func (u *utxoNursery) waitForPreschoolConf(kid *kidOutput,
|
|||
}
|
||||
}
|
||||
|
||||
// contractMaturityReport is a report that details the maturity progress of a
|
||||
// RemoveChannel channel erases all entries from the channel bucket for the
|
||||
// provided channel point.
|
||||
func (u *UtxoNursery) RemoveChannel(op *wire.OutPoint) error {
|
||||
return u.cfg.Store.RemoveChannel(op)
|
||||
}
|
||||
|
||||
// ContractMaturityReport is a report that details the maturity progress of a
|
||||
// particular force closed contract.
|
||||
type contractMaturityReport struct {
|
||||
type ContractMaturityReport struct {
|
||||
// limboBalance is the total number of frozen coins within this
|
||||
// contract.
|
||||
limboBalance btcutil.Amount
|
||||
LimboBalance btcutil.Amount
|
||||
|
||||
// recoveredBalance is the total value that has been successfully swept
|
||||
// back to the user's wallet.
|
||||
recoveredBalance btcutil.Amount
|
||||
RecoveredBalance btcutil.Amount
|
||||
|
||||
// htlcs records a maturity report for each htlc output in this channel.
|
||||
htlcs []htlcMaturityReport
|
||||
Htlcs []HtlcMaturityReport
|
||||
}
|
||||
|
||||
// htlcMaturityReport provides a summary of a single htlc output, and is
|
||||
// embedded as party of the overarching contractMaturityReport
|
||||
type htlcMaturityReport struct {
|
||||
// outpoint is the final output that will be swept back to the wallet.
|
||||
outpoint wire.OutPoint
|
||||
// HtlcMaturityReport provides a summary of a single htlc output, and is
|
||||
// embedded as party of the overarching ContractMaturityReport
|
||||
type HtlcMaturityReport struct {
|
||||
// Outpoint is the final output that will be swept back to the wallet.
|
||||
Outpoint wire.OutPoint
|
||||
|
||||
// amount is the final value that will be swept in back to the wallet.
|
||||
amount btcutil.Amount
|
||||
// Amount is the final value that will be swept in back to the wallet.
|
||||
Amount btcutil.Amount
|
||||
|
||||
// maturityHeight is the absolute block height that this output will
|
||||
// MaturityHeight is the absolute block height that this output will
|
||||
// mature at.
|
||||
maturityHeight uint32
|
||||
MaturityHeight uint32
|
||||
|
||||
// stage indicates whether the htlc is in the CLTV-timeout stage (1) or
|
||||
// Stage indicates whether the htlc is in the CLTV-timeout stage (1) or
|
||||
// the CSV-delay stage (2). A stage 1 htlc's maturity height will be set
|
||||
// to its expiry height, while a stage 2 htlc's maturity height will be
|
||||
// set to its confirmation height plus the maturity requirement.
|
||||
stage uint32
|
||||
Stage uint32
|
||||
}
|
||||
|
||||
// AddLimboStage1TimeoutHtlc adds an htlc crib output to the maturity report's
|
||||
// htlcs, and contributes its amount to the limbo balance.
|
||||
func (c *contractMaturityReport) AddLimboStage1TimeoutHtlc(baby *babyOutput) {
|
||||
c.limboBalance += baby.Amount()
|
||||
func (c *ContractMaturityReport) AddLimboStage1TimeoutHtlc(baby *babyOutput) {
|
||||
c.LimboBalance += baby.Amount()
|
||||
|
||||
// TODO(roasbeef): bool to indicate stage 1 vs stage 2?
|
||||
c.htlcs = append(c.htlcs, htlcMaturityReport{
|
||||
outpoint: *baby.OutPoint(),
|
||||
amount: baby.Amount(),
|
||||
maturityHeight: baby.expiry,
|
||||
stage: 1,
|
||||
c.Htlcs = append(c.Htlcs, HtlcMaturityReport{
|
||||
Outpoint: *baby.OutPoint(),
|
||||
Amount: baby.Amount(),
|
||||
MaturityHeight: baby.expiry,
|
||||
Stage: 1,
|
||||
})
|
||||
}
|
||||
|
||||
// AddLimboDirectHtlc adds a direct HTLC on the commitment transaction of the
|
||||
// remote party to the maturity report. This a CLTV time-locked output that
|
||||
// has or hasn't expired yet.
|
||||
func (c *contractMaturityReport) AddLimboDirectHtlc(kid *kidOutput) {
|
||||
c.limboBalance += kid.Amount()
|
||||
func (c *ContractMaturityReport) AddLimboDirectHtlc(kid *kidOutput) {
|
||||
c.LimboBalance += kid.Amount()
|
||||
|
||||
htlcReport := htlcMaturityReport{
|
||||
outpoint: *kid.OutPoint(),
|
||||
amount: kid.Amount(),
|
||||
maturityHeight: kid.absoluteMaturity,
|
||||
stage: 2,
|
||||
htlcReport := HtlcMaturityReport{
|
||||
Outpoint: *kid.OutPoint(),
|
||||
Amount: kid.Amount(),
|
||||
MaturityHeight: kid.absoluteMaturity,
|
||||
Stage: 2,
|
||||
}
|
||||
|
||||
c.htlcs = append(c.htlcs, htlcReport)
|
||||
c.Htlcs = append(c.Htlcs, htlcReport)
|
||||
}
|
||||
|
||||
// AddLimboStage1SuccessHtlcHtlc adds an htlc crib output to the maturity
|
||||
// report's set of HTLC's. We'll use this to report any incoming HTLC sweeps
|
||||
// where the second level transaction hasn't yet confirmed.
|
||||
func (c *contractMaturityReport) AddLimboStage1SuccessHtlc(kid *kidOutput) {
|
||||
c.limboBalance += kid.Amount()
|
||||
func (c *ContractMaturityReport) AddLimboStage1SuccessHtlc(kid *kidOutput) {
|
||||
c.LimboBalance += kid.Amount()
|
||||
|
||||
c.htlcs = append(c.htlcs, htlcMaturityReport{
|
||||
outpoint: *kid.OutPoint(),
|
||||
amount: kid.Amount(),
|
||||
stage: 1,
|
||||
c.Htlcs = append(c.Htlcs, HtlcMaturityReport{
|
||||
Outpoint: *kid.OutPoint(),
|
||||
Amount: kid.Amount(),
|
||||
Stage: 1,
|
||||
})
|
||||
}
|
||||
|
||||
// AddLimboStage2Htlc adds an htlc kindergarten output to the maturity report's
|
||||
// htlcs, and contributes its amount to the limbo balance.
|
||||
func (c *contractMaturityReport) AddLimboStage2Htlc(kid *kidOutput) {
|
||||
c.limboBalance += kid.Amount()
|
||||
func (c *ContractMaturityReport) AddLimboStage2Htlc(kid *kidOutput) {
|
||||
c.LimboBalance += kid.Amount()
|
||||
|
||||
htlcReport := htlcMaturityReport{
|
||||
outpoint: *kid.OutPoint(),
|
||||
amount: kid.Amount(),
|
||||
stage: 2,
|
||||
htlcReport := HtlcMaturityReport{
|
||||
Outpoint: *kid.OutPoint(),
|
||||
Amount: kid.Amount(),
|
||||
Stage: 2,
|
||||
}
|
||||
|
||||
// If the confirmation height is set, then this means the first stage
|
||||
// has been confirmed, and we know the final maturity height of the CSV
|
||||
// delay.
|
||||
if kid.ConfHeight() != 0 {
|
||||
htlcReport.maturityHeight = kid.ConfHeight() + kid.BlocksToMaturity()
|
||||
htlcReport.MaturityHeight = kid.ConfHeight() + kid.BlocksToMaturity()
|
||||
}
|
||||
|
||||
c.htlcs = append(c.htlcs, htlcReport)
|
||||
c.Htlcs = append(c.Htlcs, htlcReport)
|
||||
}
|
||||
|
||||
// AddRecoveredHtlc adds a graduate output to the maturity report's htlcs, and
|
||||
// contributes its amount to the recovered balance.
|
||||
func (c *contractMaturityReport) AddRecoveredHtlc(kid *kidOutput) {
|
||||
c.recoveredBalance += kid.Amount()
|
||||
func (c *ContractMaturityReport) AddRecoveredHtlc(kid *kidOutput) {
|
||||
c.RecoveredBalance += kid.Amount()
|
||||
|
||||
c.htlcs = append(c.htlcs, htlcMaturityReport{
|
||||
outpoint: *kid.OutPoint(),
|
||||
amount: kid.Amount(),
|
||||
maturityHeight: kid.ConfHeight() + kid.BlocksToMaturity(),
|
||||
c.Htlcs = append(c.Htlcs, HtlcMaturityReport{
|
||||
Outpoint: *kid.OutPoint(),
|
||||
Amount: kid.Amount(),
|
||||
MaturityHeight: kid.ConfHeight() + kid.BlocksToMaturity(),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1144,7 +1151,7 @@ func (c *contractMaturityReport) AddRecoveredHtlc(kid *kidOutput) {
|
|||
// if and only if all of its outputs have been marked graduated. If the channel
|
||||
// still has ungraduated outputs, the method will succeed without altering the
|
||||
// database state.
|
||||
func (u *utxoNursery) closeAndRemoveIfMature(chanPoint *wire.OutPoint) error {
|
||||
func (u *UtxoNursery) closeAndRemoveIfMature(chanPoint *wire.OutPoint) error {
|
||||
isMature, err := u.cfg.Store.IsMatureChannel(chanPoint)
|
||||
if err == ErrContractNotFound {
|
||||
return nil
|
|
@ -1,6 +1,7 @@
|
|||
//go:build !rpctest
|
||||
// +build !rpctest
|
||||
|
||||
package lnd
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -397,14 +398,14 @@ func TestBabyOutputSerialization(t *testing.T) {
|
|||
}
|
||||
|
||||
type nurseryTestContext struct {
|
||||
nursery *utxoNursery
|
||||
nursery *UtxoNursery
|
||||
notifier *sweep.MockNotifier
|
||||
chainIO *mock.ChainIO
|
||||
publishChan chan wire.MsgTx
|
||||
store *nurseryStoreInterceptor
|
||||
restart func() bool
|
||||
receiveTx func() wire.MsgTx
|
||||
sweeper *mockSweeper
|
||||
sweeper *mockSweeperFull
|
||||
timeoutChan chan chan time.Time
|
||||
t *testing.T
|
||||
dbCleanup func()
|
||||
|
@ -422,7 +423,7 @@ func createNurseryTestContext(t *testing.T,
|
|||
t.Fatalf("unable to open channeldb: %v", err)
|
||||
}
|
||||
|
||||
store, err := newNurseryStore(&chainhash.Hash{}, cdb)
|
||||
store, err := NewNurseryStore(&chainhash.Hash{}, cdb)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -435,7 +436,7 @@ func createNurseryTestContext(t *testing.T,
|
|||
|
||||
publishChan := make(chan wire.MsgTx, 1)
|
||||
publishFunc := func(tx *wire.MsgTx, source string) error {
|
||||
utxnLog.Tracef("Publishing tx %v by %v", tx.TxHash(), source)
|
||||
log.Tracef("Publishing tx %v by %v", tx.TxHash(), source)
|
||||
publishChan <- *tx
|
||||
return nil
|
||||
}
|
||||
|
@ -446,7 +447,7 @@ func createNurseryTestContext(t *testing.T,
|
|||
BestHeight: 0,
|
||||
}
|
||||
|
||||
sweeper := newMockSweeper(t)
|
||||
sweeper := newMockSweeperFull(t)
|
||||
|
||||
nurseryCfg := NurseryConfig{
|
||||
Notifier: notifier,
|
||||
|
@ -468,7 +469,7 @@ func createNurseryTestContext(t *testing.T,
|
|||
},
|
||||
}
|
||||
|
||||
nursery := newUtxoNursery(&nurseryCfg)
|
||||
nursery := NewUtxoNursery(&nurseryCfg)
|
||||
nursery.Start()
|
||||
|
||||
ctx := &nurseryTestContext{
|
||||
|
@ -487,7 +488,7 @@ func createNurseryTestContext(t *testing.T,
|
|||
var tx wire.MsgTx
|
||||
select {
|
||||
case tx = <-ctx.publishChan:
|
||||
utxnLog.Debugf("Published tx %v", tx.TxHash())
|
||||
log.Debugf("Published tx %v", tx.TxHash())
|
||||
return tx
|
||||
case <-time.After(defaultTestTimeout):
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
@ -499,16 +500,16 @@ func createNurseryTestContext(t *testing.T,
|
|||
|
||||
ctx.restart = func() bool {
|
||||
return checkStartStop(func() {
|
||||
utxnLog.Tracef("Restart sweeper and nursery")
|
||||
log.Tracef("Restart sweeper and nursery")
|
||||
// Simulate lnd restart.
|
||||
ctx.nursery.Stop()
|
||||
|
||||
// Restart sweeper.
|
||||
ctx.sweeper = newMockSweeper(t)
|
||||
ctx.sweeper = newMockSweeperFull(t)
|
||||
|
||||
/// Restart nursery.
|
||||
nurseryCfg.SweepInput = ctx.sweeper.sweepInput
|
||||
ctx.nursery = newUtxoNursery(&nurseryCfg)
|
||||
ctx.nursery = NewUtxoNursery(&nurseryCfg)
|
||||
ctx.nursery.Start()
|
||||
|
||||
})
|
||||
|
@ -627,7 +628,7 @@ func createOutgoingRes(onLocalCommitment bool) *lnwallet.OutgoingHtlcResolution
|
|||
return &outgoingRes
|
||||
}
|
||||
|
||||
func incubateTestOutput(t *testing.T, nursery *utxoNursery,
|
||||
func incubateTestOutput(t *testing.T, nursery *UtxoNursery,
|
||||
onLocalCommitment bool) *lnwallet.OutgoingHtlcResolution {
|
||||
|
||||
outgoingRes := createOutgoingRes(onLocalCommitment)
|
||||
|
@ -653,7 +654,7 @@ func incubateTestOutput(t *testing.T, nursery *utxoNursery,
|
|||
return outgoingRes
|
||||
}
|
||||
|
||||
func assertNurseryReport(t *testing.T, nursery *utxoNursery,
|
||||
func assertNurseryReport(t *testing.T, nursery *UtxoNursery,
|
||||
expectedNofHtlcs int, expectedStage uint32,
|
||||
expectedLimboBalance btcutil.Amount) {
|
||||
report, err := nursery.NurseryReport(&testChanPoint)
|
||||
|
@ -661,27 +662,27 @@ func assertNurseryReport(t *testing.T, nursery *utxoNursery,
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(report.htlcs) != expectedNofHtlcs {
|
||||
if len(report.Htlcs) != expectedNofHtlcs {
|
||||
t.Fatalf("expected %v outputs to be reported, but report "+
|
||||
"only contains %v", expectedNofHtlcs, len(report.htlcs))
|
||||
"only contains %v", expectedNofHtlcs, len(report.Htlcs))
|
||||
}
|
||||
|
||||
if expectedNofHtlcs != 0 {
|
||||
htlcReport := report.htlcs[0]
|
||||
if htlcReport.stage != expectedStage {
|
||||
htlcReport := report.Htlcs[0]
|
||||
if htlcReport.Stage != expectedStage {
|
||||
t.Fatalf("expected htlc be advanced to stage %v, but "+
|
||||
"it is reported in stage %v",
|
||||
expectedStage, htlcReport.stage)
|
||||
expectedStage, htlcReport.Stage)
|
||||
}
|
||||
}
|
||||
|
||||
if report.limboBalance != expectedLimboBalance {
|
||||
if report.LimboBalance != expectedLimboBalance {
|
||||
t.Fatalf("expected limbo balance to be %v, but it is %v instead",
|
||||
expectedLimboBalance, report.limboBalance)
|
||||
expectedLimboBalance, report.LimboBalance)
|
||||
}
|
||||
}
|
||||
|
||||
func assertNurseryReportUnavailable(t *testing.T, nursery *utxoNursery) {
|
||||
func assertNurseryReportUnavailable(t *testing.T, nursery *UtxoNursery) {
|
||||
_, err := nursery.NurseryReport(&testChanPoint)
|
||||
if err != ErrContractNotFound {
|
||||
t.Fatal("expected report to be unavailable")
|
||||
|
@ -711,7 +712,7 @@ func testRestartLoop(t *testing.T, test func(*testing.T,
|
|||
|
||||
return true
|
||||
}
|
||||
utxnLog.Debugf("Skipping restart point %v",
|
||||
log.Debugf("Skipping restart point %v",
|
||||
currentStartStopIdx)
|
||||
return false
|
||||
}
|
||||
|
@ -863,7 +864,7 @@ func testSweep(t *testing.T, ctx *nurseryTestContext,
|
|||
}
|
||||
|
||||
type nurseryStoreInterceptor struct {
|
||||
ns NurseryStore
|
||||
ns NurseryStorer
|
||||
|
||||
// TODO(joostjager): put more useful info through these channels.
|
||||
cribToKinderChan chan struct{}
|
||||
|
@ -872,7 +873,7 @@ type nurseryStoreInterceptor struct {
|
|||
preschoolToKinderChan chan struct{}
|
||||
}
|
||||
|
||||
func newNurseryStoreInterceptor(ns NurseryStore) *nurseryStoreInterceptor {
|
||||
func newNurseryStoreInterceptor(ns NurseryStorer) *nurseryStoreInterceptor {
|
||||
return &nurseryStoreInterceptor{
|
||||
ns: ns,
|
||||
cribToKinderChan: make(chan struct{}),
|
||||
|
@ -950,7 +951,7 @@ func (i *nurseryStoreInterceptor) RemoveChannel(chanPoint *wire.OutPoint) error
|
|||
return i.ns.RemoveChannel(chanPoint)
|
||||
}
|
||||
|
||||
type mockSweeper struct {
|
||||
type mockSweeperFull struct {
|
||||
lock sync.Mutex
|
||||
|
||||
resultChans map[wire.OutPoint]chan sweep.Result
|
||||
|
@ -959,18 +960,18 @@ type mockSweeper struct {
|
|||
sweepChan chan input.Input
|
||||
}
|
||||
|
||||
func newMockSweeper(t *testing.T) *mockSweeper {
|
||||
return &mockSweeper{
|
||||
func newMockSweeperFull(t *testing.T) *mockSweeperFull {
|
||||
return &mockSweeperFull{
|
||||
resultChans: make(map[wire.OutPoint]chan sweep.Result),
|
||||
sweepChan: make(chan input.Input, 1),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *mockSweeper) sweepInput(input input.Input,
|
||||
func (s *mockSweeperFull) sweepInput(input input.Input,
|
||||
_ sweep.Params) (chan sweep.Result, error) {
|
||||
|
||||
utxnLog.Debugf("mockSweeper sweepInput called for %v", *input.OutPoint())
|
||||
log.Debugf("mockSweeper sweepInput called for %v", *input.OutPoint())
|
||||
|
||||
select {
|
||||
case s.sweepChan <- input:
|
||||
|
@ -987,7 +988,7 @@ func (s *mockSweeper) sweepInput(input input.Input,
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (s *mockSweeper) expectSweep() {
|
||||
func (s *mockSweeperFull) expectSweep() {
|
||||
s.t.Helper()
|
||||
|
||||
select {
|
||||
|
@ -997,7 +998,7 @@ func (s *mockSweeper) expectSweep() {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *mockSweeper) sweepAll() {
|
||||
func (s *mockSweeperFull) sweepAll() {
|
||||
s.t.Helper()
|
||||
|
||||
s.lock.Lock()
|
||||
|
@ -1006,7 +1007,7 @@ func (s *mockSweeper) sweepAll() {
|
|||
s.lock.Unlock()
|
||||
|
||||
for o, c := range currentChans {
|
||||
utxnLog.Debugf("mockSweeper signal swept for %v", o)
|
||||
log.Debugf("mockSweeper signal swept for %v", o)
|
||||
|
||||
select {
|
||||
case c <- sweep.Result{}:
|
|
@ -229,6 +229,8 @@ you.
|
|||
|
||||
* [Refactor the interaction between the `htlcswitch` and `peer` packages for cleaner separation.](https://github.com/lightningnetwork/lnd/pull/5603)
|
||||
|
||||
* [Moved the original breach handling and timelock UTXO handling into the contract court package](https://github.com/lightningnetwork/lnd/pull/5745)
|
||||
|
||||
* [Unused error check
|
||||
removed](https://github.com/lightningnetwork/lnd/pull/5537).
|
||||
|
||||
|
|
|
@ -80,26 +80,12 @@ type plexPacket struct {
|
|||
err chan error
|
||||
}
|
||||
|
||||
// ChannelCloseType is an enum which signals the type of channel closure the
|
||||
// peer should execute.
|
||||
type ChannelCloseType uint8
|
||||
|
||||
const (
|
||||
// CloseRegular indicates a regular cooperative channel closure
|
||||
// should be attempted.
|
||||
CloseRegular ChannelCloseType = iota
|
||||
|
||||
// CloseBreach indicates that a channel breach has been detected, and
|
||||
// the link should immediately be marked as unavailable.
|
||||
CloseBreach
|
||||
)
|
||||
|
||||
// ChanClose represents a request which close a particular channel specified by
|
||||
// its id.
|
||||
type ChanClose struct {
|
||||
// CloseType is a variable which signals the type of channel closure the
|
||||
// peer should execute.
|
||||
CloseType ChannelCloseType
|
||||
CloseType contractcourt.ChannelCloseType
|
||||
|
||||
// ChanPoint represent the id of the channel which should be closed.
|
||||
ChanPoint *wire.OutPoint
|
||||
|
@ -1454,7 +1440,8 @@ func (s *Switch) teardownCircuit(pkt *htlcPacket) error {
|
|||
// a starting point for close negotiation. The deliveryScript parameter is an
|
||||
// optional parameter which sets a user specified script to close out to.
|
||||
func (s *Switch) CloseLink(chanPoint *wire.OutPoint,
|
||||
closeType ChannelCloseType, targetFeePerKw chainfee.SatPerKWeight,
|
||||
closeType contractcourt.ChannelCloseType,
|
||||
targetFeePerKw chainfee.SatPerKWeight,
|
||||
deliveryScript lnwire.DeliveryAddress) (chan interface{}, chan error) {
|
||||
|
||||
// TODO(roasbeef) abstract out the close updates.
|
||||
|
|
4
log.go
4
log.go
|
@ -81,8 +81,6 @@ var (
|
|||
ltndLog = addLndPkgLogger("LTND")
|
||||
rpcsLog = addLndPkgLogger("RPCS")
|
||||
srvrLog = addLndPkgLogger("SRVR")
|
||||
utxnLog = addLndPkgLogger("UTXN")
|
||||
brarLog = addLndPkgLogger("BRAR")
|
||||
atplLog = addLndPkgLogger("ATPL")
|
||||
)
|
||||
|
||||
|
@ -132,6 +130,8 @@ func SetupLoggers(root *build.RotatingLogWriter, interceptor signal.Interceptor)
|
|||
AddSubLogger(root, "CMGR", interceptor, connmgr.UseLogger)
|
||||
AddSubLogger(root, "BTCN", interceptor, neutrino.UseLogger)
|
||||
AddSubLogger(root, "CNCT", interceptor, contractcourt.UseLogger)
|
||||
AddSubLogger(root, "UTXN", interceptor, contractcourt.UseNurseryLogger)
|
||||
AddSubLogger(root, "BRAR", interceptor, contractcourt.UseBreachLogger)
|
||||
AddSubLogger(root, "SPHX", interceptor, sphinx.UseLogger)
|
||||
AddSubLogger(root, "SWPR", interceptor, sweep.UseLogger)
|
||||
AddSubLogger(root, "SGNR", interceptor, signrpc.UseLogger)
|
||||
|
|
|
@ -2519,7 +2519,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
|||
// A type of CloseRegular indicates that the user has opted to close
|
||||
// out this channel on-chain, so we execute the cooperative channel
|
||||
// closure workflow.
|
||||
case htlcswitch.CloseRegular:
|
||||
case contractcourt.CloseRegular:
|
||||
// First, we'll choose a delivery address that we'll use to send the
|
||||
// funds to in the case of a successful negotiation.
|
||||
|
||||
|
@ -2604,7 +2604,7 @@ func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
|||
|
||||
// A type of CloseBreach indicates that the counterparty has breached
|
||||
// the channel therefore we need to clean up our local state.
|
||||
case htlcswitch.CloseBreach:
|
||||
case contractcourt.CloseBreach:
|
||||
// TODO(roasbeef): no longer need with newer beach logic?
|
||||
peerLog.Infof("ChannelPoint(%v) has been breached, wiping "+
|
||||
"channel", req.ChanPoint)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/contractcourt"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||
"github.com/lightningnetwork/lnd/lntest/mock"
|
||||
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
||||
|
@ -165,7 +166,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
|||
updateChan := make(chan interface{}, 1)
|
||||
errChan := make(chan error, 1)
|
||||
closeCommand := &htlcswitch.ChanClose{
|
||||
CloseType: htlcswitch.CloseRegular,
|
||||
CloseType: contractcourt.CloseRegular,
|
||||
ChanPoint: bobChan.ChannelPoint(),
|
||||
Updates: updateChan,
|
||||
TargetFeePerKw: 12500,
|
||||
|
@ -491,7 +492,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
|||
updateChan := make(chan interface{}, 1)
|
||||
errChan := make(chan error, 1)
|
||||
closeCommand := &htlcswitch.ChanClose{
|
||||
CloseType: htlcswitch.CloseRegular,
|
||||
CloseType: contractcourt.CloseRegular,
|
||||
ChanPoint: bobChan.ChannelPoint(),
|
||||
Updates: updateChan,
|
||||
TargetFeePerKw: 12500,
|
||||
|
@ -835,7 +836,7 @@ func TestCustomShutdownScript(t *testing.T) {
|
|||
updateChan := make(chan interface{}, 1)
|
||||
errChan := make(chan error, 1)
|
||||
closeCommand := htlcswitch.ChanClose{
|
||||
CloseType: htlcswitch.CloseRegular,
|
||||
CloseType: contractcourt.CloseRegular,
|
||||
ChanPoint: chanPoint,
|
||||
Updates: updateChan,
|
||||
TargetFeePerKw: 12500,
|
||||
|
|
23
rpcserver.go
23
rpcserver.go
|
@ -2401,7 +2401,8 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
|
|||
}
|
||||
|
||||
updateChan, errChan = r.server.htlcSwitch.CloseLink(
|
||||
chanPoint, htlcswitch.CloseRegular, feeRate, deliveryScript,
|
||||
chanPoint, contractcourt.CloseRegular, feeRate,
|
||||
deliveryScript,
|
||||
)
|
||||
}
|
||||
out:
|
||||
|
@ -2518,8 +2519,8 @@ func (r *rpcServer) abandonChan(chanPoint *wire.OutPoint,
|
|||
// close, then it's possible that the nursery is hanging on to some
|
||||
// state. To err on the side of caution, we'll now attempt to wipe any
|
||||
// state for this channel from the nursery.
|
||||
err = r.server.utxoNursery.cfg.Store.RemoveChannel(chanPoint)
|
||||
if err != nil && err != ErrContractNotFound {
|
||||
err = r.server.utxoNursery.RemoveChannel(chanPoint)
|
||||
if err != nil && err != contractcourt.ErrContractNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -3495,7 +3496,7 @@ func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint,
|
|||
// didn't have any time-locked outputs, then the nursery may not know of
|
||||
// the contract.
|
||||
nurseryInfo, err := r.server.utxoNursery.NurseryReport(chanPoint)
|
||||
if err == ErrContractNotFound {
|
||||
if err == contractcourt.ErrContractNotFound {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -3508,18 +3509,18 @@ func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint,
|
|||
// information detailing exactly how much funds are time locked and also
|
||||
// the height in which we can ultimately sweep the funds into the
|
||||
// wallet.
|
||||
forceClose.LimboBalance = int64(nurseryInfo.limboBalance)
|
||||
forceClose.RecoveredBalance = int64(nurseryInfo.recoveredBalance)
|
||||
forceClose.LimboBalance = int64(nurseryInfo.LimboBalance)
|
||||
forceClose.RecoveredBalance = int64(nurseryInfo.RecoveredBalance)
|
||||
|
||||
for _, htlcReport := range nurseryInfo.htlcs {
|
||||
for _, htlcReport := range nurseryInfo.Htlcs {
|
||||
// TODO(conner) set incoming flag appropriately after handling
|
||||
// incoming incubation
|
||||
htlc := &lnrpc.PendingHTLC{
|
||||
Incoming: false,
|
||||
Amount: int64(htlcReport.amount),
|
||||
Outpoint: htlcReport.outpoint.String(),
|
||||
MaturityHeight: htlcReport.maturityHeight,
|
||||
Stage: htlcReport.stage,
|
||||
Amount: int64(htlcReport.Amount),
|
||||
Outpoint: htlcReport.Outpoint.String(),
|
||||
MaturityHeight: htlcReport.MaturityHeight,
|
||||
Stage: htlcReport.Stage,
|
||||
}
|
||||
|
||||
if htlc.MaturityHeight != 0 {
|
||||
|
|
20
server.go
20
server.go
|
@ -238,7 +238,7 @@ type server struct {
|
|||
|
||||
witnessBeacon contractcourt.WitnessBeacon
|
||||
|
||||
breachArbiter *breachArbiter
|
||||
breachArbiter *contractcourt.BreachArbiter
|
||||
|
||||
missionControl *routing.MissionControl
|
||||
|
||||
|
@ -250,7 +250,7 @@ type server struct {
|
|||
|
||||
localChanMgr *localchans.Manager
|
||||
|
||||
utxoNursery *utxoNursery
|
||||
utxoNursery *contractcourt.UtxoNursery
|
||||
|
||||
sweeper *sweep.UtxoSweeper
|
||||
|
||||
|
@ -848,7 +848,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
FetchChannel: s.chanStateDB.FetchChannel,
|
||||
}
|
||||
|
||||
utxnStore, err := newNurseryStore(
|
||||
utxnStore, err := contractcourt.NewNurseryStore(
|
||||
s.cfg.ActiveNetParams.GenesisHash, dbs.chanStateDB,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -884,7 +884,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
FeeRateBucketSize: sweep.DefaultFeeRateBucketSize,
|
||||
})
|
||||
|
||||
s.utxoNursery = newUtxoNursery(&NurseryConfig{
|
||||
s.utxoNursery = contractcourt.NewUtxoNursery(&contractcourt.NurseryConfig{
|
||||
ChainIO: cc.ChainIO,
|
||||
ConfDepth: 1,
|
||||
FetchClosedChannels: dbs.chanStateDB.FetchClosedChannels,
|
||||
|
@ -897,7 +897,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
|
||||
// Construct a closure that wraps the htlcswitch's CloseLink method.
|
||||
closeLink := func(chanPoint *wire.OutPoint,
|
||||
closureType htlcswitch.ChannelCloseType) {
|
||||
closureType contractcourt.ChannelCloseType) {
|
||||
// TODO(conner): Properly respect the update and error channels
|
||||
// returned by CloseLink.
|
||||
|
||||
|
@ -909,7 +909,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
|
||||
// We will use the following channel to reliably hand off contract
|
||||
// breach events from the ChannelArbitrator to the breachArbiter,
|
||||
contractBreaches := make(chan *ContractBreachEvent, 1)
|
||||
contractBreaches := make(chan *contractcourt.ContractBreachEvent, 1)
|
||||
|
||||
s.chainArb = contractcourt.NewChainArbitrator(contractcourt.ChainArbitratorConfig{
|
||||
ChainHash: *s.cfg.ActiveNetParams.GenesisHash,
|
||||
|
@ -976,7 +976,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
finalErr <- markClosed()
|
||||
}
|
||||
|
||||
event := &ContractBreachEvent{
|
||||
event := &contractcourt.ContractBreachEvent{
|
||||
ChanPoint: chanPoint,
|
||||
ProcessACK: processACK,
|
||||
BreachRetribution: breachRet,
|
||||
|
@ -1012,7 +1012,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
Clock: clock.NewDefaultClock(),
|
||||
}, dbs.chanStateDB)
|
||||
|
||||
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
||||
s.breachArbiter = contractcourt.NewBreachArbiter(&contractcourt.BreachConfig{
|
||||
CloseLink: closeLink,
|
||||
DB: dbs.chanStateDB,
|
||||
Estimator: s.cc.FeeEstimator,
|
||||
|
@ -1021,7 +1021,9 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
|||
PublishTransaction: cc.Wallet.PublishTransaction,
|
||||
ContractBreaches: contractBreaches,
|
||||
Signer: cc.Wallet.Cfg.Signer,
|
||||
Store: newRetributionStore(dbs.chanStateDB),
|
||||
Store: contractcourt.NewRetributionStore(
|
||||
dbs.chanStateDB,
|
||||
),
|
||||
})
|
||||
|
||||
// Select the configuration and furnding parameters for Bitcoin or
|
||||
|
|
Loading…
Add table
Reference in a new issue