Merge pull request #1248 from halseth/close-channel-fix

[bugfix] Wait for confirmation before marking channel cooperatively closed
This commit is contained in:
Olaoluwa Osuntokun 2018-05-24 18:28:31 -07:00 committed by GitHub
commit fc3d711cf0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 94 additions and 309 deletions

View File

@ -165,6 +165,8 @@ func (b *breachArbiter) Start() error {
// finished our responsibilities. If the removal is successful, we also // finished our responsibilities. If the removal is successful, we also
// remove the entry from our in-memory map, to avoid any further action // remove the entry from our in-memory map, to avoid any further action
// for this channel. // for this channel.
// TODO(halseth): no need continue on IsPending once closed channels
// actually means close transaction is confirmed.
for _, chanSummary := range closedChans { for _, chanSummary := range closedChans {
if chanSummary.IsPending { if chanSummary.IsPending {
continue continue

View File

@ -4,8 +4,6 @@ import (
"fmt" "fmt"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
@ -135,8 +133,6 @@ type channelCloser struct {
// TODO(roasbeef): abstract away // TODO(roasbeef): abstract away
closeReq *htlcswitch.ChanClose closeReq *htlcswitch.ChanClose
closeCtx *contractcourt.CooperativeCloseCtx
// localDeliveryScript is the script that we'll send our settled // localDeliveryScript is the script that we'll send our settled
// channel funds to. // channel funds to.
localDeliveryScript []byte localDeliveryScript []byte
@ -151,8 +147,7 @@ type channelCloser struct {
// only be populated iff, we're the initiator of this closing request. // only be populated iff, we're the initiator of this closing request.
func newChannelCloser(cfg chanCloseCfg, deliveryScript []byte, func newChannelCloser(cfg chanCloseCfg, deliveryScript []byte,
idealFeePerKw lnwallet.SatPerKWeight, negotiationHeight uint32, idealFeePerKw lnwallet.SatPerKWeight, negotiationHeight uint32,
closeReq *htlcswitch.ChanClose, closeReq *htlcswitch.ChanClose) *channelCloser {
closeCtx *contractcourt.CooperativeCloseCtx) *channelCloser {
// Given the target fee-per-kw, we'll compute what our ideal _total_ // Given the target fee-per-kw, we'll compute what our ideal _total_
// fee will be starting at for this fee negotiation. // fee will be starting at for this fee negotiation.
@ -186,7 +181,6 @@ func newChannelCloser(cfg chanCloseCfg, deliveryScript []byte,
cfg: cfg, cfg: cfg,
negotiationHeight: negotiationHeight, negotiationHeight: negotiationHeight,
idealFeeSat: idealFeeSat, idealFeeSat: idealFeeSat,
closeCtx: closeCtx,
localDeliveryScript: deliveryScript, localDeliveryScript: deliveryScript,
priorFeeOffers: make(map[btcutil.Amount]*lnwire.ClosingSigned), priorFeeOffers: make(map[btcutil.Amount]*lnwire.ClosingSigned),
} }
@ -420,7 +414,7 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b
remoteSigBytes := closeSignedMsg.Signature.ToSignatureBytes() remoteSigBytes := closeSignedMsg.Signature.ToSignatureBytes()
remoteSig := append(remoteSigBytes, byte(txscript.SigHashAll)) remoteSig := append(remoteSigBytes, byte(txscript.SigHashAll))
closeTx, finalLocalBalance, err := c.cfg.channel.CompleteCooperativeClose( closeTx, _, err := c.cfg.channel.CompleteCooperativeClose(
localSig, remoteSig, c.localDeliveryScript, localSig, remoteSig, c.localDeliveryScript,
c.remoteDeliveryScript, remoteProposedFee, c.remoteDeliveryScript, remoteProposedFee,
) )
@ -438,33 +432,16 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b
if err := c.cfg.broadcastTx(closeTx); err != nil { if err := c.cfg.broadcastTx(closeTx); err != nil {
return nil, false, err return nil, false, err
} }
if c.cfg.channel.MarkCommitmentBroadcasted(); err != nil {
// Clear out the current channel state, marking the channel as return nil, false, err
// being closed within the database. }
closingTxid := closeTx.TxHash()
chanInfo := c.cfg.channel.StateSnapshot()
c.closeCtx.Finalize(&channeldb.ChannelCloseSummary{
ChanPoint: c.chanPoint,
ChainHash: chanInfo.ChainHash,
ClosingTXID: closingTxid,
CloseHeight: c.negotiationHeight,
RemotePub: &chanInfo.RemoteIdentity,
Capacity: chanInfo.Capacity,
SettledBalance: finalLocalBalance,
CloseType: channeldb.CooperativeClose,
ShortChanID: c.cfg.channel.ShortChanID(),
IsPending: true,
})
// TODO(roasbeef): don't need, ChainWatcher will handle
c.state = closeFinished
// Finally, we'll transition to the closeFinished state, and // Finally, we'll transition to the closeFinished state, and
// also return the final close signed message we sent. // also return the final close signed message we sent.
// Additionally, we return true for the second argument to // Additionally, we return true for the second argument to
// indicate we're finished with the channel closing // indicate we're finished with the channel closing
// negotiation. // negotiation.
c.state = closeFinished
matchingOffer := c.priorFeeOffers[remoteProposedFee] matchingOffer := c.priorFeeOffers[remoteProposedFee]
return []lnwire.Message{matchingOffer}, true, nil return []lnwire.Message{matchingOffer}, true, nil
@ -493,7 +470,7 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b
// current compromise fee. // current compromise fee.
func (c *channelCloser) proposeCloseSigned(fee btcutil.Amount) (*lnwire.ClosingSigned, error) { func (c *channelCloser) proposeCloseSigned(fee btcutil.Amount) (*lnwire.ClosingSigned, error) {
rawSig, txid, localAmt, err := c.cfg.channel.CreateCloseProposal( rawSig, _, _, err := c.cfg.channel.CreateCloseProposal(
fee, c.localDeliveryScript, c.remoteDeliveryScript, fee, c.localDeliveryScript, c.remoteDeliveryScript,
) )
if err != nil { if err != nil {
@ -521,20 +498,6 @@ func (c *channelCloser) proposeCloseSigned(fee btcutil.Amount) (*lnwire.ClosingS
// accepts our offer. This way, we don't have to re-sign. // accepts our offer. This way, we don't have to re-sign.
c.priorFeeOffers[fee] = closeSignedMsg c.priorFeeOffers[fee] = closeSignedMsg
chanInfo := c.cfg.channel.StateSnapshot()
c.closeCtx.LogPotentialClose(&channeldb.ChannelCloseSummary{
ChanPoint: c.chanPoint,
ChainHash: chanInfo.ChainHash,
ClosingTXID: *txid,
CloseHeight: c.negotiationHeight,
RemotePub: &chanInfo.RemoteIdentity,
Capacity: chanInfo.Capacity,
SettledBalance: localAmt,
CloseType: channeldb.CooperativeClose,
ShortChanID: c.cfg.channel.ShortChanID(),
IsPending: true,
})
return closeSignedMsg, nil return closeSignedMsg, nil
} }

View File

@ -1711,12 +1711,12 @@ type ChannelCloseSummary struct {
// IsPending indicates whether this channel is in the 'pending close' // IsPending indicates whether this channel is in the 'pending close'
// state, which means the channel closing transaction has been // state, which means the channel closing transaction has been
// broadcast, but not confirmed yet or has not yet been fully resolved. // confirmed, but not yet been fully resolved. In the case of a channel
// In the case of a channel that has been cooperatively closed, it will // that has been cooperatively closed, it will go straight into the
// no longer be considered pending as soon as the closing transaction // fully resolved state as soon as the closing transaction has been
// has been confirmed. However, for channel that have been force // confirmed. However, for channel that have been force closed, they'll
// closed, they'll stay marked as "pending" until _all_ the pending // stay marked as "pending" until _all_ the pending funds have been
// funds have been swept. // swept.
IsPending bool IsPending bool
// RemoteCurrentRevocation is the current revocation for their // RemoteCurrentRevocation is the current revocation for their

View File

@ -341,9 +341,22 @@ func (c *ChainArbitrator) Start() error {
pCache: c.cfg.PreimageDB, pCache: c.cfg.PreimageDB,
signer: c.cfg.Signer, signer: c.cfg.Signer,
isOurAddr: c.cfg.IsOurAddress, isOurAddr: c.cfg.IsOurAddress,
markChanClosed: func() error { notifyChanClosed: func() error {
// TODO(roasbeef): also need to pass in log? c.Lock()
return c.resolveContract(chanPoint, nil) delete(c.activeChannels, chanPoint)
chainWatcher, ok := c.activeWatchers[chanPoint]
if ok {
// Since the chainWatcher is
// calling notifyChanClosed, we
// must stop it in a goroutine
// to not deadlock.
go chainWatcher.Stop()
}
delete(c.activeWatchers, chanPoint)
c.Unlock()
return nil
}, },
contractBreach: func(retInfo *lnwallet.BreachRetribution) error { contractBreach: func(retInfo *lnwallet.BreachRetribution) error {
return c.cfg.ContractBreach(chanPoint, retInfo) return c.cfg.ContractBreach(chanPoint, retInfo)
@ -379,11 +392,17 @@ func (c *ChainArbitrator) Start() error {
} }
// Next, for each channel is the closing state, we'll launch a // Next, for each channel is the closing state, we'll launch a
// corresponding more restricted resolver. // corresponding more restricted resolver, as we don't have to watch
// the chain any longer, only resolve the contracts on the confirmed
// commitment.
for _, closeChanInfo := range closingChannels { for _, closeChanInfo := range closingChannels {
// If this is a pending cooperative close channel then we'll // If this is a pending cooperative close channel then we'll
// simply launch a goroutine to wait until the closing // simply launch a goroutine to wait until the closing
// transaction has been confirmed. // transaction has been confirmed.
// TODO(halseth): can remove this since no coop close channels
// should be "pending close" after the recent changes. Keeping
// it for a bit in case someone with a coop close channel in
// the pending close state upgrades to the new commit.
if closeChanInfo.CloseType == channeldb.CooperativeClose { if closeChanInfo.CloseType == channeldb.CooperativeClose {
go c.watchForChannelClose(closeChanInfo) go c.watchForChannelClose(closeChanInfo)
@ -677,8 +696,21 @@ func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error
pCache: c.cfg.PreimageDB, pCache: c.cfg.PreimageDB,
signer: c.cfg.Signer, signer: c.cfg.Signer,
isOurAddr: c.cfg.IsOurAddress, isOurAddr: c.cfg.IsOurAddress,
markChanClosed: func() error { notifyChanClosed: func() error {
return c.resolveContract(chanPoint, nil) c.Lock()
delete(c.activeChannels, chanPoint)
chainWatcher, ok := c.activeWatchers[chanPoint]
if ok {
// Since the chainWatcher is calling
// notifyChanClosed, we must stop it in
// a goroutine to not deadlock.
go chainWatcher.Stop()
}
delete(c.activeWatchers, chanPoint)
c.Unlock()
return nil
}, },
contractBreach: func(retInfo *lnwallet.BreachRetribution) error { contractBreach: func(retInfo *lnwallet.BreachRetribution) error {
return c.cfg.ContractBreach(chanPoint, retInfo) return c.cfg.ContractBreach(chanPoint, retInfo)
@ -737,20 +769,5 @@ func (c *ChainArbitrator) SubscribeChannelEvents(
return watcher.SubscribeChannelEvents(), nil return watcher.SubscribeChannelEvents(), nil
} }
// BeginCoopChanClose allows the initiator or responder to a cooperative
// channel closure to signal to the ChainArbitrator that we're starting close
// negotiation. The caller can use this context to allow the underlying chain
// watcher to be prepared to act if *any* of the transactions that may
// potentially be signed off on during fee negotiation are confirmed.
func (c *ChainArbitrator) BeginCoopChanClose(chanPoint wire.OutPoint) (*CooperativeCloseCtx, error) {
watcher, ok := c.activeWatchers[chanPoint]
if !ok {
return nil, fmt.Errorf("unable to find watcher for: %v",
chanPoint)
}
return watcher.BeginCooperativeClose(), nil
}
// TODO(roasbeef): arbitration reports // TODO(roasbeef): arbitration reports
// * types: contested, waiting for success conf, etc // * types: contested, waiting for success conf, etc

View File

@ -10,7 +10,6 @@ import (
"github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet"
"github.com/roasbeef/btcd/chaincfg" "github.com/roasbeef/btcd/chaincfg"
"github.com/roasbeef/btcd/chaincfg/chainhash"
"github.com/roasbeef/btcd/txscript" "github.com/roasbeef/btcd/txscript"
"github.com/roasbeef/btcd/wire" "github.com/roasbeef/btcd/wire"
"github.com/roasbeef/btcutil" "github.com/roasbeef/btcutil"
@ -79,10 +78,12 @@ type chainWatcherConfig struct {
// machine. // machine.
signer lnwallet.Signer signer lnwallet.Signer
// markChanClosed is a method that will be called by the watcher if it // notifyChanClosed is a method that will be called by the watcher when
// detects that a cooperative closure transaction has successfully been // it has detected a close on-chain and performed all necessary
// confirmed. // actions, like marking the channel closed in the database and
markChanClosed func() error // notified all its subcribers. It lets the chain arbitrator know that
// the chain watcher chan be stopped.
notifyChanClosed func() error
// contractBreach is a method that will be called by the watcher if it // contractBreach is a method that will be called by the watcher if it
// detects that a contract breach transaction has been confirmed. Only // detects that a contract breach transaction has been confirmed. Only
@ -123,12 +124,6 @@ type chainWatcher struct {
// clientSubscriptions is a map that keeps track of all the active // clientSubscriptions is a map that keeps track of all the active
// client subscriptions for events related to this channel. // client subscriptions for events related to this channel.
clientSubscriptions map[uint64]*ChainEventSubscription clientSubscriptions map[uint64]*ChainEventSubscription
// possibleCloses is a map from cooperative closing transaction txid to
// a close summary that describes the nature of the channel closure.
// We'll use this map to keep track of all possible channel closures to
// ensure out db state is correct in the end.
possibleCloses map[chainhash.Hash]*channeldb.ChannelCloseSummary
} }
// newChainWatcher returns a new instance of a chainWatcher for a channel given // newChainWatcher returns a new instance of a chainWatcher for a channel given
@ -158,7 +153,6 @@ func newChainWatcher(cfg chainWatcherConfig) (*chainWatcher, error) {
stateHintObfuscator: stateHint, stateHintObfuscator: stateHint,
quit: make(chan struct{}), quit: make(chan struct{}),
clientSubscriptions: make(map[uint64]*ChainEventSubscription), clientSubscriptions: make(map[uint64]*ChainEventSubscription),
possibleCloses: make(map[chainhash.Hash]*channeldb.ChannelCloseSummary),
}, nil }, nil
} }
@ -461,8 +455,9 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
// ours. // ours.
localAmt := c.toSelfAmount(broadcastTx) localAmt := c.toSelfAmount(broadcastTx)
// Once this is known, we'll mark the state as pending close in the // Once this is known, we'll mark the state as fully closed in the
// database. // database. We can do this as a cooperatively closed channel has all
// its outputs resolved after only one confirmation.
closeSummary := &channeldb.ChannelCloseSummary{ closeSummary := &channeldb.ChannelCloseSummary{
ChanPoint: c.cfg.chanState.FundingOutpoint, ChanPoint: c.cfg.chanState.FundingOutpoint,
ChainHash: c.cfg.chanState.ChainHash, ChainHash: c.cfg.chanState.ChainHash,
@ -473,7 +468,7 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
SettledBalance: localAmt, SettledBalance: localAmt,
CloseType: channeldb.CooperativeClose, CloseType: channeldb.CooperativeClose,
ShortChanID: c.cfg.chanState.ShortChanID(), ShortChanID: c.cfg.chanState.ShortChanID(),
IsPending: true, IsPending: false,
} }
err := c.cfg.chanState.CloseChannel(closeSummary) err := c.cfg.chanState.CloseChannel(closeSummary)
if err != nil && err != channeldb.ErrNoActiveChannels && if err != nil && err != channeldb.ErrNoActiveChannels &&
@ -481,45 +476,10 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
return fmt.Errorf("unable to close chan state: %v", err) return fmt.Errorf("unable to close chan state: %v", err)
} }
// Finally, we'll launch a goroutine to mark the channel as fully log.Infof("closeObserver: ChannelPoint(%v) is fully "+
// closed once the transaction confirmed. "closed, at height: %v",
go func() { c.cfg.chanState.FundingOutpoint,
confNtfn, err := c.cfg.notifier.RegisterConfirmationsNtfn( commitSpend.SpendingHeight)
commitSpend.SpenderTxHash, 1,
uint32(commitSpend.SpendingHeight),
)
if err != nil {
log.Errorf("unable to register for conf: %v", err)
return
}
log.Infof("closeObserver: waiting for txid=%v to close "+
"ChannelPoint(%v) on chain", commitSpend.SpenderTxHash,
c.cfg.chanState.FundingOutpoint)
select {
case confInfo, ok := <-confNtfn.Confirmed:
if !ok {
log.Errorf("notifier exiting")
return
}
log.Infof("closeObserver: ChannelPoint(%v) is fully "+
"closed, at height: %v",
c.cfg.chanState.FundingOutpoint,
confInfo.BlockHeight)
err := c.cfg.markChanClosed()
if err != nil {
log.Errorf("unable to mark chan fully "+
"closed: %v", err)
return
}
case <-c.quit:
return
}
}()
c.Lock() c.Lock()
for _, sub := range c.clientSubscriptions { for _, sub := range c.clientSubscriptions {
@ -532,6 +492,14 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
} }
c.Unlock() c.Unlock()
// Now notify the ChainArbitrator that the watcher's job is done, such
// that it can shut it down and clean up.
if err := c.cfg.notifyChanClosed(); err != nil {
log.Errorf("unable to notify channel closed for "+
"ChannelPoint(%v): %v",
c.cfg.chanState.FundingOutpoint, err)
}
return nil return nil
} }
@ -751,147 +719,3 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail
return nil return nil
} }
// CooperativeCloseCtx is a transactional object that's used by external
// parties to initiate a cooperative closure negotiation. During the
// negotiation, we sign multiple versions of a closing transaction, either of
// which may be counter signed and broadcast by the remote party at any time.
// As a result, we'll need to watch the chain to see if any of these confirm,
// only afterwards will we mark the channel as fully closed.
type CooperativeCloseCtx struct {
// potentialCloses is a channel will be used by the party negotiating
// the cooperative closure to send possible closing states to the chain
// watcher to ensure we detect all on-chain spends.
potentialCloses chan *channeldb.ChannelCloseSummary
// activeCloses keeps track of all the txid's that we're currently
// watching for.
activeCloses map[chainhash.Hash]struct{}
// watchCancel will be closed once *one* of the txid's in the map above
// is confirmed. This will cause all the lingering goroutines to exit.
watchCancel chan struct{}
watcher *chainWatcher
sync.Mutex
}
// BeginCooperativeClose should be called by the party negotiating the
// cooperative closure before the first signature is sent to the remote party.
// This will return a context that should be used to communicate possible
// closing states so we can act on them.
func (c *chainWatcher) BeginCooperativeClose() *CooperativeCloseCtx {
// We'll simply return a new close context that will be used be the
// caller to notify us of potential closes.
return &CooperativeCloseCtx{
potentialCloses: make(chan *channeldb.ChannelCloseSummary),
watchCancel: make(chan struct{}),
activeCloses: make(map[chainhash.Hash]struct{}),
watcher: c,
}
}
// LogPotentialClose should be called by the party negotiating the cooperative
// closure once they signed a new state, but *before* they transmit it to the
// remote party. This will ensure that the chain watcher is able to log the new
// state it should watch the chain for.
func (c *CooperativeCloseCtx) LogPotentialClose(potentialClose *channeldb.ChannelCloseSummary) {
c.Lock()
defer c.Unlock()
// We'll check to see if we're already watching for a close of this
// channel, if so, then we'll exit early to avoid launching a duplicate
// goroutine.
if _, ok := c.activeCloses[potentialClose.ClosingTXID]; ok {
return
}
// Otherwise, we'll mark this txid as currently being watched.
c.activeCloses[potentialClose.ClosingTXID] = struct{}{}
// We'll take this potential close, and launch a goroutine which will
// wait until it's confirmed, then update the database state. When a
// potential close gets confirmed, we'll cancel out all other launched
// goroutines.
go func() {
confNtfn, err := c.watcher.cfg.notifier.RegisterConfirmationsNtfn(
&potentialClose.ClosingTXID, 1,
uint32(potentialClose.CloseHeight),
)
if err != nil {
log.Errorf("unable to register for conf: %v", err)
return
}
log.Infof("closeCtx: waiting for txid=%v to close "+
"ChannelPoint(%v) on chain", potentialClose.ClosingTXID,
c.watcher.cfg.chanState.FundingOutpoint)
select {
case confInfo, ok := <-confNtfn.Confirmed:
if !ok {
log.Errorf("notifier exiting")
return
}
log.Infof("closeCtx: ChannelPoint(%v) is fully closed, at "+
"height: %v", c.watcher.cfg.chanState.FundingOutpoint,
confInfo.BlockHeight)
close(c.watchCancel)
c.watcher.Lock()
for _, sub := range c.watcher.clientSubscriptions {
select {
case sub.CooperativeClosure <- struct{}{}:
case <-c.watcher.quit:
}
}
c.watcher.Unlock()
err := c.watcher.cfg.chanState.CloseChannel(potentialClose)
if err != nil {
log.Warnf("closeCtx: unable to update latest "+
"close for ChannelPoint(%v): %v",
c.watcher.cfg.chanState.FundingOutpoint, err)
}
err = c.watcher.cfg.markChanClosed()
if err != nil {
log.Errorf("closeCtx: unable to mark chan fully "+
"closed: %v", err)
return
}
case <-c.watchCancel:
log.Debugf("Exiting watch for close of txid=%v for "+
"ChannelPoint(%v)", potentialClose.ClosingTXID,
c.watcher.cfg.chanState.FundingOutpoint)
case <-c.watcher.quit:
return
}
}()
}
// Finalize should be called once both parties agree on a final transaction to
// close out the channel. This method will immediately mark the channel as
// pending closed in the database, then launch a goroutine to mark the channel
// fully closed upon confirmation.
func (c *CooperativeCloseCtx) Finalize(preferredClose *channeldb.ChannelCloseSummary) error {
chanPoint := c.watcher.cfg.chanState.FundingOutpoint
log.Infof("Finalizing chan close for ChannelPoint(%v)", chanPoint)
err := c.watcher.cfg.chanState.CloseChannel(preferredClose)
if err != nil {
log.Errorf("closeCtx: unable to close ChannelPoint(%v): %v",
chanPoint, err)
return err
}
go c.LogPotentialClose(preferredClose)
return nil
}

View File

@ -216,7 +216,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest,
chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex) chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex)
// If we didn't force close the transaction, at this point, the channel // If we didn't force close the transaction, at this point, the channel
// should now be marked as being in the state of "pending close". // should now be marked as being in the state of "waiting close".
if !force { if !force {
pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest) pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest)
@ -224,14 +224,14 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest,
t.Fatalf("unable to query for pending channels: %v", err) t.Fatalf("unable to query for pending channels: %v", err)
} }
var found bool var found bool
for _, pendingClose := range pendingChanResp.PendingClosingChannels { for _, pendingClose := range pendingChanResp.WaitingCloseChannels {
if pendingClose.Channel.ChannelPoint == chanPointStr { if pendingClose.Channel.ChannelPoint == chanPointStr {
found = true found = true
break break
} }
} }
if !found { if !found {
t.Fatalf("channel not marked as pending close") t.Fatalf("channel not marked as waiting close")
} }
} }
@ -247,7 +247,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest,
assertTxInBlock(t, block, closingTxid) assertTxInBlock(t, block, closingTxid)
// Finally, the transaction should no longer be in the pending close // Finally, the transaction should no longer be in the waiting close
// state as we've just mined a block that should include the closing // state as we've just mined a block that should include the closing
// transaction. This only applies for co-op close channels though. // transaction. This only applies for co-op close channels though.
if !force { if !force {
@ -260,7 +260,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest,
return false return false
} }
for _, pendingClose := range pendingChanResp.PendingClosingChannels { for _, pendingClose := range pendingChanResp.WaitingCloseChannels {
if pendingClose.Channel.ChannelPoint == chanPointStr { if pendingClose.Channel.ChannelPoint == chanPointStr {
return false return false
} }

View File

@ -5580,13 +5580,6 @@ func (lc *LightningChannel) CompleteCooperativeClose(localSig, remoteSig []byte,
return closeTx, ourBalance, nil return closeTx, ourBalance, nil
} }
// DeleteState deletes all state concerning the channel from the underlying
// database, only leaving a small summary describing metadata of the
// channel's lifetime.
func (lc *LightningChannel) DeleteState(c *channeldb.ChannelCloseSummary) error {
return lc.channelState.CloseChannel(c)
}
// AvailableBalance returns the current available balance within the channel. // AvailableBalance returns the current available balance within the channel.
// By available balance, we mean that if at this very instance s new commitment // By available balance, we mean that if at this very instance s new commitment
// were to be created which evals all the log entries, what would our available // were to be created which evals all the log entries, what would our available
@ -5903,6 +5896,16 @@ func (lc *LightningChannel) State() *channeldb.OpenChannel {
return lc.channelState return lc.channelState
} }
// MarkCommitmentBroadcasted marks the channel as a commitment transaction has
// been broadcast, either our own or the remote, and we should watch the chain
// for it to confirm before taking any further action.
func (lc *LightningChannel) MarkCommitmentBroadcasted() error {
lc.Lock()
defer lc.Unlock()
return lc.channelState.MarkCommitmentBroadcasted()
}
// ActiveHtlcs returns a slice of HTLC's which are currently active on *both* // ActiveHtlcs returns a slice of HTLC's which are currently active on *both*
// commitment transactions. // commitment transactions.
func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC { func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC {

28
peer.go
View File

@ -1572,18 +1572,6 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e
return nil, fmt.Errorf("cannot obtain best block") return nil, fmt.Errorf("cannot obtain best block")
} }
// Before we create the chan closer, we'll start a new
// cooperative channel closure transaction from the chain arb.
// With this context, we'll ensure that we're able to respond
// if *any* of the transactions we sign off on are ever
// broadcast.
closeCtx, err := p.server.chainArb.BeginCoopChanClose(
*channel.ChannelPoint(),
)
if err != nil {
return nil, err
}
chanCloser = newChannelCloser( chanCloser = newChannelCloser(
chanCloseCfg{ chanCloseCfg{
channel: channel, channel: channel,
@ -1595,7 +1583,6 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e
targetFeePerKw, targetFeePerKw,
uint32(startingHeight), uint32(startingHeight),
nil, nil,
closeCtx,
) )
p.activeChanCloses[chanID] = chanCloser p.activeChanCloses[chanID] = chanCloser
} }
@ -1638,20 +1625,6 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
return return
} }
// Before we create the chan closer, we'll start a new
// cooperative channel closure transaction from the chain arb.
// With this context, we'll ensure that we're able to respond
// if *any* of the transactions we sign off on are ever
// broadcast.
closeCtx, err := p.server.chainArb.BeginCoopChanClose(
*channel.ChannelPoint(),
)
if err != nil {
peerLog.Errorf(err.Error())
req.Err <- err
return
}
// Next, we'll create a new channel closer state machine to // Next, we'll create a new channel closer state machine to
// handle the close negotiation. // handle the close negotiation.
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock() _, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
@ -1671,7 +1644,6 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
req.TargetFeePerKw, req.TargetFeePerKw,
uint32(startingHeight), uint32(startingHeight),
req, req,
closeCtx,
) )
p.activeChanCloses[chanID] = chanCloser p.activeChanCloses[chanID] = chanCloser

View File

@ -1455,6 +1455,10 @@ func (r *rpcServer) PendingChannels(ctx context.Context,
// If the channel was closed cooperatively, then we'll only // If the channel was closed cooperatively, then we'll only
// need to tack on the closing txid. // need to tack on the closing txid.
// TODO(halseth): remove. After recent changes, a coop closed
// channel should never be in the "pending close" state.
// Keeping for now to let someone that upgraded in the middle
// of a close let their closing tx confirm.
case channeldb.CooperativeClose: case channeldb.CooperativeClose:
resp.PendingClosingChannels = append( resp.PendingClosingChannels = append(
resp.PendingClosingChannels, resp.PendingClosingChannels,