itest+lntest: fix channel force close test

Also flatten the tests to make them easier to be maintained.
This commit is contained in:
yyforyongyu 2024-06-29 10:37:38 +08:00
parent 9d4a60d613
commit b7feeba008
No known key found for this signature in database
GPG Key ID: 9BCD95C4FF296868
4 changed files with 160 additions and 245 deletions

View File

@ -210,8 +210,12 @@ var allTestCases = []*lntest.TestCase{
TestFunc: testChannelUnsettledBalance,
},
{
Name: "channel force closure",
TestFunc: testChannelForceClosure,
Name: "channel force closure anchor",
TestFunc: testChannelForceClosureAnchor,
},
{
Name: "channel force closure simple taproot",
TestFunc: testChannelForceClosureSimpleTaproot,
},
{
Name: "failing channel",

View File

@ -3,14 +3,12 @@ package itest
import (
"bytes"
"fmt"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/chainreg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
@ -20,97 +18,82 @@ import (
"github.com/stretchr/testify/require"
)
// testChannelForceClosure performs a test to exercise the behavior of "force"
// closing a channel or unilaterally broadcasting the latest local commitment
// state on-chain. The test creates a new channel between Alice and Carol, then
// force closes the channel after some cursory assertions. Within the test, a
// total of 3 + n transactions will be broadcast, representing the commitment
// transaction, a transaction sweeping the local CSV delayed output, a
// transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n
// htlc timeout transactions, where n is the number of payments Alice attempted
const pushAmt = btcutil.Amount(5e5)
// testChannelForceClosureAnchor runs `runChannelForceClosureTest` with anchor
// channels.
func testChannelForceClosureAnchor(ht *lntest.HarnessTest) {
// Create a simple network: Alice -> Carol, using anchor channels.
//
// Prepare params.
openChannelParams := lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: pushAmt,
CommitmentType: lnrpc.CommitmentType_ANCHORS,
}
cfg := node.CfgAnchor
cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...)
cfgs := [][]string{cfg, cfgCarol}
runChannelForceClosureTest(ht, cfgs, openChannelParams)
}
// testChannelForceClosureSimpleTaproot runs `runChannelForceClosureTest` with
// simple taproot channels.
func testChannelForceClosureSimpleTaproot(ht *lntest.HarnessTest) {
// Create a simple network: Alice -> Carol, using simple taproot
// channels.
//
// Prepare params.
openChannelParams := lntest.OpenChannelParams{
Amt: chanAmt,
PushAmt: pushAmt,
// If the channel is a taproot channel, then we'll need to
// create a private channel.
//
// TODO(roasbeef): lift after G175
CommitmentType: lnrpc.CommitmentType_SIMPLE_TAPROOT,
Private: true,
}
cfg := node.CfgSimpleTaproot
cfgCarol := append([]string{"--hodl.exit-settle"}, cfg...)
cfgs := [][]string{cfg, cfgCarol}
runChannelForceClosureTest(ht, cfgs, openChannelParams)
}
// runChannelForceClosureTest performs a test to exercise the behavior of
// "force" closing a channel or unilaterally broadcasting the latest local
// commitment state on-chain. The test creates a new channel between Alice and
// Carol, then force closes the channel after some cursory assertions. Within
// the test, a total of 3 + n transactions will be broadcast, representing the
// commitment transaction, a transaction sweeping the local CSV delayed output,
// a transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n htlc
// timeout transactions, where n is the number of payments Alice attempted
// to send to Carol. This test includes several restarts to ensure that the
// transaction output states are persisted throughout the forced closure
// process.
//
// TODO(roasbeef): also add an unsettled HTLC before force closing.
func testChannelForceClosure(ht *lntest.HarnessTest) {
// We'll test the scenario for some of the commitment types, to ensure
// outputs can be swept.
commitTypes := []lnrpc.CommitmentType{
lnrpc.CommitmentType_ANCHORS,
lnrpc.CommitmentType_SIMPLE_TAPROOT,
}
for _, channelType := range commitTypes {
testName := fmt.Sprintf("committype=%v", channelType)
channelType := channelType
success := ht.Run(testName, func(t *testing.T) {
st := ht.Subtest(t)
args := lntest.NodeArgsForCommitType(channelType)
alice := st.NewNode("Alice", args)
defer st.Shutdown(alice)
// Since we'd like to test failure scenarios with
// outstanding htlcs, we'll introduce another node into
// our test network: Carol.
carolArgs := []string{"--hodl.exit-settle"}
carolArgs = append(carolArgs, args...)
carol := st.NewNode("Carol", carolArgs)
defer st.Shutdown(carol)
// Each time, we'll send Alice new set of coins in
// order to fund the channel.
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// NOTE: Alice needs 3 more UTXOs to sweep her
// second-layer txns after a restart - after a restart
// all the time-sensitive sweeps are swept immediately
// without being aggregated.
//
// TODO(yy): remove this once the can recover its state
// from restart.
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
st.FundCoins(btcutil.SatoshiPerBitcoin, alice)
// Also give Carol some coins to allow her to sweep her
// anchor.
st.FundCoins(btcutil.SatoshiPerBitcoin, carol)
channelForceClosureTest(st, alice, carol, channelType)
})
if !success {
return
}
}
}
func channelForceClosureTest(ht *lntest.HarnessTest,
alice, carol *node.HarnessNode, channelType lnrpc.CommitmentType) {
func runChannelForceClosureTest(ht *lntest.HarnessTest,
cfgs [][]string, params lntest.OpenChannelParams) {
const (
chanAmt = btcutil.Amount(10e6)
pushAmt = btcutil.Amount(5e6)
paymentAmt = 100000
numInvoices = 6
numInvoices = 6
commitFeeRate = 20000
)
const commitFeeRate = 20000
ht.SetFeeEstimate(commitFeeRate)
// TODO(roasbeef): should check default value in config here
// instead, or make delay a param
defaultCLTV := uint32(chainreg.DefaultBitcoinTimeLockDelta)
// We must let Alice have an open channel before she can send a node
// announcement, so we open a channel with Carol,
ht.ConnectNodes(alice, carol)
// Create a three hop network: Alice -> Carol.
chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
alice, carol := nodes[0], nodes[1]
chanPoint := chanPoints[0]
// We need one additional UTXO for sweeping the remote anchor.
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
if ht.IsNeutrinoBackend() {
ht.FundCoins(btcutil.SatoshiPerBitcoin, alice)
}
// Before we start, obtain Carol's current wallet balance, we'll check
// to ensure that at the end of the force closure by Alice, Carol
@ -118,24 +101,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
carolBalResp := carol.RPC.WalletBalance()
carolStartingBalance := carolBalResp.ConfirmedBalance
// If the channel is a taproot channel, then we'll need to create a
// private channel.
//
// TODO(roasbeef): lift after G175
var privateChan bool
if channelType == lnrpc.CommitmentType_SIMPLE_TAPROOT {
privateChan = true
}
chanPoint := ht.OpenChannel(
alice, carol, lntest.OpenChannelParams{
Private: privateChan,
Amt: chanAmt,
PushAmt: pushAmt,
CommitmentType: channelType,
},
)
// Send payments from Alice to Carol, since Carol is htlchodl mode, the
// htlc outputs should be left unsettled, and should be swept by the
// utxo nursery.
@ -145,7 +110,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
Dest: carolPubKey,
Amt: int64(paymentAmt),
PaymentHash: ht.Random32Bytes(),
FinalCltvDelta: chainreg.DefaultBitcoinTimeLockDelta,
FinalCltvDelta: finalCltvDelta,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
@ -162,13 +127,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
curHeight := int32(ht.CurrentHeight())
// Using the current height of the chain, derive the relevant heights
// for incubating two-stage htlcs.
// for sweeping two-stage htlcs.
var (
startHeight = uint32(curHeight)
commCsvMaturityHeight = startHeight + 1 + defaultCSV
htlcExpiryHeight = padCLTV(startHeight + defaultCLTV)
htlcExpiryHeight = padCLTV(startHeight + finalCltvDelta)
htlcCsvMaturityHeight = padCLTV(
startHeight + defaultCLTV + 1 + defaultCSV,
startHeight + finalCltvDelta + 1 + defaultCSV,
)
)
@ -199,21 +164,15 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
)
// The several restarts in this test are intended to ensure that when a
// channel is force-closed, the UTXO nursery has persisted the state of
// the channel in the closure process and will recover the correct
// channel is force-closed, the contract court has persisted the state
// of the channel in the closure process and will recover the correct
// state when the system comes back on line. This restart tests state
// persistence at the beginning of the process, when the commitment
// transaction has been broadcast but not yet confirmed in a block.
ht.RestartNode(alice)
// To give the neutrino backend some time to catch up with the chain,
// we wait here until we have enough UTXOs to actually sweep the local
// and remote anchor.
const expectedUtxos = 6
ht.AssertNumUTXOs(alice, expectedUtxos)
// We expect to see Alice's force close tx in the mempool.
ht.GetNumTxsFromMempool(1)
ht.AssertNumTxsInMempool(1)
// Mine a block which should confirm the commitment transaction
// broadcast as a result of the force closure. Once mined, we also
@ -258,46 +217,34 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// The following restart is intended to ensure that outputs from the
// force close commitment transaction have been persisted once the
// transaction has been confirmed, but before the outputs are spendable
// (the "kindergarten" bucket.)
// transaction has been confirmed, but before the outputs are
// spendable.
ht.RestartNode(alice)
// Carol should offer her commit and anchor outputs to the sweeper.
sweepTxns := ht.AssertNumPendingSweeps(carol, 2)
// Find Carol's anchor sweep.
// Identify Carol's pending sweeps.
var carolAnchor, carolCommit = sweepTxns[0], sweepTxns[1]
if carolAnchor.AmountSat != uint32(anchorSize) {
carolAnchor, carolCommit = carolCommit, carolAnchor
}
// Mine a block to trigger Carol's sweeper to make decisions on the
// anchor sweeping.
ht.MineEmptyBlocks(1)
// Carol's sweep tx should be in the mempool already, as her output is
// not timelocked.
// not timelocked. This sweep tx should spend her to_local output as
// the anchor output is not economical to spend.
carolTx := ht.GetNumTxsFromMempool(1)[0]
// Carol's sweeping tx should have 2-input-1-output shape.
require.Len(ht, carolTx.TxIn, 2)
// Carol's sweeping tx should have 1-input-1-output shape.
require.Len(ht, carolTx.TxIn, 1)
require.Len(ht, carolTx.TxOut, 1)
// Calculate the total fee Carol paid.
totalFeeCarol := ht.CalculateTxFee(carolTx)
// If we have anchors, add an anchor resolution for carol.
op := fmt.Sprintf("%v:%v", carolAnchor.Outpoint.TxidStr,
carolAnchor.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_ANCHOR,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: carolTx.TxHash().String(),
AmountSat: anchorSize,
Outpoint: carolAnchor.Outpoint,
}
op = fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr,
// Carol's anchor report won't be created since it's uneconomical to
// sweep. So we expect to see only the commit sweep report.
op := fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr,
carolCommit.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT,
@ -319,9 +266,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Alice should still have the anchor sweeping request.
ht.AssertNumPendingSweeps(alice, 1)
// The following restart checks to ensure that outputs in the
// kindergarten bucket are persisted while waiting for the required
// number of confirmations to be reported.
// The following restart checks to ensure that outputs in the contract
// court are persisted while waiting for the required number of
// confirmations to be reported.
ht.RestartNode(alice)
// Alice should see the channel in her set of pending force closed
@ -344,12 +291,12 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
aliceBalance = forceClose.Channel.LocalBalance
// At this point, the nursery should show that the commitment
// output has 2 block left before its CSV delay expires. In
// output has 3 block left before its CSV delay expires. In
// total, we have mined exactly defaultCSV blocks, so the htlc
// outputs should also reflect that this many blocks have
// passed.
err = checkCommitmentMaturity(
forceClose, commCsvMaturityHeight, 2,
forceClose, commCsvMaturityHeight, 3,
)
if err != nil {
return err
@ -368,9 +315,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// Generate an additional block, which should cause the CSV delayed
// output from the commitment txn to expire.
ht.MineEmptyBlocks(1)
// Generate two blocks, which should cause the CSV delayed output from
// the commitment txn to expire.
ht.MineBlocks(2)
// At this point, the CSV will expire in the next block, meaning that
// the output should be offered to the sweeper.
@ -380,14 +327,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
commitSweep, anchorSweep = anchorSweep, commitSweep
}
// Restart Alice to ensure that she resumes watching the finalized
// commitment sweep txid.
ht.RestartNode(alice)
// Mine one block and the sweeping transaction should now be broadcast.
// So we fetch the node's mempool to ensure it has been properly
// broadcast.
ht.MineEmptyBlocks(1)
sweepingTXID := ht.AssertNumTxsInMempool(1)[0]
// Fetch the sweep transaction, all input it's spending should be from
@ -398,7 +340,12 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
"sweep transaction not spending from commit")
}
// We expect a resolution which spends our commit output.
// Restart Alice to ensure that she resumes watching the finalized
// commitment sweep txid.
ht.RestartNode(alice)
// Alice's anchor report won't be created since it's uneconomical to
// sweep. We expect a resolution which spends our commit output.
op = fmt.Sprintf("%v:%v", commitSweep.Outpoint.TxidStr,
commitSweep.Outpoint.OutputIndex)
aliceReports[op] = &lnrpc.Resolution{
@ -409,17 +356,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
AmountSat: uint64(aliceBalance),
}
// Add alice's anchor to our expected set of reports.
op = fmt.Sprintf("%v:%v", aliceAnchor.Outpoint.TxidStr,
aliceAnchor.Outpoint.OutputIndex)
aliceReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_ANCHOR,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: sweepingTXID.String(),
Outpoint: aliceAnchor.Outpoint,
AmountSat: uint64(anchorSize),
}
// Check that we can find the commitment sweep in our set of known
// sweeps, using the simple transaction id ListSweeps output.
ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0)
@ -489,17 +425,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Advance the blockchain until just before the CLTV expires, nothing
// exciting should have happened during this time.
ht.MineEmptyBlocks(cltvHeightDelta)
ht.MineBlocks(cltvHeightDelta)
// We now restart Alice, to ensure that she will broadcast the
// presigned htlc timeout txns after the delay expires after
// experiencing a while waiting for the htlc outputs to incubate.
ht.RestartNode(alice)
// To give the neutrino backend some time to catch up with the chain,
// we wait here until we have enough UTXOs to
// ht.AssertNumUTXOs(alice, expectedUtxos)
// Alice should now see the channel in her set of pending force closed
// channels with one pending HTLC.
err = wait.NoError(func() error {
@ -534,24 +466,23 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Now, generate the block which will cause Alice to offer the
// presigned htlc timeout txns to the sweeper.
ht.MineEmptyBlocks(1)
ht.MineBlocks(1)
// Since Alice had numInvoices (6) htlcs extended to Carol before force
// closing, we expect Alice to broadcast an htlc timeout txn for each
// one.
ht.AssertNumPendingSweeps(alice, numInvoices)
// one. We also expect Alice to still have her anchor since it's not
// swept.
ht.AssertNumPendingSweeps(alice, numInvoices+1)
// Wait for them all to show up in the mempool
//
// NOTE: after restart, all the htlc timeout txns will be offered to
// the sweeper with `Immediate` set to true, so they won't be
// aggregated.
htlcTxIDs := ht.AssertNumTxsInMempool(numInvoices)
htlcTxIDs := ht.AssertNumTxsInMempool(1)
// Retrieve each htlc timeout txn from the mempool, and ensure it is
// well-formed. This entails verifying that each only spends from
// output, and that output is from the commitment txn.
numInputs := 2
// well-formed. The sweeping tx should spend all the htlc outputs.
//
// NOTE: We also add 1 output as the outgoing HTLC is swept using twice
// its value as its budget, so a wallet utxo is used.
numInputs := 6 + 1
// Construct a map of the already confirmed htlc timeout outpoints,
// that will count the number of times each is spent by the sweep txn.
@ -560,6 +491,8 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
var htlcTxOutpointSet = make(map[wire.OutPoint]int)
var htlcLessFees uint64
//nolint:ll
for _, htlcTxID := range htlcTxIDs {
// Fetch the sweep transaction, all input it's spending should
// be from the commitment transaction which was broadcast
@ -652,10 +585,10 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate a block that mines the htlc timeout txns. Doing so now
// activates the 2nd-stage CSV delayed outputs.
ht.MineBlocksAndAssertNumTxes(1, numInvoices)
ht.MineBlocksAndAssertNumTxes(1, 1)
// Alice is restarted here to ensure that she promptly moved the crib
// outputs to the kindergarten bucket after the htlc timeout txns were
// Alice is restarted here to ensure that her contract court properly
// handles the 2nd-stage sweeps after the htlc timeout txns were
// confirmed.
ht.RestartNode(alice)
@ -664,12 +597,19 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
currentHeight = int32(ht.CurrentHeight())
ht.Logf("current height: %v, htlcCsvMaturityHeight=%v", currentHeight,
htlcCsvMaturityHeight)
numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 2)
ht.MineEmptyBlocks(numBlocks)
numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 1)
ht.MineBlocks(numBlocks)
// Restart Alice to ensure that she can recover from a failure before
// having graduated the htlc outputs in the kindergarten bucket.
ht.RestartNode(alice)
ht.AssertNumPendingSweeps(alice, numInvoices+1)
// Restart Alice to ensure that she can recover from a failure.
//
// TODO(yy): Skip this step for neutrino as it cannot recover the
// sweeping txns from the mempool. We need to also store the txns in
// the sweeper store to make it work for the neutrino case.
if !ht.IsNeutrinoBackend() {
ht.RestartNode(alice)
}
// Now that the channel has been fully swept, it should no longer show
// incubated, check to see that Alice's node still reports the channel
@ -687,55 +627,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}, defaultTimeout)
require.NoError(ht, err, "timeout while checking force closed channel")
// Generate a block that causes Alice to sweep the htlc outputs in the
// kindergarten bucket.
ht.MineEmptyBlocks(1)
ht.AssertNumPendingSweeps(alice, numInvoices)
// Mine a block to trigger the sweep.
ht.MineEmptyBlocks(1)
// A temp hack to ensure the CI is not blocking the current
// development. There's a known issue in block sync among different
// subsystems, which is scheduled to be fixed in 0.18.1.
if ht.IsNeutrinoBackend() {
// We expect the htlcs to be aggregated into one tx. However,
// due to block sync issue, they may end up in two txns. Here
// we assert that there are two txns found in the mempool - if
// succeeded, it means the aggregation failed, and we won't
// continue the test.
//
// NOTE: we don't check `len(mempool) == 1` because it will
// give us false positive.
err := wait.NoError(func() error {
mempool := ht.Miner().GetRawMempool()
if len(mempool) == 2 {
return nil
}
return fmt.Errorf("expected 2 txes in mempool, found "+
"%d", len(mempool))
}, lntest.DefaultTimeout)
ht.Logf("Assert num of txns got %v", err)
// If there are indeed two txns found in the mempool, we won't
// continue the test.
if err == nil {
ht.Log("Neutrino backend failed to aggregate htlc " +
"sweeps!")
// Clean the mempool.
ht.MineBlocksAndAssertNumTxes(1, 2)
return
}
}
ht.AssertNumPendingSweeps(alice, numInvoices+1)
// Wait for the single sweep txn to appear in the mempool.
htlcSweepTxID := ht.AssertNumTxsInMempool(1)[0]
htlcSweepTxid := ht.AssertNumTxsInMempool(1)[0]
// Fetch the htlc sweep transaction from the mempool.
htlcSweepTx := ht.GetRawTransaction(htlcSweepTxID)
htlcSweepTx := ht.GetRawTransaction(htlcSweepTxid)
// Ensure the htlc sweep transaction only has one input for each htlc
// Alice extended before force closing.
@ -747,6 +645,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Ensure that each output spends from exactly one htlc timeout output.
for _, txIn := range htlcSweepTx.MsgTx().TxIn {
outpoint := txIn.PreviousOutPoint
// Check that the input is a confirmed htlc timeout txn.
_, ok := htlcTxOutpointSet[outpoint]
require.Truef(ht, ok, "htlc sweep output not spending from "+
@ -784,11 +683,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Check that we can find the htlc sweep in our set of sweeps using
// the verbose output of the listsweeps output.
ht.AssertSweepFound(alice, htlcSweepTx.Hash().String(), true, 0)
ht.AssertSweepFound(alice, htlcSweepTxid.String(), true, 0)
// The following restart checks to ensure that the nursery store is
// storing the txid of the previously broadcast htlc sweep txn, and
// that it begins watching that txid after restarting.
// The following restart checks to ensure that the sweeper is storing
// the txid of the previously broadcast htlc sweep txn, and that it
// begins watching that txid after restarting.
ht.RestartNode(alice)
// Now that the channel has been fully swept, it should no longer show
@ -804,7 +703,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
}
err = checkPendingHtlcStageAndMaturity(
forceClose, 2, htlcCsvMaturityHeight-1, -1,
forceClose, 2, htlcCsvMaturityHeight-1, 0,
)
if err != nil {
return err
@ -817,7 +716,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate the final block that sweeps all htlc funds into the user's
// wallet, and make sure the sweep is in this block.
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.AssertTxInBlock(block, htlcSweepTxID)
ht.AssertTxInBlock(block, htlcSweepTxid)
// Now that the channel has been fully swept, it should no longer show
// up within the pending channels RPC.
@ -846,12 +745,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
carolExpectedBalance := btcutil.Amount(carolStartingBalance) +
pushAmt - totalFeeCarol
// In addition, if this is an anchor-enabled channel, further add the
// anchor size.
if lntest.CommitTypeHasAnchors(channelType) {
carolExpectedBalance += btcutil.Amount(anchorSize)
}
require.Equal(ht, carolExpectedBalance,
btcutil.Amount(carolBalResp.ConfirmedBalance),
"carol's balance is incorrect")

View File

@ -1660,6 +1660,22 @@ func (h *HarnessTest) CleanupForceClose(hn *node.HarnessNode) {
// Wait for the channel to be marked pending force close.
h.AssertNumPendingForceClose(hn, 1)
// Mine enough blocks for the node to sweep its funds from the force
// closed channel. The commit sweep resolver is offers the input to the
// sweeper when it's force closed, and broadcast the sweep tx at
// defaulCSV-1.
//
// NOTE: we might empty blocks here as we don't know the exact number
// of blocks to mine. This may end up mining more blocks than needed.
h.MineEmptyBlocks(node.DefaultCSV - 1)
// Assert there is one pending sweep.
h.AssertNumPendingSweeps(hn, 1)
// The node should now sweep the funds, clean up by mining the sweeping
// tx.
h.MineBlocksAndAssertNumTxes(1, 1)
// Mine blocks to get any second level HTLC resolved. If there are no
// HTLCs, this will behave like h.AssertNumPendingCloseChannels.
h.mineTillForceCloseResolved(hn)
@ -2001,7 +2017,8 @@ func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
return nil
}
return fmt.Errorf("sweep tx %v not found", sweep)
return fmt.Errorf("sweep tx %v not found in resp %v", sweep,
sweepResp)
}, wait.DefaultTimeout)
require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
}

View File

@ -196,7 +196,8 @@ func (h *HarnessTest) mineTillForceCloseResolved(hn *node.HarnessNode) {
return nil
}, DefaultTimeout)
require.NoErrorf(h, err, "assert force close resolved timeout")
require.NoErrorf(h, err, "%s: assert force close resolved timeout",
hn.Name())
}
// AssertTxInMempool asserts a given transaction can be found in the mempool.