itest+lntest: fix channel force close itest

This commit is contained in:
yyforyongyu 2024-03-26 23:07:15 +08:00
parent 6933c5a86c
commit ce58175314
No known key found for this signature in database
GPG Key ID: 9BCD95C4FF296868
3 changed files with 189 additions and 200 deletions

View File

@ -16,6 +16,7 @@ import (
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/node" "github.com/lightningnetwork/lnd/lntest/node"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -168,9 +169,6 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// immediately execute a force closure of the channel. This will also // immediately execute a force closure of the channel. This will also
// assert that the commitment transaction was immediately broadcast in // assert that the commitment transaction was immediately broadcast in
// order to fulfill the force closure request. // order to fulfill the force closure request.
const actualFeeRate = 30000
ht.SetFeeEstimate(actualFeeRate)
ht.CloseChannelAssertPending(alice, chanPoint, true) ht.CloseChannelAssertPending(alice, chanPoint, true)
// Now that the channel has been force closed, it should show up in the // Now that the channel has been force closed, it should show up in the
@ -197,54 +195,43 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// transaction has been broadcast but not yet confirmed in a block. // transaction has been broadcast but not yet confirmed in a block.
ht.RestartNode(alice) ht.RestartNode(alice)
// To give the neutrino backend some time to catch up with the chain, we // To give the neutrino backend some time to catch up with the chain,
// wait here until we have enough UTXOs to actually sweep the local and // we wait here until we have enough UTXOs to actually sweep the local
// remote anchor. // and remote anchor.
const expectedUtxos = 2 const expectedUtxos = 2
ht.AssertNumUTXOs(alice, expectedUtxos) ht.AssertNumUTXOs(alice, expectedUtxos)
// Mine a block which should confirm the commitment transaction // We expect to see Alice's force close tx in the mempool.
// broadcast as a result of the force closure. If there are anchors, we ht.Miner.GetNumTxsFromMempool(1)
// also expect the anchor sweep tx to be in the mempool.
expectedTxes := 1 // Assert Alice's has the pending anchor outputs - one for local and
expectedFeeRate := commitFeeRate // the other for remote (invalid).
if lntest.CommitTypeHasAnchors(channelType) { sweeps := ht.AssertNumPendingSweeps(alice, 2)
expectedTxes = 2 aliceAnchor := sweeps[0]
expectedFeeRate = actualFeeRate if aliceAnchor.Outpoint.TxidStr != waitingClose.Commitments.LocalTxid {
aliceAnchor = sweeps[1]
} }
require.Equal(ht, aliceAnchor.Outpoint.TxidStr,
waitingClose.Commitments.LocalTxid)
sweepTxns := ht.Miner.GetNumTxsFromMempool(expectedTxes) // Mine a block which should confirm the commitment transaction
// broadcast as a result of the force closure. Once mined, we also
// expect Alice's anchor sweeping tx being published.
ht.MineBlocksAndAssertNumTxes(1, 1)
// Verify fee rate of the commitment tx plus anchor if present. // Assert Alice's anchor sweeping tx is found in the mempool.
feeRate := ht.CalculateTxesFeeRate(sweepTxns) aliceSweepTxid := ht.Miner.AssertNumTxsInMempool(1)[0]
// Allow some deviation because weight estimates during tx generation // Add alice's anchor to our expected set of reports.
// are estimates. op := fmt.Sprintf("%v:%v", aliceAnchor.Outpoint.TxidStr,
require.InEpsilonf(ht, expectedFeeRate, feeRate, 0.005, "fee rate not "+ aliceAnchor.Outpoint.OutputIndex)
"match: want %v, got %v", expectedFeeRate, feeRate) aliceReports[op] = &lnrpc.Resolution{
// Find alice's commit sweep and anchor sweep (if present) in the
// mempool.
aliceCloseTx := waitingClose.Commitments.LocalTxid
_, aliceAnchor := ht.FindCommitAndAnchor(sweepTxns, aliceCloseTx)
// If we expect anchors, add alice's anchor to our expected set of
// reports.
if lntest.CommitTypeHasAnchors(channelType) {
aliceReports[aliceAnchor.OutPoint.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_ANCHOR, ResolutionType: lnrpc.ResolutionType_ANCHOR,
Outcome: lnrpc.ResolutionOutcome_CLAIMED, Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: aliceAnchor.SweepTx.TxHash().String(), SweepTxid: aliceSweepTxid.String(),
Outpoint: &lnrpc.OutPoint{ Outpoint: aliceAnchor.Outpoint,
TxidBytes: aliceAnchor.OutPoint.Hash[:],
TxidStr: aliceAnchor.OutPoint.Hash.String(),
OutputIndex: aliceAnchor.OutPoint.Index,
},
AmountSat: uint64(anchorSize), AmountSat: uint64(anchorSize),
} }
}
ht.MineBlocks(1)
// Now that the commitment has been confirmed, the channel should be // Now that the commitment has been confirmed, the channel should be
// marked as force closed. // marked as force closed.
@ -262,20 +249,13 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
return err return err
} }
// None of our outputs have been swept, so they should all be in // None of our outputs have been swept, so they should all be
// limbo. For anchors, we expect the anchor amount to be // in limbo.
// recovered.
if forceClose.LimboBalance == 0 { if forceClose.LimboBalance == 0 {
return errors.New("all funds should still be in " + return errors.New("all funds should still be in limbo")
"limbo")
} }
expectedRecoveredBalance := int64(0) if forceClose.RecoveredBalance != 0 {
if lntest.CommitTypeHasAnchors(channelType) { return errors.New("no funds should be recovered")
expectedRecoveredBalance = anchorSize
}
if forceClose.RecoveredBalance != expectedRecoveredBalance {
return errors.New("no funds should yet be shown " +
"as recovered")
} }
return nil return nil
@ -288,52 +268,61 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// (the "kindergarten" bucket.) // (the "kindergarten" bucket.)
ht.RestartNode(alice) ht.RestartNode(alice)
// Carol should have pending sweeps now. // Carol should offer her commit and anchor outputs to the sweeper.
ht.AssertNumPendingSweeps(carol, expectedTxes) sweepTxns := ht.AssertNumPendingSweeps(carol, 2)
// Mine a block to trigger the sweep transactions. // Find Carol's anchor sweep.
blocksMined := int32(1) var carolAnchor, carolCommit = sweepTxns[0], sweepTxns[1]
ht.MineBlocks(1) if carolAnchor.AmountSat != uint32(anchorSize) {
carolAnchor, carolCommit = carolCommit, carolAnchor
}
// Mine a block to trigger Carol's sweeper to make decisions on the
// anchor sweeping. This block will also confirm Alice's anchor
// sweeping tx as her anchor is used for CPFP due to there are
// time-sensitive HTLCs.
ht.MineBlocksAndAssertNumTxes(1, 1)
// Carol's sweep tx should be in the mempool already, as her output is // Carol's sweep tx should be in the mempool already, as her output is
// not timelocked. If there are anchors, we also expect Carol's anchor // not timelocked.
// sweep now. carolTx := ht.Miner.GetNumTxsFromMempool(1)[0]
sweepTxns = ht.Miner.GetNumTxsFromMempool(expectedTxes)
// Carol's sweeping tx should have 2-input-1-output shape.
require.Len(ht, carolTx.TxIn, 2)
require.Len(ht, carolTx.TxOut, 1)
// Calculate the total fee Carol paid. // Calculate the total fee Carol paid.
var totalFeeCarol btcutil.Amount totalFeeCarol := ht.CalculateTxFee(carolTx)
for _, tx := range sweepTxns {
fee := ht.CalculateTxFee(tx)
totalFeeCarol += fee
}
// We look up the sweep txns we have found in mempool and create
// expected resolutions for carol.
carolCommit, carolAnchor := ht.FindCommitAndAnchor(
sweepTxns, aliceCloseTx,
)
// If we have anchors, add an anchor resolution for carol. // If we have anchors, add an anchor resolution for carol.
if lntest.CommitTypeHasAnchors(channelType) { op = fmt.Sprintf("%v:%v", carolAnchor.Outpoint.TxidStr,
carolReports[carolAnchor.OutPoint.String()] = &lnrpc.Resolution{ carolAnchor.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_ANCHOR, ResolutionType: lnrpc.ResolutionType_ANCHOR,
Outcome: lnrpc.ResolutionOutcome_CLAIMED, Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: carolAnchor.SweepTx.TxHash().String(), SweepTxid: carolTx.TxHash().String(),
AmountSat: anchorSize, AmountSat: anchorSize,
Outpoint: &lnrpc.OutPoint{ Outpoint: carolAnchor.Outpoint,
TxidBytes: carolAnchor.OutPoint.Hash[:],
TxidStr: carolAnchor.OutPoint.Hash.String(),
OutputIndex: carolAnchor.OutPoint.Index,
},
} }
op = fmt.Sprintf("%v:%v", carolCommit.Outpoint.TxidStr,
carolCommit.Outpoint.OutputIndex)
carolReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
Outpoint: carolCommit.Outpoint,
AmountSat: uint64(pushAmt),
SweepTxid: carolTx.TxHash().String(),
} }
// Currently within the codebase, the default CSV is 4 relative blocks. // Currently within the codebase, the default CSV is 4 relative blocks.
// For the persistence test, we generate two blocks, then trigger // For the persistence test, we generate two blocks, then trigger a
// a restart and then generate the final block that should trigger // restart and then generate the final block that should trigger the
// the creation of the sweep transaction. // creation of the sweep transaction.
ht.MineBlocks(1) //
blocksMined++ // We also expect Carol to broadcast her sweeping tx which spends her
// commit and anchor outputs.
ht.MineBlocksAndAssertNumTxes(1, 1)
// The following restart checks to ensure that outputs in the // The following restart checks to ensure that outputs in the
// kindergarten bucket are persisted while waiting for the required // kindergarten bucket are persisted while waiting for the required
@ -365,8 +354,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// outputs should also reflect that this many blocks have // outputs should also reflect that this many blocks have
// passed. // passed.
err = checkCommitmentMaturity( err = checkCommitmentMaturity(
forceClose, commCsvMaturityHeight, forceClose, commCsvMaturityHeight, 2,
defaultCSV-blocksMined,
) )
if err != nil { if err != nil {
return err return err
@ -377,13 +365,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
return errors.New("all funds should still be in " + return errors.New("all funds should still be in " +
"limbo") "limbo")
} }
expectedRecoveredBalance := int64(0) if forceClose.RecoveredBalance != anchorSize {
if lntest.CommitTypeHasAnchors(channelType) { return fmt.Errorf("expected %v to be recovered",
expectedRecoveredBalance = anchorSize anchorSize)
}
if forceClose.RecoveredBalance != expectedRecoveredBalance {
return errors.New("no funds should yet be shown " +
"as recovered")
} }
return nil return nil
@ -392,16 +376,20 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate an additional block, which should cause the CSV delayed // Generate an additional block, which should cause the CSV delayed
// output from the commitment txn to expire. // output from the commitment txn to expire.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
// At this point, the CSV will expire in the next block, meaning that // At this point, the CSV will expire in the next block, meaning that
// the output should be offered to the sweeper. // the output should be offered to the sweeper.
ht.AssertNumPendingSweeps(alice, 1) aliceCommit := ht.AssertNumPendingSweeps(alice, 1)[0]
// Restart Alice to ensure that she resumes watching the finalized
// commitment sweep txid.
ht.RestartNode(alice)
// Mine one block and the sweeping transaction should now be broadcast. // Mine one block and the sweeping transaction should now be broadcast.
// So we fetch the node's mempool to ensure it has been properly // So we fetch the node's mempool to ensure it has been properly
// broadcast. // broadcast.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
sweepingTXID := ht.Miner.AssertNumTxsInMempool(1)[0] sweepingTXID := ht.Miner.AssertNumTxsInMempool(1)[0]
// Fetch the sweep transaction, all input it's spending should be from // Fetch the sweep transaction, all input it's spending should be from
@ -413,44 +401,24 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
} }
// We expect a resolution which spends our commit output. // We expect a resolution which spends our commit output.
output := sweepTx.MsgTx().TxIn[0].PreviousOutPoint op = fmt.Sprintf("%v:%v", aliceCommit.Outpoint.TxidStr,
aliceReports[output.String()] = &lnrpc.Resolution{ aliceCommit.Outpoint.OutputIndex)
aliceReports[op] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT, ResolutionType: lnrpc.ResolutionType_COMMIT,
Outcome: lnrpc.ResolutionOutcome_CLAIMED, Outcome: lnrpc.ResolutionOutcome_CLAIMED,
SweepTxid: sweepingTXID.String(), SweepTxid: sweepingTXID.String(),
Outpoint: &lnrpc.OutPoint{ Outpoint: aliceCommit.Outpoint,
TxidBytes: output.Hash[:],
TxidStr: output.Hash.String(),
OutputIndex: output.Index,
},
AmountSat: uint64(aliceBalance), AmountSat: uint64(aliceBalance),
} }
carolReports[carolCommit.OutPoint.String()] = &lnrpc.Resolution{
ResolutionType: lnrpc.ResolutionType_COMMIT,
Outcome: lnrpc.ResolutionOutcome_CLAIMED,
Outpoint: &lnrpc.OutPoint{
TxidBytes: carolCommit.OutPoint.Hash[:],
TxidStr: carolCommit.OutPoint.Hash.String(),
OutputIndex: carolCommit.OutPoint.Index,
},
AmountSat: uint64(pushAmt),
SweepTxid: carolCommit.SweepTx.TxHash().String(),
}
// Check that we can find the commitment sweep in our set of known // Check that we can find the commitment sweep in our set of known
// sweeps, using the simple transaction id ListSweeps output. // sweeps, using the simple transaction id ListSweeps output.
ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0) ht.AssertSweepFound(alice, sweepingTXID.String(), false, 0)
// Restart Alice to ensure that she resumes watching the finalized
// commitment sweep txid.
ht.RestartNode(alice)
// Next, we mine an additional block which should include the sweep // Next, we mine an additional block which should include the sweep
// transaction as the input scripts and the sequence locks on the // transaction as the input scripts and the sequence locks on the
// inputs should be properly met. // inputs should be properly met.
block := ht.MineBlocks(1)[0] ht.MineBlocksAndAssertNumTxes(1, 1)
ht.Miner.AssertTxInBlock(block, sweepTx.Hash())
// Update current height // Update current height
_, curHeight = ht.Miner.GetBestBlock() _, curHeight = ht.Miner.GetBestBlock()
@ -499,8 +467,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
return nil return nil
}, defaultTimeout) }, defaultTimeout)
require.NoError(ht, err, "timeout checking pending "+ require.NoError(ht, err, "timeout checking pending force close channel")
"force close channel")
// Compute the height preceding that which will cause the htlc CLTV // Compute the height preceding that which will cause the htlc CLTV
// timeouts will expire. The outputs entered at the same height as the // timeouts will expire. The outputs entered at the same height as the
@ -508,24 +475,16 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// number of blocks we have generated since adding it to the nursery, // number of blocks we have generated since adding it to the nursery,
// and take an additional block off so that we end up one block shy of // and take an additional block off so that we end up one block shy of
// the expiry height, and add the block padding. // the expiry height, and add the block padding.
cltvHeightDelta := padCLTV(defaultCLTV - defaultCSV - 1 - 1 - 1) _, currentHeight := ht.Miner.GetBestBlock()
cltvHeightDelta := int(htlcExpiryHeight - uint32(currentHeight) - 1)
// NOTE: this rest of the test would only pass if we remove the `Force`
// flag used in sweeping HTLCs, otherwise an immediate sweep will be
// attempted due to being forced. This flag will be removed once we can
// conditionally cancel back upstream htlcs to avoid cascading FCs.
ht.Shutdown(alice)
ht.Shutdown(carol)
ht.MineBlocksAndAssertNumTxes(1, 0)
ht.Skip("Skipping due until force flags are removed")
// Advance the blockchain until just before the CLTV expires, nothing // Advance the blockchain until just before the CLTV expires, nothing
// exciting should have happened during this time. // exciting should have happened during this time.
ht.MineBlocks(cltvHeightDelta) ht.MineEmptyBlocks(cltvHeightDelta)
// We now restart Alice, to ensure that she will broadcast the presigned // We now restart Alice, to ensure that she will broadcast the
// htlc timeout txns after the delay expires after experiencing a while // presigned htlc timeout txns after the delay expires after
// waiting for the htlc outputs to incubate. // experiencing a while waiting for the htlc outputs to incubate.
ht.RestartNode(alice) ht.RestartNode(alice)
// Alice should now see the channel in her set of pending force closed // Alice should now see the channel in her set of pending force closed
@ -553,8 +512,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// All htlc outputs are still left in limbo, so it should be // All htlc outputs are still left in limbo, so it should be
// non-zero as well. // non-zero as well.
if forceClose.LimboBalance == 0 { if forceClose.LimboBalance == 0 {
return errors.New("htlc funds should still be in " + return errors.New("htlc funds should still be in limbo")
"limbo")
} }
return nil return nil
@ -563,34 +521,26 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Now, generate the block which will cause Alice to offer the // Now, generate the block which will cause Alice to offer the
// presigned htlc timeout txns to the sweeper. // presigned htlc timeout txns to the sweeper.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
// Since Alice had numInvoices (6) htlcs extended to Carol before force // Since Alice had numInvoices (6) htlcs extended to Carol before force
// closing, we expect Alice to broadcast an htlc timeout txn for each // closing, we expect Alice to broadcast an htlc timeout txn for each
// one. // one.
expectedTxes = numInvoices
ht.AssertNumPendingSweeps(alice, numInvoices) ht.AssertNumPendingSweeps(alice, numInvoices)
// In case of anchors, the timeout txs will be aggregated into one.
if lntest.CommitTypeHasAnchors(channelType) {
expectedTxes = 1
}
// Mine a block to trigger the sweeps. // Mine a block to trigger the sweeps.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
// Wait for them all to show up in the mempool. // Wait for them all to show up in the mempool and expect the timeout
htlcTxIDs := ht.Miner.AssertNumTxsInMempool(expectedTxes) // txs to be aggregated into one.
htlcTxIDs := ht.Miner.AssertNumTxsInMempool(1)
// Retrieve each htlc timeout txn from the mempool, and ensure it is // Retrieve each htlc timeout txn from the mempool, and ensure it is
// well-formed. This entails verifying that each only spends from // well-formed. This entails verifying that each only spends from
// output, and that output is from the commitment txn. In case this is // output, and that output is from the commitment txn. In case this is
// an anchor channel, the transactions are aggregated by the sweeper // an anchor channel, the transactions are aggregated by the sweeper
// into one. // into one.
numInputs := 1 numInputs := numInvoices + 1
if lntest.CommitTypeHasAnchors(channelType) {
numInputs = numInvoices + 1
}
// Construct a map of the already confirmed htlc timeout outpoints, // Construct a map of the already confirmed htlc timeout outpoints,
// that will count the number of times each is spent by the sweep txn. // that will count the number of times each is spent by the sweep txn.
@ -691,7 +641,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate a block that mines the htlc timeout txns. Doing so now // Generate a block that mines the htlc timeout txns. Doing so now
// activates the 2nd-stage CSV delayed outputs. // activates the 2nd-stage CSV delayed outputs.
ht.MineBlocks(1) ht.MineBlocksAndAssertNumTxes(1, 1)
// Alice is restarted here to ensure that she promptly moved the crib // Alice is restarted here to ensure that she promptly moved the crib
// outputs to the kindergarten bucket after the htlc timeout txns were // outputs to the kindergarten bucket after the htlc timeout txns were
@ -700,11 +650,9 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Advance the chain until just before the 2nd-layer CSV delays expire. // Advance the chain until just before the 2nd-layer CSV delays expire.
// For anchor channels this is one block earlier. // For anchor channels this is one block earlier.
numBlocks := uint32(defaultCSV - 1) _, currentHeight = ht.Miner.GetBestBlock()
if lntest.CommitTypeHasAnchors(channelType) { numBlocks := int(htlcCsvMaturityHeight - uint32(currentHeight) - 1)
numBlocks = defaultCSV - 2 ht.MineEmptyBlocks(numBlocks)
}
ht.MineBlocks(numBlocks)
// Restart Alice to ensure that she can recover from a failure before // Restart Alice to ensure that she can recover from a failure before
// having graduated the htlc outputs in the kindergarten bucket. // having graduated the htlc outputs in the kindergarten bucket.
@ -728,11 +676,11 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate a block that causes Alice to sweep the htlc outputs in the // Generate a block that causes Alice to sweep the htlc outputs in the
// kindergarten bucket. // kindergarten bucket.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
ht.AssertNumPendingSweeps(alice, 6) ht.AssertNumPendingSweeps(alice, numInvoices)
// Mine a block to trigger the sweep. // Mine a block to trigger the sweep.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
// Wait for the single sweep txn to appear in the mempool. // Wait for the single sweep txn to appear in the mempool.
htlcSweepTxID := ht.Miner.AssertNumTxsInMempool(1)[0] htlcSweepTxID := ht.Miner.AssertNumTxsInMempool(1)[0]
@ -819,7 +767,7 @@ func channelForceClosureTest(ht *lntest.HarnessTest,
// Generate the final block that sweeps all htlc funds into the user's // Generate the final block that sweeps all htlc funds into the user's
// wallet, and make sure the sweep is in this block. // wallet, and make sure the sweep is in this block.
block = ht.MineBlocksAndAssertNumTxes(1, 1)[0] block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, htlcSweepTxID) ht.Miner.AssertTxInBlock(block, htlcSweepTxID)
// Now that the channel has been fully swept, it should no longer show // Now that the channel has been fully swept, it should no longer show
@ -926,21 +874,31 @@ func testFailingChannel(ht *lntest.HarnessTest) {
ht.AssertNumPendingForceClose(carol, 1) ht.AssertNumPendingForceClose(carol, 1)
// Carol will use the correct preimage to resolve the HTLC on-chain. // Carol will use the correct preimage to resolve the HTLC on-chain.
ht.AssertNumPendingSweeps(carol, 1)
// Bring down the fee rate estimation, otherwise the following sweep
// won't happen.
ht.SetFeeEstimate(chainfee.FeePerKwFloor)
// Mine a block to trigger Carol's sweeper to broadcast the sweeping
// tx.
ht.MineEmptyBlocks(1)
// Carol should have broadcast her sweeping tx.
ht.Miner.AssertNumTxsInMempool(1) ht.Miner.AssertNumTxsInMempool(1)
// Mine enough blocks for Alice to sweep her funds from the force // Mine two blocks to confirm Carol's sweeping tx, which will by now
// closed channel. // Alice's commit output should be offered to her sweeper.
ht.MineBlocks(defaultCSV - 1) ht.MineBlocksAndAssertNumTxes(2, 1)
// Alice's should have one pending sweep request for her commit output.
ht.AssertNumPendingSweeps(alice, 1) ht.AssertNumPendingSweeps(alice, 1)
// Mine a block to trigger the sweep. // Mine a block to trigger the sweep.
ht.MineBlocks(1) ht.MineEmptyBlocks(1)
// Wait for the sweeping tx to be broadcast. // Mine Alice's sweeping tx.
ht.Miner.AssertNumTxsInMempool(1) ht.MineBlocksAndAssertNumTxes(1, 1)
// Mine the sweep.
ht.MineBlocks(1)
// No pending channels should be left. // No pending channels should be left.
ht.AssertNumPendingForceClose(alice, 0) ht.AssertNumPendingForceClose(alice, 0)
@ -966,18 +924,26 @@ func assertReports(ht *lntest.HarnessTest, hn *node.HarnessNode,
break break
} }
} }
require.NotNil(ht, resolutions) require.NotNil(ht, resolutions)
require.Equal(ht, len(expected), len(resolutions))
// Copy the expected resolutions so we can remove them as we find them.
notFound := make(map[string]*lnrpc.Resolution)
for k, v := range expected {
notFound[k] = v
}
for _, res := range resolutions { for _, res := range resolutions {
outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr, outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr,
res.Outpoint.OutputIndex) res.Outpoint.OutputIndex)
expected, ok := expected[outPointStr] require.Contains(ht, expected, outPointStr)
require.True(ht, ok) require.Equal(ht, expected[outPointStr], res)
require.Equal(ht, expected, res)
delete(notFound, outPointStr)
} }
// We should have found all the resolutions.
require.Empty(ht, notFound)
} }
// checkCommitmentMaturity checks that both the maturity height and blocks // checkCommitmentMaturity checks that both the maturity height and blocks

View File

@ -42,6 +42,10 @@ const (
// lndErrorChanSize specifies the buffer size used to receive errors // lndErrorChanSize specifies the buffer size used to receive errors
// from lnd process. // from lnd process.
lndErrorChanSize = 10 lndErrorChanSize = 10
// maxBlocksAllowed specifies the max allowed value to be used when
// mining blocks.
maxBlocksAllowed = 100
) )
// TestCase defines a test case that's been used in the integration test. // TestCase defines a test case that's been used in the integration test.
@ -1701,6 +1705,9 @@ func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) {
// NOTE: this differs from miner's `MineBlocks` as it requires the nodes to be // NOTE: this differs from miner's `MineBlocks` as it requires the nodes to be
// synced. // synced.
func (h *HarnessTest) MineBlocks(num uint32) []*wire.MsgBlock { func (h *HarnessTest) MineBlocks(num uint32) []*wire.MsgBlock {
require.Less(h, num, uint32(maxBlocksAllowed),
"too many blocks to mine")
// Mining the blocks slow to give `lnd` more time to sync. // Mining the blocks slow to give `lnd` more time to sync.
blocks := h.Miner.MineBlocksSlow(num) blocks := h.Miner.MineBlocksSlow(num)
@ -1789,6 +1796,8 @@ func (h *HarnessTest) CleanShutDown() {
// NOTE: this differs from miner's `MineEmptyBlocks` as it requires the nodes // NOTE: this differs from miner's `MineEmptyBlocks` as it requires the nodes
// to be synced. // to be synced.
func (h *HarnessTest) MineEmptyBlocks(num int) []*wire.MsgBlock { func (h *HarnessTest) MineEmptyBlocks(num int) []*wire.MsgBlock {
require.Less(h, num, maxBlocksAllowed, "too many blocks to mine")
blocks := h.Miner.MineEmptyBlocks(num) blocks := h.Miner.MineEmptyBlocks(num)
// Finally, make sure all the active nodes are synced. // Finally, make sure all the active nodes are synced.
@ -2087,6 +2096,7 @@ func (h *HarnessTest) FindCommitAndAnchor(sweepTxns []*wire.MsgTx,
func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode, func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
sweep string, verbose bool, startHeight int32) { sweep string, verbose bool, startHeight int32) {
err := wait.NoError(func() error {
// List all sweeps that alice's node had broadcast. // List all sweeps that alice's node had broadcast.
sweepResp := hn.RPC.ListSweeps(verbose, startHeight) sweepResp := hn.RPC.ListSweeps(verbose, startHeight)
@ -2097,7 +2107,13 @@ func (h *HarnessTest) AssertSweepFound(hn *node.HarnessNode,
found = findSweepInTxids(h, sweep, sweepResp) found = findSweepInTxids(h, sweep, sweepResp)
} }
require.Truef(h, found, "%s: sweep: %v not found", sweep, hn.Name()) if found {
return nil
}
return fmt.Errorf("sweep tx %v not found", sweep)
}, wait.DefaultTimeout)
require.NoError(h, err, "%s: timeout checking sweep tx", hn.Name())
} }
func findSweepInTxids(ht *HarnessTest, sweepTxid string, func findSweepInTxids(ht *HarnessTest, sweepTxid string,

View File

@ -2594,12 +2594,17 @@ func (h *HarnessTest) AssertWalletLockedBalance(hn *node.HarnessNode,
// AssertNumPendingSweeps asserts the number of pending sweeps for the given // AssertNumPendingSweeps asserts the number of pending sweeps for the given
// node. // node.
func (h *HarnessTest) AssertNumPendingSweeps(hn *node.HarnessNode, n int) { func (h *HarnessTest) AssertNumPendingSweeps(hn *node.HarnessNode,
n int) []*walletrpc.PendingSweep {
results := make([]*walletrpc.PendingSweep, 0, n)
err := wait.NoError(func() error { err := wait.NoError(func() error {
resp := hn.RPC.PendingSweeps() resp := hn.RPC.PendingSweeps()
num := len(resp.PendingSweeps) num := len(resp.PendingSweeps)
if num == n { if num == n {
results = resp.PendingSweeps
return nil return nil
} }
@ -2614,6 +2619,8 @@ func (h *HarnessTest) AssertNumPendingSweeps(hn *node.HarnessNode, n int) {
}, DefaultTimeout) }, DefaultTimeout)
require.NoErrorf(h, err, "%s: check pending sweeps timeout", hn.Name()) require.NoErrorf(h, err, "%s: check pending sweeps timeout", hn.Name())
return results
} }
// FindSweepingTxns asserts the expected number of sweeping txns are found in // FindSweepingTxns asserts the expected number of sweeping txns are found in