From 5a7c6cea0236b6a7317b955708d81c5f02bae3b5 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Fri, 29 Jul 2022 01:24:54 +0800 Subject: [PATCH] lntest+lntemp: refactor `testMultiHopHtlcLocalTimeout` --- lntemp/harness.go | 29 ++- lntemp/harness_assertion.go | 78 ++++++++ lntemp/harness_miner.go | 34 ++++ .../lnd_multi-hop_htlc_local_timeout_test.go | 174 ++++++------------ lntest/itest/lnd_multi-hop_test.go | 12 +- 5 files changed, 206 insertions(+), 121 deletions(-) diff --git a/lntemp/harness.go b/lntemp/harness.go index 3c0afdae6..6b26b1d20 100644 --- a/lntemp/harness.go +++ b/lntemp/harness.go @@ -434,7 +434,14 @@ func (h *HarnessTest) SuspendNode(node *node.HarnessNode) func() error { err := node.Stop() require.NoErrorf(h, err, "failed to stop %s", node.Name()) - return func() error { return node.Start(h.runCtx) } + // Remove the node from active nodes. + delete(h.manager.activeNodes, node.Cfg.NodeID) + + return func() error { + h.manager.registerNode(node) + + return node.Start(h.runCtx) + } } // RestartNode restarts a given node and asserts. @@ -1160,3 +1167,23 @@ func (h *HarnessTest) RestartNodeAndRestoreDB(hn *node.HarnessNode) { // continue with the tests. h.WaitForBlockchainSync(hn) } + +// MineBlocksAssertNodesSync mines blocks and asserts all active nodes have +// synced to the chain. Use this method when more than 3 blocks are mined to +// make sure the nodes stay synced. +// +// TODO(yy): replace directly mining with this one. +func (h *HarnessTest) MineBlocksAssertNodesSync(num uint32) { + // If we are mining more than 3 blocks, use the slow mining. + if num > 3 { + h.Miner.MineBlocksSlow(num) + } else { + // Mine the blocks. + h.Miner.MineBlocks(num) + } + + // Make sure all the active nodes are synced. + for _, node := range h.manager.activeNodes { + h.WaitForBlockchainSync(node) + } +} diff --git a/lntemp/harness_assertion.go b/lntemp/harness_assertion.go index c0758a134..f8a602e18 100644 --- a/lntemp/harness_assertion.go +++ b/lntemp/harness_assertion.go @@ -3,6 +3,7 @@ package lntemp import ( "context" "crypto/rand" + "encoding/hex" "fmt" "math" "strings" @@ -961,3 +962,80 @@ func (h *HarnessTest) AssertChannelNumUpdates(hn *node.HarnessNode, }, DefaultTimeout) require.NoError(h, err, "timeout while checking for num of updates") } + +// AssertNumActiveHtlcs asserts that a given number of HTLCs are seen in the +// node's channels. +func (h *HarnessTest) AssertNumActiveHtlcs(hn *node.HarnessNode, num int) { + old := hn.State.HTLC + + err := wait.NoError(func() error { + // We require the RPC call to be succeeded and won't wait for + // it as it's an unexpected behavior. + req := &lnrpc.ListChannelsRequest{} + nodeChans := hn.RPC.ListChannels(req) + + total := 0 + for _, channel := range nodeChans.Channels { + total += len(channel.PendingHtlcs) + } + if total-old != num { + return errNumNotMatched(hn.Name(), "active HTLCs", + num, total-old, total, old) + } + + return nil + }, DefaultTimeout) + + require.NoErrorf(h, err, "%s timeout checking num active htlcs", + hn.Name()) +} + +// AssertActiveHtlcs makes sure the node has the _exact_ HTLCs matching +// payHashes on _all_ their channels. +func (h *HarnessTest) AssertActiveHtlcs(hn *node.HarnessNode, + payHashes ...[]byte) { + + err := wait.NoError(func() error { + // We require the RPC call to be succeeded and won't wait for + // it as it's an unexpected behavior. + req := &lnrpc.ListChannelsRequest{} + nodeChans := hn.RPC.ListChannels(req) + + for _, ch := range nodeChans.Channels { + // Record all payment hashes active for this channel. + htlcHashes := make(map[string]struct{}) + + for _, htlc := range ch.PendingHtlcs { + h := hex.EncodeToString(htlc.HashLock) + _, ok := htlcHashes[h] + if ok { + return fmt.Errorf("duplicate HashLock") + } + htlcHashes[h] = struct{}{} + } + + // Channel should have exactly the payHashes active. + if len(payHashes) != len(htlcHashes) { + return fmt.Errorf("node [%s:%x] had %v "+ + "htlcs active, expected %v", + hn.Name(), hn.PubKey[:], + len(htlcHashes), len(payHashes)) + } + + // Make sure all the payHashes are active. + for _, payHash := range payHashes { + h := hex.EncodeToString(payHash) + if _, ok := htlcHashes[h]; ok { + continue + } + + return fmt.Errorf("node [%s:%x] didn't have: "+ + "the payHash %v active", hn.Name(), + hn.PubKey[:], h) + } + } + + return nil + }, DefaultTimeout) + require.NoError(h, err, "timeout checking active HTLCs") +} diff --git a/lntemp/harness_miner.go b/lntemp/harness_miner.go index 8509d3e17..82392aa3a 100644 --- a/lntemp/harness_miner.go +++ b/lntemp/harness_miner.go @@ -327,3 +327,37 @@ func (h *HarnessMiner) MineBlocksSlow(num uint32) []*wire.MsgBlock { return blocks } + +// AssertOutpointInMempool asserts a given outpoint can be found in the mempool. +func (h *HarnessMiner) AssertOutpointInMempool(op wire.OutPoint) *wire.MsgTx { + var msgTx *wire.MsgTx + + err := wait.NoError(func() error { + // We require the RPC call to be succeeded and won't wait for + // it as it's an unexpected behavior. + mempool := h.GetRawMempool() + + if len(mempool) == 0 { + return fmt.Errorf("empty mempool") + } + + for _, txid := range mempool { + // We require the RPC call to be succeeded and won't + // wait for it as it's an unexpected behavior. + tx := h.GetRawTransaction(txid) + + msgTx = tx.MsgTx() + for _, txIn := range msgTx.TxIn { + if txIn.PreviousOutPoint == op { + return nil + } + } + } + + return fmt.Errorf("outpoint %v not found in mempool", op) + }, lntest.MinerMempoolTimeout) + + require.NoError(h, err, "timeout checking mempool") + + return msgTx +} diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go index ecb85a2da..edb2acc54 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -1,16 +1,13 @@ package itest import ( - "context" - "time" - "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" - "github.com/lightningnetwork/lnd/lntest" - "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/lightningnetwork/lnd/lntemp" + "github.com/lightningnetwork/lnd/lntemp/node" "github.com/stretchr/testify/require" ) @@ -19,24 +16,16 @@ import ( // it using the HTLC timeout transaction. Any dust HTLC's should be immediately // canceled backwards. Once the timeout has been reached, then we should sweep // it on-chain, and cancel the HTLC backwards. -func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode, c lnrpc.CommitmentType, - zeroConf bool) { - - ctxb := context.Background() +func testMultiHopHtlcLocalTimeout(ht *lntemp.HarnessTest, + alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) { // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopNetworkOld( - t, net, alice, bob, true, c, zeroConf, + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + ht, alice, bob, true, c, zeroConf, ) - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - time.Sleep(time.Second * 1) - // Now that our channels are set up, we'll send two HTLC's from Alice // to Carol. The first HTLC will be universally considered "dust", // while the second will be a proper fully valued HTLC. @@ -46,50 +35,39 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, finalCltvDelta = 40 ) - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - // We'll create two random payment hashes unknown to carol, then send // each of them by manually specifying the HTLC details. carolPubKey := carol.PubKey[:] - dustPayHash := makeFakePayHash(t) - payHash := makeFakePayHash(t) + dustPayHash := ht.Random32Bytes() + payHash := ht.Random32Bytes() - _, err := alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(dustHtlcAmt), - PaymentHash: dustPayHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, err) + alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{ + Dest: carolPubKey, + Amt: int64(dustHtlcAmt), + PaymentHash: dustPayHash, + FinalCltvDelta: finalCltvDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + }) - _, err = alice.RouterClient.SendPaymentV2( - ctx, &routerrpc.SendPaymentRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - TimeoutSeconds: 60, - FeeLimitMsat: noFeeLimitMsat, - }, - ) - require.NoError(t.t, err) + alice.RPC.SendPayment(&routerrpc.SendPaymentRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + }) // Verify that all nodes in the path now have two HTLC's with the // proper parameters. - nodes := []*lntest.HarnessNode{alice, bob, carol} - err = wait.NoError(func() error { - return assertActiveHtlcs(nodes, dustPayHash, payHash) - }, defaultTimeout) - require.NoError(t.t, err) + ht.AssertActiveHtlcs(alice, dustPayHash, payHash) + ht.AssertActiveHtlcs(bob, dustPayHash, payHash) + ht.AssertActiveHtlcs(carol, dustPayHash, payHash) // Increase the fee estimate so that the following force close tx will // be cpfp'ed. - net.SetFeeEstimate(30000) + ht.SetFeeEstimate(30000) // We'll now mine enough blocks to trigger Bob's broadcast of his // commitment transaction due to the fact that the HTLC is about to @@ -98,8 +76,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, numBlocks := padCLTV( uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), ) - _, err = net.Miner.Client.Generate(numBlocks) - require.NoError(t.t, err) + ht.MineBlocksAssertNodesSync(numBlocks) // Bob's force close transaction should now be found in the mempool. If // there are anchors, we also expect Bob's anchor sweep. @@ -108,74 +85,51 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, if hasAnchors { expectedTxes = 2 } - _, err = waitForNTxsInMempool( - net.Miner.Client, expectedTxes, minerMempoolTimeout, - ) - require.NoError(t.t, err) + ht.Miner.AssertNumTxsInMempool(expectedTxes) - bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint) - require.NoError(t.t, err) - bobChanOutpoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - closeTxid := assertSpendingTxInMempool( - t, net.Miner.Client, minerMempoolTimeout, bobChanOutpoint, - ) + op := ht.OutPointFromChannelPoint(bobChanPoint) + closeTx := ht.Miner.AssertOutpointInMempool(op) // Mine a block to confirm the closing transaction. - mineBlocks(t, net, 1, expectedTxes) + ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes) // At this point, Bob should have canceled backwards the dust HTLC // that we sent earlier. This means Alice should now only have a single // HTLC on her channel. - nodes = []*lntest.HarnessNode{alice} - err = wait.NoError(func() error { - return assertActiveHtlcs(nodes, payHash) - }, defaultTimeout) - require.NoError(t.t, err) + ht.AssertActiveHtlcs(alice, payHash) // With the closing transaction confirmed, we should expect Bob's HTLC // timeout transaction to be broadcast due to the expiry being reached. // If there are anchors, we also expect Carol's anchor sweep now. - _, err = getNTxsFromMempool( - net.Miner.Client, expectedTxes, minerMempoolTimeout, - ) - require.NoError(t.t, err) + ht.Miner.AssertNumTxsInMempool(expectedTxes) // We'll also obtain the expected HTLC timeout transaction hash. - htlcOutpoint := wire.OutPoint{Hash: closeTxid, Index: 0} - commitOutpoint := wire.OutPoint{Hash: closeTxid, Index: 1} + htlcOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 0} + commitOutpoint := wire.OutPoint{Hash: closeTx.TxHash(), Index: 1} if hasAnchors { htlcOutpoint.Index = 2 commitOutpoint.Index = 3 } - htlcTimeoutTxid := assertSpendingTxInMempool( - t, net.Miner.Client, minerMempoolTimeout, htlcOutpoint, - ) + htlcTimeoutTxid := ht.Miner.AssertOutpointInMempool( + htlcOutpoint, + ).TxHash() // Mine a block to confirm the expected transactions. - _ = mineBlocks(t, net, 1, expectedTxes) + ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes) // With Bob's HTLC timeout transaction confirmed, there should be no // active HTLC's on the commitment transaction from Alice -> Bob. - err = wait.NoError(func() error { - return assertNumActiveHtlcs([]*lntest.HarnessNode{alice}, 0) - }, defaultTimeout) - require.NoError(t.t, err) + ht.AssertNumActiveHtlcs(alice, 0) // At this point, Bob should show that the pending HTLC has advanced to // the second stage and is ready to be swept once the timelock is up. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest) - require.NoError(t.t, err) - require.Equal(t.t, 1, len(pendingChanResp.PendingForceClosingChannels)) + pendingChanResp := bob.RPC.PendingChannels() + require.Equal(ht, 1, len(pendingChanResp.PendingForceClosingChannels)) forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - require.NotZero(t.t, forceCloseChan.LimboBalance) - require.Positive(t.t, forceCloseChan.BlocksTilMaturity) - require.Equal(t.t, 1, len(forceCloseChan.PendingHtlcs)) - require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) + require.NotZero(ht, forceCloseChan.LimboBalance) + require.Positive(ht, forceCloseChan.BlocksTilMaturity) + require.Equal(ht, 1, len(forceCloseChan.PendingHtlcs)) + require.Equal(ht, uint32(2), forceCloseChan.PendingHtlcs[0].Stage) htlcTimeoutOutpoint := wire.OutPoint{Hash: htlcTimeoutTxid, Index: 0} if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE { @@ -184,48 +138,40 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, // CLTV on top of the usual CSV delay on any outputs that he can // sweep back to his wallet. blocksTilMaturity := uint32(forceCloseChan.BlocksTilMaturity) - mineBlocks(t, net, blocksTilMaturity, 0) + ht.MineBlocksAssertNodesSync(blocksTilMaturity) // Check that the sweep spends the expected inputs. - _ = assertSpendingTxInMempool( - t, net.Miner.Client, minerMempoolTimeout, - commitOutpoint, htlcTimeoutOutpoint, - ) + ht.Miner.AssertOutpointInMempool(commitOutpoint) + ht.Miner.AssertOutpointInMempool(htlcTimeoutOutpoint) } else { // Since Bob force closed the channel between him and Carol, he // will incur the usual CSV delay on any outputs that he can // sweep back to his wallet. We'll subtract one block from our // current maturity period to assert on the mempool. - mineBlocks(t, net, uint32(forceCloseChan.BlocksTilMaturity-1), 0) + numBlocks := uint32(forceCloseChan.BlocksTilMaturity - 1) + ht.MineBlocksAssertNodesSync(numBlocks) // Check that the sweep spends from the mined commitment. - _ = assertSpendingTxInMempool( - t, net.Miner.Client, minerMempoolTimeout, commitOutpoint, - ) + ht.Miner.AssertOutpointInMempool(commitOutpoint) // Mine a block to confirm Bob's commit sweep tx and assert it // was in fact mined. - _ = mineBlocks(t, net, 1, 1)[0] + ht.Miner.MineBlocksAndAssertNumTxes(1, 1) // Mine an additional block to prompt Bob to broadcast their // second layer sweep due to the CSV on the HTLC timeout output. - mineBlocks(t, net, 1, 0) - _ = assertSpendingTxInMempool( - t, net.Miner.Client, minerMempoolTimeout, - htlcTimeoutOutpoint, - ) + ht.Miner.MineBlocksAndAssertNumTxes(1, 0) + ht.Miner.AssertOutpointInMempool(htlcTimeoutOutpoint) } // Next, we'll mine a final block that should confirm the sweeping // transactions left. - _, err = net.Miner.Client.Generate(1) - require.NoError(t.t, err) + ht.MineBlocksAssertNodesSync(1) // Once this transaction has been confirmed, Bob should detect that he // no longer has any pending channels. - err = waitForNumChannelPendingForceClose(bob, 0, nil) - require.NoError(t.t, err) + ht.AssertNumPendingForceClose(bob, 0) // Coop close channel, expect no anchors. - closeChannelAndAssertType(t, net, alice, aliceChanPoint, false, false) + ht.CloseChannel(alice, aliceChanPoint) } diff --git a/lntest/itest/lnd_multi-hop_test.go b/lntest/itest/lnd_multi-hop_test.go index 1852ff4b3..be3bcb5c3 100644 --- a/lntest/itest/lnd_multi-hop_test.go +++ b/lntest/itest/lnd_multi-hop_test.go @@ -27,12 +27,12 @@ func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) { } subTests := []testCase{ - // { - // // bob: outgoing our commit timeout - // // carol: incoming their commit watch and see timeout - // name: "local force close immediate expiry", - // test: testMultiHopHtlcLocalTimeout, - // }, + { + // bob: outgoing our commit timeout + // carol: incoming their commit watch and see timeout + name: "local force close immediate expiry", + test: testMultiHopHtlcLocalTimeout, + }, // { // // bob: outgoing watch and see, they sweep on chain // // carol: incoming our commit, know preimage