itest: refactor testMultiHopRemoteForceCloseOnChainHtlcTimeout

This commit is contained in:
yyforyongyu 2022-07-29 12:26:58 +08:00
parent 6b5f4f407c
commit f65002255c
No known key found for this signature in database
GPG key ID: 9BCD95C4FF296868
2 changed files with 66 additions and 132 deletions

View file

@ -1,16 +1,13 @@
package itest
import (
"context"
"fmt"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
@ -20,22 +17,16 @@ import (
// channel, then we properly timeout the HTLC directly on *their* commitment
// transaction once the timeout has expired. Once we sweep the transaction, we
// should also cancel back the initial HTLC.
func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
t *harnessTest, alice, bob *lntest.HarnessNode, c lnrpc.CommitmentType,
zeroConf bool) {
ctxb := context.Background()
func testMultiHopRemoteForceCloseOnChainHtlcTimeout(ht *lntemp.HarnessTest,
alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetworkOld(
t, net, alice, bob, true, c, zeroConf,
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
ht, alice, bob, true, c, zeroConf,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With our channels set up, we'll then send a single HTLC from Alice
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
// opens up the base for out tests.
@ -44,56 +35,52 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
htlcAmt = btcutil.Amount(30000)
)
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
// We'll now send a single HTLC across our multi-hop network.
preimage := lntypes.Preimage{1, 2, 3}
var preimage lntypes.Preimage
copy(preimage[:], ht.Random32Bytes())
payHash := preimage.Hash()
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Value: int64(htlcAmt),
CltvExpiry: 40,
Hash: payHash[:],
}
carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
require.NoError(t.t, err)
// Subscribe the invoice.
stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
_, err = alice.RouterClient.SendPaymentV2(
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
require.NoError(t.t, err)
req := &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
alice.RPC.SendPayment(req)
// Once the HTLC has cleared, all the nodes in our mini network should
// show that the HTLC has been locked in.
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertActiveHtlcs(alice, payHash[:])
ht.AssertActiveHtlcs(bob, payHash[:])
ht.AssertActiveHtlcs(carol, payHash[:])
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
net.SetFeeEstimate(30000)
ht.SetFeeEstimate(30000)
// At this point, we'll now instruct Carol to force close the
// transaction. This will let us exercise that Bob is able to sweep the
// expired HTLC on Carol's version of the commitment transaction. If
// Carol has an anchor, it will be swept too.
hasAnchors := commitTypeHasAnchors(c)
closeTx := closeChannelAndAssertType(
t, net, carol, bobChanPoint, hasAnchors, true,
closeStream, _ := ht.CloseChannelAssertPending(
carol, bobChanPoint, true,
)
closeTx := ht.AssertStreamChannelForceClosed(
carol, bobChanPoint, hasAnchors, closeStream,
)
// At this point, Bob should have a pending force close channel as
// Carol has gone directly to chain.
err = waitForNumChannelPendingForceClose(bob, 1, nil)
require.NoError(t.t, err)
ht.AssertNumPendingForceClose(bob, 1)
var expectedTxes int
switch c {
@ -112,134 +99,81 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
expectedTxes = 1
default:
t.Fatalf("unhandled commitment type %v", c)
ht.Fatalf("unhandled commitment type %v", c)
}
_, err = waitForNTxsInMempool(
net.Miner.Client, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
ht.Miner.AssertNumTxsInMempool(expectedTxes)
// Next, we'll mine enough blocks for the HTLC to expire. At this
// point, Bob should hand off the output to his internal utxo nursery,
// which will broadcast a sweep transaction.
numBlocks := padCLTV(finalCltvDelta - 1)
_, err = net.Miner.Client.Generate(numBlocks)
require.NoError(t.t, err)
ht.MineBlocksAssertNodesSync(numBlocks)
// If we check Bob's pending channel report, it should show that he has
// a single HTLC that's now in the second stage, as skip the initial
// first stage since this is a direct HTLC.
err = waitForNumChannelPendingForceClose(
bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if c.PendingHtlcs[0].Stage != 2 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
}
return nil
},
)
require.NoError(t.t, err)
ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2)
// We need to generate an additional block to trigger the sweep.
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err)
ht.MineBlocksAssertNodesSync(1)
// Bob's sweeping transaction should now be found in the mempool at
// this point.
sweepTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
if err != nil {
// If Bob's transaction isn't yet in the mempool, then due to
// internal message passing and the low period between blocks
// being mined, it may have been detected as a late
// registration. As a result, we'll mine another block and
// repeat the check. If it doesn't go through this time, then
// we'll fail.
// TODO(halseth): can we use waitForChannelPendingForceClose to
// avoid this hack?
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err)
sweepTx, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err)
}
sweepTx := ht.Miner.AssertNumTxsInMempool(1)[0]
// The following issue is believed to have been resolved. Keep the
// original comments here for future reference in case anything goes
// wrong.
//
// If Bob's transaction isn't yet in the mempool, then due to
// internal message passing and the low period between blocks
// being mined, it may have been detected as a late
// registration. As a result, we'll mine another block and
// repeat the check. If it doesn't go through this time, then
// we'll fail.
// TODO(halseth): can we use waitForChannelPendingForceClose to
// avoid this hack?
// If we mine an additional block, then this should confirm Bob's
// transaction which sweeps the direct HTLC output.
block := mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, sweepTx)
block := ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, sweepTx)
// Now that the sweeping transaction has been confirmed, Bob should
// cancel back that HTLC. As a result, Alice should not know of any
// active HTLC's.
nodes = []*lntest.HarnessNode{alice}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertNumActiveHtlcs(alice, 0)
// Now we'll check Bob's pending channel report. Since this was Carol's
// commitment, he doesn't have to wait for any CSV delays, but he may
// still need to wait for a CLTV on his commit output to expire
// depending on the commitment type.
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
resp, err := bob.PendingChannels(
ctxt, &lnrpc.PendingChannelsRequest{},
)
require.NoError(t.t, err)
resp := bob.RPC.PendingChannels()
require.Len(t.t, resp.PendingForceClosingChannels, 1)
require.Len(ht, resp.PendingForceClosingChannels, 1)
forceCloseChan := resp.PendingForceClosingChannels[0]
require.Positive(t.t, forceCloseChan.BlocksTilMaturity)
require.Positive(ht, forceCloseChan.BlocksTilMaturity)
numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
_, err = net.Miner.Client.Generate(numBlocks)
require.NoError(t.t, err)
ht.MineBlocksAssertNodesSync(numBlocks)
bobCommitOutpoint := wire.OutPoint{Hash: *closeTx, Index: 3}
bobCommitSweep := assertSpendingTxInMempool(
t, net.Miner.Client, minerMempoolTimeout,
bobCommitSweep := ht.Miner.AssertOutpointInMempool(
bobCommitOutpoint,
)
block := mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, &bobCommitSweep)
bobCommitSweepTxid := bobCommitSweep.TxHash()
block := ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0]
ht.Miner.AssertTxInBlock(block, &bobCommitSweepTxid)
}
err = waitForNumChannelPendingForceClose(bob, 0, nil)
require.NoError(t.t, err)
ht.AssertNumPendingForceClose(bob, 0)
// While we're here, we assert that our expired invoice's state is
// correctly updated, and can no longer be settled.
assertOnChainInvoiceState(ctxb, t, carol, preimage)
ht.AssertInvoiceState(stream, lnrpc.Invoice_CANCELED)
// We'll close out the test by closing the channel from Alice to Bob,
// and then shutting down the new node we created as its no longer
// needed. Coop close, no anchors.
closeChannelAndAssertType(t, net, alice, aliceChanPoint, false, false)
}
// assertOnChainInvoiceState asserts that we have the correct state for a hold
// invoice that has expired on chain, and that it can't be settled.
func assertOnChainInvoiceState(ctx context.Context, t *harnessTest,
node *lntest.HarnessNode, preimage lntypes.Preimage) {
hash := preimage.Hash()
inv, err := node.LookupInvoice(ctx, &lnrpc.PaymentHash{
RHash: hash[:],
})
require.NoError(t.t, err)
for _, htlc := range inv.Htlcs {
require.Equal(t.t, lnrpc.InvoiceHTLCState_CANCELED, htlc.State)
}
require.Equal(t.t, lnrpc.Invoice_CANCELED, inv.State)
_, err = node.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
require.Error(t.t, err, "should not be able to settle invoice")
ht.CloseChannel(alice, aliceChanPoint)
}

View file

@ -45,12 +45,12 @@ func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) {
name: "local force close on-chain htlc timeout",
test: testMultiHopLocalForceCloseOnChainHtlcTimeout,
},
// {
// // bob: outgoing their commit watch and see timeout
// // carol: incoming our commit watch and see timeout
// name: "remote force close on-chain htlc timeout",
// test: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
// },
{
// bob: outgoing their commit watch and see timeout
// carol: incoming our commit watch and see timeout
name: "remote force close on-chain htlc timeout",
test: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
},
// {
// // bob: outgoing our commit watch and see, they sweep
// // on chain