mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-03-04 09:48:19 +01:00
itest: refactor testMultiHopRemoteForceCloseOnChainHtlcTimeout
This commit is contained in:
parent
6b5f4f407c
commit
f65002255c
2 changed files with 66 additions and 132 deletions
|
@ -1,16 +1,13 @@
|
||||||
package itest
|
package itest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcutil"
|
"github.com/btcsuite/btcd/btcutil"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntemp"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||||
"github.com/lightningnetwork/lnd/lntypes"
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -20,22 +17,16 @@ import (
|
||||||
// channel, then we properly timeout the HTLC directly on *their* commitment
|
// channel, then we properly timeout the HTLC directly on *their* commitment
|
||||||
// transaction once the timeout has expired. Once we sweep the transaction, we
|
// transaction once the timeout has expired. Once we sweep the transaction, we
|
||||||
// should also cancel back the initial HTLC.
|
// should also cancel back the initial HTLC.
|
||||||
func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
func testMultiHopRemoteForceCloseOnChainHtlcTimeout(ht *lntemp.HarnessTest,
|
||||||
t *harnessTest, alice, bob *lntest.HarnessNode, c lnrpc.CommitmentType,
|
alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
|
||||||
zeroConf bool) {
|
|
||||||
|
|
||||||
ctxb := context.Background()
|
|
||||||
|
|
||||||
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
|
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
|
||||||
// Carol refusing to actually settle or directly cancel any HTLC's
|
// Carol refusing to actually settle or directly cancel any HTLC's
|
||||||
// self.
|
// self.
|
||||||
aliceChanPoint, bobChanPoint, carol := createThreeHopNetworkOld(
|
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
|
||||||
t, net, alice, bob, true, c, zeroConf,
|
ht, alice, bob, true, c, zeroConf,
|
||||||
)
|
)
|
||||||
|
|
||||||
// Clean up carol's node when the test finishes.
|
|
||||||
defer shutdownAndAssert(net, t, carol)
|
|
||||||
|
|
||||||
// With our channels set up, we'll then send a single HTLC from Alice
|
// With our channels set up, we'll then send a single HTLC from Alice
|
||||||
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
|
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
|
||||||
// opens up the base for out tests.
|
// opens up the base for out tests.
|
||||||
|
@ -44,56 +35,52 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||||
htlcAmt = btcutil.Amount(30000)
|
htlcAmt = btcutil.Amount(30000)
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctxb)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// We'll now send a single HTLC across our multi-hop network.
|
// We'll now send a single HTLC across our multi-hop network.
|
||||||
preimage := lntypes.Preimage{1, 2, 3}
|
var preimage lntypes.Preimage
|
||||||
|
copy(preimage[:], ht.Random32Bytes())
|
||||||
payHash := preimage.Hash()
|
payHash := preimage.Hash()
|
||||||
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
|
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
|
||||||
Value: int64(htlcAmt),
|
Value: int64(htlcAmt),
|
||||||
CltvExpiry: 40,
|
CltvExpiry: 40,
|
||||||
Hash: payHash[:],
|
Hash: payHash[:],
|
||||||
}
|
}
|
||||||
|
carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
|
||||||
|
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
// Subscribe the invoice.
|
||||||
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
_, err = alice.RouterClient.SendPaymentV2(
|
req := &routerrpc.SendPaymentRequest{
|
||||||
ctx, &routerrpc.SendPaymentRequest{
|
|
||||||
PaymentRequest: carolInvoice.PaymentRequest,
|
PaymentRequest: carolInvoice.PaymentRequest,
|
||||||
TimeoutSeconds: 60,
|
TimeoutSeconds: 60,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
}
|
||||||
)
|
alice.RPC.SendPayment(req)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Once the HTLC has cleared, all the nodes in our mini network should
|
// Once the HTLC has cleared, all the nodes in our mini network should
|
||||||
// show that the HTLC has been locked in.
|
// show that the HTLC has been locked in.
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
ht.AssertActiveHtlcs(alice, payHash[:])
|
||||||
err = wait.NoError(func() error {
|
ht.AssertActiveHtlcs(bob, payHash[:])
|
||||||
return assertActiveHtlcs(nodes, payHash[:])
|
ht.AssertActiveHtlcs(carol, payHash[:])
|
||||||
}, defaultTimeout)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Increase the fee estimate so that the following force close tx will
|
// Increase the fee estimate so that the following force close tx will
|
||||||
// be cpfp'ed.
|
// be cpfp'ed.
|
||||||
net.SetFeeEstimate(30000)
|
ht.SetFeeEstimate(30000)
|
||||||
|
|
||||||
// At this point, we'll now instruct Carol to force close the
|
// At this point, we'll now instruct Carol to force close the
|
||||||
// transaction. This will let us exercise that Bob is able to sweep the
|
// transaction. This will let us exercise that Bob is able to sweep the
|
||||||
// expired HTLC on Carol's version of the commitment transaction. If
|
// expired HTLC on Carol's version of the commitment transaction. If
|
||||||
// Carol has an anchor, it will be swept too.
|
// Carol has an anchor, it will be swept too.
|
||||||
hasAnchors := commitTypeHasAnchors(c)
|
hasAnchors := commitTypeHasAnchors(c)
|
||||||
closeTx := closeChannelAndAssertType(
|
closeStream, _ := ht.CloseChannelAssertPending(
|
||||||
t, net, carol, bobChanPoint, hasAnchors, true,
|
carol, bobChanPoint, true,
|
||||||
|
)
|
||||||
|
closeTx := ht.AssertStreamChannelForceClosed(
|
||||||
|
carol, bobChanPoint, hasAnchors, closeStream,
|
||||||
)
|
)
|
||||||
|
|
||||||
// At this point, Bob should have a pending force close channel as
|
// At this point, Bob should have a pending force close channel as
|
||||||
// Carol has gone directly to chain.
|
// Carol has gone directly to chain.
|
||||||
err = waitForNumChannelPendingForceClose(bob, 1, nil)
|
ht.AssertNumPendingForceClose(bob, 1)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
var expectedTxes int
|
var expectedTxes int
|
||||||
switch c {
|
switch c {
|
||||||
|
@ -112,48 +99,32 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||||
expectedTxes = 1
|
expectedTxes = 1
|
||||||
|
|
||||||
default:
|
default:
|
||||||
t.Fatalf("unhandled commitment type %v", c)
|
ht.Fatalf("unhandled commitment type %v", c)
|
||||||
}
|
}
|
||||||
_, err = waitForNTxsInMempool(
|
|
||||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Next, we'll mine enough blocks for the HTLC to expire. At this
|
// Next, we'll mine enough blocks for the HTLC to expire. At this
|
||||||
// point, Bob should hand off the output to his internal utxo nursery,
|
// point, Bob should hand off the output to his internal utxo nursery,
|
||||||
// which will broadcast a sweep transaction.
|
// which will broadcast a sweep transaction.
|
||||||
numBlocks := padCLTV(finalCltvDelta - 1)
|
numBlocks := padCLTV(finalCltvDelta - 1)
|
||||||
_, err = net.Miner.Client.Generate(numBlocks)
|
ht.MineBlocksAssertNodesSync(numBlocks)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// If we check Bob's pending channel report, it should show that he has
|
// If we check Bob's pending channel report, it should show that he has
|
||||||
// a single HTLC that's now in the second stage, as skip the initial
|
// a single HTLC that's now in the second stage, as skip the initial
|
||||||
// first stage since this is a direct HTLC.
|
// first stage since this is a direct HTLC.
|
||||||
err = waitForNumChannelPendingForceClose(
|
ht.AssertNumHTLCsAndStage(bob, bobChanPoint, 1, 2)
|
||||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
|
||||||
if len(c.PendingHtlcs) != 1 {
|
|
||||||
return fmt.Errorf("bob should have pending " +
|
|
||||||
"htlc but doesn't")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.PendingHtlcs[0].Stage != 2 {
|
|
||||||
return fmt.Errorf("bob's htlc should have "+
|
|
||||||
"advanced to the second stage: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// We need to generate an additional block to trigger the sweep.
|
// We need to generate an additional block to trigger the sweep.
|
||||||
_, err = net.Miner.Client.Generate(1)
|
ht.MineBlocksAssertNodesSync(1)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Bob's sweeping transaction should now be found in the mempool at
|
// Bob's sweeping transaction should now be found in the mempool at
|
||||||
// this point.
|
// this point.
|
||||||
sweepTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
sweepTx := ht.Miner.AssertNumTxsInMempool(1)[0]
|
||||||
if err != nil {
|
// The following issue is believed to have been resolved. Keep the
|
||||||
|
// original comments here for future reference in case anything goes
|
||||||
|
// wrong.
|
||||||
|
//
|
||||||
// If Bob's transaction isn't yet in the mempool, then due to
|
// If Bob's transaction isn't yet in the mempool, then due to
|
||||||
// internal message passing and the low period between blocks
|
// internal message passing and the low period between blocks
|
||||||
// being mined, it may have been detected as a late
|
// being mined, it may have been detected as a late
|
||||||
|
@ -162,84 +133,47 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||||
// we'll fail.
|
// we'll fail.
|
||||||
// TODO(halseth): can we use waitForChannelPendingForceClose to
|
// TODO(halseth): can we use waitForChannelPendingForceClose to
|
||||||
// avoid this hack?
|
// avoid this hack?
|
||||||
_, err = net.Miner.Client.Generate(1)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
sweepTx, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we mine an additional block, then this should confirm Bob's
|
// If we mine an additional block, then this should confirm Bob's
|
||||||
// transaction which sweeps the direct HTLC output.
|
// transaction which sweeps the direct HTLC output.
|
||||||
block := mineBlocks(t, net, 1, 1)[0]
|
block := ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0]
|
||||||
assertTxInBlock(t, block, sweepTx)
|
ht.Miner.AssertTxInBlock(block, sweepTx)
|
||||||
|
|
||||||
// Now that the sweeping transaction has been confirmed, Bob should
|
// Now that the sweeping transaction has been confirmed, Bob should
|
||||||
// cancel back that HTLC. As a result, Alice should not know of any
|
// cancel back that HTLC. As a result, Alice should not know of any
|
||||||
// active HTLC's.
|
// active HTLC's.
|
||||||
nodes = []*lntest.HarnessNode{alice}
|
ht.AssertNumActiveHtlcs(alice, 0)
|
||||||
err = wait.NoError(func() error {
|
|
||||||
return assertNumActiveHtlcs(nodes, 0)
|
|
||||||
}, defaultTimeout)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// Now we'll check Bob's pending channel report. Since this was Carol's
|
// Now we'll check Bob's pending channel report. Since this was Carol's
|
||||||
// commitment, he doesn't have to wait for any CSV delays, but he may
|
// commitment, he doesn't have to wait for any CSV delays, but he may
|
||||||
// still need to wait for a CLTV on his commit output to expire
|
// still need to wait for a CLTV on his commit output to expire
|
||||||
// depending on the commitment type.
|
// depending on the commitment type.
|
||||||
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
resp := bob.RPC.PendingChannels()
|
||||||
resp, err := bob.PendingChannels(
|
|
||||||
ctxt, &lnrpc.PendingChannelsRequest{},
|
|
||||||
)
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
require.Len(t.t, resp.PendingForceClosingChannels, 1)
|
require.Len(ht, resp.PendingForceClosingChannels, 1)
|
||||||
forceCloseChan := resp.PendingForceClosingChannels[0]
|
forceCloseChan := resp.PendingForceClosingChannels[0]
|
||||||
require.Positive(t.t, forceCloseChan.BlocksTilMaturity)
|
require.Positive(ht, forceCloseChan.BlocksTilMaturity)
|
||||||
|
|
||||||
numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
|
numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
|
||||||
_, err = net.Miner.Client.Generate(numBlocks)
|
ht.MineBlocksAssertNodesSync(numBlocks)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
bobCommitOutpoint := wire.OutPoint{Hash: *closeTx, Index: 3}
|
bobCommitOutpoint := wire.OutPoint{Hash: *closeTx, Index: 3}
|
||||||
bobCommitSweep := assertSpendingTxInMempool(
|
bobCommitSweep := ht.Miner.AssertOutpointInMempool(
|
||||||
t, net.Miner.Client, minerMempoolTimeout,
|
|
||||||
bobCommitOutpoint,
|
bobCommitOutpoint,
|
||||||
)
|
)
|
||||||
block := mineBlocks(t, net, 1, 1)[0]
|
bobCommitSweepTxid := bobCommitSweep.TxHash()
|
||||||
assertTxInBlock(t, block, &bobCommitSweep)
|
block := ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0]
|
||||||
|
ht.Miner.AssertTxInBlock(block, &bobCommitSweepTxid)
|
||||||
}
|
}
|
||||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
ht.AssertNumPendingForceClose(bob, 0)
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
// While we're here, we assert that our expired invoice's state is
|
// While we're here, we assert that our expired invoice's state is
|
||||||
// correctly updated, and can no longer be settled.
|
// correctly updated, and can no longer be settled.
|
||||||
assertOnChainInvoiceState(ctxb, t, carol, preimage)
|
ht.AssertInvoiceState(stream, lnrpc.Invoice_CANCELED)
|
||||||
|
|
||||||
// We'll close out the test by closing the channel from Alice to Bob,
|
// We'll close out the test by closing the channel from Alice to Bob,
|
||||||
// and then shutting down the new node we created as its no longer
|
// and then shutting down the new node we created as its no longer
|
||||||
// needed. Coop close, no anchors.
|
// needed. Coop close, no anchors.
|
||||||
closeChannelAndAssertType(t, net, alice, aliceChanPoint, false, false)
|
ht.CloseChannel(alice, aliceChanPoint)
|
||||||
}
|
|
||||||
|
|
||||||
// assertOnChainInvoiceState asserts that we have the correct state for a hold
|
|
||||||
// invoice that has expired on chain, and that it can't be settled.
|
|
||||||
func assertOnChainInvoiceState(ctx context.Context, t *harnessTest,
|
|
||||||
node *lntest.HarnessNode, preimage lntypes.Preimage) {
|
|
||||||
|
|
||||||
hash := preimage.Hash()
|
|
||||||
inv, err := node.LookupInvoice(ctx, &lnrpc.PaymentHash{
|
|
||||||
RHash: hash[:],
|
|
||||||
})
|
|
||||||
require.NoError(t.t, err)
|
|
||||||
|
|
||||||
for _, htlc := range inv.Htlcs {
|
|
||||||
require.Equal(t.t, lnrpc.InvoiceHTLCState_CANCELED, htlc.State)
|
|
||||||
}
|
|
||||||
require.Equal(t.t, lnrpc.Invoice_CANCELED, inv.State)
|
|
||||||
|
|
||||||
_, err = node.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
|
||||||
Preimage: preimage[:],
|
|
||||||
})
|
|
||||||
require.Error(t.t, err, "should not be able to settle invoice")
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,12 +45,12 @@ func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) {
|
||||||
name: "local force close on-chain htlc timeout",
|
name: "local force close on-chain htlc timeout",
|
||||||
test: testMultiHopLocalForceCloseOnChainHtlcTimeout,
|
test: testMultiHopLocalForceCloseOnChainHtlcTimeout,
|
||||||
},
|
},
|
||||||
// {
|
{
|
||||||
// // bob: outgoing their commit watch and see timeout
|
// bob: outgoing their commit watch and see timeout
|
||||||
// // carol: incoming our commit watch and see timeout
|
// carol: incoming our commit watch and see timeout
|
||||||
// name: "remote force close on-chain htlc timeout",
|
name: "remote force close on-chain htlc timeout",
|
||||||
// test: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
|
test: testMultiHopRemoteForceCloseOnChainHtlcTimeout,
|
||||||
// },
|
},
|
||||||
// {
|
// {
|
||||||
// // bob: outgoing our commit watch and see, they sweep
|
// // bob: outgoing our commit watch and see, they sweep
|
||||||
// // on chain
|
// // on chain
|
||||||
|
|
Loading…
Add table
Reference in a new issue