mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-03-04 09:48:19 +01:00
itest: refactor testMultiHopHtlcAggregation
This commit is contained in:
parent
0115ec8719
commit
b463147595
3 changed files with 131 additions and 458 deletions
|
@ -2,7 +2,6 @@ package itest
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -15,7 +14,6 @@ import (
|
|||
"github.com/lightningnetwork/lnd/labels"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
"github.com/lightningnetwork/lnd/lntemp"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
|
@ -717,115 +715,6 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
|||
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
// deriveFundingShim creates a channel funding shim by deriving the necessary
|
||||
// keys on both sides.
|
||||
// TODO(yy): remove.
|
||||
func deriveFundingShimOld(net *lntest.NetworkHarness, t *harnessTest,
|
||||
carol, dave *lntest.HarnessNode, chanSize btcutil.Amount,
|
||||
thawHeight uint32, publish bool) (*lnrpc.FundingShim,
|
||||
*lnrpc.ChannelPoint, *chainhash.Hash) {
|
||||
|
||||
ctxb := context.Background()
|
||||
keyLoc := &walletrpc.KeyReq{KeyFamily: 9999}
|
||||
carolFundingKey, err := carol.WalletKitClient.DeriveNextKey(ctxb, keyLoc)
|
||||
require.NoError(t.t, err)
|
||||
daveFundingKey, err := dave.WalletKitClient.DeriveNextKey(ctxb, keyLoc)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Now that we have the multi-sig keys for each party, we can manually
|
||||
// construct the funding transaction. We'll instruct the backend to
|
||||
// immediately create and broadcast a transaction paying out an exact
|
||||
// amount. Normally this would reside in the mempool, but we just
|
||||
// confirm it now for simplicity.
|
||||
_, fundingOutput, err := input.GenFundingPkScript(
|
||||
carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes,
|
||||
int64(chanSize),
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
var txid *chainhash.Hash
|
||||
targetOutputs := []*wire.TxOut{fundingOutput}
|
||||
if publish {
|
||||
txid, err = net.Miner.SendOutputsWithoutChange(
|
||||
targetOutputs, 5,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
} else {
|
||||
tx, err := net.Miner.CreateTransaction(targetOutputs, 5, false)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
txHash := tx.TxHash()
|
||||
txid = &txHash
|
||||
}
|
||||
|
||||
// At this point, we can being our external channel funding workflow.
|
||||
// We'll start by generating a pending channel ID externally that will
|
||||
// be used to track this new funding type.
|
||||
var pendingChanID [32]byte
|
||||
_, err = rand.Read(pendingChanID[:])
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Now that we have the pending channel ID, Dave (our responder) will
|
||||
// register the intent to receive a new channel funding workflow using
|
||||
// the pending channel ID.
|
||||
chanPoint := &lnrpc.ChannelPoint{
|
||||
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
||||
FundingTxidBytes: txid[:],
|
||||
},
|
||||
}
|
||||
chanPointShim := &lnrpc.ChanPointShim{
|
||||
Amt: int64(chanSize),
|
||||
ChanPoint: chanPoint,
|
||||
LocalKey: &lnrpc.KeyDescriptor{
|
||||
RawKeyBytes: daveFundingKey.RawKeyBytes,
|
||||
KeyLoc: &lnrpc.KeyLocator{
|
||||
KeyFamily: daveFundingKey.KeyLoc.KeyFamily,
|
||||
KeyIndex: daveFundingKey.KeyLoc.KeyIndex,
|
||||
},
|
||||
},
|
||||
RemoteKey: carolFundingKey.RawKeyBytes,
|
||||
PendingChanId: pendingChanID[:],
|
||||
ThawHeight: thawHeight,
|
||||
}
|
||||
fundingShim := &lnrpc.FundingShim{
|
||||
Shim: &lnrpc.FundingShim_ChanPointShim{
|
||||
ChanPointShim: chanPointShim,
|
||||
},
|
||||
}
|
||||
_, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
|
||||
Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
|
||||
ShimRegister: fundingShim,
|
||||
},
|
||||
})
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// If we attempt to register the same shim (has the same pending chan
|
||||
// ID), then we should get an error.
|
||||
_, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{
|
||||
Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{
|
||||
ShimRegister: fundingShim,
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("duplicate pending channel ID funding shim " +
|
||||
"registration should trigger an error")
|
||||
}
|
||||
|
||||
// We'll take the chan point shim we just registered for Dave (the
|
||||
// responder), and swap the local/remote keys before we feed it in as
|
||||
// Carol's funding shim as the initiator.
|
||||
fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{
|
||||
RawKeyBytes: carolFundingKey.RawKeyBytes,
|
||||
KeyLoc: &lnrpc.KeyLocator{
|
||||
KeyFamily: carolFundingKey.KeyLoc.KeyFamily,
|
||||
KeyIndex: carolFundingKey.KeyLoc.KeyIndex,
|
||||
},
|
||||
}
|
||||
fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes
|
||||
|
||||
return fundingShim, chanPoint, txid
|
||||
}
|
||||
|
||||
// testBatchChanFunding makes sure multiple channels can be opened in one batch
|
||||
// transaction in an atomic way.
|
||||
func testBatchChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
|
|
@ -1,18 +1,16 @@
|
|||
package itest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lncfg"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||
"github.com/lightningnetwork/lnd/lntemp"
|
||||
"github.com/lightningnetwork/lnd/lntemp/node"
|
||||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -22,41 +20,35 @@ import (
|
|||
// resolve them using the second level timeout and success transactions. In
|
||||
// case of anchor channels, the second-level spends can also be aggregated and
|
||||
// properly feebumped, so we'll check that as well.
|
||||
func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
||||
alice, bob *lntest.HarnessNode, c lnrpc.CommitmentType,
|
||||
zeroConf bool) {
|
||||
func testMultiHopHtlcAggregation(ht *lntemp.HarnessTest,
|
||||
alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
|
||||
|
||||
const finalCltvDelta = 40
|
||||
ctxb := context.Background()
|
||||
|
||||
// First, we'll create a three hop network: Alice -> Bob -> Carol.
|
||||
aliceChanPoint, bobChanPoint, carol := createThreeHopNetworkOld(
|
||||
t, net, alice, bob, false, c, zeroConf,
|
||||
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
|
||||
ht, alice, bob, false, c, zeroConf,
|
||||
)
|
||||
defer shutdownAndAssert(net, t, carol)
|
||||
|
||||
// For neutrino backend, we need one additional UTXO to create
|
||||
// the sweeping tx for the second-level success txes.
|
||||
if ht.IsNeutrinoBackend() {
|
||||
ht.FundCoins(btcutil.SatoshiPerBitcoin, bob)
|
||||
}
|
||||
|
||||
// To ensure we have capacity in both directions of the route, we'll
|
||||
// make a fairly large payment Alice->Carol and settle it.
|
||||
const reBalanceAmt = 500_000
|
||||
invoice := &lnrpc.Invoice{
|
||||
Value: reBalanceAmt,
|
||||
}
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := carol.AddInvoice(ctxt, invoice)
|
||||
require.NoError(t.t, err)
|
||||
invoice := &lnrpc.Invoice{Value: reBalanceAmt}
|
||||
resp := carol.RPC.AddInvoice(invoice)
|
||||
|
||||
sendReq := &routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: resp.PaymentRequest,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
}
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
stream, err := alice.RouterClient.SendPaymentV2(ctxt, sendReq)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
result, err := getPaymentResult(stream)
|
||||
require.NoError(t.t, err)
|
||||
require.Equal(t.t, result.Status, lnrpc.Payment_SUCCEEDED)
|
||||
stream := alice.RPC.SendPayment(sendReq)
|
||||
ht.AssertPaymentStatusFromStream(stream, lnrpc.Payment_SUCCEEDED)
|
||||
|
||||
// With the network active, we'll now add a new hodl invoices at both
|
||||
// Alice's and Carol's end. Make sure the cltv expiry delta is large
|
||||
|
@ -69,112 +61,101 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
aliceInvoices []*invoicesrpc.AddHoldInvoiceResp
|
||||
alicePreimages []lntypes.Preimage
|
||||
payHashes [][]byte
|
||||
alicePayHashes [][]byte
|
||||
carolPayHashes [][]byte
|
||||
invoiceStreamsCarol []rpc.SingleInvoiceClient
|
||||
invoiceStreamsAlice []rpc.SingleInvoiceClient
|
||||
)
|
||||
|
||||
// Add Carol invoices.
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
preimage := lntypes.Preimage{1, 1, 1, byte(i)}
|
||||
var preimage lntypes.Preimage
|
||||
copy(preimage[:], ht.Random32Bytes())
|
||||
payHash := preimage.Hash()
|
||||
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
|
||||
Value: invoiceAmt,
|
||||
CltvExpiry: finalCltvDelta,
|
||||
Hash: payHash[:],
|
||||
}
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
||||
require.NoError(t.t, err)
|
||||
carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
|
||||
|
||||
carolInvoices = append(carolInvoices, carolInvoice)
|
||||
payHashes = append(payHashes, payHash[:])
|
||||
carolPayHashes = append(carolPayHashes, payHash[:])
|
||||
|
||||
// Subscribe the invoice.
|
||||
stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
|
||||
invoiceStreamsCarol = append(invoiceStreamsCarol, stream)
|
||||
}
|
||||
|
||||
// We'll give Alice's invoices a longer CLTV expiry, to ensure the
|
||||
// channel Bob<->Carol will be closed first.
|
||||
for i := 0; i < numInvoices; i++ {
|
||||
preimage := lntypes.Preimage{2, 2, 2, byte(i)}
|
||||
var preimage lntypes.Preimage
|
||||
copy(preimage[:], ht.Random32Bytes())
|
||||
payHash := preimage.Hash()
|
||||
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
|
||||
Value: invoiceAmt,
|
||||
CltvExpiry: 2 * finalCltvDelta,
|
||||
Hash: payHash[:],
|
||||
}
|
||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
aliceInvoice, err := alice.AddHoldInvoice(ctxt, invoiceReq)
|
||||
require.NoError(t.t, err)
|
||||
aliceInvoice := alice.RPC.AddHoldInvoice(invoiceReq)
|
||||
|
||||
aliceInvoices = append(aliceInvoices, aliceInvoice)
|
||||
alicePreimages = append(alicePreimages, preimage)
|
||||
payHashes = append(payHashes, payHash[:])
|
||||
alicePayHashes = append(alicePayHashes, payHash[:])
|
||||
|
||||
// Subscribe the invoice.
|
||||
stream := alice.RPC.SubscribeSingleInvoice(payHash[:])
|
||||
invoiceStreamsAlice = append(invoiceStreamsAlice, stream)
|
||||
}
|
||||
|
||||
// Now that we've created the invoices, we'll pay them all from
|
||||
// Alice<->Carol, going through Bob. We won't wait for the response
|
||||
// however, as neither will immediately settle the payment.
|
||||
ctx, cancel := context.WithCancel(ctxb)
|
||||
defer cancel()
|
||||
|
||||
// Alice will pay all of Carol's invoices.
|
||||
for _, carolInvoice := range carolInvoices {
|
||||
_, err = alice.RouterClient.SendPaymentV2(
|
||||
ctx, &routerrpc.SendPaymentRequest{
|
||||
req := &routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: carolInvoice.PaymentRequest,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
},
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
alice.RPC.SendPayment(req)
|
||||
}
|
||||
|
||||
// And Carol will pay Alice's.
|
||||
for _, aliceInvoice := range aliceInvoices {
|
||||
_, err = carol.RouterClient.SendPaymentV2(
|
||||
ctx, &routerrpc.SendPaymentRequest{
|
||||
req := &routerrpc.SendPaymentRequest{
|
||||
PaymentRequest: aliceInvoice.PaymentRequest,
|
||||
TimeoutSeconds: 60,
|
||||
FeeLimitMsat: noFeeLimitMsat,
|
||||
},
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
carol.RPC.SendPayment(req)
|
||||
}
|
||||
|
||||
// At this point, all 3 nodes should now the HTLCs active on their
|
||||
// channels.
|
||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||
err = wait.NoError(func() error {
|
||||
return assertActiveHtlcs(nodes, payHashes...)
|
||||
}, defaultTimeout)
|
||||
require.NoError(t.t, err)
|
||||
ht.AssertActiveHtlcs(alice, payHashes...)
|
||||
ht.AssertActiveHtlcs(bob, payHashes...)
|
||||
ht.AssertActiveHtlcs(carol, payHashes...)
|
||||
|
||||
// Wait for Alice and Carol to mark the invoices as accepted. There is
|
||||
// a small gap to bridge between adding the htlc to the channel and
|
||||
// executing the exit hop logic.
|
||||
for _, payHash := range carolPayHashes {
|
||||
h := lntypes.Hash{}
|
||||
copy(h[:], payHash)
|
||||
waitForInvoiceAccepted(t, carol, h)
|
||||
for _, stream := range invoiceStreamsCarol {
|
||||
ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
|
||||
}
|
||||
|
||||
for _, payHash := range alicePayHashes {
|
||||
h := lntypes.Hash{}
|
||||
copy(h[:], payHash)
|
||||
waitForInvoiceAccepted(t, alice, h)
|
||||
for _, stream := range invoiceStreamsAlice {
|
||||
ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
|
||||
}
|
||||
|
||||
// Increase the fee estimate so that the following force close tx will
|
||||
// be cpfp'ed.
|
||||
net.SetFeeEstimate(30000)
|
||||
ht.SetFeeEstimate(30000)
|
||||
|
||||
// We want Carol's htlcs to expire off-chain to demonstrate bob's force
|
||||
// close. However, Carol will cancel her invoices to prevent force
|
||||
// closes, so we shut her down for now.
|
||||
restartCarol, err := net.SuspendNode(carol)
|
||||
require.NoError(t.t, err)
|
||||
restartCarol := ht.SuspendNode(carol)
|
||||
|
||||
// We'll now mine enough blocks to trigger Bob's broadcast of his
|
||||
// commitment transaction due to the fact that the Carol's HTLCs are
|
||||
|
@ -183,8 +164,7 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
numBlocks := padCLTV(
|
||||
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
|
||||
)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
ht.MineBlocksAssertNodesSync(numBlocks)
|
||||
|
||||
// Bob's force close transaction should now be found in the mempool. If
|
||||
// there are anchors, we also expect Bob's anchor sweep.
|
||||
|
@ -193,22 +173,15 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
if hasAnchors {
|
||||
expectedTxes = 2
|
||||
}
|
||||
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
||||
|
||||
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
|
||||
require.NoError(t.t, err)
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
closeTx := getSpendingTxInMempool(
|
||||
t, net.Miner.Client, minerMempoolTimeout, wire.OutPoint{
|
||||
Hash: *bobFundingTxid,
|
||||
Index: bobChanPoint.OutputIndex,
|
||||
},
|
||||
closeTx := ht.Miner.AssertOutpointInMempool(
|
||||
ht.OutPointFromChannelPoint(bobChanPoint),
|
||||
)
|
||||
closeTxid := closeTx.TxHash()
|
||||
|
||||
// Go through the closing transaction outputs, and make an index for the HTLC outputs.
|
||||
// Go through the closing transaction outputs, and make an index for
|
||||
// the HTLC outputs.
|
||||
successOuts := make(map[wire.OutPoint]struct{})
|
||||
timeoutOuts := make(map[wire.OutPoint]struct{})
|
||||
for i, txOut := range closeTx.TxOut {
|
||||
|
@ -224,33 +197,26 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
case invoiceAmt:
|
||||
timeoutOuts[op] = struct{}{}
|
||||
|
||||
// If the HTLC has direction towards Alice, Bob will
|
||||
// claim it with the success TX when he learns the preimage. In
|
||||
// this case one extra sat will be on the output, because of
|
||||
// the routing fee.
|
||||
// If the HTLC has direction towards Alice, Bob will claim it
|
||||
// with the success TX when he learns the preimage. In this
|
||||
// case one extra sat will be on the output, because of the
|
||||
// routing fee.
|
||||
case invoiceAmt + 1:
|
||||
successOuts[op] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Once bob has force closed, we can restart carol.
|
||||
require.NoError(t.t, restartCarol())
|
||||
require.NoError(ht, restartCarol())
|
||||
|
||||
// Mine a block to confirm the closing transaction.
|
||||
mineBlocks(t, net, 1, expectedTxes)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
||||
|
||||
// Let Alice settle her invoices. When Bob now gets the preimages, he
|
||||
// has no other option than to broadcast his second-level transactions
|
||||
// to claim the money.
|
||||
for _, preimage := range alicePreimages {
|
||||
ctx, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||
defer cancel()
|
||||
_, err = alice.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
||||
Preimage: preimage[:],
|
||||
})
|
||||
require.NoError(t.t, err)
|
||||
alice.RPC.SettleInvoice(preimage[:])
|
||||
}
|
||||
|
||||
switch c {
|
||||
|
@ -271,13 +237,9 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
expectedTxes = 4
|
||||
|
||||
default:
|
||||
t.Fatalf("unhandled commitment type %v", c)
|
||||
ht.Fatalf("unhandled commitment type %v", c)
|
||||
}
|
||||
|
||||
txes, err := getNTxsFromMempool(
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
txes := ht.Miner.GetNumTxsFromMempool(expectedTxes)
|
||||
|
||||
// Since Bob can aggregate the transactions, we expect a single
|
||||
// transaction, that have multiple spends from the commitment.
|
||||
|
@ -306,81 +268,53 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
// levels to be aggregated into one tx. For earlier channel types, they
|
||||
// will be separate transactions.
|
||||
if hasAnchors {
|
||||
require.Len(t.t, timeoutTxs, 1)
|
||||
require.Len(t.t, successTxs, 1)
|
||||
require.Len(ht, timeoutTxs, 1)
|
||||
require.Len(ht, successTxs, 1)
|
||||
} else {
|
||||
require.Len(t.t, timeoutTxs, numInvoices)
|
||||
require.Len(t.t, successTxs, numInvoices)
|
||||
require.Len(ht, timeoutTxs, numInvoices)
|
||||
require.Len(ht, successTxs, numInvoices)
|
||||
}
|
||||
|
||||
// All mempool transactions should be spending from the commitment
|
||||
// transaction.
|
||||
assertAllTxesSpendFrom(t, txes, closeTxid)
|
||||
ht.AssertAllTxesSpendFrom(txes, closeTxid)
|
||||
|
||||
// Mine a block to confirm the transactions.
|
||||
block := mineBlocks(t, net, 1, expectedTxes)[0]
|
||||
require.Len(t.t, block.Transactions, expectedTxes+1)
|
||||
// Mine a block to confirm the all the transactions, including Carol's
|
||||
// commitment tx, anchor tx(optional), and the second-level timeout and
|
||||
// success txes.
|
||||
block := ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)[0]
|
||||
require.Len(ht, block.Transactions, expectedTxes+1)
|
||||
|
||||
// At this point, Bob should have broadcast his second layer success
|
||||
// transaction, and should have sent it to the nursery for incubation,
|
||||
// or to the sweeper for sweeping.
|
||||
err = waitForNumChannelPendingForceClose(
|
||||
bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||
if c.Channel.LocalBalance != 0 {
|
||||
return nil
|
||||
}
|
||||
ht.AssertNumPendingForceClose(bob, 1)
|
||||
|
||||
if len(c.PendingHtlcs) != 1 {
|
||||
return fmt.Errorf("bob should have pending " +
|
||||
"htlc but doesn't")
|
||||
}
|
||||
|
||||
if c.PendingHtlcs[0].Stage != 1 {
|
||||
return fmt.Errorf("bob's htlc should have "+
|
||||
"advanced to the first stage but was "+
|
||||
"stage: %v", c.PendingHtlcs[0].Stage)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
// For this channel, we also check the number of HTLCs and the stage
|
||||
// are correct.
|
||||
ht.AssertNumHTLCsAndStage(bob, bobChanPoint, numInvoices*2, 2)
|
||||
|
||||
if c != lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
||||
// If we then mine additional blocks, Bob can sweep his commitment
|
||||
// output.
|
||||
_, err = net.Miner.Client.Generate(defaultCSV - 2)
|
||||
require.NoError(t.t, err)
|
||||
// If we then mine additional blocks, Bob can sweep his
|
||||
// commitment output.
|
||||
ht.MineBlocksAssertNodesSync(defaultCSV - 2)
|
||||
|
||||
// Find the commitment sweep.
|
||||
bobCommitSweepHash, err := waitForTxInMempool(
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
bobCommitSweep, err := net.Miner.Client.GetRawTransaction(
|
||||
bobCommitSweepHash,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
require.Equal(
|
||||
t.t, closeTxid,
|
||||
bobCommitSweep.MsgTx().TxIn[0].PreviousOutPoint.Hash,
|
||||
)
|
||||
bobCommitSweep := ht.Miner.GetNumTxsFromMempool(1)[0]
|
||||
ht.AssertTxSpendFrom(bobCommitSweep, closeTxid)
|
||||
|
||||
// Also ensure it is not spending from any of the HTLC output.
|
||||
for _, txin := range bobCommitSweep.MsgTx().TxIn {
|
||||
for _, txin := range bobCommitSweep.TxIn {
|
||||
for _, timeoutTx := range timeoutTxs {
|
||||
if *timeoutTx == txin.PreviousOutPoint.Hash {
|
||||
t.Fatalf("found unexpected spend of " +
|
||||
"timeout tx")
|
||||
}
|
||||
require.NotEqual(ht, *timeoutTx,
|
||||
txin.PreviousOutPoint.Hash,
|
||||
"found unexpected spend of timeout tx")
|
||||
}
|
||||
|
||||
for _, successTx := range successTxs {
|
||||
if *successTx == txin.PreviousOutPoint.Hash {
|
||||
t.Fatalf("found unexpected spend of " +
|
||||
"success tx")
|
||||
}
|
||||
require.NotEqual(ht, *successTx,
|
||||
txin.PreviousOutPoint.Hash,
|
||||
"found unexpected spend of success tx")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -390,43 +324,42 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
// the nursery waits an extra block before sweeping. Before the blocks
|
||||
// are mined, we should expect to see Bob's commit sweep in the mempool.
|
||||
case lnrpc.CommitmentType_LEGACY:
|
||||
_ = mineBlocks(t, net, 2, 1)
|
||||
ht.Miner.MineBlocksAndAssertNumTxes(2, 1)
|
||||
|
||||
// Mining one additional block, Bob's second level tx is mature, and he
|
||||
// can sweep the output. Before the blocks are mined, we should expect
|
||||
// to see Bob's commit sweep in the mempool.
|
||||
case lnrpc.CommitmentType_ANCHORS:
|
||||
_ = mineBlocks(t, net, 1, 1)
|
||||
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
||||
|
||||
// Since Bob is the initiator of the Bob-Carol script-enforced leased
|
||||
// channel, he incurs an additional CLTV when sweeping outputs back to
|
||||
// his wallet. We'll need to mine enough blocks for the timelock to
|
||||
// expire to prompt his broadcast.
|
||||
case lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE:
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
resp, err := bob.PendingChannels(
|
||||
ctxt, &lnrpc.PendingChannelsRequest{},
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
require.Len(t.t, resp.PendingForceClosingChannels, 1)
|
||||
resp := bob.RPC.PendingChannels()
|
||||
require.Len(ht, resp.PendingForceClosingChannels, 1)
|
||||
forceCloseChan := resp.PendingForceClosingChannels[0]
|
||||
require.Positive(t.t, forceCloseChan.BlocksTilMaturity)
|
||||
_ = mineBlocks(t, net, uint32(forceCloseChan.BlocksTilMaturity), 0)
|
||||
require.Positive(ht, forceCloseChan.BlocksTilMaturity)
|
||||
numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
|
||||
|
||||
// Add debug log.
|
||||
_, height := ht.Miner.GetBestBlock()
|
||||
bob.AddToLogf("itest: now mine %d blocks at height %d",
|
||||
numBlocks, height)
|
||||
ht.MineBlocksAssertNodesSync(numBlocks)
|
||||
|
||||
default:
|
||||
t.Fatalf("unhandled commitment type %v", c)
|
||||
ht.Fatalf("unhandled commitment type %v", c)
|
||||
}
|
||||
|
||||
bobSweep, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Make sure it spends from the second level tx.
|
||||
secondLevelSweep, err := net.Miner.Client.GetRawTransaction(bobSweep)
|
||||
require.NoError(t.t, err)
|
||||
secondLevelSweep := ht.Miner.GetNumTxsFromMempool(1)[0]
|
||||
bobSweep := secondLevelSweep.TxHash()
|
||||
|
||||
// It should be sweeping all the second-level outputs.
|
||||
var secondLvlSpends int
|
||||
for _, txin := range secondLevelSweep.MsgTx().TxIn {
|
||||
for _, txin := range secondLevelSweep.TxIn {
|
||||
for _, timeoutTx := range timeoutTxs {
|
||||
if *timeoutTx == txin.PreviousOutPoint.Hash {
|
||||
secondLvlSpends++
|
||||
|
@ -440,25 +373,22 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
|||
}
|
||||
}
|
||||
|
||||
require.Equal(t.t, 2*numInvoices, secondLvlSpends)
|
||||
require.Equal(ht, 2*numInvoices, secondLvlSpends)
|
||||
|
||||
// When we mine one additional block, that will confirm Bob's second
|
||||
// level sweep. Now Bob should have no pending channels anymore, as
|
||||
// this just resolved it by the confirmation of the sweep transaction.
|
||||
block = mineBlocks(t, net, 1, 1)[0]
|
||||
assertTxInBlock(t, block, bobSweep)
|
||||
|
||||
err = waitForNumChannelPendingForceClose(bob, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
block = ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0]
|
||||
ht.Miner.AssertTxInBlock(block, &bobSweep)
|
||||
ht.AssertNumPendingForceClose(bob, 0)
|
||||
|
||||
// THe channel with Alice is still open.
|
||||
assertNodeNumChannels(t, bob, 1)
|
||||
ht.AssertNodeNumChannels(bob, 1)
|
||||
|
||||
// Carol should have no channels left (open nor pending).
|
||||
err = waitForNumChannelPendingForceClose(carol, 0, nil)
|
||||
require.NoError(t.t, err)
|
||||
assertNodeNumChannels(t, carol, 0)
|
||||
ht.AssertNumPendingForceClose(carol, 0)
|
||||
ht.AssertNodeNumChannels(carol, 0)
|
||||
|
||||
// Coop close channel, expect no anchors.
|
||||
closeChannelAndAssertType(t, net, alice, aliceChanPoint, false, false)
|
||||
// Coop close, no anchors.
|
||||
ht.CloseChannel(alice, aliceChanPoint)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/lightningnetwork/lnd/lntemp/rpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) {
|
||||
|
@ -67,11 +66,11 @@ func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) {
|
|||
name: "remote chain claim",
|
||||
test: testMultiHopHtlcRemoteChainClaim,
|
||||
},
|
||||
// {
|
||||
// // bob: outgoing and incoming, sweep all on chain
|
||||
// name: "local htlc aggregation",
|
||||
// test: testMultiHopHtlcAggregation,
|
||||
// },
|
||||
{
|
||||
// bob: outgoing and incoming, sweep all on chain
|
||||
name: "local htlc aggregation",
|
||||
test: testMultiHopHtlcAggregation,
|
||||
},
|
||||
}
|
||||
|
||||
commitWithZeroConf := []struct {
|
||||
|
@ -237,151 +236,6 @@ func checkPaymentStatus(node *lntest.HarnessNode, preimage lntypes.Preimage,
|
|||
return nil
|
||||
}
|
||||
|
||||
// TODO(yy): delete.
|
||||
func createThreeHopNetworkOld(t *harnessTest, net *lntest.NetworkHarness,
|
||||
alice, bob *lntest.HarnessNode, carolHodl bool, c lnrpc.CommitmentType,
|
||||
zeroConf bool) (
|
||||
*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) {
|
||||
|
||||
net.EnsureConnected(t.t, alice, bob)
|
||||
|
||||
// Make sure there are enough utxos for anchoring.
|
||||
for i := 0; i < 2; i++ {
|
||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, alice)
|
||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, bob)
|
||||
}
|
||||
|
||||
// We'll start the test by creating a channel between Alice and Bob,
|
||||
// which will act as the first leg for out multi-hop HTLC.
|
||||
const chanAmt = 1000000
|
||||
var aliceFundingShim *lnrpc.FundingShim
|
||||
var thawHeight uint32
|
||||
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
||||
require.NoError(t.t, err)
|
||||
thawHeight = uint32(minerHeight + 144)
|
||||
aliceFundingShim, _, _ = deriveFundingShimOld(
|
||||
net, t, alice, bob, chanAmt, thawHeight, true,
|
||||
)
|
||||
}
|
||||
|
||||
// If a zero-conf channel is being opened, the nodes are signalling the
|
||||
// zero-conf feature bit. Setup a ChannelAcceptor for the fundee.
|
||||
ctxb := context.Background()
|
||||
|
||||
var (
|
||||
cancel context.CancelFunc
|
||||
ctxc context.Context
|
||||
)
|
||||
|
||||
if zeroConf {
|
||||
ctxc, cancel = context.WithCancel(ctxb)
|
||||
acceptStream, err := bob.ChannelAcceptor(ctxc)
|
||||
require.NoError(t.t, err)
|
||||
go acceptChannel(t.t, true, acceptStream)
|
||||
}
|
||||
|
||||
aliceChanPoint := openChannelAndAssert(
|
||||
t, net, alice, bob,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
CommitmentType: c,
|
||||
FundingShim: aliceFundingShim,
|
||||
ZeroConf: zeroConf,
|
||||
},
|
||||
)
|
||||
|
||||
// Remove the ChannelAcceptor for Bob.
|
||||
if zeroConf {
|
||||
cancel()
|
||||
}
|
||||
|
||||
err := alice.WaitForNetworkChannelOpen(aliceChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
err = bob.WaitForNetworkChannelOpen(aliceChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
// Next, we'll create a new node "carol" and have Bob connect to her. If
|
||||
// the carolHodl flag is set, we'll make carol always hold onto the
|
||||
// HTLC, this way it'll force Bob to go to chain to resolve the HTLC.
|
||||
carolFlags := nodeArgsForCommitType(c)
|
||||
if carolHodl {
|
||||
carolFlags = append(carolFlags, "--hodl.exit-settle")
|
||||
}
|
||||
|
||||
if zeroConf {
|
||||
carolFlags = append(
|
||||
carolFlags, "--protocol.option-scid-alias",
|
||||
"--protocol.zero-conf",
|
||||
)
|
||||
}
|
||||
|
||||
carol := net.NewNode(t.t, "Carol", carolFlags)
|
||||
|
||||
net.ConnectNodes(t.t, bob, carol)
|
||||
|
||||
// Make sure Carol has enough utxos for anchoring. Because the anchor by
|
||||
// itself often doesn't meet the dust limit, a utxo from the wallet
|
||||
// needs to be attached as an additional input. This can still lead to a
|
||||
// positively-yielding transaction.
|
||||
for i := 0; i < 2; i++ {
|
||||
net.SendCoins(t.t, btcutil.SatoshiPerBitcoin, carol)
|
||||
}
|
||||
|
||||
// We'll then create a channel from Bob to Carol. After this channel is
|
||||
// open, our topology looks like: A -> B -> C.
|
||||
var bobFundingShim *lnrpc.FundingShim
|
||||
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
||||
bobFundingShim, _, _ = deriveFundingShimOld(
|
||||
net, t, bob, carol, chanAmt, thawHeight, true,
|
||||
)
|
||||
}
|
||||
|
||||
// Setup a ChannelAcceptor for Carol if a zero-conf channel open is
|
||||
// being attempted.
|
||||
if zeroConf {
|
||||
ctxc, cancel = context.WithCancel(ctxb)
|
||||
acceptStream, err := carol.ChannelAcceptor(ctxc)
|
||||
require.NoError(t.t, err)
|
||||
go acceptChannel(t.t, true, acceptStream)
|
||||
}
|
||||
|
||||
bobChanPoint := openChannelAndAssert(
|
||||
t, net, bob, carol,
|
||||
lntest.OpenChannelParams{
|
||||
Amt: chanAmt,
|
||||
CommitmentType: c,
|
||||
FundingShim: bobFundingShim,
|
||||
ZeroConf: zeroConf,
|
||||
},
|
||||
)
|
||||
|
||||
// Remove the ChannelAcceptor for Carol.
|
||||
if zeroConf {
|
||||
cancel()
|
||||
}
|
||||
|
||||
err = bob.WaitForNetworkChannelOpen(bobChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("alice didn't report channel: %v", err)
|
||||
}
|
||||
err = carol.WaitForNetworkChannelOpen(bobChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
err = alice.WaitForNetworkChannelOpen(bobChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("bob didn't report channel: %v", err)
|
||||
}
|
||||
|
||||
return aliceChanPoint, bobChanPoint, carol
|
||||
}
|
||||
|
||||
// assertAllTxesSpendFrom asserts that all txes in the list spend from the given
|
||||
// tx.
|
||||
func assertAllTxesSpendFrom(t *harnessTest, txes []*wire.MsgTx,
|
||||
|
|
Loading…
Add table
Reference in a new issue