itest: refactor testMultiHopReceiverChainClaim

This commit is contained in:
yyforyongyu 2022-07-29 11:38:13 +08:00
parent 5a7c6cea02
commit d51f98efc6
No known key found for this signature in database
GPG key ID: 9BCD95C4FF296868
6 changed files with 366 additions and 146 deletions

View file

@ -671,9 +671,15 @@ type OpenChannelParams struct {
// destNode with the passed channel funding parameters. Once the `OpenChannel`
// is called, it will consume the first event it receives from the open channel
// client and asserts it's a channel pending event.
func (h *HarnessTest) OpenChannelAssertPending(
srcNode, destNode *node.HarnessNode,
p OpenChannelParams) rpc.OpenChanClient {
func (h *HarnessTest) OpenChannelAssertPending(srcNode,
destNode *node.HarnessNode, p OpenChannelParams) rpc.OpenChanClient {
// Wait until srcNode and destNode have the latest chain synced.
// Otherwise, we may run into a check within the funding manager that
// prevents any funding workflows from being kicked off if the chain
// isn't yet synced.
h.WaitForBlockchainSync(srcNode)
h.WaitForBlockchainSync(destNode)
// Specify the minimal confirmations of the UTXOs used for channel
// funding.
@ -722,13 +728,6 @@ func (h *HarnessTest) OpenChannelAssertPending(
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
p OpenChannelParams) *lnrpc.ChannelPoint {
// Wait until srcNode and destNode have the latest chain synced.
// Otherwise, we may run into a check within the funding manager that
// prevents any funding workflows from being kicked off if the chain
// isn't yet synced.
h.WaitForBlockchainSync(alice)
h.WaitForBlockchainSync(bob)
chanOpenUpdate := h.OpenChannelAssertPending(alice, bob, p)
// Mine 6 blocks, then wait for Alice's node to notify us that the

View file

@ -12,6 +12,7 @@ import (
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
@ -1039,3 +1040,227 @@ func (h *HarnessTest) AssertActiveHtlcs(hn *node.HarnessNode,
}, DefaultTimeout)
require.NoError(h, err, "timeout checking active HTLCs")
}
// ReceiveSingleInvoice waits until a message is received on the subscribe
// single invoice stream or the timeout is reached.
func (h *HarnessTest) ReceiveSingleInvoice(
stream rpc.SingleInvoiceClient) *lnrpc.Invoice {
chanMsg := make(chan *lnrpc.Invoice, 1)
errChan := make(chan error, 1)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout receiving single invoice")
case err := <-errChan:
require.Failf(h, "err from stream",
"received err from stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// AssertInvoiceState takes a single invoice subscription stream and asserts
// that a given invoice has became the desired state before timeout and returns
// the invoice found.
func (h *HarnessTest) AssertInvoiceState(stream rpc.SingleInvoiceClient,
state lnrpc.Invoice_InvoiceState) *lnrpc.Invoice {
var invoice *lnrpc.Invoice
err := wait.NoError(func() error {
invoice = h.ReceiveSingleInvoice(stream)
if invoice.State == state {
return nil
}
return fmt.Errorf("mismatched invoice state, want %v, got %v",
state, invoice.State)
}, DefaultTimeout)
require.NoError(h, err, "timeout waiting for invoice state: %v", state)
return invoice
}
// assertAllTxesSpendFrom asserts that all txes in the list spend from the
// given tx.
func (h *HarnessTest) AssertAllTxesSpendFrom(txes []*wire.MsgTx,
prevTxid chainhash.Hash) {
for _, tx := range txes {
if tx.TxIn[0].PreviousOutPoint.Hash != prevTxid {
require.Failf(h, "", "tx %v did not spend from %v",
tx.TxHash(), prevTxid)
}
}
}
// AssertTxSpendFrom asserts that a given tx is spent from a previous tx.
// tx.
func (h *HarnessTest) AssertTxSpendFrom(tx *wire.MsgTx,
prevTxid chainhash.Hash) {
if tx.TxIn[0].PreviousOutPoint.Hash != prevTxid {
require.Failf(h, "", "tx %v did not spend from %v",
tx.TxHash(), prevTxid)
}
}
type PendingForceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel
// AssertChannelPendingForceClose asserts that the given channel found in the
// node is pending force close. Returns the PendingForceClose if found.
func (h *HarnessTest) AssertChannelPendingForceClose(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) PendingForceClose {
var target PendingForceClose
op := h.OutPointFromChannelPoint(chanPoint)
err := wait.NoError(func() error {
resp := hn.RPC.PendingChannels()
forceCloseChans := resp.PendingForceClosingChannels
for _, ch := range forceCloseChans {
if ch.Channel.ChannelPoint == op.String() {
target = ch
return nil
}
}
return fmt.Errorf("%v: channel %s not found in pending "+
"force close", hn.Name(), chanPoint)
}, DefaultTimeout)
require.NoError(h, err, "assert pending force close timed out")
return target
}
// AssertNumHTLCsAndStage takes a pending force close channel's channel point
// and asserts the expected number of pending HTLCs and HTLC stage are matched.
func (h *HarnessTest) AssertNumHTLCsAndStage(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint, num int, stage uint32) {
// Get the channel output point.
cp := h.OutPointFromChannelPoint(chanPoint)
var target PendingForceClose
checkStage := func() error {
resp := hn.RPC.PendingChannels()
if len(resp.PendingForceClosingChannels) == 0 {
return fmt.Errorf("zero pending force closing channels")
}
for _, ch := range resp.PendingForceClosingChannels {
if ch.Channel.ChannelPoint == cp.String() {
target = ch
break
}
}
if target == nil {
return fmt.Errorf("cannot find pending force closing "+
"channel using %v", cp)
}
if target.LimboBalance == 0 {
return fmt.Errorf("zero limbo balance")
}
if len(target.PendingHtlcs) != num {
return fmt.Errorf("got %d pending htlcs, want %d",
len(target.PendingHtlcs), num)
}
for i, htlc := range target.PendingHtlcs {
if htlc.Stage == stage {
continue
}
return fmt.Errorf("HTLC %d got stage: %v, "+
"want stage: %v", i, htlc.Stage, stage)
}
return nil
}
require.NoErrorf(h, wait.NoError(checkStage, DefaultTimeout),
"timeout waiting for htlc stage")
}
// findPayment queries the payment from the node's ListPayments which matches
// the specified preimage hash.
func (h *HarnessTest) findPayment(hn *node.HarnessNode,
preimage lntypes.Preimage) *lnrpc.Payment {
req := &lnrpc.ListPaymentsRequest{IncludeIncomplete: true}
paymentsResp := hn.RPC.ListPayments(req)
payHash := preimage.Hash()
for _, p := range paymentsResp.Payments {
if p.PaymentHash != payHash.String() {
continue
}
return p
}
require.Fail(h, "payment: %v not found", payHash)
return nil
}
// AssertPaymentStatus asserts that the given node list a payment with the
// given preimage has the expected status. It also checks that the payment has
// the expected preimage, which is empty when it's not settled and matches the
// given preimage when it's succeeded.
func (h *HarnessTest) AssertPaymentStatus(hn *node.HarnessNode,
preimage lntypes.Preimage,
status lnrpc.Payment_PaymentStatus) *lnrpc.Payment {
var target *lnrpc.Payment
err := wait.NoError(func() error {
p := h.findPayment(hn, preimage)
if status == p.Status {
target = p
return nil
}
return fmt.Errorf("payment: %v status not match, want %s "+
"got %s", preimage, status, p.Status)
}, DefaultTimeout)
require.NoError(h, err, "timeout checking payment status")
switch status {
// If this expected status is SUCCEEDED, we expect the final
// preimage.
case lnrpc.Payment_SUCCEEDED:
require.Equal(h, preimage.String(), target.PaymentPreimage,
"preimage not match")
// Otherwise we expect an all-zero preimage.
default:
require.Equal(h, (lntypes.Preimage{}).String(),
target.PaymentPreimage, "expected zero preimage")
}
return target
}

View file

@ -361,3 +361,17 @@ func (h *HarnessMiner) AssertOutpointInMempool(op wire.OutPoint) *wire.MsgTx {
return msgTx
}
// GetNumTxsFromMempool polls until finding the desired number of transactions
// in the miner's mempool and returns the full transactions to the caller.
func (h *HarnessMiner) GetNumTxsFromMempool(n int) []*wire.MsgTx {
txids := h.AssertNumTxsInMempool(n)
var txes []*wire.MsgTx
for _, txid := range txids {
tx := h.GetRawTransaction(txid)
txes = append(txes, tx.MsgTx())
}
return txes
}

View file

@ -24,3 +24,61 @@ func (h *HarnessRPC) LookupInvoiceV2(
return resp
}
// AddHoldInvoice adds a hold invoice for the given node and asserts.
func (h *HarnessRPC) AddHoldInvoice(
r *invoicesrpc.AddHoldInvoiceRequest) *invoicesrpc.AddHoldInvoiceResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
invoice, err := h.Invoice.AddHoldInvoice(ctxt, r)
h.NoError(err, "AddHoldInvoice")
return invoice
}
// SettleInvoice settles a given invoice and asserts.
func (h *HarnessRPC) SettleInvoice(
preimage []byte) *invoicesrpc.SettleInvoiceResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &invoicesrpc.SettleInvoiceMsg{Preimage: preimage}
resp, err := h.Invoice.SettleInvoice(ctxt, req)
h.NoError(err, "SettleInvoice")
return resp
}
// CancelInvoice cancels a given invoice and asserts.
func (h *HarnessRPC) CancelInvoice(
payHash []byte) *invoicesrpc.CancelInvoiceResp {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
req := &invoicesrpc.CancelInvoiceMsg{PaymentHash: payHash}
resp, err := h.Invoice.CancelInvoice(ctxt, req)
h.NoError(err, "CancelInvoice")
return resp
}
type SingleInvoiceClient invoicesrpc.Invoices_SubscribeSingleInvoiceClient
// SubscribeSingleInvoice creates a subscription client for given invoice and
// asserts its creation.
func (h *HarnessRPC) SubscribeSingleInvoice(rHash []byte) SingleInvoiceClient {
req := &invoicesrpc.SubscribeSingleInvoiceRequest{RHash: rHash}
// SubscribeSingleInvoice needs to have the context alive for the
// entire test case as the returned client will be used for send and
// receive events stream. Thus we use runCtx here instead of a timeout
// context.
client, err := h.Invoice.SubscribeSingleInvoice(h.runCtx, req)
h.NoError(err, "SubscribeSingleInvoice")
return client
}

View file

@ -1,16 +1,13 @@
package itest
import (
"context"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntemp"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
@ -21,85 +18,66 @@ import (
// transaction. In this scenario, the node that sent the outgoing HTLC should
// extract the preimage from the sweep transaction, and finish settling the
// HTLC backwards into the route.
func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
alice, bob *lntest.HarnessNode, c lnrpc.CommitmentType,
zeroConf bool) {
ctxb := context.Background()
func testMultiHopReceiverChainClaim(ht *lntemp.HarnessTest,
alice, bob *node.HarnessNode, c lnrpc.CommitmentType, zeroConf bool) {
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetworkOld(
t, net, alice, bob, false, c, zeroConf,
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
ht, alice, bob, false, c, zeroConf,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new hodl invoice at Carol's
// end. Make sure the cltv expiry delta is large enough, otherwise Bob
// won't send out the outgoing htlc.
const invoiceAmt = 100000
preimage := lntypes.Preimage{1, 2, 4}
var preimage lntypes.Preimage
copy(preimage[:], ht.Random32Bytes())
payHash := preimage.Hash()
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Value: invoiceAmt,
CltvExpiry: 40,
Hash: payHash[:],
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
require.NoError(t.t, err)
carolInvoice := carol.RPC.AddHoldInvoice(invoiceReq)
// Subscribe the invoice.
stream := carol.RPC.SubscribeSingleInvoice(payHash[:])
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
require.NoError(t.t, err)
req := &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
}
alice.RPC.SendPayment(req)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertActiveHtlcs(alice, payHash[:])
ht.AssertActiveHtlcs(bob, payHash[:])
ht.AssertActiveHtlcs(carol, payHash[:])
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
// hop logic.
waitForInvoiceAccepted(t, carol, payHash)
ht.AssertInvoiceState(stream, lnrpc.Invoice_ACCEPTED)
restartBob, err := net.SuspendNode(bob)
require.NoError(t.t, err)
restartBob := ht.SuspendNode(bob)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
// this test, it is important to actually settle and not leave the
// invoice in the accepted state, because without a known preimage, the
// channel arbitrator won't go to chain.
ctx, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
require.NoError(t.t, err)
carol.RPC.SettleInvoice(preimage[:])
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
net.SetFeeEstimate(30000)
ht.SetFeeEstimate(30000)
// Now we'll mine enough blocks to prompt carol to actually go to the
// chain in order to sweep her HTLC since the value is high enough.
@ -107,8 +85,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV(uint32(
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
))
_, err = net.Miner.Client.Generate(numBlocks)
require.NoError(t.t, err)
ht.MineBlocksAssertNodesSync(numBlocks)
// At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC. If there
@ -118,32 +95,18 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
if hasAnchors {
expectedTxes = 2
}
_, err = getNTxsFromMempool(
net.Miner.Client, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
ht.Miner.AssertNumTxsInMempool(expectedTxes)
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The commitment transaction should be spending from the funding
// transaction.
closingTx := getSpendingTxInMempool(
t, net.Miner.Client, minerMempoolTimeout, carolFundingPoint,
closingTx := ht.Miner.AssertOutpointInMempool(
ht.OutPointFromChannelPoint(bobChanPoint),
)
closingTxid := closingTx.TxHash()
// Confirm the commitment.
mineBlocks(t, net, 1, expectedTxes)
ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
// Restart bob again.
err = restartBob()
require.NoError(t.t, err)
require.NoError(ht, restartBob())
// After the force close transaction is mined, a series of transactions
// should be broadcast by Bob and Carol. When Bob notices Carol's second
@ -170,97 +133,61 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
expectedTxes = 2
default:
t.Fatalf("unhandled commitment type %v", c)
ht.Fatalf("unhandled commitment type %v", c)
}
txes, err := getNTxsFromMempool(
net.Miner.Client, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// All transactions should be spending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid)
txes := ht.Miner.GetNumTxsFromMempool(expectedTxes)
ht.AssertAllTxesSpendFrom(txes, closingTxid)
// We'll now mine an additional block which should confirm both the
// second layer transactions.
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err)
time.Sleep(time.Second * 4)
ht.MineBlocksAssertNodesSync(1)
// TODO(roasbeef): assert bob pending state as well
// Carol's pending channel report should now show two outputs under
// limbo: her commitment output, as well as the second-layer claim
// output.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
require.NoError(t.t, err)
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
require.NotZero(t.t, forceCloseChan.LimboBalance)
// The pending HTLC carol has should also now be in stage 2.
require.Len(t.t, forceCloseChan.PendingHtlcs, 1)
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
// output, and the pending HTLC should also now be in stage 2.
ht.AssertNumHTLCsAndStage(carol, bobChanPoint, 1, 2)
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{alice}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
ht.AssertNumActiveHtlcs(alice, 0)
// If we mine 4 additional blocks, then Carol can sweep the second level
// HTLC output.
_, err = net.Miner.Client.Generate(defaultCSV)
require.NoError(t.t, err)
ht.MineBlocksAssertNodesSync(defaultCSV)
// We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
require.NoError(t.t, err)
ht.Miner.AssertNumTxsInMempool(1)
// Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report
// afterwards.
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err)
err = waitForNumChannelPendingForceClose(carol, 0, nil)
require.NoError(t.t, err)
ht.MineBlocksAssertNodesSync(1)
ht.AssertNumPendingForceClose(carol, 0)
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
require.NoError(t.t, err)
require.Len(t.t, invoicesResp.Invoices, 1)
invoice := invoicesResp.Invoices[0]
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
ht.AssertInvoiceSettled(carol, carolInvoice.PaymentAddr)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
err = checkPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
require.NoError(t.t, err)
ht.AssertPaymentStatus(alice, preimage, lnrpc.Payment_SUCCEEDED)
if c == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
// Bob still has his commit output to sweep to since he incurred
// an additional CLTV from being the channel initiator of a
// script-enforced leased channel, regardless of whether he
// forced closed the channel or not.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, &lnrpc.PendingChannelsRequest{},
)
require.NoError(t.t, err)
pendingChanResp := bob.RPC.PendingChannels()
require.Len(t.t, pendingChanResp.PendingForceClosingChannels, 1)
require.Len(ht, pendingChanResp.PendingForceClosingChannels, 1)
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
require.Positive(t.t, forceCloseChan.LimboBalance)
require.Positive(t.t, forceCloseChan.BlocksTilMaturity)
require.Positive(ht, forceCloseChan.LimboBalance)
require.Positive(ht, forceCloseChan.BlocksTilMaturity)
// TODO: Bob still shows a pending HTLC at this point when he
// shouldn't, as he already extracted the preimage from Carol's
@ -269,19 +196,16 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Mine enough blocks for Bob's commit output's CLTV to expire
// and sweep it.
_ = mineBlocks(t, net, uint32(forceCloseChan.BlocksTilMaturity), 0)
numBlocks := uint32(forceCloseChan.BlocksTilMaturity)
ht.MineBlocksAssertNodesSync(numBlocks)
commitOutpoint := wire.OutPoint{Hash: closingTxid, Index: 3}
assertSpendingTxInMempool(
t, net.Miner.Client, minerMempoolTimeout, commitOutpoint,
)
_, err = net.Miner.Client.Generate(1)
require.NoError(t.t, err)
ht.Miner.AssertOutpointInMempool(commitOutpoint)
ht.MineBlocksAssertNodesSync(1)
}
err = waitForNumChannelPendingForceClose(bob, 0, nil)
require.NoError(t.t, err)
ht.AssertNumPendingForceClose(bob, 0)
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.
closeChannelAndAssertType(t, net, alice, aliceChanPoint, false, false)
ht.CloseChannel(alice, aliceChanPoint)
}

View file

@ -33,12 +33,12 @@ func testMultiHopHtlcClaims(ht *lntemp.HarnessTest) {
name: "local force close immediate expiry",
test: testMultiHopHtlcLocalTimeout,
},
// {
// // bob: outgoing watch and see, they sweep on chain
// // carol: incoming our commit, know preimage
// name: "receiver chain claim",
// test: testMultiHopReceiverChainClaim,
// },
{
// bob: outgoing watch and see, they sweep on chain
// carol: incoming our commit, know preimage
name: "receiver chain claim",
test: testMultiHopReceiverChainClaim,
},
// {
// // bob: outgoing our commit watch and see timeout
// // carol: incoming their commit watch and see timeout