2020-08-12 16:06:34 +02:00
|
|
|
package itest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-08-12 16:49:39 +02:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2020-08-12 16:06:34 +02:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
2020-08-12 16:06:34 +02:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/lightningnetwork/lnd/chanbackup"
|
2022-07-28 11:48:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/funding"
|
2020-08-12 16:06:34 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2021-01-25 22:27:25 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
2022-08-12 11:03:44 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntest"
|
|
|
|
"github.com/lightningnetwork/lnd/lntest/node"
|
2020-08-12 16:06:34 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
2020-08-12 16:49:39 +02:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
type (
|
|
|
|
// nodeRestorer is a function closure that allows each test case to
|
|
|
|
// control exactly *how* the prior node is restored. This might be
|
|
|
|
// using an backup obtained over RPC, or the file system, etc.
|
|
|
|
nodeRestorer func() *node.HarnessNode
|
|
|
|
|
|
|
|
// restoreMethod takes an old node, then returns a function closure
|
|
|
|
// that'll return the same node, but with its state restored via a
|
|
|
|
// custom method. We use this to abstract away _how_ a node is restored
|
|
|
|
// from our assertions once the node has been fully restored itself.
|
2022-08-12 11:03:44 +02:00
|
|
|
restoreMethodType func(ht *lntest.HarnessTest,
|
2022-08-17 10:53:07 +02:00
|
|
|
oldNode *node.HarnessNode, backupFilePath string,
|
|
|
|
password []byte, mnemonic []string) nodeRestorer
|
|
|
|
)
|
|
|
|
|
2022-11-13 20:41:45 +01:00
|
|
|
// revocationWindow is used when we specify the revocation window used when
|
|
|
|
// restoring node.
|
|
|
|
const revocationWindow = 100
|
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// chanRestoreScenario represents a test case used by testing the channel
|
|
|
|
// restore methods.
|
|
|
|
type chanRestoreScenario struct {
|
|
|
|
carol *node.HarnessNode
|
|
|
|
dave *node.HarnessNode
|
|
|
|
password []byte
|
|
|
|
mnemonic []string
|
2022-08-12 11:03:44 +02:00
|
|
|
params lntest.OpenChannelParams
|
2022-08-17 10:53:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// newChanRestoreScenario creates a new scenario that has two nodes, Carol and
|
|
|
|
// Dave, connected and funded.
|
2022-08-12 11:03:44 +02:00
|
|
|
func newChanRestoreScenario(ht *lntest.HarnessTest, ct lnrpc.CommitmentType,
|
2022-08-17 10:53:07 +02:00
|
|
|
zeroConf bool) *chanRestoreScenario {
|
|
|
|
|
|
|
|
const (
|
|
|
|
chanAmt = btcutil.Amount(10000000)
|
|
|
|
pushAmt = btcutil.Amount(5000000)
|
|
|
|
)
|
|
|
|
|
|
|
|
password := []byte("El Psy Kongroo")
|
|
|
|
nodeArgs := []string{
|
|
|
|
"--minbackoff=50ms",
|
|
|
|
"--maxbackoff=1s",
|
|
|
|
}
|
|
|
|
|
|
|
|
if ct != lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE {
|
2022-08-12 11:03:44 +02:00
|
|
|
args := lntest.NodeArgsForCommitType(ct)
|
2022-08-17 10:53:07 +02:00
|
|
|
nodeArgs = append(nodeArgs, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
if zeroConf {
|
|
|
|
nodeArgs = append(
|
|
|
|
nodeArgs, "--protocol.option-scid-alias",
|
|
|
|
"--protocol.zero-conf",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, we'll create a brand new node we'll use within the test. If
|
|
|
|
// we have a custom backup file specified, then we'll also create that
|
|
|
|
// for use.
|
|
|
|
dave, mnemonic, _ := ht.NewNodeWithSeed(
|
|
|
|
"dave", nodeArgs, password, false,
|
|
|
|
)
|
|
|
|
carol := ht.NewNode("carol", nodeArgs)
|
|
|
|
|
|
|
|
// Now that our new nodes are created, we'll give them some coins for
|
|
|
|
// channel opening and anchor sweeping.
|
|
|
|
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, carol)
|
|
|
|
ht.FundCoinsUnconfirmed(btcutil.SatoshiPerBitcoin, dave)
|
|
|
|
|
|
|
|
// Mine a block to confirm the funds.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocks(1)
|
2022-08-17 10:53:07 +02:00
|
|
|
|
|
|
|
// For the anchor output case we need two UTXOs for Carol so she can
|
|
|
|
// sweep both the local and remote anchor.
|
2022-08-12 11:03:44 +02:00
|
|
|
if lntest.CommitTypeHasAnchors(ct) {
|
2022-08-17 10:53:07 +02:00
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll connect Dave to Carol, and open a new channel to her
|
|
|
|
// with a portion pushed.
|
|
|
|
ht.ConnectNodes(dave, carol)
|
|
|
|
|
|
|
|
return &chanRestoreScenario{
|
|
|
|
carol: carol,
|
|
|
|
dave: dave,
|
|
|
|
mnemonic: mnemonic,
|
|
|
|
password: password,
|
2022-08-12 11:03:44 +02:00
|
|
|
params: lntest.OpenChannelParams{
|
2022-08-17 10:53:07 +02:00
|
|
|
Amt: chanAmt,
|
|
|
|
PushAmt: pushAmt,
|
|
|
|
ZeroConf: zeroConf,
|
|
|
|
CommitmentType: ct,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// restoreDave will call the `nodeRestorer` and asserts Dave is restored by
|
|
|
|
// checking his wallet balance against zero.
|
2022-08-12 11:03:44 +02:00
|
|
|
func (c *chanRestoreScenario) restoreDave(ht *lntest.HarnessTest,
|
2022-08-17 10:53:07 +02:00
|
|
|
restoredNodeFunc nodeRestorer) *node.HarnessNode {
|
|
|
|
|
|
|
|
// Next, we'll make a new Dave and start the bulk of our recovery
|
|
|
|
// workflow.
|
|
|
|
dave := restoredNodeFunc()
|
|
|
|
|
|
|
|
// First ensure that the on-chain balance is restored.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
|
|
|
daveBal := daveBalResp.ConfirmedBalance
|
|
|
|
if daveBal <= 0 {
|
|
|
|
return fmt.Errorf("expected positive balance, had %v",
|
|
|
|
daveBal)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(ht, err, "On-chain balance not restored")
|
|
|
|
|
|
|
|
return dave
|
|
|
|
}
|
|
|
|
|
|
|
|
// testScenario runs a test case with a given setup and asserts the DLP is
|
|
|
|
// executed as expected, in details, it will,
|
|
|
|
// 1. shutdown Dave.
|
|
|
|
// 2. suspend Carol.
|
|
|
|
// 3. restore Dave.
|
|
|
|
// 4. validate pending channel state and check we cannot force close it.
|
|
|
|
// 5. validate Carol's UTXOs.
|
|
|
|
// 6. assert DLP is executed.
|
2022-08-12 11:03:44 +02:00
|
|
|
func (c *chanRestoreScenario) testScenario(ht *lntest.HarnessTest,
|
2022-08-17 10:53:07 +02:00
|
|
|
restoredNodeFunc nodeRestorer) {
|
|
|
|
|
|
|
|
carol, dave := c.carol, c.dave
|
|
|
|
|
|
|
|
// Before we start the recovery, we'll record the balances of both
|
|
|
|
// Carol and Dave to ensure they both sweep their coins at the end.
|
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
|
|
|
carolStartingBalance := carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
daveBalance := dave.RPC.WalletBalance()
|
|
|
|
daveStartingBalance := daveBalance.ConfirmedBalance
|
|
|
|
|
|
|
|
// Now that we're able to make our restored now, we'll shutdown the old
|
|
|
|
// Dave node as we'll be storing it shortly below.
|
|
|
|
ht.Shutdown(dave)
|
|
|
|
|
|
|
|
// To make sure the channel state is advanced correctly if the channel
|
|
|
|
// peer is not online at first, we also shutdown Carol.
|
|
|
|
restartCarol := ht.SuspendNode(carol)
|
|
|
|
|
|
|
|
// We now restore Dave.
|
|
|
|
dave = c.restoreDave(ht, restoredNodeFunc)
|
|
|
|
|
|
|
|
// We now check that the restored channel is in the proper state. It
|
|
|
|
// should not yet be force closing as no connection with the remote
|
|
|
|
// peer was established yet. We should also not be able to close the
|
|
|
|
// channel.
|
|
|
|
channel := ht.AssertNumWaitingClose(dave, 1)[0]
|
|
|
|
chanPointStr := channel.Channel.ChannelPoint
|
|
|
|
|
|
|
|
// We also want to make sure we cannot force close in this state. That
|
|
|
|
// would get the state machine in a weird state.
|
|
|
|
chanPointParts := strings.Split(chanPointStr, ":")
|
|
|
|
chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32)
|
|
|
|
|
|
|
|
// We don't get an error directly but only when reading the first
|
|
|
|
// message of the stream.
|
|
|
|
err := ht.CloseChannelAssertErr(
|
|
|
|
dave, &lnrpc.ChannelPoint{
|
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
|
|
|
|
FundingTxidStr: chanPointParts[0],
|
|
|
|
},
|
|
|
|
OutputIndex: uint32(chanPointIndex),
|
|
|
|
}, true,
|
|
|
|
)
|
|
|
|
require.Contains(ht, err.Error(), "cannot close channel with state: ")
|
|
|
|
require.Contains(ht, err.Error(), "ChanStatusRestored")
|
|
|
|
|
|
|
|
// Increase the fee estimate so that the following force close tx will
|
|
|
|
// be cpfp'ed in case of anchor commitments.
|
|
|
|
ht.SetFeeEstimate(30000)
|
|
|
|
|
|
|
|
// Now that we have ensured that the channels restored by the backup
|
|
|
|
// are in the correct state even without the remote peer telling us so,
|
|
|
|
// let's start up Carol again.
|
|
|
|
require.NoError(ht, restartCarol(), "restart carol failed")
|
|
|
|
|
2022-08-12 11:03:44 +02:00
|
|
|
if lntest.CommitTypeHasAnchors(c.params.CommitmentType) {
|
2022-08-17 10:53:07 +02:00
|
|
|
ht.AssertNumUTXOs(carol, 2)
|
|
|
|
} else {
|
|
|
|
ht.AssertNumUTXOs(carol, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we'll assert that both sides properly execute the DLP protocol.
|
|
|
|
// We grab their balances now to ensure that they're made whole at the
|
|
|
|
// end of the protocol.
|
|
|
|
assertDLPExecuted(
|
|
|
|
ht, carol, carolStartingBalance, dave,
|
|
|
|
daveStartingBalance, c.params.CommitmentType,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// testChannelBackupRestore tests that we're able to recover from, and initiate
|
|
|
|
// the DLP protocol via: the RPC restore command, restoring on unlock, and
|
|
|
|
// restoring from initial wallet creation. We'll also alternate between
|
|
|
|
// restoring form the on disk file, and restoring from the exported RPC command
|
|
|
|
// as well.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testChannelBackupRestoreBasic(ht *lntest.HarnessTest) {
|
2022-08-17 10:53:07 +02:00
|
|
|
var testCases = []struct {
|
|
|
|
name string
|
|
|
|
restoreMethod restoreMethodType
|
|
|
|
}{
|
2020-08-12 16:06:34 +02:00
|
|
|
// Restore from backups obtained via the RPC interface. Dave
|
|
|
|
// was the initiator, of the non-advertised channel.
|
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore from RPC backup",
|
2022-08-12 11:03:44 +02:00
|
|
|
restoreMethod: func(st *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-08-17 10:53:07 +02:00
|
|
|
password []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// For this restoration method, we'll grab the
|
|
|
|
// current multi-channel backup from the old
|
|
|
|
// node, and use it to restore a new node
|
|
|
|
// within the closure.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
multi := chanBackup.MultiChanBackup.
|
|
|
|
MultiChanBackup
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore
|
|
|
|
// the node from seed, then manually recover
|
|
|
|
// the channel backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface.
|
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore from backup file",
|
2022-08-12 11:03:44 +02:00
|
|
|
restoreMethod: func(st *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-08-17 10:53:07 +02:00
|
|
|
password []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2020-08-12 16:06:34 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
2021-08-12 00:15:21 +02:00
|
|
|
// using the on-disk channel.backup.
|
2020-08-12 16:06:34 +02:00
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup as part of node initialization with the
|
|
|
|
// prior mnemonic and new backup seed.
|
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore during creation",
|
2022-08-12 11:03:44 +02:00
|
|
|
restoreMethod: func(st *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-08-17 10:53:07 +02:00
|
|
|
password []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, fetch the current backup state as is,
|
|
|
|
// to obtain our latest Multi.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
backupSnapshot := &lnrpc.ChanBackupSnapshot{
|
2022-07-28 11:44:23 +02:00
|
|
|
MultiChanBackup: chanBackup.
|
|
|
|
MultiChanBackup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new nodeRestorer that will restore
|
|
|
|
// the node using the Multi backup we just
|
|
|
|
// obtained above.
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
return st.RestoreNodeWithSeed(
|
2020-10-24 23:18:56 +02:00
|
|
|
"dave", nil, password, mnemonic,
|
2022-11-13 20:41:45 +01:00
|
|
|
"", revocationWindow,
|
|
|
|
backupSnapshot,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
2022-07-28 11:44:23 +02:00
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup once the node has already been
|
|
|
|
// re-created, using the Unlock call.
|
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore during unlock",
|
2022-08-12 11:03:44 +02:00
|
|
|
restoreMethod: func(st *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-08-17 10:53:07 +02:00
|
|
|
password []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, fetch the current backup state as is,
|
|
|
|
// to obtain our latest Multi.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
backupSnapshot := &lnrpc.ChanBackupSnapshot{
|
2022-07-28 11:44:23 +02:00
|
|
|
MultiChanBackup: chanBackup.
|
|
|
|
MultiChanBackup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new nodeRestorer that will restore
|
|
|
|
// the node with its seed, but no channel
|
|
|
|
// backup, shutdown this initialized node, then
|
|
|
|
// restart it again using Unlock.
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
newNode := st.RestoreNodeWithSeed(
|
2020-10-24 23:18:56 +02:00
|
|
|
"dave", nil, password, mnemonic,
|
2022-11-13 20:41:45 +01:00
|
|
|
"", revocationWindow, nil,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
2022-08-09 06:40:27 +02:00
|
|
|
st.RestartNodeWithChanBackups(
|
|
|
|
newNode, backupSnapshot,
|
|
|
|
)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return newNode
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file a second time to
|
|
|
|
// make sure imports can be canceled and later resumed.
|
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore from backup file twice",
|
2022-08-12 11:03:44 +02:00
|
|
|
restoreMethod: func(st *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-08-17 10:53:07 +02:00
|
|
|
password []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2020-08-12 16:06:34 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
2021-08-12 00:15:21 +02:00
|
|
|
// using the on-disk channel.backup.
|
2022-08-12 09:49:54 +02:00
|
|
|
//
|
|
|
|
//nolint:lll
|
2020-08-12 16:06:34 +02:00
|
|
|
backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
|
|
|
|
MultiChanBackup: multi,
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
newNode := st.RestoreNodeWithSeed(
|
2020-08-12 16:06:34 +02:00
|
|
|
"dave", nil, password, mnemonic,
|
2022-11-13 20:41:45 +01:00
|
|
|
"", revocationWindow, nil,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
req := &lnrpc.RestoreChanBackupRequest{
|
|
|
|
Backup: backup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
newNode.RPC.RestoreChanBackups(req)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
req = &lnrpc.RestoreChanBackupRequest{
|
|
|
|
Backup: backup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
newNode.RPC.RestoreChanBackups(req)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return newNode
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
|
|
|
},
|
2022-08-17 10:53:07 +02:00
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
for _, testCase := range testCases {
|
|
|
|
tc := testCase
|
|
|
|
success := ht.Run(tc.name, func(t *testing.T) {
|
2022-08-29 22:35:13 +02:00
|
|
|
h := ht.Subtest(t)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
runChanRestoreScenarioBasic(h, tc.restoreMethod)
|
|
|
|
})
|
|
|
|
if !success {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// runChanRestoreScenarioBasic executes a given test case from end to end,
|
|
|
|
// ensuring that after Dave restores his channel state according to the
|
|
|
|
// testCase, the DLP protocol is executed properly and both nodes are made
|
|
|
|
// whole.
|
2022-08-12 11:03:44 +02:00
|
|
|
func runChanRestoreScenarioBasic(ht *lntest.HarnessTest,
|
2022-08-17 10:53:07 +02:00
|
|
|
restoreMethod restoreMethodType) {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Create a new retore scenario.
|
|
|
|
crs := newChanRestoreScenario(
|
|
|
|
ht, lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, false,
|
|
|
|
)
|
|
|
|
carol, dave := crs.carol, crs.dave
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Open a channel from Dave to Carol.
|
|
|
|
ht.OpenChannel(dave, carol, crs.params)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// At this point, we'll now execute the restore method to give us the
|
|
|
|
// new node we should attempt our assertions against.
|
|
|
|
backupFilePath := dave.Cfg.ChanBackupPath()
|
|
|
|
restoredNodeFunc := restoreMethod(
|
|
|
|
ht, dave, backupFilePath, crs.password, crs.mnemonic,
|
|
|
|
)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Test the scenario.
|
|
|
|
crs.testScenario(ht, restoredNodeFunc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelBackupRestoreUnconfirmed tests that we're able to restore from
|
|
|
|
// disk file and the exported RPC command for unconfirmed channel.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testChannelBackupRestoreUnconfirmed(ht *lntest.HarnessTest) {
|
2022-08-17 10:53:07 +02:00
|
|
|
// Use the channel backup file that contains an unconfirmed channel and
|
|
|
|
// make sure recovery works as well.
|
|
|
|
ht.Run("restore unconfirmed channel file", func(t *testing.T) {
|
2022-08-29 22:35:13 +02:00
|
|
|
st := ht.Subtest(t)
|
2022-08-17 10:53:07 +02:00
|
|
|
runChanRestoreScenarioUnConfirmed(st, true)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Create a backup using RPC that contains an unconfirmed channel and
|
|
|
|
// make sure recovery works as well.
|
|
|
|
ht.Run("restore unconfirmed channel RPC", func(t *testing.T) {
|
2022-08-29 22:35:13 +02:00
|
|
|
st := ht.Subtest(t)
|
2022-08-17 10:53:07 +02:00
|
|
|
runChanRestoreScenarioUnConfirmed(st, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// runChanRestoreScenarioUnConfirmed checks that Dave is able to restore for an
|
|
|
|
// unconfirmed channel.
|
2022-08-12 11:03:44 +02:00
|
|
|
func runChanRestoreScenarioUnConfirmed(ht *lntest.HarnessTest, useFile bool) {
|
2022-08-17 10:53:07 +02:00
|
|
|
// Create a new retore scenario.
|
|
|
|
crs := newChanRestoreScenario(
|
|
|
|
ht, lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, false,
|
|
|
|
)
|
|
|
|
carol, dave := crs.carol, crs.dave
|
|
|
|
|
|
|
|
// Open a pending channel.
|
|
|
|
ht.OpenChannelAssertPending(dave, carol, crs.params)
|
|
|
|
|
|
|
|
// Give the pubsub some time to update the channel backup.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
fi, err := os.Stat(dave.Cfg.ChanBackupPath())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if fi.Size() <= chanbackup.NilMultiSizePacked {
|
|
|
|
return fmt.Errorf("backup file empty")
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(ht, err, "channel backup not updated in time")
|
|
|
|
|
|
|
|
// At this point, we'll now execute the restore method to give us the
|
|
|
|
// new node we should attempt our assertions against.
|
|
|
|
var multi []byte
|
|
|
|
if useFile {
|
|
|
|
backupFilePath := dave.Cfg.ChanBackupPath()
|
|
|
|
// Read the entire Multi backup stored within this node's
|
|
|
|
// channel.backup file.
|
|
|
|
multi, err = ioutil.ReadFile(backupFilePath)
|
|
|
|
require.NoError(ht, err)
|
|
|
|
} else {
|
|
|
|
// For this restoration method, we'll grab the current
|
|
|
|
// multi-channel backup from the old node. The channel should
|
|
|
|
// be included, even if it is not confirmed yet.
|
|
|
|
chanBackup := dave.RPC.ExportAllChanBackups()
|
|
|
|
chanPoints := chanBackup.MultiChanBackup.ChanPoints
|
|
|
|
require.NotEmpty(ht, chanPoints,
|
|
|
|
"unconfirmed channel not found")
|
|
|
|
multi = chanBackup.MultiChanBackup.MultiChanBackup
|
|
|
|
}
|
|
|
|
|
|
|
|
// Let's assume time passes, the channel confirms in the meantime but
|
|
|
|
// for some reason the backup we made while it was still unconfirmed is
|
|
|
|
// the only backup we have. We should still be able to restore it. To
|
|
|
|
// simulate time passing, we mine some blocks to get the channel
|
|
|
|
// confirmed _after_ we saved the backup.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(6, 1)
|
2022-08-17 10:53:07 +02:00
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore the node from seed, then
|
|
|
|
// manually recover the channel backup.
|
|
|
|
restoredNodeFunc := chanRestoreViaRPC(
|
|
|
|
ht, crs.password, crs.mnemonic, multi, dave,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Test the scenario.
|
|
|
|
crs.testScenario(ht, restoredNodeFunc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelBackupRestoreCommitTypes tests that we're able to recover from,
|
|
|
|
// and initiate the DLP protocol for different channel commitment types and
|
|
|
|
// zero-conf channel.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testChannelBackupRestoreCommitTypes(ht *lntest.HarnessTest) {
|
2022-08-17 10:53:07 +02:00
|
|
|
var testCases = []struct {
|
|
|
|
name string
|
|
|
|
ct lnrpc.CommitmentType
|
|
|
|
zeroConf bool
|
|
|
|
}{
|
2020-08-12 16:06:34 +02:00
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface, for anchor commitment channels.
|
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore from backup file anchors",
|
|
|
|
ct: lnrpc.CommitmentType_ANCHORS,
|
2021-07-26 22:03:41 +02:00
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface, for script-enforced leased channels.
|
|
|
|
{
|
2022-07-28 11:44:23 +02:00
|
|
|
name: "restore from backup file script " +
|
|
|
|
"enforced lease",
|
2022-08-17 10:53:07 +02:00
|
|
|
ct: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
|
|
|
|
},
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface, for zero-conf anchor channels.
|
|
|
|
{
|
|
|
|
name: "restore from backup file for zero-conf " +
|
|
|
|
"anchors channel",
|
|
|
|
ct: lnrpc.CommitmentType_ANCHORS,
|
|
|
|
zeroConf: true,
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface for a zero-conf script-enforced leased channel.
|
2021-01-25 22:27:25 +01:00
|
|
|
{
|
2022-08-17 10:53:07 +02:00
|
|
|
name: "restore from backup file zero-conf " +
|
|
|
|
"script-enforced leased channel",
|
|
|
|
ct: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
|
|
|
|
zeroConf: true,
|
|
|
|
},
|
|
|
|
}
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
for _, testCase := range testCases {
|
|
|
|
tc := testCase
|
|
|
|
success := ht.Run(tc.name, func(t *testing.T) {
|
2022-08-29 22:35:13 +02:00
|
|
|
h := ht.Subtest(t)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
runChanRestoreScenarioCommitTypes(
|
|
|
|
h, tc.ct, tc.zeroConf,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if !success {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-08-09 13:55:37 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// runChanRestoreScenarioCommitTypes tests that the DLP is applied for
|
|
|
|
// different channel commitment types and zero-conf channel.
|
2022-08-12 11:03:44 +02:00
|
|
|
func runChanRestoreScenarioCommitTypes(ht *lntest.HarnessTest,
|
2022-08-17 10:53:07 +02:00
|
|
|
ct lnrpc.CommitmentType, zeroConf bool) {
|
|
|
|
|
|
|
|
// Create a new retore scenario.
|
|
|
|
crs := newChanRestoreScenario(ht, ct, zeroConf)
|
|
|
|
carol, dave := crs.carol, crs.dave
|
|
|
|
|
|
|
|
// If we are testing zero-conf channels, setup a ChannelAcceptor for
|
|
|
|
// the fundee.
|
|
|
|
var cancelAcceptor context.CancelFunc
|
|
|
|
if zeroConf {
|
|
|
|
// Setup a ChannelAcceptor.
|
|
|
|
acceptStream, cancel := carol.RPC.ChannelAcceptor()
|
|
|
|
cancelAcceptor = cancel
|
|
|
|
go acceptChannel(ht.T, true, acceptStream)
|
|
|
|
}
|
2021-08-09 13:55:37 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
var fundingShim *lnrpc.FundingShim
|
|
|
|
if ct == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
|
|
|
_, minerHeight := ht.Miner.GetBestBlock()
|
|
|
|
thawHeight := uint32(minerHeight + thawHeightDelta)
|
2021-08-09 13:55:37 +02:00
|
|
|
|
2022-08-12 09:49:54 +02:00
|
|
|
fundingShim, _ = deriveFundingShim(
|
2022-08-17 10:53:07 +02:00
|
|
|
ht, dave, carol, crs.params.Amt, thawHeight, true,
|
|
|
|
)
|
|
|
|
crs.params.FundingShim = fundingShim
|
|
|
|
}
|
|
|
|
ht.OpenChannel(dave, carol, crs.params)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Remove the ChannelAcceptor.
|
|
|
|
if zeroConf {
|
|
|
|
cancelAcceptor()
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we'll now execute the restore method to give us the
|
|
|
|
// new node we should attempt our assertions against.
|
|
|
|
backupFilePath := dave.Cfg.ChanBackupPath()
|
|
|
|
|
|
|
|
// Read the entire Multi backup stored within this node's
|
|
|
|
// channels.backup file.
|
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
|
|
|
require.NoError(ht, err)
|
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll create a new nodeRestorer
|
|
|
|
// that we'll restore using the on-disk channels.backup.
|
|
|
|
restoredNodeFunc := chanRestoreViaRPC(
|
|
|
|
ht, crs.password, crs.mnemonic, multi, dave,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Test the scenario.
|
|
|
|
crs.testScenario(ht, restoredNodeFunc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelBackupRestoreLegacy checks a channel with the legacy revocation
|
|
|
|
// producer format and makes sure old SCBs can still be recovered.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testChannelBackupRestoreLegacy(ht *lntest.HarnessTest) {
|
2022-08-17 10:53:07 +02:00
|
|
|
// Create a new retore scenario.
|
|
|
|
crs := newChanRestoreScenario(
|
|
|
|
ht, lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, false,
|
|
|
|
)
|
|
|
|
carol, dave := crs.carol, crs.dave
|
|
|
|
|
|
|
|
createLegacyRevocationChannel(
|
|
|
|
ht, crs.params.Amt, crs.params.PushAmt, dave, carol,
|
|
|
|
)
|
|
|
|
|
|
|
|
// For this restoration method, we'll grab the current multi-channel
|
|
|
|
// backup from the old node, and use it to restore a new node within
|
|
|
|
// the closure.
|
|
|
|
chanBackup := dave.RPC.ExportAllChanBackups()
|
|
|
|
multi := chanBackup.MultiChanBackup.MultiChanBackup
|
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore the node from seed, then
|
|
|
|
// manually recover the channel backup.
|
|
|
|
restoredNodeFunc := chanRestoreViaRPC(
|
|
|
|
ht, crs.password, crs.mnemonic, multi, dave,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Test the scenario.
|
|
|
|
crs.testScenario(ht, restoredNodeFunc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelBackupRestoreForceClose checks that Dave can restore from force
|
|
|
|
// closed channels.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testChannelBackupRestoreForceClose(ht *lntest.HarnessTest) {
|
2022-08-17 10:53:07 +02:00
|
|
|
// Restore a channel that was force closed by dave just before going
|
|
|
|
// offline.
|
2022-11-13 20:41:45 +01:00
|
|
|
success := ht.Run("from backup file anchors", func(t *testing.T) {
|
2022-08-29 22:35:13 +02:00
|
|
|
st := ht.Subtest(t)
|
2022-08-17 10:53:07 +02:00
|
|
|
runChanRestoreScenarioForceClose(st, false)
|
|
|
|
})
|
|
|
|
|
2022-11-13 20:41:45 +01:00
|
|
|
// Only run the second test if the first passed.
|
|
|
|
if !success {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Restore a zero-conf anchors channel that was force closed by dave
|
|
|
|
// just before going offline.
|
|
|
|
ht.Run("from backup file anchors w/ zero-conf", func(t *testing.T) {
|
2022-08-29 22:35:13 +02:00
|
|
|
st := ht.Subtest(t)
|
2022-08-17 10:53:07 +02:00
|
|
|
runChanRestoreScenarioForceClose(st, true)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// runChanRestoreScenarioForceClose creates anchor-enabled force close channels
|
|
|
|
// and checks that Dave is able to restore from them.
|
2022-08-12 11:03:44 +02:00
|
|
|
func runChanRestoreScenarioForceClose(ht *lntest.HarnessTest, zeroConf bool) {
|
2022-08-17 10:53:07 +02:00
|
|
|
crs := newChanRestoreScenario(
|
|
|
|
ht, lnrpc.CommitmentType_ANCHORS, zeroConf,
|
|
|
|
)
|
|
|
|
carol, dave := crs.carol, crs.dave
|
|
|
|
|
|
|
|
// For neutrino backend, we give Dave once more UTXO to fund the anchor
|
|
|
|
// sweep.
|
|
|
|
if ht.IsNeutrinoBackend() {
|
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are testing zero-conf channels, setup a ChannelAcceptor for
|
|
|
|
// the fundee.
|
|
|
|
var cancelAcceptor context.CancelFunc
|
|
|
|
if zeroConf {
|
|
|
|
// Setup a ChannelAcceptor.
|
|
|
|
acceptStream, cancel := carol.RPC.ChannelAcceptor()
|
|
|
|
cancelAcceptor = cancel
|
|
|
|
go acceptChannel(ht.T, true, acceptStream)
|
|
|
|
}
|
|
|
|
|
|
|
|
chanPoint := ht.OpenChannel(dave, carol, crs.params)
|
|
|
|
|
|
|
|
// Remove the ChannelAcceptor.
|
|
|
|
if zeroConf {
|
|
|
|
cancelAcceptor()
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're testing that locally force closed channels can be restored
|
|
|
|
// then we issue the force close now.
|
|
|
|
ht.CloseChannelAssertPending(dave, chanPoint, true)
|
|
|
|
|
|
|
|
// Dave should see one waiting close channel.
|
|
|
|
ht.AssertNumWaitingClose(dave, 1)
|
|
|
|
|
|
|
|
// Now we need to make sure that the channel is still in the backup.
|
|
|
|
// Otherwise restoring won't work later.
|
|
|
|
dave.RPC.ExportChanBackup(chanPoint)
|
|
|
|
|
|
|
|
// Before we start the recovery, we'll record the balances of both
|
|
|
|
// Carol and Dave to ensure they both sweep their coins at the end.
|
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
|
|
|
carolStartingBalance := carolBalResp.ConfirmedBalance
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
daveBalance := dave.RPC.WalletBalance()
|
|
|
|
daveStartingBalance := daveBalance.ConfirmedBalance
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// At this point, we'll now execute the restore method to give us the
|
|
|
|
// new node we should attempt our assertions against.
|
|
|
|
backupFilePath := dave.Cfg.ChanBackupPath()
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Read the entire Multi backup stored within this node's
|
|
|
|
// channel.backup file.
|
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
|
|
|
require.NoError(ht, err)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Now that we have Dave's backup file, we'll create a new nodeRestorer
|
|
|
|
// that will restore using the on-disk channel.backup.
|
|
|
|
restoredNodeFunc := chanRestoreViaRPC(
|
|
|
|
ht, crs.password, crs.mnemonic, multi, dave,
|
|
|
|
)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// We now wait until both Dave's closing tx and sweep tx have shown in
|
|
|
|
// mempool.
|
|
|
|
ht.Miner.AssertNumTxsInMempool(2)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Now that we're able to make our restored now, we'll shutdown the old
|
|
|
|
// Dave node as we'll be storing it shortly below.
|
|
|
|
ht.Shutdown(dave)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Mine a block to confirm the closing tx from Dave.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(1, 2)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// To make sure the channel state is advanced correctly if the channel
|
|
|
|
// peer is not online at first, we also shutdown Carol.
|
|
|
|
restartCarol := ht.SuspendNode(carol)
|
|
|
|
|
|
|
|
dave = crs.restoreDave(ht, restoredNodeFunc)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// For our force close scenario we don't need the channel to be closed
|
|
|
|
// by Carol since it was already force closed before we started the
|
|
|
|
// recovery. All we need is for Carol to send us over the commit height
|
|
|
|
// so we can sweep the time locked output with the correct commit
|
|
|
|
// point.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 1)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
require.NoError(ht, restartCarol(), "restart carol failed")
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Now that we have our new node up, we expect that it'll re-connect to
|
|
|
|
// Carol automatically based on the restored backup.
|
|
|
|
ht.EnsureConnected(dave, carol)
|
2020-09-04 11:19:27 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
assertTimeLockSwept(
|
|
|
|
ht, carol, dave, carolStartingBalance, daveStartingBalance,
|
|
|
|
)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelBackupUpdates tests that both the streaming channel update RPC,
|
2021-08-12 00:15:21 +02:00
|
|
|
// and the on-disk channel.backup are updated each time a channel is
|
2020-08-12 16:06:34 +02:00
|
|
|
// opened/closed.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testChannelBackupUpdates(ht *lntest.HarnessTest) {
|
2022-08-04 03:05:48 +02:00
|
|
|
alice := ht.Alice
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, we'll make a temp directory that we'll use to store our
|
|
|
|
// backup file, so we can check in on it during the test easily.
|
2022-08-04 03:05:48 +02:00
|
|
|
backupDir := ht.T.TempDir()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, we'll create a new node, Carol. We'll also create a temporary
|
|
|
|
// file that Carol will use to store her channel backups.
|
|
|
|
backupFilePath := filepath.Join(
|
|
|
|
backupDir, chanbackup.DefaultBackupFileName,
|
|
|
|
)
|
|
|
|
carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath)
|
2022-08-04 03:05:48 +02:00
|
|
|
carol := ht.NewNode("carol", []string{carolArgs})
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Next, we'll register for streaming notifications for changes to the
|
|
|
|
// backup file.
|
2022-08-04 03:05:48 +02:00
|
|
|
backupStream := carol.RPC.SubscribeChannelBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// We'll use this goroutine to proxy any updates to a channel we can
|
|
|
|
// easily use below.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
backupUpdates := make(chan *lnrpc.ChanBackupSnapshot)
|
|
|
|
streamErr := make(chan error)
|
|
|
|
streamQuit := make(chan struct{})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for {
|
|
|
|
snapshot, err := backupStream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case streamErr <- err:
|
|
|
|
case <-streamQuit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case backupUpdates <- snapshot:
|
|
|
|
case <-streamQuit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
defer close(streamQuit)
|
|
|
|
|
|
|
|
// With Carol up, we'll now connect her to Alice, and open a channel
|
|
|
|
// between them.
|
2022-08-04 03:05:48 +02:00
|
|
|
ht.ConnectNodes(carol, alice)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Next, we'll open two channels between Alice and Carol back to back.
|
|
|
|
var chanPoints []*lnrpc.ChannelPoint
|
|
|
|
numChans := 2
|
|
|
|
chanAmt := btcutil.Amount(1000000)
|
|
|
|
for i := 0; i < numChans; i++ {
|
2022-08-04 03:05:48 +02:00
|
|
|
chanPoint := ht.OpenChannel(
|
2022-08-12 11:03:44 +02:00
|
|
|
alice, carol, lntest.OpenChannelParams{Amt: chanAmt},
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
chanPoints = append(chanPoints, chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Using this helper function, we'll maintain a pointer to the latest
|
|
|
|
// channel backup so we can compare it to the on disk state.
|
|
|
|
var currentBackup *lnrpc.ChanBackupSnapshot
|
|
|
|
assertBackupNtfns := func(numNtfns int) {
|
|
|
|
for i := 0; i < numNtfns; i++ {
|
|
|
|
select {
|
|
|
|
case err := <-streamErr:
|
2022-08-04 03:05:48 +02:00
|
|
|
require.Failf(ht, "stream err",
|
|
|
|
"error with backup stream: %v", err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
case currentBackup = <-backupUpdates:
|
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
2022-08-04 03:05:48 +02:00
|
|
|
require.Failf(ht, "timeout", "didn't "+
|
|
|
|
"receive channel backup "+
|
2020-08-12 16:06:34 +02:00
|
|
|
"notification %v", i+1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertBackupFileState is a helper function that we'll use to compare
|
|
|
|
// the on disk back up file to our currentBackup pointer above.
|
|
|
|
assertBackupFileState := func() {
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
packedBackup, err := ioutil.ReadFile(backupFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read backup "+
|
|
|
|
"file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// As each back up file will be encrypted with a fresh
|
|
|
|
// nonce, we can't compare them directly, so instead
|
|
|
|
// we'll compare the length which is a proxy for the
|
|
|
|
// number of channels that the multi-backup contains.
|
2022-08-04 03:05:48 +02:00
|
|
|
backup := currentBackup.MultiChanBackup.MultiChanBackup
|
|
|
|
if len(backup) != len(packedBackup) {
|
2020-08-12 16:06:34 +02:00
|
|
|
return fmt.Errorf("backup files don't match: "+
|
2022-08-04 03:05:48 +02:00
|
|
|
"expected %x got %x", backup,
|
|
|
|
packedBackup)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Additionally, we'll assert that both backups up
|
|
|
|
// returned are valid.
|
2022-08-04 03:05:48 +02:00
|
|
|
for _, backup := range [][]byte{backup, packedBackup} {
|
2020-08-12 16:06:34 +02:00
|
|
|
snapshot := &lnrpc.ChanBackupSnapshot{
|
|
|
|
MultiChanBackup: &lnrpc.MultiChanBackup{
|
|
|
|
MultiChanBackup: backup,
|
|
|
|
},
|
|
|
|
}
|
2022-08-04 03:05:48 +02:00
|
|
|
|
|
|
|
carol.RPC.VerifyChanBackup(snapshot)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-12-08 16:27:01 +01:00
|
|
|
}, defaultTimeout)
|
2022-08-04 03:05:48 +02:00
|
|
|
require.NoError(ht, err, "timeout while checking "+
|
|
|
|
"backup state: %v", err)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// As these two channels were just opened, we should've got two times
|
|
|
|
// the pending and open notifications for channel backups.
|
|
|
|
assertBackupNtfns(2 * 2)
|
|
|
|
|
|
|
|
// The on disk file should also exactly match the latest backup that we
|
|
|
|
// have.
|
|
|
|
assertBackupFileState()
|
|
|
|
|
|
|
|
// Next, we'll close the channels one by one. After each channel
|
|
|
|
// closure, we should get a notification, and the on-disk state should
|
|
|
|
// match this state as well.
|
|
|
|
for i := 0; i < numChans; i++ {
|
|
|
|
// To ensure force closes also trigger an update, we'll force
|
|
|
|
// close half of the channels.
|
|
|
|
forceClose := i%2 == 0
|
|
|
|
|
|
|
|
chanPoint := chanPoints[i]
|
|
|
|
|
|
|
|
// If we force closed the channel, then we'll mine enough
|
|
|
|
// blocks to ensure all outputs have been swept.
|
|
|
|
if forceClose {
|
2022-08-04 03:05:48 +02:00
|
|
|
ht.ForceCloseChannel(alice, chanPoint)
|
|
|
|
|
2021-08-09 13:55:35 +02:00
|
|
|
// A local force closed channel will trigger a
|
|
|
|
// notification once the commitment TX confirms on
|
|
|
|
// chain. But that won't remove the channel from the
|
|
|
|
// backup just yet, that will only happen once the time
|
|
|
|
// locked contract was fully resolved on chain.
|
|
|
|
assertBackupNtfns(1)
|
|
|
|
|
2022-08-04 03:05:48 +02:00
|
|
|
// Now that the channel's been fully resolved, we
|
|
|
|
// expect another notification.
|
2021-08-09 13:55:35 +02:00
|
|
|
assertBackupNtfns(1)
|
|
|
|
assertBackupFileState()
|
|
|
|
} else {
|
2022-08-04 03:05:48 +02:00
|
|
|
ht.CloseChannel(alice, chanPoint)
|
2021-08-09 13:55:35 +02:00
|
|
|
// We should get a single notification after closing,
|
|
|
|
// and the on-disk state should match this latest
|
|
|
|
// notifications.
|
|
|
|
assertBackupNtfns(1)
|
|
|
|
assertBackupFileState()
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// testExportChannelBackup tests that we're able to properly export either a
|
|
|
|
// targeted channel's backup, or export backups of all the currents open
|
|
|
|
// channels.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testExportChannelBackup(ht *lntest.HarnessTest) {
|
2020-08-12 16:06:34 +02:00
|
|
|
// First, we'll create our primary test node: Carol. We'll use Carol to
|
|
|
|
// open channels and also export backups that we'll examine throughout
|
|
|
|
// the test.
|
2022-08-04 03:09:35 +02:00
|
|
|
carol := ht.NewNode("carol", nil)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// With Carol up, we'll now connect her to Alice, and open a channel
|
|
|
|
// between them.
|
2022-08-04 03:09:35 +02:00
|
|
|
alice := ht.Alice
|
|
|
|
ht.ConnectNodes(carol, alice)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Next, we'll open two channels between Alice and Carol back to back.
|
|
|
|
var chanPoints []*lnrpc.ChannelPoint
|
|
|
|
numChans := 2
|
|
|
|
chanAmt := btcutil.Amount(1000000)
|
|
|
|
for i := 0; i < numChans; i++ {
|
2022-08-04 03:09:35 +02:00
|
|
|
chanPoint := ht.OpenChannel(
|
2022-08-12 11:03:44 +02:00
|
|
|
alice, carol, lntest.OpenChannelParams{Amt: chanAmt},
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
chanPoints = append(chanPoints, chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the channels are open, we should be able to fetch the
|
|
|
|
// backups of each of the channels.
|
|
|
|
for _, chanPoint := range chanPoints {
|
2022-08-04 03:09:35 +02:00
|
|
|
chanBackup := carol.RPC.ExportChanBackup(chanPoint)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// The returned backup should be full populated. Since it's
|
|
|
|
// encrypted, we can't assert any more than that atm.
|
2022-08-04 03:09:35 +02:00
|
|
|
require.NotEmptyf(ht, chanBackup.ChanBackup,
|
|
|
|
"obtained empty backup for channel: %v", chanPoint)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// The specified chanPoint in the response should match our
|
|
|
|
// requested chanPoint.
|
2022-08-04 03:09:35 +02:00
|
|
|
require.Equal(ht, chanBackup.ChanPoint.String(),
|
|
|
|
chanPoint.String())
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Before we proceed, we'll make two utility methods we'll use below
|
|
|
|
// for our primary assertions.
|
|
|
|
assertNumSingleBackups := func(numSingles int) {
|
|
|
|
err := wait.NoError(func() error {
|
2022-08-04 03:09:35 +02:00
|
|
|
chanSnapshot := carol.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
if chanSnapshot.SingleChanBackups == nil {
|
|
|
|
return fmt.Errorf("single chan backups not " +
|
|
|
|
"populated")
|
|
|
|
}
|
|
|
|
|
|
|
|
backups := chanSnapshot.SingleChanBackups.ChanBackups
|
|
|
|
if len(backups) != numSingles {
|
|
|
|
return fmt.Errorf("expected %v singles, "+
|
|
|
|
"got %v", len(backups), numSingles)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
2022-08-04 03:09:35 +02:00
|
|
|
require.NoError(ht, err, "timeout checking num single backup")
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2022-08-04 03:09:35 +02:00
|
|
|
|
2022-08-12 09:49:54 +02:00
|
|
|
assertMultiBackupFound := func() func(bool,
|
|
|
|
map[wire.OutPoint]struct{}) {
|
|
|
|
|
2022-08-04 03:09:35 +02:00
|
|
|
chanSnapshot := carol.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
return func(found bool, chanPoints map[wire.OutPoint]struct{}) {
|
2022-08-12 09:49:54 +02:00
|
|
|
num := len(chanSnapshot.MultiChanBackup.MultiChanBackup)
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
switch {
|
|
|
|
case found && chanSnapshot.MultiChanBackup == nil:
|
2022-08-04 03:09:35 +02:00
|
|
|
require.Fail(ht, "multi-backup not present")
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
case !found && chanSnapshot.MultiChanBackup != nil &&
|
2022-08-12 09:49:54 +02:00
|
|
|
num != chanbackup.NilMultiSizePacked:
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-08-04 03:09:35 +02:00
|
|
|
require.Fail(ht, "found multi-backup when "+
|
|
|
|
"non should be found")
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints
|
2022-08-04 03:09:35 +02:00
|
|
|
require.Len(ht, backedUpChans, len(chanPoints))
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
for _, chanPoint := range backedUpChans {
|
2022-08-04 03:09:35 +02:00
|
|
|
wp := ht.OutPointFromChannelPoint(chanPoint)
|
|
|
|
_, ok := chanPoints[wp]
|
|
|
|
require.True(ht, ok, "unexpected "+
|
|
|
|
"backup: %v", wp)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
chans := make(map[wire.OutPoint]struct{})
|
|
|
|
for _, chanPoint := range chanPoints {
|
2022-08-04 03:09:35 +02:00
|
|
|
chans[ht.OutPointFromChannelPoint(chanPoint)] = struct{}{}
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We should have exactly two single channel backups contained, and we
|
|
|
|
// should also have a multi-channel backup.
|
|
|
|
assertNumSingleBackups(2)
|
|
|
|
assertMultiBackupFound()(true, chans)
|
|
|
|
|
|
|
|
// We'll now close each channel on by one. After we close a channel, we
|
|
|
|
// shouldn't be able to find that channel as a backup still. We should
|
|
|
|
// also have one less single written to disk.
|
|
|
|
for i, chanPoint := range chanPoints {
|
2022-08-04 03:09:35 +02:00
|
|
|
ht.CloseChannel(alice, chanPoint)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
assertNumSingleBackups(len(chanPoints) - i - 1)
|
|
|
|
|
2022-08-04 03:09:35 +02:00
|
|
|
delete(chans, ht.OutPointFromChannelPoint(chanPoint))
|
2020-08-12 16:06:34 +02:00
|
|
|
assertMultiBackupFound()(true, chans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point we shouldn't have any single or multi-chan backups at
|
|
|
|
// all.
|
|
|
|
assertNumSingleBackups(0)
|
|
|
|
assertMultiBackupFound()(false, nil)
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:48:54 +02:00
|
|
|
// testDataLossProtection tests that if one of the nodes in a channel
|
|
|
|
// relationship lost state, they will detect this during channel sync, and the
|
|
|
|
// up-to-date party will force close the channel, giving the outdated party the
|
|
|
|
// opportunity to sweep its output.
|
2022-08-12 11:03:44 +02:00
|
|
|
func testDataLossProtection(ht *lntest.HarnessTest) {
|
2022-07-28 11:48:54 +02:00
|
|
|
const (
|
|
|
|
chanAmt = funding.MaxBtcFundingAmount
|
|
|
|
paymentAmt = 10000
|
|
|
|
numInvoices = 6
|
|
|
|
)
|
|
|
|
|
|
|
|
// Carol will be the up-to-date party. We set --nolisten to ensure Dave
|
|
|
|
// won't be able to connect to her and trigger the channel data
|
|
|
|
// protection logic automatically. We also can't have Carol
|
|
|
|
// automatically re-connect too early, otherwise DLP would be initiated
|
|
|
|
// at the wrong moment.
|
2022-07-28 12:25:06 +02:00
|
|
|
carol := ht.NewNode("Carol", []string{"--nolisten", "--minbackoff=1h"})
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Dave will be the party losing his state.
|
2022-07-28 12:25:06 +02:00
|
|
|
dave := ht.NewNode("Dave", nil)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Before we make a channel, we'll load up Carol with some coins sent
|
|
|
|
// directly from the miner.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
// timeTravelDave is a method that will make Carol open a channel to
|
|
|
|
// Dave, settle a series of payments, then Dave back to the state
|
|
|
|
// before the payments happened. When this method returns Dave will
|
|
|
|
// be unaware of the new state updates. The returned function can be
|
|
|
|
// used to restart Dave in this state.
|
|
|
|
timeTravelDave := func() (func() error, *lnrpc.ChannelPoint, int64) {
|
2022-07-28 11:48:54 +02:00
|
|
|
// We must let the node communicate with Carol before they are
|
|
|
|
// able to open channel, so we connect them.
|
2022-11-01 01:56:40 +01:00
|
|
|
ht.EnsureConnected(carol, dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// We'll first open up a channel between them with a 0.5 BTC
|
|
|
|
// value.
|
2022-07-28 12:25:06 +02:00
|
|
|
chanPoint := ht.OpenChannel(
|
2022-11-01 01:56:40 +01:00
|
|
|
carol, dave, lntest.OpenChannelParams{
|
2022-07-28 11:48:54 +02:00
|
|
|
Amt: chanAmt,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
// With the channel open, we'll create a few invoices for the
|
|
|
|
// node that Carol will pay to in order to advance the state of
|
|
|
|
// the channel.
|
|
|
|
// TODO(halseth): have dangling HTLCs on the commitment, able to
|
|
|
|
// retrieve funds?
|
2022-11-01 01:56:40 +01:00
|
|
|
payReqs, _, _ := ht.CreatePayReqs(dave, paymentAmt, numInvoices)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Send payments from Carol using 3 of the payment hashes
|
|
|
|
// generated above.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.CompletePaymentRequests(carol, payReqs[:numInvoices/2])
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
// Next query for Dave's channel state, as we sent 3 payments
|
|
|
|
// of 10k satoshis each, it should now see his balance as being
|
|
|
|
// 30k satoshis.
|
2022-07-28 12:25:06 +02:00
|
|
|
nodeChan := ht.AssertChannelLocalBalance(
|
2022-11-01 01:56:40 +01:00
|
|
|
dave, chanPoint, 30_000,
|
2022-07-28 12:25:06 +02:00
|
|
|
)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Grab the current commitment height (update number), we'll
|
|
|
|
// later revert him to this state after additional updates to
|
|
|
|
// revoke this state.
|
|
|
|
stateNumPreCopy := nodeChan.NumUpdates
|
|
|
|
|
|
|
|
// With the temporary file created, copy the current state into
|
|
|
|
// the temporary file we created above. Later after more
|
|
|
|
// updates, we'll restore this state.
|
2022-11-01 01:56:40 +01:00
|
|
|
ht.BackupDB(dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
// Reconnect the peers after the restart that was needed for
|
|
|
|
// the db backup.
|
2022-11-01 01:56:40 +01:00
|
|
|
ht.EnsureConnected(carol, dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
// Finally, send more payments from Carol, using the remaining
|
2022-07-28 11:48:54 +02:00
|
|
|
// payment hashes.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.CompletePaymentRequests(carol, payReqs[numInvoices/2:])
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
// TODO(yy): remove the sleep once the following bug is fixed.
|
|
|
|
//
|
|
|
|
// While the payment is reported as settled, the commitment
|
|
|
|
// dance may not be finished, which leaves several HTLCs in the
|
|
|
|
// commitment. Later on, when Carol force closes this channel,
|
|
|
|
// she would have HTLCs there and the test won't pass.
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
|
|
|
// Now we shutdown Dave, copying over the its temporary
|
2022-07-28 11:48:54 +02:00
|
|
|
// database state which has the *prior* channel state over his
|
|
|
|
// current most up to date state. With this, we essentially
|
2022-11-01 01:56:40 +01:00
|
|
|
// force Dave to travel back in time within the channel's
|
2022-07-28 11:48:54 +02:00
|
|
|
// history.
|
2022-11-01 01:56:40 +01:00
|
|
|
ht.RestartNodeAndRestoreDB(dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
// Make sure the channel is still there from the PoV of Dave.
|
|
|
|
ht.AssertNodeNumChannels(dave, 1)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Now query for the channel state, it should show that it's at
|
|
|
|
// a state number in the past, not the *latest* state.
|
2022-11-01 01:56:40 +01:00
|
|
|
ht.AssertChannelNumUpdates(dave, stateNumPreCopy, chanPoint)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
balResp := dave.RPC.WalletBalance()
|
|
|
|
restart := ht.SuspendNode(dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
return restart, chanPoint, balResp.ConfirmedBalance
|
2022-07-28 11:48:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reset Dave to a state where he has an outdated channel state.
|
2022-11-01 01:56:40 +01:00
|
|
|
restartDave, _, daveStartingBalance := timeTravelDave()
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// We make a note of the nodes' current on-chain balances, to make sure
|
|
|
|
// they are able to retrieve the channel funds eventually,
|
2022-07-28 12:25:06 +02:00
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
carolStartingBalance := carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
// Restart Dave to trigger a channel resync.
|
2022-07-28 12:25:06 +02:00
|
|
|
require.NoError(ht, restartDave(), "unable to restart dave")
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Assert that once Dave comes up, they reconnect, Carol force closes
|
|
|
|
// on chain, and both of them properly carry out the DLP protocol.
|
2022-07-28 12:25:06 +02:00
|
|
|
assertDLPExecuted(
|
|
|
|
ht, carol, carolStartingBalance, dave,
|
|
|
|
daveStartingBalance, lnrpc.CommitmentType_STATIC_REMOTE_KEY,
|
2022-07-28 11:48:54 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// As a second part of this test, we will test the scenario where a
|
|
|
|
// channel is closed while Dave is offline, loses his state and comes
|
|
|
|
// back online. In this case the node should attempt to resync the
|
|
|
|
// channel, and the peer should resend a channel sync message for the
|
|
|
|
// closed channel, such that Dave can retrieve his funds.
|
|
|
|
//
|
|
|
|
// We start by letting Dave time travel back to an outdated state.
|
2022-11-01 01:56:40 +01:00
|
|
|
restartDave, chanPoint2, daveStartingBalance := timeTravelDave()
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
carolBalResp = carol.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
carolStartingBalance = carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
// Now let Carol force close the channel while Dave is offline.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.ForceCloseChannel(carol, chanPoint2)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Make sure Carol got her balance back.
|
2022-07-28 12:25:06 +02:00
|
|
|
carolBalResp = carol.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
carolBalance := carolBalResp.ConfirmedBalance
|
2022-07-28 12:25:06 +02:00
|
|
|
require.Greater(ht, carolBalance, carolStartingBalance,
|
|
|
|
"expected carol to have balance increased")
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.AssertNodeNumChannels(carol, 0)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// When Dave comes online, he will reconnect to Carol, try to resync
|
|
|
|
// the channel, but it will already be closed. Carol should resend the
|
|
|
|
// information Dave needs to sweep his funds.
|
2022-07-28 12:25:06 +02:00
|
|
|
require.NoError(ht, restartDave(), "unable to restart Eve")
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Dave should sweep his funds.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.Miner.AssertNumTxsInMempool(1)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Mine a block to confirm the sweep, and make sure Dave got his
|
|
|
|
// balance back.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.AssertNodeNumChannels(dave, 0)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
err := wait.NoError(func() error {
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
daveBalance := daveBalResp.ConfirmedBalance
|
|
|
|
if daveBalance <= daveStartingBalance {
|
|
|
|
return fmt.Errorf("expected dave to have balance "+
|
2022-07-28 12:25:06 +02:00
|
|
|
"above %d, intead had %v", daveStartingBalance,
|
2022-07-28 11:48:54 +02:00
|
|
|
daveBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
2022-07-28 12:25:06 +02:00
|
|
|
require.NoError(ht, err, "timeout while checking dave's balance")
|
2022-07-28 11:48:54 +02:00
|
|
|
}
|
|
|
|
|
2021-01-25 22:27:25 +01:00
|
|
|
// createLegacyRevocationChannel creates a single channel using the legacy
|
|
|
|
// revocation producer format by using PSBT to signal a special pending channel
|
|
|
|
// ID.
|
2022-08-12 11:03:44 +02:00
|
|
|
func createLegacyRevocationChannel(ht *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
chanAmt, pushAmt btcutil.Amount, from, to *node.HarnessNode) {
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
// We'll signal to the wallet that we also want to create a channel
|
|
|
|
// with the legacy revocation producer format that relies on deriving a
|
2021-01-25 22:27:25 +01:00
|
|
|
// private key from the key ring. This is only available during itests
|
|
|
|
// to make sure we don't hard depend on the DerivePrivKey method of the
|
|
|
|
// key ring. We can signal the wallet by setting a custom pending
|
|
|
|
// channel ID. To be able to do that, we need to set a funding shim
|
|
|
|
// which is easiest by using PSBT funding. The ID is the hex
|
|
|
|
// representation of the string "legacy-revocation".
|
|
|
|
itestLegacyFormatChanID := [32]byte{
|
|
|
|
0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x2d, 0x72, 0x65, 0x76,
|
|
|
|
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
shim := &lnrpc.FundingShim{
|
|
|
|
Shim: &lnrpc.FundingShim_PsbtShim{
|
|
|
|
PsbtShim: &lnrpc.PsbtShim{
|
|
|
|
PendingChanId: itestLegacyFormatChanID[:],
|
2021-01-25 22:27:25 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-08-12 11:03:44 +02:00
|
|
|
openChannelReq := lntest.OpenChannelParams{
|
2022-07-28 11:44:23 +02:00
|
|
|
Amt: chanAmt,
|
|
|
|
PushAmt: pushAmt,
|
|
|
|
FundingShim: shim,
|
|
|
|
}
|
|
|
|
chanUpdates, tempPsbt := ht.OpenChannelPsbt(from, to, openChannelReq)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Fund the PSBT by using the source node's wallet.
|
|
|
|
fundReq := &walletrpc.FundPsbtRequest{
|
|
|
|
Template: &walletrpc.FundPsbtRequest_Psbt{
|
|
|
|
Psbt: tempPsbt,
|
|
|
|
},
|
|
|
|
Fees: &walletrpc.FundPsbtRequest_SatPerVbyte{
|
|
|
|
SatPerVbyte: 2,
|
|
|
|
},
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
fundResp := from.RPC.FundPsbt(fundReq)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2022-08-12 09:49:54 +02:00
|
|
|
// We have a PSBT that has no witness data yet, which is exactly what
|
|
|
|
// we need for the next step of verifying the PSBT with the funding
|
|
|
|
// intents.
|
2022-07-28 11:44:23 +02:00
|
|
|
msg := &lnrpc.FundingTransitionMsg{
|
2021-01-25 22:27:25 +01:00
|
|
|
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
|
|
|
|
PsbtVerify: &lnrpc.FundingPsbtVerify{
|
|
|
|
PendingChanId: itestLegacyFormatChanID[:],
|
|
|
|
FundedPsbt: fundResp.FundedPsbt,
|
|
|
|
},
|
|
|
|
},
|
2022-07-28 11:44:23 +02:00
|
|
|
}
|
|
|
|
from.RPC.FundingStateStep(msg)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Now we'll ask the source node's wallet to sign the PSBT so we can
|
|
|
|
// finish the funding flow.
|
|
|
|
finalizeReq := &walletrpc.FinalizePsbtRequest{
|
|
|
|
FundedPsbt: fundResp.FundedPsbt,
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
finalizeRes := from.RPC.FinalizePsbt(finalizeReq)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// We've signed our PSBT now, let's pass it to the intent again.
|
2022-07-28 11:44:23 +02:00
|
|
|
msg = &lnrpc.FundingTransitionMsg{
|
2021-01-25 22:27:25 +01:00
|
|
|
Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
|
|
|
|
PsbtFinalize: &lnrpc.FundingPsbtFinalize{
|
|
|
|
PendingChanId: itestLegacyFormatChanID[:],
|
|
|
|
SignedPsbt: finalizeRes.SignedPsbt,
|
|
|
|
},
|
|
|
|
},
|
2022-07-28 11:44:23 +02:00
|
|
|
}
|
|
|
|
from.RPC.FundingStateStep(msg)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Consume the "channel pending" update. This waits until the funding
|
|
|
|
// transaction was fully compiled.
|
2022-07-28 11:44:23 +02:00
|
|
|
updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
|
2021-01-25 22:27:25 +01:00
|
|
|
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.True(ht, ok)
|
2021-01-25 22:27:25 +01:00
|
|
|
chanPoint := &lnrpc.ChannelPoint{
|
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: upd.ChanPending.Txid,
|
|
|
|
},
|
|
|
|
OutputIndex: upd.ChanPending.OutputIndex,
|
|
|
|
}
|
|
|
|
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(6, 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.AssertTopologyChannelOpen(from, chanPoint)
|
|
|
|
ht.AssertTopologyChannelOpen(to, chanPoint)
|
2021-01-25 22:27:25 +01:00
|
|
|
}
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// chanRestoreViaRPC is a helper test method that returns a nodeRestorer
|
|
|
|
// instance which will restore the target node from a password+seed, then
|
|
|
|
// trigger a SCB restore using the RPC interface.
|
2022-08-12 11:03:44 +02:00
|
|
|
func chanRestoreViaRPC(ht *lntest.HarnessTest, password []byte,
|
2021-05-14 10:09:05 +02:00
|
|
|
mnemonic []string, multi []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
|
|
|
|
MultiChanBackup: multi,
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
newNode := ht.RestoreNodeWithSeed(
|
2022-11-13 20:41:45 +01:00
|
|
|
"dave", nil, password, mnemonic, "", revocationWindow,
|
2023-05-12 10:09:56 +02:00
|
|
|
nil,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
2022-07-28 11:44:23 +02:00
|
|
|
req := &lnrpc.RestoreChanBackupRequest{Backup: backup}
|
|
|
|
newNode.RPC.RestoreChanBackups(req)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return newNode
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2021-05-14 10:09:05 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
// assertTimeLockSwept when dave's outputs matures, he should claim them. This
|
|
|
|
// function will advance 2 blocks such that all the pending closing
|
|
|
|
// transactions would be swept in the end.
|
|
|
|
//
|
|
|
|
// Note: this function is only used in this test file and has been made
|
|
|
|
// specifically for testChanRestoreScenario.
|
2022-08-12 11:03:44 +02:00
|
|
|
func assertTimeLockSwept(ht *lntest.HarnessTest, carol, dave *node.HarnessNode,
|
2022-08-17 10:53:07 +02:00
|
|
|
carolStartingBalance, daveStartingBalance int64) {
|
2022-07-28 11:44:23 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// We expect Carol to sweep her funds and also the anchor tx.
|
2022-07-28 11:44:23 +02:00
|
|
|
expectedTxes := 2
|
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Carol should sweep her funds immediately, as they are not
|
|
|
|
// timelocked.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
|
|
|
|
|
|
|
// Carol should consider the channel pending force close (since she is
|
|
|
|
// waiting for her sweep to confirm).
|
|
|
|
ht.AssertNumPendingForceClose(carol, 1)
|
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Dave is considering it "pending force close", as we must wait before
|
|
|
|
// he can sweep her outputs.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.AssertNumPendingForceClose(dave, 1)
|
|
|
|
|
|
|
|
// Mine the sweep (and anchor) tx(ns).
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// Now Carol should consider the channel fully closed.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 0)
|
|
|
|
|
|
|
|
// We query Carol's balance to make sure it increased after the channel
|
|
|
|
// closed. This checks that she was able to sweep the funds she had in
|
|
|
|
// the channel.
|
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
|
|
|
carolBalance := carolBalResp.ConfirmedBalance
|
|
|
|
require.Greater(ht, carolBalance, carolStartingBalance,
|
|
|
|
"balance not increased")
|
|
|
|
|
|
|
|
// After the Dave's output matures, he should reclaim his funds.
|
|
|
|
//
|
|
|
|
// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
|
|
|
|
// we already mined one block after the commitment was published, so
|
|
|
|
// take that into account.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocks(defaultCSV - 1 - 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
daveSweep := ht.Miner.AssertNumTxsInMempool(1)[0]
|
2022-08-23 23:28:31 +02:00
|
|
|
block := ht.MineBlocksAndAssertNumTxes(1, 1)[0]
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.Miner.AssertTxInBlock(block, daveSweep)
|
|
|
|
|
|
|
|
// Now the channel should be fully closed also from Dave's POV.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 0)
|
|
|
|
|
|
|
|
// Make sure Dave got his balance back.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
|
|
|
daveBalance := daveBalResp.ConfirmedBalance
|
|
|
|
if daveBalance <= daveStartingBalance {
|
|
|
|
return fmt.Errorf("expected dave to have balance "+
|
|
|
|
"above %d, instead had %v", daveStartingBalance,
|
|
|
|
daveBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(ht, err)
|
|
|
|
|
|
|
|
ht.AssertNodeNumChannels(dave, 0)
|
|
|
|
ht.AssertNodeNumChannels(carol, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertDLPExecuted asserts that Dave is a node that has recovered their state
|
|
|
|
// form scratch. Carol should then force close on chain, with Dave sweeping his
|
|
|
|
// funds immediately, and Carol sweeping her fund after her CSV delay is up. If
|
|
|
|
// the blankSlate value is true, then this means that Dave won't need to sweep
|
|
|
|
// on chain as he has no funds in the channel.
|
2022-08-12 11:03:44 +02:00
|
|
|
func assertDLPExecuted(ht *lntest.HarnessTest,
|
2022-07-28 11:44:23 +02:00
|
|
|
carol *node.HarnessNode, carolStartingBalance int64,
|
|
|
|
dave *node.HarnessNode, daveStartingBalance int64,
|
|
|
|
commitType lnrpc.CommitmentType) {
|
|
|
|
|
2022-11-01 01:56:40 +01:00
|
|
|
ht.Helper()
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
// Increase the fee estimate so that the following force close tx will
|
|
|
|
// be cpfp'ed.
|
|
|
|
ht.SetFeeEstimate(30000)
|
|
|
|
|
|
|
|
// We disabled auto-reconnect for some tests to avoid timing issues.
|
|
|
|
// To make sure the nodes are initiating DLP now, we have to manually
|
|
|
|
// re-connect them.
|
|
|
|
ht.EnsureConnected(carol, dave)
|
|
|
|
|
|
|
|
// Upon reconnection, the nodes should detect that Dave is out of sync.
|
|
|
|
// Carol should force close the channel using her latest commitment.
|
|
|
|
expectedTxes := 1
|
2022-08-12 11:03:44 +02:00
|
|
|
if lntest.CommitTypeHasAnchors(commitType) {
|
2022-07-28 11:44:23 +02:00
|
|
|
expectedTxes = 2
|
|
|
|
}
|
|
|
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
|
|
|
|
|
|
|
// Channel should be in the state "waiting close" for Carol since she
|
|
|
|
// broadcasted the force close tx.
|
|
|
|
ht.AssertNumWaitingClose(carol, 1)
|
|
|
|
|
|
|
|
// Dave should also consider the channel "waiting close", as he noticed
|
|
|
|
// the channel was out of sync, and is now waiting for a force close to
|
|
|
|
// hit the chain.
|
|
|
|
ht.AssertNumWaitingClose(dave, 1)
|
|
|
|
|
|
|
|
// Restart Dave to make sure he is able to sweep the funds after
|
|
|
|
// shutdown.
|
|
|
|
ht.RestartNode(dave)
|
|
|
|
|
|
|
|
// Generate a single block, which should confirm the closing tx.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// Dave should consider the channel pending force close (since he is
|
|
|
|
// waiting for his sweep to confirm).
|
|
|
|
ht.AssertNumPendingForceClose(dave, 1)
|
|
|
|
|
|
|
|
// Carol is considering it "pending force close", as we must wait
|
|
|
|
// before she can sweep her outputs.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 1)
|
|
|
|
|
|
|
|
if commitType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
|
|
|
// Dave should sweep his anchor only, since he still has the
|
|
|
|
// lease CLTV constraint on his commitment output.
|
|
|
|
ht.Miner.AssertNumTxsInMempool(1)
|
|
|
|
|
|
|
|
// Mine Dave's anchor sweep tx.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// After Carol's output matures, she should also reclaim her
|
|
|
|
// funds.
|
|
|
|
//
|
|
|
|
// The commit sweep resolver publishes the sweep tx at
|
|
|
|
// defaultCSV-1 and we already mined one block after the
|
|
|
|
// commitmment was published, so take that into account.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocks(defaultCSV - 1 - 1)
|
|
|
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// Now the channel should be fully closed also from Carol's POV.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 0)
|
|
|
|
|
|
|
|
// We'll now mine the remaining blocks to prompt Dave to sweep
|
|
|
|
// his CLTV-constrained output.
|
|
|
|
resp := dave.RPC.PendingChannels()
|
|
|
|
blocksTilMaturity :=
|
|
|
|
resp.PendingForceClosingChannels[0].BlocksTilMaturity
|
|
|
|
require.Positive(ht, blocksTilMaturity)
|
|
|
|
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocks(uint32(blocksTilMaturity))
|
|
|
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// Now Dave should consider the channel fully closed.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 0)
|
|
|
|
} else {
|
|
|
|
// Dave should sweep his funds immediately, as they are not
|
|
|
|
// timelocked. We also expect Dave to sweep his anchor, if
|
|
|
|
// present.
|
|
|
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
|
|
|
|
|
|
|
// Mine the sweep tx.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// Now Dave should consider the channel fully closed.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 0)
|
|
|
|
|
|
|
|
// After Carol's output matures, she should also reclaim her
|
|
|
|
// funds.
|
|
|
|
//
|
|
|
|
// The commit sweep resolver publishes the sweep tx at
|
|
|
|
// defaultCSV-1 and we already mined one block after the
|
|
|
|
// commitmment was published, so take that into account.
|
2022-08-23 23:28:31 +02:00
|
|
|
ht.MineBlocks(defaultCSV - 1 - 1)
|
|
|
|
ht.MineBlocksAndAssertNumTxes(1, 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
2022-08-17 10:53:07 +02:00
|
|
|
// Now the channel should be fully closed also from Carol's
|
|
|
|
// POV.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.AssertNumPendingForceClose(carol, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We query Dave's balance to make sure it increased after the channel
|
|
|
|
// closed. This checks that he was able to sweep the funds he had in
|
|
|
|
// the channel.
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
|
|
|
daveBalance := daveBalResp.ConfirmedBalance
|
|
|
|
require.Greater(ht, daveBalance, daveStartingBalance,
|
|
|
|
"balance not increased")
|
|
|
|
|
|
|
|
// Make sure Carol got her balance back.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
|
|
|
carolBalance := carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
// With Neutrino we don't get a backend error when trying to
|
|
|
|
// publish an orphan TX (which is what the sweep for the remote
|
|
|
|
// anchor is since the remote commitment TX was not broadcast).
|
|
|
|
// That's why the wallet still sees that as unconfirmed and we
|
|
|
|
// need to count the total balance instead of the confirmed.
|
|
|
|
if ht.IsNeutrinoBackend() {
|
|
|
|
carolBalance = carolBalResp.TotalBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
if carolBalance <= carolStartingBalance {
|
|
|
|
return fmt.Errorf("expected carol to have balance "+
|
|
|
|
"above %d, instead had %v",
|
|
|
|
carolStartingBalance, carolBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(ht, err, "timeout while checking carol's balance")
|
|
|
|
|
|
|
|
ht.AssertNodeNumChannels(dave, 0)
|
|
|
|
ht.AssertNodeNumChannels(carol, 0)
|
|
|
|
}
|