2020-08-12 16:06:34 +02:00
|
|
|
package itest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-08-12 16:49:39 +02:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2020-08-12 16:06:34 +02:00
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-02-23 14:48:00 +01:00
|
|
|
"github.com/btcsuite/btcd/btcutil"
|
2020-08-12 16:06:34 +02:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/lightningnetwork/lnd/chanbackup"
|
2022-07-28 11:48:54 +02:00
|
|
|
"github.com/lightningnetwork/lnd/funding"
|
2020-08-12 16:06:34 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2021-01-25 22:27:25 +01:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
2022-07-28 11:44:23 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntemp"
|
|
|
|
"github.com/lightningnetwork/lnd/lntemp/node"
|
2020-08-12 16:06:34 +02:00
|
|
|
"github.com/lightningnetwork/lnd/lntest"
|
|
|
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
2020-08-12 16:49:39 +02:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// testChannelBackupRestore tests that we're able to recover from, and initiate
|
|
|
|
// the DLP protocol via: the RPC restore command, restoring on unlock, and
|
|
|
|
// restoring from initial wallet creation. We'll also alternate between
|
|
|
|
// restoring form the on disk file, and restoring from the exported RPC command
|
|
|
|
// as well.
|
2022-07-28 11:44:23 +02:00
|
|
|
func testChannelBackupRestore(ht *lntemp.HarnessTest) {
|
2020-08-12 16:06:34 +02:00
|
|
|
password := []byte("El Psy Kongroo")
|
|
|
|
|
|
|
|
var testCases = []chanRestoreTestCase{
|
|
|
|
// Restore from backups obtained via the RPC interface. Dave
|
|
|
|
// was the initiator, of the non-advertised channel.
|
|
|
|
{
|
|
|
|
name: "restore from RPC backup",
|
|
|
|
channelsUpdated: false,
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// For this restoration method, we'll grab the
|
|
|
|
// current multi-channel backup from the old
|
|
|
|
// node, and use it to restore a new node
|
|
|
|
// within the closure.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
multi := chanBackup.MultiChanBackup.
|
|
|
|
MultiChanBackup
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore
|
|
|
|
// the node from seed, then manually recover
|
|
|
|
// the channel backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface.
|
|
|
|
{
|
|
|
|
name: "restore from backup file",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2020-08-12 16:06:34 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
2021-08-12 00:15:21 +02:00
|
|
|
// using the on-disk channel.backup.
|
2020-08-12 16:06:34 +02:00
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup as part of node initialization with the
|
|
|
|
// prior mnemonic and new backup seed.
|
|
|
|
{
|
|
|
|
name: "restore during creation",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, fetch the current backup state as is,
|
|
|
|
// to obtain our latest Multi.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
backupSnapshot := &lnrpc.ChanBackupSnapshot{
|
2022-07-28 11:44:23 +02:00
|
|
|
MultiChanBackup: chanBackup.
|
|
|
|
MultiChanBackup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new nodeRestorer that will restore
|
|
|
|
// the node using the Multi backup we just
|
|
|
|
// obtained above.
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
return st.RestoreNodeWithSeed(
|
2020-10-24 23:18:56 +02:00
|
|
|
"dave", nil, password, mnemonic,
|
|
|
|
"", 1000, backupSnapshot,
|
2021-05-14 10:09:05 +02:00
|
|
|
copyPorts(oldNode),
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
2022-07-28 11:44:23 +02:00
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup once the node has already been
|
|
|
|
// re-created, using the Unlock call.
|
|
|
|
{
|
|
|
|
name: "restore during unlock",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, fetch the current backup state as is,
|
|
|
|
// to obtain our latest Multi.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
backupSnapshot := &lnrpc.ChanBackupSnapshot{
|
2022-07-28 11:44:23 +02:00
|
|
|
MultiChanBackup: chanBackup.
|
|
|
|
MultiChanBackup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new nodeRestorer that will restore
|
|
|
|
// the node with its seed, but no channel
|
|
|
|
// backup, shutdown this initialized node, then
|
|
|
|
// restart it again using Unlock.
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
newNode := st.RestoreNodeWithSeed(
|
2020-10-24 23:18:56 +02:00
|
|
|
"dave", nil, password, mnemonic,
|
|
|
|
"", 1000, nil,
|
2021-05-14 10:09:05 +02:00
|
|
|
copyPorts(oldNode),
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
st.RestartNode(newNode, backupSnapshot)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return newNode
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file a second time to
|
|
|
|
// make sure imports can be canceled and later resumed.
|
|
|
|
{
|
|
|
|
name: "restore from backup file twice",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2020-08-12 16:06:34 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
2021-08-12 00:15:21 +02:00
|
|
|
// using the on-disk channel.backup.
|
2020-08-12 16:06:34 +02:00
|
|
|
backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
|
|
|
|
MultiChanBackup: multi,
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
newNode := st.RestoreNodeWithSeed(
|
2020-08-12 16:06:34 +02:00
|
|
|
"dave", nil, password, mnemonic,
|
2020-10-24 23:18:56 +02:00
|
|
|
"", 1000, nil,
|
|
|
|
copyPorts(oldNode),
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
req := &lnrpc.RestoreChanBackupRequest{
|
|
|
|
Backup: backup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
newNode.RPC.RestoreChanBackups(req)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
req = &lnrpc.RestoreChanBackupRequest{
|
|
|
|
Backup: backup,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
newNode.RPC.RestoreChanBackups(req)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return newNode
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Use the channel backup file that contains an unconfirmed
|
|
|
|
// channel and make sure recovery works as well.
|
|
|
|
{
|
|
|
|
name: "restore unconfirmed channel file",
|
|
|
|
channelsUpdated: false,
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
unconfirmed: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2020-08-12 16:06:34 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Let's assume time passes, the channel
|
|
|
|
// confirms in the meantime but for some reason
|
|
|
|
// the backup we made while it was still
|
|
|
|
// unconfirmed is the only backup we have. We
|
|
|
|
// should still be able to restore it. To
|
|
|
|
// simulate time passing, we mine some blocks
|
|
|
|
// to get the channel confirmed _after_ we saved
|
|
|
|
// the backup.
|
2022-07-28 11:44:23 +02:00
|
|
|
st.Miner.MineBlocksAndAssertNumTxes(6, 1)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore
|
|
|
|
// the node from seed, then manually recover
|
|
|
|
// the channel backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Create a backup using RPC that contains an unconfirmed
|
|
|
|
// channel and make sure recovery works as well.
|
|
|
|
{
|
|
|
|
name: "restore unconfirmed channel RPC",
|
|
|
|
channelsUpdated: false,
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
unconfirmed: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// For this restoration method, we'll grab the
|
|
|
|
// current multi-channel backup from the old
|
|
|
|
// node. The channel should be included, even if
|
|
|
|
// it is not confirmed yet.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2020-08-12 16:06:34 +02:00
|
|
|
chanPoints := chanBackup.MultiChanBackup.ChanPoints
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NotEmpty(st, chanPoints,
|
|
|
|
"unconfirmed channel not found")
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Let's assume time passes, the channel
|
|
|
|
// confirms in the meantime but for some reason
|
|
|
|
// the backup we made while it was still
|
|
|
|
// unconfirmed is the only backup we have. We
|
|
|
|
// should still be able to restore it. To
|
|
|
|
// simulate time passing, we mine some blocks
|
|
|
|
// to get the channel confirmed _after_ we saved
|
|
|
|
// the backup.
|
2022-07-28 11:44:23 +02:00
|
|
|
st.Miner.MineBlocksAndAssertNumTxes(6, 1)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore
|
|
|
|
// the node from seed, then manually recover
|
|
|
|
// the channel backup.
|
|
|
|
multi := chanBackup.MultiChanBackup.MultiChanBackup
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface, for anchor commitment channels.
|
|
|
|
{
|
2021-07-26 22:03:41 +02:00
|
|
|
name: "restore from backup file anchors",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
commitmentType: lnrpc.CommitmentType_ANCHORS,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2021-07-26 22:03:41 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2021-07-26 22:03:41 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
|
|
|
// this node's channels.backup file.
|
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2021-07-26 22:03:41 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
|
|
|
// using the on-disk channels.backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2021-07-26 22:03:41 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface, for script-enforced leased channels.
|
|
|
|
{
|
2022-07-28 11:44:23 +02:00
|
|
|
name: "restore from backup file script " +
|
|
|
|
"enforced lease",
|
2021-07-26 22:03:41 +02:00
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
commitmentType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2020-08-12 16:06:34 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
2021-08-12 00:15:21 +02:00
|
|
|
// using the on-disk channel.backup.
|
2020-08-12 16:06:34 +02:00
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Restore by also creating a channel with the legacy revocation
|
|
|
|
// producer format to make sure old SCBs can still be recovered.
|
|
|
|
{
|
|
|
|
name: "old revocation producer format",
|
|
|
|
initiator: true,
|
|
|
|
legacyRevocation: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2021-01-25 22:27:25 +01:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// For this restoration method, we'll grab the
|
|
|
|
// current multi-channel backup from the old
|
|
|
|
// node, and use it to restore a new node
|
|
|
|
// within the closure.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanBackup := oldNode.RPC.ExportAllChanBackups()
|
2021-01-25 22:27:25 +01:00
|
|
|
multi := chanBackup.MultiChanBackup.MultiChanBackup
|
|
|
|
|
|
|
|
// In our nodeRestorer function, we'll restore
|
|
|
|
// the node from seed, then manually recover the
|
|
|
|
// channel backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2021-01-25 22:27:25 +01:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
// Restore a channel that was force closed by dave just before
|
|
|
|
// going offline.
|
|
|
|
{
|
|
|
|
name: "restore force closed from backup file " +
|
|
|
|
"anchors",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
2021-07-26 22:03:41 +02:00
|
|
|
commitmentType: lnrpc.CommitmentType_ANCHORS,
|
2021-08-09 13:55:37 +02:00
|
|
|
localForceClose: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2021-08-09 13:55:37 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
2021-08-12 00:15:21 +02:00
|
|
|
// this node's channel.backup file.
|
2021-08-09 13:55:37 +02:00
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that will restore
|
2021-08-12 00:15:21 +02:00
|
|
|
// using the on-disk channel.backup.
|
2021-08-09 13:55:37 +02:00
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2021-08-09 13:55:37 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface, for zero-conf anchor channels.
|
|
|
|
{
|
|
|
|
name: "restore from backup file for zero-conf " +
|
|
|
|
"anchors channel",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
commitmentType: lnrpc.CommitmentType_ANCHORS,
|
|
|
|
zeroConf: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2022-04-04 23:01:07 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
|
|
|
// this node's channels.backup file.
|
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that we'll restore
|
|
|
|
// using the on-disk channels.backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2022-04-04 23:01:07 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore the backup from the on-disk file, using the RPC
|
|
|
|
// interface for a zero-conf script-enforced leased channel.
|
|
|
|
{
|
|
|
|
name: "restore from backup file zero-conf " +
|
|
|
|
"script-enforced leased channel",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
commitmentType: lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE,
|
|
|
|
zeroConf: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2022-04-04 23:01:07 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
|
|
|
// this node's channel.backup file.
|
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that we'll restore
|
|
|
|
// using the on-disk channel backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2022-04-04 23:01:07 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
// Restore a zero-conf anchors channel that was force closed by
|
|
|
|
// dave just before going offline.
|
|
|
|
{
|
|
|
|
name: "restore force closed from backup file " +
|
|
|
|
"anchors w/ zero-conf",
|
|
|
|
initiator: true,
|
|
|
|
private: false,
|
|
|
|
commitmentType: lnrpc.CommitmentType_ANCHORS,
|
|
|
|
localForceClose: true,
|
|
|
|
zeroConf: true,
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod: func(st *lntemp.HarnessTest,
|
|
|
|
oldNode *node.HarnessNode,
|
2022-04-04 23:01:07 +02:00
|
|
|
backupFilePath string,
|
2022-07-28 11:44:23 +02:00
|
|
|
mnemonic []string) nodeRestorer {
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Read the entire Multi backup stored within
|
|
|
|
// this node's channel.backup file.
|
|
|
|
multi, err := ioutil.ReadFile(backupFilePath)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(st, err)
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// Now that we have Dave's backup file, we'll
|
|
|
|
// create a new nodeRestorer that we'll restore
|
|
|
|
// using the on-disk channel backup.
|
|
|
|
return chanRestoreViaRPC(
|
2022-07-28 11:44:23 +02:00
|
|
|
st, password, mnemonic, multi, oldNode,
|
2022-04-04 23:01:07 +02:00
|
|
|
)
|
|
|
|
},
|
|
|
|
},
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): online vs offline close?
|
|
|
|
|
|
|
|
// TODO(roasbeef): need to re-trigger the on-disk file once the node
|
|
|
|
// ann is updated?
|
|
|
|
|
|
|
|
for _, testCase := range testCases {
|
2020-01-10 15:27:50 +01:00
|
|
|
testCase := testCase
|
2022-07-28 11:44:23 +02:00
|
|
|
success := ht.Run(testCase.name, func(t *testing.T) {
|
|
|
|
h, cleanup := ht.Subtest(t)
|
|
|
|
defer cleanup()
|
2020-09-04 11:19:27 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
testChanRestoreScenario(h, &testCase, password)
|
2020-08-12 16:06:34 +02:00
|
|
|
})
|
|
|
|
if !success {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelBackupUpdates tests that both the streaming channel update RPC,
|
2021-08-12 00:15:21 +02:00
|
|
|
// and the on-disk channel.backup are updated each time a channel is
|
2020-08-12 16:06:34 +02:00
|
|
|
// opened/closed.
|
|
|
|
func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) {
|
|
|
|
ctxb := context.Background()
|
|
|
|
|
|
|
|
// First, we'll make a temp directory that we'll use to store our
|
|
|
|
// backup file, so we can check in on it during the test easily.
|
2022-08-15 15:07:17 +02:00
|
|
|
backupDir := t.t.TempDir()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First, we'll create a new node, Carol. We'll also create a temporary
|
|
|
|
// file that Carol will use to store her channel backups.
|
|
|
|
backupFilePath := filepath.Join(
|
|
|
|
backupDir, chanbackup.DefaultBackupFileName,
|
|
|
|
)
|
|
|
|
carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath)
|
2021-06-07 22:05:12 +02:00
|
|
|
carol := net.NewNode(t.t, "carol", []string{carolArgs})
|
2020-08-12 16:06:34 +02:00
|
|
|
defer shutdownAndAssert(net, t, carol)
|
|
|
|
|
|
|
|
// Next, we'll register for streaming notifications for changes to the
|
|
|
|
// backup file.
|
|
|
|
backupStream, err := carol.SubscribeChannelBackups(
|
|
|
|
ctxb, &lnrpc.ChannelBackupSubscription{},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create backup stream: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll use this goroutine to proxy any updates to a channel we can
|
|
|
|
// easily use below.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
backupUpdates := make(chan *lnrpc.ChanBackupSnapshot)
|
|
|
|
streamErr := make(chan error)
|
|
|
|
streamQuit := make(chan struct{})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for {
|
|
|
|
snapshot, err := backupStream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
select {
|
|
|
|
case streamErr <- err:
|
|
|
|
case <-streamQuit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case backupUpdates <- snapshot:
|
|
|
|
case <-streamQuit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
defer close(streamQuit)
|
|
|
|
|
|
|
|
// With Carol up, we'll now connect her to Alice, and open a channel
|
|
|
|
// between them.
|
2021-08-19 14:30:33 +02:00
|
|
|
net.ConnectNodes(t.t, carol, net.Alice)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Next, we'll open two channels between Alice and Carol back to back.
|
|
|
|
var chanPoints []*lnrpc.ChannelPoint
|
|
|
|
numChans := 2
|
|
|
|
chanAmt := btcutil.Amount(1000000)
|
|
|
|
for i := 0; i < numChans; i++ {
|
|
|
|
chanPoint := openChannelAndAssert(
|
2021-08-19 14:12:50 +02:00
|
|
|
t, net, net.Alice, carol,
|
|
|
|
lntest.OpenChannelParams{Amt: chanAmt},
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
chanPoints = append(chanPoints, chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Using this helper function, we'll maintain a pointer to the latest
|
|
|
|
// channel backup so we can compare it to the on disk state.
|
|
|
|
var currentBackup *lnrpc.ChanBackupSnapshot
|
|
|
|
assertBackupNtfns := func(numNtfns int) {
|
|
|
|
for i := 0; i < numNtfns; i++ {
|
|
|
|
select {
|
|
|
|
case err := <-streamErr:
|
|
|
|
t.Fatalf("error with backup stream: %v", err)
|
|
|
|
|
|
|
|
case currentBackup = <-backupUpdates:
|
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
|
|
|
t.Fatalf("didn't receive channel backup "+
|
|
|
|
"notification %v", i+1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertBackupFileState is a helper function that we'll use to compare
|
|
|
|
// the on disk back up file to our currentBackup pointer above.
|
|
|
|
assertBackupFileState := func() {
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
packedBackup, err := ioutil.ReadFile(backupFilePath)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read backup "+
|
|
|
|
"file: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// As each back up file will be encrypted with a fresh
|
|
|
|
// nonce, we can't compare them directly, so instead
|
|
|
|
// we'll compare the length which is a proxy for the
|
|
|
|
// number of channels that the multi-backup contains.
|
|
|
|
rawBackup := currentBackup.MultiChanBackup.MultiChanBackup
|
|
|
|
if len(rawBackup) != len(packedBackup) {
|
|
|
|
return fmt.Errorf("backup files don't match: "+
|
|
|
|
"expected %x got %x", rawBackup, packedBackup)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Additionally, we'll assert that both backups up
|
|
|
|
// returned are valid.
|
|
|
|
for i, backup := range [][]byte{rawBackup, packedBackup} {
|
|
|
|
snapshot := &lnrpc.ChanBackupSnapshot{
|
|
|
|
MultiChanBackup: &lnrpc.MultiChanBackup{
|
|
|
|
MultiChanBackup: backup,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
_, err := carol.VerifyChanBackup(ctxb, snapshot)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to verify "+
|
|
|
|
"backup #%d: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-12-08 16:27:01 +01:00
|
|
|
}, defaultTimeout)
|
2020-08-12 16:06:34 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("backup state invalid: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// As these two channels were just opened, we should've got two times
|
|
|
|
// the pending and open notifications for channel backups.
|
|
|
|
assertBackupNtfns(2 * 2)
|
|
|
|
|
|
|
|
// The on disk file should also exactly match the latest backup that we
|
|
|
|
// have.
|
|
|
|
assertBackupFileState()
|
|
|
|
|
|
|
|
// Next, we'll close the channels one by one. After each channel
|
|
|
|
// closure, we should get a notification, and the on-disk state should
|
|
|
|
// match this state as well.
|
|
|
|
for i := 0; i < numChans; i++ {
|
|
|
|
// To ensure force closes also trigger an update, we'll force
|
|
|
|
// close half of the channels.
|
|
|
|
forceClose := i%2 == 0
|
|
|
|
|
|
|
|
chanPoint := chanPoints[i]
|
|
|
|
|
2021-08-11 06:29:40 +02:00
|
|
|
closeChannelAndAssert(t, net, net.Alice, chanPoint, forceClose)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// If we force closed the channel, then we'll mine enough
|
|
|
|
// blocks to ensure all outputs have been swept.
|
|
|
|
if forceClose {
|
2021-08-09 13:55:35 +02:00
|
|
|
// A local force closed channel will trigger a
|
|
|
|
// notification once the commitment TX confirms on
|
|
|
|
// chain. But that won't remove the channel from the
|
|
|
|
// backup just yet, that will only happen once the time
|
|
|
|
// locked contract was fully resolved on chain.
|
|
|
|
assertBackupNtfns(1)
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
cleanupForceClose(t, net, net.Alice, chanPoint)
|
2021-08-09 13:55:35 +02:00
|
|
|
|
|
|
|
// Now that the channel's been fully resolved, we expect
|
|
|
|
// another notification.
|
|
|
|
assertBackupNtfns(1)
|
|
|
|
assertBackupFileState()
|
|
|
|
} else {
|
|
|
|
// We should get a single notification after closing,
|
|
|
|
// and the on-disk state should match this latest
|
|
|
|
// notifications.
|
|
|
|
assertBackupNtfns(1)
|
|
|
|
assertBackupFileState()
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// testExportChannelBackup tests that we're able to properly export either a
|
|
|
|
// targeted channel's backup, or export backups of all the currents open
|
|
|
|
// channels.
|
|
|
|
func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) {
|
|
|
|
ctxb := context.Background()
|
|
|
|
|
|
|
|
// First, we'll create our primary test node: Carol. We'll use Carol to
|
|
|
|
// open channels and also export backups that we'll examine throughout
|
|
|
|
// the test.
|
2021-06-07 22:05:12 +02:00
|
|
|
carol := net.NewNode(t.t, "carol", nil)
|
2020-08-12 16:06:34 +02:00
|
|
|
defer shutdownAndAssert(net, t, carol)
|
|
|
|
|
|
|
|
// With Carol up, we'll now connect her to Alice, and open a channel
|
|
|
|
// between them.
|
2021-08-19 14:30:33 +02:00
|
|
|
net.ConnectNodes(t.t, carol, net.Alice)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Next, we'll open two channels between Alice and Carol back to back.
|
|
|
|
var chanPoints []*lnrpc.ChannelPoint
|
|
|
|
numChans := 2
|
|
|
|
chanAmt := btcutil.Amount(1000000)
|
|
|
|
for i := 0; i < numChans; i++ {
|
|
|
|
chanPoint := openChannelAndAssert(
|
2021-08-19 14:12:50 +02:00
|
|
|
t, net, net.Alice, carol,
|
|
|
|
lntest.OpenChannelParams{Amt: chanAmt},
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
chanPoints = append(chanPoints, chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the channels are open, we should be able to fetch the
|
|
|
|
// backups of each of the channels.
|
|
|
|
for _, chanPoint := range chanPoints {
|
|
|
|
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
|
|
req := &lnrpc.ExportChannelBackupRequest{
|
|
|
|
ChanPoint: chanPoint,
|
|
|
|
}
|
|
|
|
chanBackup, err := carol.ExportChannelBackup(ctxt, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch backup for channel %v: %v",
|
|
|
|
chanPoint, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The returned backup should be full populated. Since it's
|
|
|
|
// encrypted, we can't assert any more than that atm.
|
|
|
|
if len(chanBackup.ChanBackup) == 0 {
|
|
|
|
t.Fatalf("obtained empty backup for channel: %v", chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The specified chanPoint in the response should match our
|
|
|
|
// requested chanPoint.
|
|
|
|
if chanBackup.ChanPoint.String() != chanPoint.String() {
|
|
|
|
t.Fatalf("chanPoint mismatched: expected %v, got %v",
|
|
|
|
chanPoint.String(),
|
|
|
|
chanBackup.ChanPoint.String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before we proceed, we'll make two utility methods we'll use below
|
|
|
|
// for our primary assertions.
|
|
|
|
assertNumSingleBackups := func(numSingles int) {
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
|
|
req := &lnrpc.ChanBackupExportRequest{}
|
|
|
|
chanSnapshot, err := carol.ExportAllChannelBackups(
|
|
|
|
ctxt, req,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to export channel "+
|
|
|
|
"backup: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if chanSnapshot.SingleChanBackups == nil {
|
|
|
|
return fmt.Errorf("single chan backups not " +
|
|
|
|
"populated")
|
|
|
|
}
|
|
|
|
|
|
|
|
backups := chanSnapshot.SingleChanBackups.ChanBackups
|
|
|
|
if len(backups) != numSingles {
|
|
|
|
return fmt.Errorf("expected %v singles, "+
|
|
|
|
"got %v", len(backups), numSingles)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) {
|
|
|
|
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
|
|
|
req := &lnrpc.ChanBackupExportRequest{}
|
|
|
|
chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to export channel backup: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return func(found bool, chanPoints map[wire.OutPoint]struct{}) {
|
|
|
|
switch {
|
|
|
|
case found && chanSnapshot.MultiChanBackup == nil:
|
|
|
|
t.Fatalf("multi-backup not present")
|
|
|
|
|
|
|
|
case !found && chanSnapshot.MultiChanBackup != nil &&
|
|
|
|
(len(chanSnapshot.MultiChanBackup.MultiChanBackup) !=
|
|
|
|
chanbackup.NilMultiSizePacked):
|
|
|
|
|
|
|
|
t.Fatalf("found multi-backup when non should " +
|
|
|
|
"be found")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints
|
|
|
|
if len(chanPoints) != len(backedUpChans) {
|
|
|
|
t.Fatalf("expected %v chans got %v", len(chanPoints),
|
|
|
|
len(backedUpChans))
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, chanPoint := range backedUpChans {
|
|
|
|
wirePoint := rpcPointToWirePoint(t, chanPoint)
|
|
|
|
if _, ok := chanPoints[wirePoint]; !ok {
|
|
|
|
t.Fatalf("unexpected backup: %v", wirePoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
chans := make(map[wire.OutPoint]struct{})
|
|
|
|
for _, chanPoint := range chanPoints {
|
|
|
|
chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have exactly two single channel backups contained, and we
|
|
|
|
// should also have a multi-channel backup.
|
|
|
|
assertNumSingleBackups(2)
|
|
|
|
assertMultiBackupFound()(true, chans)
|
|
|
|
|
|
|
|
// We'll now close each channel on by one. After we close a channel, we
|
|
|
|
// shouldn't be able to find that channel as a backup still. We should
|
|
|
|
// also have one less single written to disk.
|
|
|
|
for i, chanPoint := range chanPoints {
|
2021-08-11 06:29:40 +02:00
|
|
|
closeChannelAndAssert(t, net, net.Alice, chanPoint, false)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
assertNumSingleBackups(len(chanPoints) - i - 1)
|
|
|
|
|
|
|
|
delete(chans, rpcPointToWirePoint(t, chanPoint))
|
|
|
|
assertMultiBackupFound()(true, chans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point we shouldn't have any single or multi-chan backups at
|
|
|
|
// all.
|
|
|
|
assertNumSingleBackups(0)
|
|
|
|
assertMultiBackupFound()(false, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// nodeRestorer is a function closure that allows each chanRestoreTestCase to
|
|
|
|
// control exactly *how* the prior node is restored. This might be using an
|
|
|
|
// backup obtained over RPC, or the file system, etc.
|
2022-07-28 11:44:23 +02:00
|
|
|
type nodeRestorer func() *node.HarnessNode
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// chanRestoreTestCase describes a test case for an end to end SCB restoration
|
|
|
|
// work flow. One node will start from scratch using an existing SCB. At the
|
|
|
|
// end of the est, both nodes should be made whole via the DLP protocol.
|
|
|
|
type chanRestoreTestCase struct {
|
|
|
|
// name is the name of the target test case.
|
|
|
|
name string
|
|
|
|
|
|
|
|
// channelsUpdated is false then this means that no updates
|
|
|
|
// have taken place within the channel before restore.
|
|
|
|
// Otherwise, HTLCs will be settled between the two parties
|
|
|
|
// before restoration modifying the balance beyond the initial
|
|
|
|
// allocation.
|
|
|
|
channelsUpdated bool
|
|
|
|
|
|
|
|
// initiator signals if Dave should be the one that opens the
|
|
|
|
// channel to Alice, or if it should be the other way around.
|
|
|
|
initiator bool
|
|
|
|
|
|
|
|
// private signals if the channel from Dave to Carol should be
|
|
|
|
// private or not.
|
|
|
|
private bool
|
|
|
|
|
|
|
|
// unconfirmed signals if the channel from Dave to Carol should be
|
|
|
|
// confirmed or not.
|
|
|
|
unconfirmed bool
|
|
|
|
|
2021-07-26 22:03:41 +02:00
|
|
|
// commitmentType specifies the commitment type that should be used for
|
|
|
|
// the channel from Dave to Carol.
|
|
|
|
commitmentType lnrpc.CommitmentType
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2021-01-25 22:27:25 +01:00
|
|
|
// legacyRevocation signals if a channel with the legacy revocation
|
|
|
|
// producer format should also be created before restoring.
|
|
|
|
legacyRevocation bool
|
|
|
|
|
2021-08-09 13:55:37 +02:00
|
|
|
// localForceClose signals if the channel should be force closed by the
|
|
|
|
// node that is going to recover.
|
|
|
|
localForceClose bool
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// restoreMethod takes an old node, then returns a function
|
|
|
|
// closure that'll return the same node, but with its state
|
|
|
|
// restored via a custom method. We use this to abstract away
|
|
|
|
// _how_ a node is restored from our assertions once the node
|
|
|
|
// has been fully restored itself.
|
2022-07-28 11:44:23 +02:00
|
|
|
restoreMethod func(ht *lntemp.HarnessTest, oldNode *node.HarnessNode,
|
|
|
|
backupFilePath string, mnemonic []string) nodeRestorer
|
2022-04-04 23:01:07 +02:00
|
|
|
|
|
|
|
// zeroConf denotes whether the opened channel is a zero-conf channel
|
|
|
|
// or not.
|
|
|
|
zeroConf bool
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// testChanRestoreScenario executes a chanRestoreTestCase from end to end,
|
|
|
|
// ensuring that after Dave restores his channel state according to the
|
|
|
|
// testCase, the DLP protocol is executed properly and both nodes are made
|
|
|
|
// whole.
|
2022-07-28 11:44:23 +02:00
|
|
|
func testChanRestoreScenario(ht *lntemp.HarnessTest,
|
2020-08-12 16:06:34 +02:00
|
|
|
testCase *chanRestoreTestCase, password []byte) {
|
|
|
|
|
|
|
|
const (
|
|
|
|
chanAmt = btcutil.Amount(10000000)
|
|
|
|
pushAmt = btcutil.Amount(5000000)
|
|
|
|
)
|
|
|
|
|
2021-05-14 10:09:04 +02:00
|
|
|
nodeArgs := []string{
|
|
|
|
"--minbackoff=50ms",
|
|
|
|
"--maxbackoff=1s",
|
|
|
|
}
|
2021-07-26 22:03:41 +02:00
|
|
|
if testCase.commitmentType != lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE {
|
|
|
|
args := nodeArgsForCommitType(testCase.commitmentType)
|
|
|
|
nodeArgs = append(nodeArgs, args...)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
2022-04-04 23:01:07 +02:00
|
|
|
if testCase.zeroConf {
|
|
|
|
nodeArgs = append(
|
|
|
|
nodeArgs, "--protocol.option-scid-alias",
|
|
|
|
"--protocol.zero-conf",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// First, we'll create a brand new node we'll use within the test. If
|
|
|
|
// we have a custom backup file specified, then we'll also create that
|
|
|
|
// for use.
|
2022-07-28 11:44:23 +02:00
|
|
|
dave, mnemonic, _ := ht.NewNodeWithSeed(
|
2020-10-06 17:23:40 +02:00
|
|
|
"dave", nodeArgs, password, false,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
2022-07-28 11:44:23 +02:00
|
|
|
carol := ht.NewNode("carol", nodeArgs)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// Now that our new nodes are created, we'll give them some coins for
|
|
|
|
// channel opening and anchor sweeping.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
2021-06-09 19:29:22 +02:00
|
|
|
|
2021-07-13 18:12:37 +02:00
|
|
|
// For the anchor output case we need two UTXOs for Carol so she can
|
|
|
|
// sweep both the local and remote anchor.
|
2021-07-26 22:03:41 +02:00
|
|
|
if commitTypeHasAnchors(testCase.commitmentType) {
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
2021-07-13 18:12:37 +02:00
|
|
|
}
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, dave)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
var from, to *node.HarnessNode
|
2020-08-12 16:06:34 +02:00
|
|
|
if testCase.initiator {
|
|
|
|
from, to = dave, carol
|
|
|
|
} else {
|
|
|
|
from, to = carol, dave
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll connect Dave to Carol, and open a new channel to her
|
|
|
|
// with a portion pushed.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.ConnectNodes(dave, carol)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// We will either open a confirmed or unconfirmed channel, depending on
|
|
|
|
// the requirements of the test case.
|
2021-08-09 13:55:37 +02:00
|
|
|
var chanPoint *lnrpc.ChannelPoint
|
2020-08-12 16:06:34 +02:00
|
|
|
switch {
|
|
|
|
case testCase.unconfirmed:
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.OpenChannelAssertPending(
|
|
|
|
from, to, lntemp.OpenChannelParams{
|
|
|
|
Amt: chanAmt,
|
|
|
|
PushAmt: pushAmt,
|
|
|
|
},
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Give the pubsub some time to update the channel backup.
|
2022-07-28 11:44:23 +02:00
|
|
|
err := wait.NoError(func() error {
|
|
|
|
fi, err := os.Stat(dave.Cfg.ChanBackupPath())
|
2020-08-12 16:06:34 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if fi.Size() <= chanbackup.NilMultiSizePacked {
|
|
|
|
return fmt.Errorf("backup file empty")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(ht, err, "channel backup not updated in time")
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2021-01-25 22:27:25 +01:00
|
|
|
// Also create channels with the legacy revocation producer format if
|
|
|
|
// requested.
|
|
|
|
case testCase.legacyRevocation:
|
2022-07-28 11:44:23 +02:00
|
|
|
createLegacyRevocationChannel(ht, chanAmt, pushAmt, from, to)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
default:
|
2022-07-08 23:19:17 +02:00
|
|
|
// If we are testing zero-conf channels, setup a
|
|
|
|
// ChannelAcceptor for the fundee.
|
2022-07-28 11:44:23 +02:00
|
|
|
var cancelAcceptor context.CancelFunc
|
2022-07-08 23:19:17 +02:00
|
|
|
if testCase.zeroConf {
|
|
|
|
// Setup a ChannelAcceptor.
|
2022-07-28 11:44:23 +02:00
|
|
|
acceptStream, cancel := to.RPC.ChannelAcceptor()
|
|
|
|
cancelAcceptor = cancel
|
|
|
|
go acceptChannel(ht.T, true, acceptStream)
|
2022-07-08 23:19:17 +02:00
|
|
|
}
|
|
|
|
|
2021-07-26 22:03:41 +02:00
|
|
|
var fundingShim *lnrpc.FundingShim
|
|
|
|
if testCase.commitmentType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
2022-07-28 11:44:23 +02:00
|
|
|
_, minerHeight := ht.Miner.GetBestBlock()
|
2022-08-01 20:16:17 +02:00
|
|
|
thawHeight := uint32(minerHeight + thawHeightDelta)
|
2021-07-26 22:03:41 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
fundingShim, _, _ = deriveFundingShim(
|
|
|
|
ht, from, to, chanAmt, thawHeight, true,
|
2021-07-26 22:03:41 +02:00
|
|
|
)
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
chanPoint = ht.OpenChannel(
|
|
|
|
from, to, lntemp.OpenChannelParams{
|
|
|
|
Amt: chanAmt,
|
|
|
|
PushAmt: pushAmt,
|
|
|
|
Private: testCase.private,
|
|
|
|
FundingShim: fundingShim,
|
|
|
|
CommitmentType: testCase.commitmentType,
|
|
|
|
ZeroConf: testCase.zeroConf,
|
|
|
|
},
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
2022-07-08 23:19:17 +02:00
|
|
|
// Remove the ChannelAcceptor.
|
|
|
|
if testCase.zeroConf {
|
2022-07-28 11:44:23 +02:00
|
|
|
cancelAcceptor()
|
2022-07-08 23:19:17 +02:00
|
|
|
}
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// Wait for both sides to see the opened channel.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.AssertTopologyChannelOpen(dave, chanPoint)
|
|
|
|
ht.AssertTopologyChannelOpen(carol, chanPoint)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// If both parties should start with existing channel updates, then
|
|
|
|
// we'll send+settle an HTLC between 'from' and 'to' now.
|
|
|
|
if testCase.channelsUpdated {
|
|
|
|
invoice := &lnrpc.Invoice{
|
|
|
|
Memo: "testing",
|
2020-12-08 23:55:40 +01:00
|
|
|
Value: 100000,
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
invoiceResp := to.RPC.AddInvoice(invoice)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
requests := []string{invoiceResp.PaymentRequest}
|
|
|
|
ht.CompletePaymentRequests(from, requests)
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
|
|
|
|
2021-08-09 13:55:37 +02:00
|
|
|
// If we're testing that locally force closed channels can be restored
|
|
|
|
// then we issue the force close now.
|
|
|
|
if testCase.localForceClose && chanPoint != nil {
|
2022-07-28 11:44:23 +02:00
|
|
|
// Calls the rpc to close the channel.
|
|
|
|
ht.CloseChannelAssertPending(dave, chanPoint, true)
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
// After closing the channel we mine one transaction to make
|
|
|
|
// sure the commitment TX was confirmed.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
// Now we need to make sure that the channel is still in the
|
|
|
|
// backup. Otherwise restoring won't work later.
|
2022-07-28 11:44:23 +02:00
|
|
|
dave.RPC.ExportChanBackup(chanPoint)
|
2021-08-09 13:55:37 +02:00
|
|
|
}
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// Before we start the recovery, we'll record the balances of both
|
|
|
|
// Carol and Dave to ensure they both sweep their coins at the end.
|
2022-07-28 11:44:23 +02:00
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
2020-08-12 16:06:34 +02:00
|
|
|
carolStartingBalance := carolBalResp.ConfirmedBalance
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
daveBalance := dave.RPC.WalletBalance()
|
2020-08-12 16:06:34 +02:00
|
|
|
daveStartingBalance := daveBalance.ConfirmedBalance
|
|
|
|
|
|
|
|
// At this point, we'll now execute the restore method to give us the
|
|
|
|
// new node we should attempt our assertions against.
|
2022-07-28 11:44:23 +02:00
|
|
|
backupFilePath := dave.Cfg.ChanBackupPath()
|
|
|
|
restoredNodeFunc := testCase.restoreMethod(
|
|
|
|
ht, dave, backupFilePath, mnemonic,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Now that we're able to make our restored now, we'll shutdown the old
|
|
|
|
// Dave node as we'll be storing it shortly below.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.Shutdown(dave)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2020-08-12 16:49:39 +02:00
|
|
|
// To make sure the channel state is advanced correctly if the channel
|
|
|
|
// peer is not online at first, we also shutdown Carol.
|
2022-07-28 11:44:23 +02:00
|
|
|
restartCarol := ht.SuspendNode(carol)
|
2020-08-12 16:49:39 +02:00
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// Next, we'll make a new Dave and start the bulk of our recovery
|
|
|
|
// workflow.
|
2022-07-28 11:44:23 +02:00
|
|
|
dave = restoredNodeFunc()
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// First ensure that the on-chain balance is restored.
|
2022-07-28 11:44:23 +02:00
|
|
|
err := wait.NoError(func() error {
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
2020-08-12 16:06:34 +02:00
|
|
|
daveBal := daveBalResp.ConfirmedBalance
|
|
|
|
if daveBal <= 0 {
|
|
|
|
return fmt.Errorf("expected positive balance, had %v",
|
|
|
|
daveBal)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(ht, err, "On-chain balance not restored")
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2021-08-09 13:55:37 +02:00
|
|
|
// For our force close scenario we don't need the channel to be closed
|
|
|
|
// by Carol since it was already force closed before we started the
|
|
|
|
// recovery. All we need is for Carol to send us over the commit height
|
|
|
|
// so we can sweep the time locked output with the correct commit point.
|
|
|
|
if testCase.localForceClose {
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.AssertNumPendingForceClose(dave, 1)
|
2021-08-09 13:55:37 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(ht, restartCarol(), "restart carol failed")
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
// Now that we have our new node up, we expect that it'll
|
|
|
|
// re-connect to Carol automatically based on the restored
|
|
|
|
// backup.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.EnsureConnected(dave, carol)
|
2021-08-09 13:55:37 +02:00
|
|
|
|
|
|
|
assertTimeLockSwept(
|
2022-07-28 11:44:23 +02:00
|
|
|
ht, carol, carolStartingBalance, dave,
|
2021-07-26 22:03:41 +02:00
|
|
|
daveStartingBalance,
|
|
|
|
commitTypeHasAnchors(testCase.commitmentType),
|
2021-08-09 13:55:37 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-12 16:49:39 +02:00
|
|
|
// We now check that the restored channel is in the proper state. It
|
|
|
|
// should not yet be force closing as no connection with the remote
|
|
|
|
// peer was established yet. We should also not be able to close the
|
|
|
|
// channel.
|
2022-07-28 11:44:23 +02:00
|
|
|
channel := ht.AssertNumWaitingClose(dave, 1)[0]
|
|
|
|
chanPointStr := channel.Channel.ChannelPoint
|
2020-12-08 16:27:03 +01:00
|
|
|
|
2020-08-12 16:49:39 +02:00
|
|
|
// We also want to make sure we cannot force close in this state. That
|
|
|
|
// would get the state machine in a weird state.
|
2022-07-28 11:44:23 +02:00
|
|
|
chanPointParts := strings.Split(chanPointStr, ":")
|
2020-08-12 16:49:39 +02:00
|
|
|
chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32)
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// We don't get an error directly but only when reading the first
|
|
|
|
// message of the stream.
|
|
|
|
err = ht.CloseChannelAssertErr(
|
|
|
|
dave, &lnrpc.ChannelPoint{
|
2020-08-12 16:49:39 +02:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{
|
|
|
|
FundingTxidStr: chanPointParts[0],
|
|
|
|
},
|
|
|
|
OutputIndex: uint32(chanPointIndex),
|
2022-07-28 11:44:23 +02:00
|
|
|
}, true,
|
|
|
|
)
|
|
|
|
require.Contains(ht, err.Error(), "cannot close channel with state: ")
|
|
|
|
require.Contains(ht, err.Error(), "ChanStatusRestored")
|
2020-08-12 16:49:39 +02:00
|
|
|
|
2020-11-04 11:03:34 +01:00
|
|
|
// Increase the fee estimate so that the following force close tx will
|
|
|
|
// be cpfp'ed in case of anchor commitments.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.SetFeeEstimate(30000)
|
2020-11-04 11:03:34 +01:00
|
|
|
|
2020-08-12 16:49:39 +02:00
|
|
|
// Now that we have ensured that the channels restored by the backup are
|
|
|
|
// in the correct state even without the remote peer telling us so,
|
|
|
|
// let's start up Carol again.
|
2022-07-28 11:44:23 +02:00
|
|
|
require.NoError(ht, restartCarol(), "restart carol failed")
|
2020-08-12 16:49:39 +02:00
|
|
|
|
2021-07-13 18:12:37 +02:00
|
|
|
numUTXOs := 1
|
2021-07-26 22:03:41 +02:00
|
|
|
if commitTypeHasAnchors(testCase.commitmentType) {
|
2021-07-13 18:12:37 +02:00
|
|
|
numUTXOs = 2
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.AssertNumUTXOs(carol, numUTXOs)
|
2021-07-13 18:12:37 +02:00
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// Now that we have our new node up, we expect that it'll re-connect to
|
|
|
|
// Carol automatically based on the restored backup.
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.EnsureConnected(dave, carol)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
// TODO(roasbeef): move dave restarts?
|
|
|
|
|
|
|
|
// Now we'll assert that both sides properly execute the DLP protocol.
|
|
|
|
// We grab their balances now to ensure that they're made whole at the
|
|
|
|
// end of the protocol.
|
|
|
|
assertDLPExecuted(
|
2022-07-28 11:44:23 +02:00
|
|
|
ht, carol, carolStartingBalance, dave,
|
|
|
|
daveStartingBalance, testCase.commitmentType,
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:48:54 +02:00
|
|
|
// testDataLossProtection tests that if one of the nodes in a channel
|
|
|
|
// relationship lost state, they will detect this during channel sync, and the
|
|
|
|
// up-to-date party will force close the channel, giving the outdated party the
|
|
|
|
// opportunity to sweep its output.
|
2022-07-28 12:25:06 +02:00
|
|
|
func testDataLossProtection(ht *lntemp.HarnessTest) {
|
2022-07-28 11:48:54 +02:00
|
|
|
const (
|
|
|
|
chanAmt = funding.MaxBtcFundingAmount
|
|
|
|
paymentAmt = 10000
|
|
|
|
numInvoices = 6
|
|
|
|
)
|
|
|
|
|
|
|
|
// Carol will be the up-to-date party. We set --nolisten to ensure Dave
|
|
|
|
// won't be able to connect to her and trigger the channel data
|
|
|
|
// protection logic automatically. We also can't have Carol
|
|
|
|
// automatically re-connect too early, otherwise DLP would be initiated
|
|
|
|
// at the wrong moment.
|
2022-07-28 12:25:06 +02:00
|
|
|
carol := ht.NewNode("Carol", []string{"--nolisten", "--minbackoff=1h"})
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Dave will be the party losing his state.
|
2022-07-28 12:25:06 +02:00
|
|
|
dave := ht.NewNode("Dave", nil)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Before we make a channel, we'll load up Carol with some coins sent
|
|
|
|
// directly from the miner.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.FundCoins(btcutil.SatoshiPerBitcoin, carol)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// timeTravel is a method that will make Carol open a channel to the
|
|
|
|
// passed node, settle a series of payments, then reset the node back
|
|
|
|
// to the state before the payments happened. When this method returns
|
|
|
|
// the node will be unaware of the new state updates. The returned
|
|
|
|
// function can be used to restart the node in this state.
|
2022-07-28 12:25:06 +02:00
|
|
|
timeTravel := func(node *node.HarnessNode) (func() error,
|
|
|
|
*lnrpc.ChannelPoint, int64) {
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// We must let the node communicate with Carol before they are
|
|
|
|
// able to open channel, so we connect them.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.EnsureConnected(carol, node)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// We'll first open up a channel between them with a 0.5 BTC
|
|
|
|
// value.
|
2022-07-28 12:25:06 +02:00
|
|
|
chanPoint := ht.OpenChannel(
|
|
|
|
carol, node, lntemp.OpenChannelParams{
|
2022-07-28 11:48:54 +02:00
|
|
|
Amt: chanAmt,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
// With the channel open, we'll create a few invoices for the
|
|
|
|
// node that Carol will pay to in order to advance the state of
|
|
|
|
// the channel.
|
|
|
|
// TODO(halseth): have dangling HTLCs on the commitment, able to
|
|
|
|
// retrieve funds?
|
2022-07-28 12:25:06 +02:00
|
|
|
payReqs, _, _ := ht.CreatePayReqs(node, paymentAmt, numInvoices)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Send payments from Carol using 3 of the payment hashes
|
|
|
|
// generated above.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.CompletePaymentRequests(carol, payReqs[:numInvoices/2])
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Next query for the node's channel state, as we sent 3
|
|
|
|
// payments of 10k satoshis each, it should now see his balance
|
|
|
|
// as being 30k satoshis.
|
2022-07-28 12:25:06 +02:00
|
|
|
nodeChan := ht.AssertChannelLocalBalance(
|
|
|
|
node, chanPoint, 30_000,
|
|
|
|
)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Grab the current commitment height (update number), we'll
|
|
|
|
// later revert him to this state after additional updates to
|
|
|
|
// revoke this state.
|
|
|
|
stateNumPreCopy := nodeChan.NumUpdates
|
|
|
|
|
|
|
|
// With the temporary file created, copy the current state into
|
|
|
|
// the temporary file we created above. Later after more
|
|
|
|
// updates, we'll restore this state.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.BackupDB(node)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
// Reconnect the peers after the restart that was needed for
|
|
|
|
// the db backup.
|
|
|
|
ht.EnsureConnected(carol, node)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Finally, send more payments from , using the remaining
|
|
|
|
// payment hashes.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.CompletePaymentRequests(carol, payReqs[numInvoices/2:])
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Now we shutdown the node, copying over the its temporary
|
|
|
|
// database state which has the *prior* channel state over his
|
|
|
|
// current most up to date state. With this, we essentially
|
|
|
|
// force the node to travel back in time within the channel's
|
|
|
|
// history.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.RestartNodeAndRestoreDB(node)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Make sure the channel is still there from the PoV of the
|
|
|
|
// node.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.AssertNodeNumChannels(node, 1)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Now query for the channel state, it should show that it's at
|
|
|
|
// a state number in the past, not the *latest* state.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.AssertChannelNumUpdates(node, stateNumPreCopy, chanPoint)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
balResp := node.RPC.WalletBalance()
|
|
|
|
restart := ht.SuspendNode(node)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
return restart, chanPoint, balResp.ConfirmedBalance
|
2022-07-28 11:48:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reset Dave to a state where he has an outdated channel state.
|
2022-07-28 12:25:06 +02:00
|
|
|
restartDave, _, daveStartingBalance := timeTravel(dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// We make a note of the nodes' current on-chain balances, to make sure
|
|
|
|
// they are able to retrieve the channel funds eventually,
|
2022-07-28 12:25:06 +02:00
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
carolStartingBalance := carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
// Restart Dave to trigger a channel resync.
|
2022-07-28 12:25:06 +02:00
|
|
|
require.NoError(ht, restartDave(), "unable to restart dave")
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Assert that once Dave comes up, they reconnect, Carol force closes
|
|
|
|
// on chain, and both of them properly carry out the DLP protocol.
|
2022-07-28 12:25:06 +02:00
|
|
|
assertDLPExecuted(
|
|
|
|
ht, carol, carolStartingBalance, dave,
|
|
|
|
daveStartingBalance, lnrpc.CommitmentType_STATIC_REMOTE_KEY,
|
2022-07-28 11:48:54 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// As a second part of this test, we will test the scenario where a
|
|
|
|
// channel is closed while Dave is offline, loses his state and comes
|
|
|
|
// back online. In this case the node should attempt to resync the
|
|
|
|
// channel, and the peer should resend a channel sync message for the
|
|
|
|
// closed channel, such that Dave can retrieve his funds.
|
|
|
|
//
|
|
|
|
// We start by letting Dave time travel back to an outdated state.
|
2022-07-28 12:25:06 +02:00
|
|
|
restartDave, chanPoint2, daveStartingBalance := timeTravel(dave)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
carolBalResp = carol.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
carolStartingBalance = carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
// Now let Carol force close the channel while Dave is offline.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.ForceCloseChannel(carol, chanPoint2)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Make sure Carol got her balance back.
|
2022-07-28 12:25:06 +02:00
|
|
|
carolBalResp = carol.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
carolBalance := carolBalResp.ConfirmedBalance
|
2022-07-28 12:25:06 +02:00
|
|
|
require.Greater(ht, carolBalance, carolStartingBalance,
|
|
|
|
"expected carol to have balance increased")
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.AssertNodeNumChannels(carol, 0)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// When Dave comes online, he will reconnect to Carol, try to resync
|
|
|
|
// the channel, but it will already be closed. Carol should resend the
|
|
|
|
// information Dave needs to sweep his funds.
|
2022-07-28 12:25:06 +02:00
|
|
|
require.NoError(ht, restartDave(), "unable to restart Eve")
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Dave should sweep his funds.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.Miner.AssertNumTxsInMempool(1)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
|
|
|
// Mine a block to confirm the sweep, and make sure Dave got his
|
|
|
|
// balance back.
|
2022-07-28 12:25:06 +02:00
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
|
|
|
ht.AssertNodeNumChannels(dave, 0)
|
2022-07-28 11:48:54 +02:00
|
|
|
|
2022-07-28 12:25:06 +02:00
|
|
|
err := wait.NoError(func() error {
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
2022-07-28 11:48:54 +02:00
|
|
|
daveBalance := daveBalResp.ConfirmedBalance
|
|
|
|
if daveBalance <= daveStartingBalance {
|
|
|
|
return fmt.Errorf("expected dave to have balance "+
|
2022-07-28 12:25:06 +02:00
|
|
|
"above %d, intead had %v", daveStartingBalance,
|
2022-07-28 11:48:54 +02:00
|
|
|
daveBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
2022-07-28 12:25:06 +02:00
|
|
|
require.NoError(ht, err, "timeout while checking dave's balance")
|
2022-07-28 11:48:54 +02:00
|
|
|
}
|
|
|
|
|
2021-01-25 22:27:25 +01:00
|
|
|
// createLegacyRevocationChannel creates a single channel using the legacy
|
|
|
|
// revocation producer format by using PSBT to signal a special pending channel
|
|
|
|
// ID.
|
2022-07-28 11:44:23 +02:00
|
|
|
func createLegacyRevocationChannel(ht *lntemp.HarnessTest,
|
|
|
|
chanAmt, pushAmt btcutil.Amount, from, to *node.HarnessNode) {
|
2021-01-25 22:27:25 +01:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
// We'll signal to the wallet that we also want to create a channel
|
|
|
|
// with the legacy revocation producer format that relies on deriving a
|
2021-01-25 22:27:25 +01:00
|
|
|
// private key from the key ring. This is only available during itests
|
|
|
|
// to make sure we don't hard depend on the DerivePrivKey method of the
|
|
|
|
// key ring. We can signal the wallet by setting a custom pending
|
|
|
|
// channel ID. To be able to do that, we need to set a funding shim
|
|
|
|
// which is easiest by using PSBT funding. The ID is the hex
|
|
|
|
// representation of the string "legacy-revocation".
|
|
|
|
itestLegacyFormatChanID := [32]byte{
|
|
|
|
0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x2d, 0x72, 0x65, 0x76,
|
|
|
|
0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
shim := &lnrpc.FundingShim{
|
|
|
|
Shim: &lnrpc.FundingShim_PsbtShim{
|
|
|
|
PsbtShim: &lnrpc.PsbtShim{
|
|
|
|
PendingChanId: itestLegacyFormatChanID[:],
|
2021-01-25 22:27:25 +01:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
openChannelReq := lntemp.OpenChannelParams{
|
|
|
|
Amt: chanAmt,
|
|
|
|
PushAmt: pushAmt,
|
|
|
|
FundingShim: shim,
|
|
|
|
}
|
|
|
|
chanUpdates, tempPsbt := ht.OpenChannelPsbt(from, to, openChannelReq)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Fund the PSBT by using the source node's wallet.
|
|
|
|
fundReq := &walletrpc.FundPsbtRequest{
|
|
|
|
Template: &walletrpc.FundPsbtRequest_Psbt{
|
|
|
|
Psbt: tempPsbt,
|
|
|
|
},
|
|
|
|
Fees: &walletrpc.FundPsbtRequest_SatPerVbyte{
|
|
|
|
SatPerVbyte: 2,
|
|
|
|
},
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
fundResp := from.RPC.FundPsbt(fundReq)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// We have a PSBT that has no witness data yet, which is exactly what we
|
|
|
|
// need for the next step of verifying the PSBT with the funding intents.
|
2022-07-28 11:44:23 +02:00
|
|
|
msg := &lnrpc.FundingTransitionMsg{
|
2021-01-25 22:27:25 +01:00
|
|
|
Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{
|
|
|
|
PsbtVerify: &lnrpc.FundingPsbtVerify{
|
|
|
|
PendingChanId: itestLegacyFormatChanID[:],
|
|
|
|
FundedPsbt: fundResp.FundedPsbt,
|
|
|
|
},
|
|
|
|
},
|
2022-07-28 11:44:23 +02:00
|
|
|
}
|
|
|
|
from.RPC.FundingStateStep(msg)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Now we'll ask the source node's wallet to sign the PSBT so we can
|
|
|
|
// finish the funding flow.
|
|
|
|
finalizeReq := &walletrpc.FinalizePsbtRequest{
|
|
|
|
FundedPsbt: fundResp.FundedPsbt,
|
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
finalizeRes := from.RPC.FinalizePsbt(finalizeReq)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// We've signed our PSBT now, let's pass it to the intent again.
|
2022-07-28 11:44:23 +02:00
|
|
|
msg = &lnrpc.FundingTransitionMsg{
|
2021-01-25 22:27:25 +01:00
|
|
|
Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{
|
|
|
|
PsbtFinalize: &lnrpc.FundingPsbtFinalize{
|
|
|
|
PendingChanId: itestLegacyFormatChanID[:],
|
|
|
|
SignedPsbt: finalizeRes.SignedPsbt,
|
|
|
|
},
|
|
|
|
},
|
2022-07-28 11:44:23 +02:00
|
|
|
}
|
|
|
|
from.RPC.FundingStateStep(msg)
|
2021-01-25 22:27:25 +01:00
|
|
|
|
|
|
|
// Consume the "channel pending" update. This waits until the funding
|
|
|
|
// transaction was fully compiled.
|
2022-07-28 11:44:23 +02:00
|
|
|
updateResp := ht.ReceiveOpenChannelUpdate(chanUpdates)
|
2021-01-25 22:27:25 +01:00
|
|
|
upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
2022-07-28 11:44:23 +02:00
|
|
|
require.True(ht, ok)
|
2021-01-25 22:27:25 +01:00
|
|
|
chanPoint := &lnrpc.ChannelPoint{
|
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: upd.ChanPending.Txid,
|
|
|
|
},
|
|
|
|
OutputIndex: upd.ChanPending.OutputIndex,
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(6, 1)
|
|
|
|
ht.AssertTopologyChannelOpen(from, chanPoint)
|
|
|
|
ht.AssertTopologyChannelOpen(to, chanPoint)
|
2021-01-25 22:27:25 +01:00
|
|
|
}
|
|
|
|
|
2020-08-12 16:06:34 +02:00
|
|
|
// chanRestoreViaRPC is a helper test method that returns a nodeRestorer
|
|
|
|
// instance which will restore the target node from a password+seed, then
|
|
|
|
// trigger a SCB restore using the RPC interface.
|
2022-07-28 11:44:23 +02:00
|
|
|
func chanRestoreViaRPC(ht *lntemp.HarnessTest, password []byte,
|
2021-05-14 10:09:05 +02:00
|
|
|
mnemonic []string, multi []byte,
|
2022-07-28 11:44:23 +02:00
|
|
|
oldNode *node.HarnessNode) nodeRestorer {
|
2020-08-12 16:06:34 +02:00
|
|
|
|
|
|
|
backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{
|
|
|
|
MultiChanBackup: multi,
|
|
|
|
}
|
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return func() *node.HarnessNode {
|
|
|
|
newNode := ht.RestoreNodeWithSeed(
|
2020-10-24 23:18:56 +02:00
|
|
|
"dave", nil, password, mnemonic, "", 1000, nil,
|
2021-05-14 10:09:05 +02:00
|
|
|
copyPorts(oldNode),
|
2020-08-12 16:06:34 +02:00
|
|
|
)
|
2022-07-28 11:44:23 +02:00
|
|
|
req := &lnrpc.RestoreChanBackupRequest{Backup: backup}
|
|
|
|
newNode.RPC.RestoreChanBackups(req)
|
2020-08-12 16:06:34 +02:00
|
|
|
|
2022-07-28 11:44:23 +02:00
|
|
|
return newNode
|
|
|
|
}
|
2020-08-12 16:06:34 +02:00
|
|
|
}
|
2021-05-14 10:09:05 +02:00
|
|
|
|
|
|
|
// copyPorts returns a node option function that copies the ports of an existing
|
|
|
|
// node over to the newly created one.
|
2022-07-28 11:44:23 +02:00
|
|
|
func copyPorts(oldNode *node.HarnessNode) node.Option {
|
|
|
|
return func(cfg *node.BaseNodeConfig) {
|
2021-05-14 10:09:05 +02:00
|
|
|
cfg.P2PPort = oldNode.Cfg.P2PPort
|
|
|
|
cfg.RPCPort = oldNode.Cfg.RPCPort
|
|
|
|
cfg.RESTPort = oldNode.Cfg.RESTPort
|
|
|
|
cfg.ProfilePort = oldNode.Cfg.ProfilePort
|
|
|
|
}
|
|
|
|
}
|
2021-06-28 22:45:37 +02:00
|
|
|
|
2021-08-08 11:19:25 +02:00
|
|
|
func rpcPointToWirePoint(t *harnessTest,
|
|
|
|
chanPoint *lnrpc.ChannelPoint) wire.OutPoint {
|
2021-06-28 22:45:37 +02:00
|
|
|
|
2021-08-08 11:19:25 +02:00
|
|
|
op, err := lntest.MakeOutpoint(chanPoint)
|
|
|
|
require.NoError(t.t, err, "unable to get txid")
|
|
|
|
|
|
|
|
return op
|
2021-06-28 22:45:37 +02:00
|
|
|
}
|
2022-07-28 11:44:23 +02:00
|
|
|
|
|
|
|
// assertTimeLockSwept when dave's outputs matures, he should claim them. This
|
|
|
|
// function will advance 2 blocks such that all the pending closing
|
|
|
|
// transactions would be swept in the end.
|
|
|
|
//
|
|
|
|
// Note: this function is only used in this test file and has been made
|
|
|
|
// specifically for testChanRestoreScenario.
|
|
|
|
func assertTimeLockSwept(ht *lntemp.HarnessTest,
|
|
|
|
carol *node.HarnessNode, carolStartingBalance int64,
|
|
|
|
dave *node.HarnessNode, daveStartingBalance int64, anchors bool) {
|
|
|
|
|
|
|
|
expectedTxes := 2
|
|
|
|
if anchors {
|
|
|
|
expectedTxes = 3
|
|
|
|
}
|
|
|
|
|
|
|
|
// Carol should sweep her funds immediately, as they are not timelocked.
|
|
|
|
// We also expect Carol and Dave to sweep their anchor, if present.
|
|
|
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
|
|
|
|
|
|
|
// Carol should consider the channel pending force close (since she is
|
|
|
|
// waiting for her sweep to confirm).
|
|
|
|
ht.AssertNumPendingForceClose(carol, 1)
|
|
|
|
|
|
|
|
// Dave is considering it "pending force close", as we must wait
|
|
|
|
// before he can sweep her outputs.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 1)
|
|
|
|
|
|
|
|
// Mine the sweep (and anchor) tx(ns).
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
|
|
|
|
|
|
|
// Now Carol should consider the channel fully closed.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 0)
|
|
|
|
|
|
|
|
// We query Carol's balance to make sure it increased after the channel
|
|
|
|
// closed. This checks that she was able to sweep the funds she had in
|
|
|
|
// the channel.
|
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
|
|
|
carolBalance := carolBalResp.ConfirmedBalance
|
|
|
|
require.Greater(ht, carolBalance, carolStartingBalance,
|
|
|
|
"balance not increased")
|
|
|
|
|
|
|
|
// After the Dave's output matures, he should reclaim his funds.
|
|
|
|
//
|
|
|
|
// The commit sweep resolver publishes the sweep tx at defaultCSV-1 and
|
|
|
|
// we already mined one block after the commitment was published, so
|
|
|
|
// take that into account.
|
2022-08-01 20:16:17 +02:00
|
|
|
ht.MineBlocksAssertNodesSync(defaultCSV - 1 - 1)
|
2022-07-28 11:44:23 +02:00
|
|
|
daveSweep := ht.Miner.AssertNumTxsInMempool(1)[0]
|
|
|
|
block := ht.Miner.MineBlocksAndAssertNumTxes(1, 1)[0]
|
|
|
|
ht.Miner.AssertTxInBlock(block, daveSweep)
|
|
|
|
|
|
|
|
// Now the channel should be fully closed also from Dave's POV.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 0)
|
|
|
|
|
|
|
|
// Make sure Dave got his balance back.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
|
|
|
daveBalance := daveBalResp.ConfirmedBalance
|
|
|
|
if daveBalance <= daveStartingBalance {
|
|
|
|
return fmt.Errorf("expected dave to have balance "+
|
|
|
|
"above %d, instead had %v", daveStartingBalance,
|
|
|
|
daveBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(ht, err)
|
|
|
|
|
|
|
|
ht.AssertNodeNumChannels(dave, 0)
|
|
|
|
ht.AssertNodeNumChannels(carol, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertDLPExecuted asserts that Dave is a node that has recovered their state
|
|
|
|
// form scratch. Carol should then force close on chain, with Dave sweeping his
|
|
|
|
// funds immediately, and Carol sweeping her fund after her CSV delay is up. If
|
|
|
|
// the blankSlate value is true, then this means that Dave won't need to sweep
|
|
|
|
// on chain as he has no funds in the channel.
|
|
|
|
func assertDLPExecuted(ht *lntemp.HarnessTest,
|
|
|
|
carol *node.HarnessNode, carolStartingBalance int64,
|
|
|
|
dave *node.HarnessNode, daveStartingBalance int64,
|
|
|
|
commitType lnrpc.CommitmentType) {
|
|
|
|
|
|
|
|
// Increase the fee estimate so that the following force close tx will
|
|
|
|
// be cpfp'ed.
|
|
|
|
ht.SetFeeEstimate(30000)
|
|
|
|
|
|
|
|
// We disabled auto-reconnect for some tests to avoid timing issues.
|
|
|
|
// To make sure the nodes are initiating DLP now, we have to manually
|
|
|
|
// re-connect them.
|
|
|
|
ht.EnsureConnected(carol, dave)
|
|
|
|
|
|
|
|
// Upon reconnection, the nodes should detect that Dave is out of sync.
|
|
|
|
// Carol should force close the channel using her latest commitment.
|
|
|
|
expectedTxes := 1
|
|
|
|
if commitTypeHasAnchors(commitType) {
|
|
|
|
expectedTxes = 2
|
|
|
|
}
|
|
|
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
|
|
|
|
|
|
|
// Channel should be in the state "waiting close" for Carol since she
|
|
|
|
// broadcasted the force close tx.
|
|
|
|
ht.AssertNumWaitingClose(carol, 1)
|
|
|
|
|
|
|
|
// Dave should also consider the channel "waiting close", as he noticed
|
|
|
|
// the channel was out of sync, and is now waiting for a force close to
|
|
|
|
// hit the chain.
|
|
|
|
ht.AssertNumWaitingClose(dave, 1)
|
|
|
|
|
|
|
|
// Restart Dave to make sure he is able to sweep the funds after
|
|
|
|
// shutdown.
|
|
|
|
ht.RestartNode(dave)
|
|
|
|
|
|
|
|
// Generate a single block, which should confirm the closing tx.
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
|
|
|
|
|
|
|
// Dave should consider the channel pending force close (since he is
|
|
|
|
// waiting for his sweep to confirm).
|
|
|
|
ht.AssertNumPendingForceClose(dave, 1)
|
|
|
|
|
|
|
|
// Carol is considering it "pending force close", as we must wait
|
|
|
|
// before she can sweep her outputs.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 1)
|
|
|
|
|
|
|
|
if commitType == lnrpc.CommitmentType_SCRIPT_ENFORCED_LEASE {
|
|
|
|
// Dave should sweep his anchor only, since he still has the
|
|
|
|
// lease CLTV constraint on his commitment output.
|
|
|
|
ht.Miner.AssertNumTxsInMempool(1)
|
|
|
|
|
|
|
|
// Mine Dave's anchor sweep tx.
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
|
|
|
|
|
|
|
// After Carol's output matures, she should also reclaim her
|
|
|
|
// funds.
|
|
|
|
//
|
|
|
|
// The commit sweep resolver publishes the sweep tx at
|
|
|
|
// defaultCSV-1 and we already mined one block after the
|
|
|
|
// commitmment was published, so take that into account.
|
|
|
|
ht.Miner.MineBlocks(defaultCSV - 1 - 1)
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
|
|
|
|
|
|
|
// Now the channel should be fully closed also from Carol's POV.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 0)
|
|
|
|
|
|
|
|
// We'll now mine the remaining blocks to prompt Dave to sweep
|
|
|
|
// his CLTV-constrained output.
|
|
|
|
resp := dave.RPC.PendingChannels()
|
|
|
|
blocksTilMaturity :=
|
|
|
|
resp.PendingForceClosingChannels[0].BlocksTilMaturity
|
|
|
|
require.Positive(ht, blocksTilMaturity)
|
|
|
|
|
|
|
|
ht.Miner.MineBlocks(uint32(blocksTilMaturity))
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
|
|
|
|
|
|
|
// Now Dave should consider the channel fully closed.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 0)
|
|
|
|
} else {
|
|
|
|
// Dave should sweep his funds immediately, as they are not
|
|
|
|
// timelocked. We also expect Dave to sweep his anchor, if
|
|
|
|
// present.
|
|
|
|
ht.Miner.AssertNumTxsInMempool(expectedTxes)
|
|
|
|
|
|
|
|
// Mine the sweep tx.
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)
|
|
|
|
|
|
|
|
// Now Dave should consider the channel fully closed.
|
|
|
|
ht.AssertNumPendingForceClose(dave, 0)
|
|
|
|
|
|
|
|
// After Carol's output matures, she should also reclaim her
|
|
|
|
// funds.
|
|
|
|
//
|
|
|
|
// The commit sweep resolver publishes the sweep tx at
|
|
|
|
// defaultCSV-1 and we already mined one block after the
|
|
|
|
// commitmment was published, so take that into account.
|
|
|
|
ht.Miner.MineBlocks(defaultCSV - 1 - 1)
|
|
|
|
ht.Miner.MineBlocksAndAssertNumTxes(1, 1)
|
|
|
|
|
|
|
|
// Now the channel should be fully closed also from Carol's POV.
|
|
|
|
ht.AssertNumPendingForceClose(carol, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We query Dave's balance to make sure it increased after the channel
|
|
|
|
// closed. This checks that he was able to sweep the funds he had in
|
|
|
|
// the channel.
|
|
|
|
daveBalResp := dave.RPC.WalletBalance()
|
|
|
|
daveBalance := daveBalResp.ConfirmedBalance
|
|
|
|
require.Greater(ht, daveBalance, daveStartingBalance,
|
|
|
|
"balance not increased")
|
|
|
|
|
|
|
|
// Make sure Carol got her balance back.
|
|
|
|
err := wait.NoError(func() error {
|
|
|
|
carolBalResp := carol.RPC.WalletBalance()
|
|
|
|
carolBalance := carolBalResp.ConfirmedBalance
|
|
|
|
|
|
|
|
// With Neutrino we don't get a backend error when trying to
|
|
|
|
// publish an orphan TX (which is what the sweep for the remote
|
|
|
|
// anchor is since the remote commitment TX was not broadcast).
|
|
|
|
// That's why the wallet still sees that as unconfirmed and we
|
|
|
|
// need to count the total balance instead of the confirmed.
|
|
|
|
if ht.IsNeutrinoBackend() {
|
|
|
|
carolBalance = carolBalResp.TotalBalance
|
|
|
|
}
|
|
|
|
|
|
|
|
if carolBalance <= carolStartingBalance {
|
|
|
|
return fmt.Errorf("expected carol to have balance "+
|
|
|
|
"above %d, instead had %v",
|
|
|
|
carolStartingBalance, carolBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(ht, err, "timeout while checking carol's balance")
|
|
|
|
|
|
|
|
ht.AssertNodeNumChannels(dave, 0)
|
|
|
|
ht.AssertNodeNumChannels(carol, 0)
|
|
|
|
}
|