mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-02-23 22:46:40 +01:00
This commit introduces the `CustomRecords` type in the `lnwire` package, designed to hold arbitrary byte slices. Each entry in this map can associate with TLV type values that are greater than or equal to 65536.
198 lines
4.4 KiB
Go
198 lines
4.4 KiB
Go
package lnwire
|
|
|
|
import (
|
|
"bytes"
|
|
"testing"
|
|
|
|
"github.com/lightningnetwork/lnd/fn"
|
|
"github.com/lightningnetwork/lnd/tlv"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
// TestCustomRecords tests the custom records serialization and deserialization,
|
|
// as well as copying and producing records.
|
|
func TestCustomRecords(t *testing.T) {
|
|
testCases := []struct {
|
|
name string
|
|
customTypes tlv.TypeMap
|
|
expectedRecords CustomRecords
|
|
expectedErr string
|
|
}{
|
|
{
|
|
name: "empty custom records",
|
|
customTypes: tlv.TypeMap{},
|
|
expectedRecords: nil,
|
|
},
|
|
{
|
|
name: "custom record with invalid type",
|
|
customTypes: tlv.TypeMap{
|
|
123: []byte{1, 2, 3},
|
|
},
|
|
expectedErr: "TLV type below min: 65536",
|
|
},
|
|
{
|
|
name: "valid custom record",
|
|
customTypes: tlv.TypeMap{
|
|
65536: []byte{1, 2, 3},
|
|
},
|
|
expectedRecords: map[uint64][]byte{
|
|
65536: {1, 2, 3},
|
|
},
|
|
},
|
|
{
|
|
name: "valid custom records, wrong order",
|
|
customTypes: tlv.TypeMap{
|
|
65537: []byte{3, 4, 5},
|
|
65536: []byte{1, 2, 3},
|
|
},
|
|
expectedRecords: map[uint64][]byte{
|
|
65536: {1, 2, 3},
|
|
65537: {3, 4, 5},
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
records, err := NewCustomRecords(tc.customTypes)
|
|
|
|
if tc.expectedErr != "" {
|
|
require.ErrorContains(t, err, tc.expectedErr)
|
|
return
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
require.Equal(t, tc.expectedRecords, records)
|
|
|
|
// Serialize, then parse the records again.
|
|
blob, err := records.Serialize()
|
|
require.NoError(t, err)
|
|
|
|
parsedRecords, err := ParseCustomRecords(blob)
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tc.expectedRecords, parsedRecords)
|
|
|
|
// Copy() should also return the same records.
|
|
require.Equal(
|
|
t, tc.expectedRecords, parsedRecords.Copy(),
|
|
)
|
|
|
|
// RecordProducers() should also allow us to serialize
|
|
// the records again.
|
|
serializedProducers := serializeRecordProducers(
|
|
t, parsedRecords.RecordProducers(),
|
|
)
|
|
|
|
require.Equal(t, blob, serializedProducers)
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestCustomRecordsExtendRecordProducers tests that we can extend a slice of
|
|
// record producers with custom records.
|
|
func TestCustomRecordsExtendRecordProducers(t *testing.T) {
|
|
testCases := []struct {
|
|
name string
|
|
existingTypes map[uint64][]byte
|
|
customRecords CustomRecords
|
|
expectedResult tlv.TypeMap
|
|
expectedErr string
|
|
}{
|
|
{
|
|
name: "normal merge",
|
|
existingTypes: map[uint64][]byte{
|
|
123: {3, 4, 5},
|
|
345: {1, 2, 3},
|
|
},
|
|
customRecords: CustomRecords{
|
|
65536: {1, 2, 3},
|
|
},
|
|
expectedResult: tlv.TypeMap{
|
|
123: {3, 4, 5},
|
|
345: {1, 2, 3},
|
|
65536: {1, 2, 3},
|
|
},
|
|
},
|
|
{
|
|
name: "duplicates",
|
|
existingTypes: map[uint64][]byte{
|
|
123: {3, 4, 5},
|
|
345: {1, 2, 3},
|
|
65536: {1, 2, 3},
|
|
},
|
|
customRecords: CustomRecords{
|
|
65536: {1, 2, 3},
|
|
},
|
|
expectedErr: "contains a TLV type that is already " +
|
|
"present in the existing records: 65536",
|
|
},
|
|
{
|
|
name: "non custom type in custom records",
|
|
existingTypes: map[uint64][]byte{
|
|
123: {3, 4, 5},
|
|
345: {1, 2, 3},
|
|
65536: {1, 2, 3},
|
|
},
|
|
customRecords: CustomRecords{
|
|
123: {1, 2, 3},
|
|
},
|
|
expectedErr: "TLV type below min: 65536",
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
nonCustomRecords := tlv.MapToRecords(tc.existingTypes)
|
|
nonCustomProducers := fn.Map(
|
|
func(r tlv.Record) tlv.RecordProducer {
|
|
return &recordProducer{r}
|
|
}, nonCustomRecords,
|
|
)
|
|
|
|
combined, err := tc.customRecords.ExtendRecordProducers(
|
|
nonCustomProducers,
|
|
)
|
|
|
|
if tc.expectedErr != "" {
|
|
require.ErrorContains(t, err, tc.expectedErr)
|
|
return
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
serializedProducers := serializeRecordProducers(
|
|
t, combined,
|
|
)
|
|
|
|
stream, err := tlv.NewStream()
|
|
require.NoError(t, err)
|
|
|
|
parsedMap, err := stream.DecodeWithParsedTypes(
|
|
bytes.NewReader(serializedProducers),
|
|
)
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tc.expectedResult, parsedMap)
|
|
})
|
|
}
|
|
}
|
|
|
|
// serializeRecordProducers is a helper function that serializes a slice of
|
|
// record producers into a byte slice.
|
|
func serializeRecordProducers(t *testing.T,
|
|
producers []tlv.RecordProducer) []byte {
|
|
|
|
tlvRecords := fn.Map(func(p tlv.RecordProducer) tlv.Record {
|
|
return p.Record()
|
|
}, producers)
|
|
|
|
stream, err := tlv.NewStream(tlvRecords...)
|
|
require.NoError(t, err)
|
|
|
|
var b bytes.Buffer
|
|
err = stream.Encode(&b)
|
|
require.NoError(t, err)
|
|
|
|
return b.Bytes()
|
|
}
|