2019-09-30 11:05:19 +09:30
from collections import Counter
2020-01-07 22:58:21 +01:00
from ephemeral_port_reserve import reserve
2018-05-04 11:59:21 +02:00
from fixtures import * # noqa: F401,F403
2019-09-18 21:55:41 +02:00
from fixtures import TEST_NETWORK
2020-05-04 09:47:54 +09:30
from pyln . client import RpcError , Millisatoshi
2020-01-06 12:20:12 +01:00
from utils import (
2023-09-21 15:06:28 +09:30
wait_for , TIMEOUT , only_one , sync_blockheight ,
2022-09-14 13:20:28 +09:30
expected_node_features ,
2023-12-13 16:06:28 +10:30
mine_funding_to_announce , default_ln_port , CHANNEL_SIZE ,
first_scid ,
2020-01-06 12:20:12 +01:00
)
2018-05-04 11:59:21 +02:00
2018-08-02 23:47:36 +02:00
import json
import logging
2019-10-08 11:38:24 +10:30
import math
2018-05-04 11:59:21 +02:00
import os
2019-01-15 14:35:27 +10:30
import pytest
2018-09-21 09:59:44 +09:30
import struct
2018-06-23 13:59:32 +09:30
import subprocess
2018-05-04 11:59:21 +02:00
import time
import unittest
2019-11-15 09:47:45 +01:00
import socket
2018-05-04 11:59:21 +02:00
2018-06-13 10:32:17 +09:30
with open ( ' config.vars ' ) as configfile :
config = dict ( [ ( line . rstrip ( ) . split ( ' = ' , 1 ) ) for line in configfile ] )
2018-05-04 11:59:21 +02:00
def test_gossip_pruning ( node_factory , bitcoind ) :
""" Create channel and see it being updated in time before pruning
"""
2021-11-15 04:20:46 +10:30
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts = { ' dev-fast-gossip-prune ' : None ,
' allow_bad_gossip ' : True } )
2018-05-04 11:59:21 +02:00
2018-05-07 13:58:12 +09:30
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2018-05-04 11:59:21 +02:00
2020-10-15 13:10:31 -05:00
scid1 , _ = l1 . fundchannel ( l2 , 10 * * 6 )
scid2 , _ = l2 . fundchannel ( l3 , 10 * * 6 )
2018-05-04 11:59:21 +02:00
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2018-05-04 11:59:21 +02:00
# Channels should be activated locally
wait_for ( lambda : [ c [ ' active ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True ] * 4 )
wait_for ( lambda : [ c [ ' active ' ] for c in l2 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True ] * 4 )
wait_for ( lambda : [ c [ ' active ' ] for c in l3 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True ] * 4 )
2019-09-26 11:30:20 +09:30
# All of them should send a keepalive message (after 30 seconds)
2018-05-04 11:59:21 +02:00
l1 . daemon . wait_for_logs ( [
' Sending keepalive channel_update for {} ' . format ( scid1 ) ,
2019-09-26 11:30:20 +09:30
] , timeout = 50 )
2018-05-04 11:59:21 +02:00
l2 . daemon . wait_for_logs ( [
' Sending keepalive channel_update for {} ' . format ( scid1 ) ,
' Sending keepalive channel_update for {} ' . format ( scid2 ) ,
] )
l3 . daemon . wait_for_logs ( [
' Sending keepalive channel_update for {} ' . format ( scid2 ) ,
] )
2019-09-26 11:30:20 +09:30
# Now kill l2, so that l1 and l3 will prune from their view after 60 seconds
2019-09-18 10:35:05 +09:30
l2 . stop ( )
2018-05-04 11:59:21 +02:00
2019-09-26 11:30:20 +09:30
# We check every 60/4 seconds, and takes 60 seconds since last update.
2019-09-18 10:35:05 +09:30
l1 . daemon . wait_for_log ( " Pruning channel {} from network view " . format ( scid2 ) ,
2019-09-26 11:30:20 +09:30
timeout = 80 )
2019-09-18 10:35:05 +09:30
l3 . daemon . wait_for_log ( " Pruning channel {} from network view " . format ( scid1 ) )
2018-05-04 11:59:21 +02:00
assert scid2 not in [ c [ ' short_channel_id ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ]
2019-09-18 10:35:05 +09:30
assert scid1 not in [ c [ ' short_channel_id ' ] for c in l3 . rpc . listchannels ( ) [ ' channels ' ] ]
2018-05-04 11:59:21 +02:00
assert l3 . info [ ' id ' ] not in [ n [ ' nodeid ' ] for n in l1 . rpc . listnodes ( ) [ ' nodes ' ] ]
2019-09-18 10:35:05 +09:30
assert l1 . info [ ' id ' ] not in [ n [ ' nodeid ' ] for n in l3 . rpc . listnodes ( ) [ ' nodes ' ] ]
2018-05-28 19:10:25 +02:00
def test_gossip_disable_channels ( node_factory , bitcoind ) :
""" Simple test to check that channels get disabled correctly on disconnect and
reenabled upon reconnecting
"""
opts = { ' dev-no-reconnect ' : None , ' may_reconnect ' : True }
l1 , l2 = node_factory . get_nodes ( 2 , opts = opts )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-10-15 13:10:31 -05:00
scid , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2018-10-26 16:04:56 +10:30
bitcoind . generate_block ( 5 )
2018-05-28 19:10:25 +02:00
def count_active ( node ) :
chans = node . rpc . listchannels ( ) [ ' channels ' ]
active = [ c for c in chans if c [ ' active ' ] ]
2023-12-13 16:06:19 +10:30
connected = len ( [ p for p in node . rpc . listpeerchannels ( ) [ ' channels ' ] if p [ ' peer_connected ' ] is True ] )
return connected * len ( active )
2018-05-28 19:10:25 +02:00
l1 . wait_channel_active ( scid )
l2 . wait_channel_active ( scid )
assert ( count_active ( l1 ) == 2 )
assert ( count_active ( l2 ) == 2 )
l2 . restart ( )
wait_for ( lambda : count_active ( l1 ) == 0 )
assert ( count_active ( l2 ) == 0 )
# Now reconnect, they should re-enable the channels
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
wait_for ( lambda : count_active ( l1 ) == 2 )
wait_for ( lambda : count_active ( l2 ) == 2 )
2018-06-23 13:59:32 +09:30
def test_announce_address ( node_factory , bitcoind ) :
""" Make sure our announcements are well formed. """
# We do not allow announcement of duplicates.
2023-05-30 13:58:18 +09:30
opts = { ' announce-addr ' :
2018-06-23 13:59:32 +09:30
[ ' 4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion ' ,
' 1.2.3.4:1234 ' ,
2023-05-30 13:58:18 +09:30
' dns:example.com:1236 ' ,
2018-06-24 14:46:53 +09:30
' :: ' ] ,
2018-08-08 23:36:58 +09:30
' log-level ' : ' io ' ,
' dev-allow-localhost ' : None }
2018-06-23 13:59:32 +09:30
l1 , l2 = node_factory . get_nodes ( 2 , opts = [ opts , { } ] )
2018-06-24 14:46:53 +09:30
2018-06-23 13:59:32 +09:30
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-10-15 13:10:31 -05:00
scid , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2018-06-23 13:59:32 +09:30
bitcoind . generate_block ( 5 )
l1 . wait_channel_active ( scid )
l2 . wait_channel_active ( scid )
2020-03-30 19:52:12 +10:30
# We should see it send node announce with all addresses (257 = 0x0101)
2021-10-01 13:47:29 +02:00
# Note: local ephemeral port is masked out.
# Note: Since we `disable-dns` it should not announce a resolved IPv4
# or IPv6 address for example.com
#
# Also expect the address descriptor types to be sorted!
# BOLT #7:
# - MUST place address descriptors in ascending order.
2022-01-01 14:05:58 +01:00
l1 . daemon . wait_for_log ( r " \ [OUT \ ] 0101.*0056 "
2021-10-01 13:47:29 +02:00
" 010102030404d2 " # IPv4 01 1.2.3.4:1234
" 017f000001.... " # IPv4 01 127.0.0.1:wxyz
2021-11-01 21:17:20 +01:00
" 0200000000000000000000000000000000.... " # IPv6 02 :::<any_port>
" 04e00533f3e8f2aedaa8969b3d0fa03a96e857bbb28064dca5e147e934244b9ba5023003.... " # TORv3 04
2021-10-01 13:47:29 +02:00
" 050b6578616d706c652e636f6d04d4 " ) # DNS 05 len example.com:1236
2021-12-05 13:25:47 +10:30
# Check other node can parse these (make sure it has digested msg)
wait_for ( lambda : ' addresses ' in l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] )
2021-10-01 13:47:29 +02:00
addresses = l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] [ ' addresses ' ]
addresses_dns = [ address for address in addresses if address [ ' type ' ] == ' dns ' ]
2022-01-01 14:05:58 +01:00
assert len ( addresses ) == 5
assert len ( addresses_dns ) == 1
assert addresses_dns [ 0 ] [ ' address ' ] == ' example.com '
assert addresses_dns [ 0 ] [ ' port ' ] == 1236
2018-08-02 23:47:36 +02:00
2022-12-08 15:02:35 +10:30
def test_announce_dns_suppressed ( node_factory , bitcoind ) :
""" By default announce DNS names as IPs """
opts = { ' announce-addr ' : ' example.com:1236 ' ,
' start ' : False }
l1 , l2 = node_factory . get_nodes ( 2 , opts = [ opts , { } ] )
# Remove unwanted disable-dns option!
del l1 . daemon . opts [ ' disable-dns ' ]
l1 . start ( )
# Need a channel so l1 will announce itself.
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
scid , _ = l1 . fundchannel ( l2 , 10 * * 6 )
bitcoind . generate_block ( 5 )
# Wait for l2 to see l1, with addresses.
wait_for ( lambda : l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] != [ ] )
wait_for ( lambda : ' addresses ' in only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) )
addresses = only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) [ ' addresses ' ]
assert len ( addresses ) == 1
2023-02-11 00:04:21 +01:00
assert addresses [ 0 ] [ ' type ' ] in [ ' ipv4 ' , ' ipv6 ' ]
2022-12-08 15:02:35 +10:30
assert addresses [ 0 ] [ ' address ' ] != ' example.com '
assert addresses [ 0 ] [ ' port ' ] == 1236
2021-10-08 15:37:56 +02:00
def test_announce_and_connect_via_dns ( node_factory , bitcoind ) :
2024-01-31 13:46:20 +10:30
""" Test that DNS announcements propagate and can be used when connecting.
2021-10-08 15:37:56 +02:00
- First node announces only a FQDN like ' localhost.localdomain ' .
- Second node gets a channel with first node .
- Third node just connects to second node .
- Fourth node with DNS disabled also connects to second node .
- Wait for gossip so third and fourth node sees first node .
- Third node must be able to ' resolve ' ' localhost.localdomain '
and connect to first node .
- Fourth node must not be able to connect because he has disabled DNS .
Notes :
- - - disable - dns is needed so the first node does not announce 127.0 .0 .1 itself .
- ' dev-allow-localhost ' must not be set , so it does not resolve localhost anyway .
"""
opts1 = { ' disable-dns ' : None ,
2023-05-30 13:58:18 +09:30
' announce-addr ' : [ ' dns:localhost.localdomain:12345 ' ] , # announce dns
2021-10-08 15:37:56 +02:00
' bind-addr ' : [ ' 127.0.0.1:12345 ' , ' [::1]:12345 ' ] } # and bind local IPs
opts3 = { ' may_reconnect ' : True }
opts4 = { ' disable-dns ' : None }
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 , opts = [ opts1 , { } , opts3 , opts4 ] )
# In order to enable DNS on a pyln testnode we need to delete the
# 'disable-dns' opt (which is added by pyln test utils) and restart it.
del l3 . daemon . opts [ ' disable-dns ' ]
l3 . restart ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l3 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l4 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
scid , _ = l1 . fundchannel ( l2 , 10 * * 6 )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2021-10-08 15:37:56 +02:00
# wait until l3 and l4 see l1 via gossip with announced addresses
wait_for ( lambda : len ( l3 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) == 1 )
wait_for ( lambda : len ( l4 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) == 1 )
wait_for ( lambda : ' addresses ' in l3 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] )
wait_for ( lambda : ' addresses ' in l4 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] )
addresses = l3 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ] [ ' addresses ' ]
assert ( len ( addresses ) == 1 ) # no other addresses must be announced for this
assert ( addresses [ 0 ] [ ' type ' ] == ' dns ' )
assert ( addresses [ 0 ] [ ' address ' ] == ' localhost.localdomain ' )
assert ( addresses [ 0 ] [ ' port ' ] == 12345 )
# now l3 must be able to use DNS to resolve and connect to l1
result = l3 . rpc . connect ( l1 . info [ ' id ' ] )
assert result [ ' id ' ] == l1 . info [ ' id ' ]
assert result [ ' direction ' ] == ' out '
assert result [ ' address ' ] [ ' port ' ] == 12345
if result [ ' address ' ] [ ' type ' ] == ' ipv4 ' :
assert result [ ' address ' ] [ ' address ' ] == ' 127.0.0.1 '
elif result [ ' address ' ] [ ' type ' ] == ' ipv6 ' :
assert result [ ' address ' ] [ ' address ' ] == ' ::1 '
else :
assert False
# l4 however must not be able to connect because he used '--disable-dns'
# This raises RpcError code 401, currently with an empty error message.
2021-12-08 12:26:25 +01:00
with pytest . raises ( RpcError , match = r " 401.*dns disabled " ) :
2021-10-08 15:37:56 +02:00
l4 . rpc . connect ( l1 . info [ ' id ' ] )
2022-01-01 14:05:58 +01:00
def test_only_announce_one_dns ( node_factory , bitcoind ) :
# and test that we can't announce more than one DNS address
2022-07-21 14:09:30 +09:30
l1 = node_factory . get_node ( expect_fail = True , start = False ,
2023-05-30 13:58:18 +09:30
options = { ' announce-addr ' : [ ' dns:localhost.localdomain:12345 ' , ' dns:example.com:12345 ' ] } )
2022-07-21 14:09:30 +09:30
l1 . daemon . start ( wait_for_initialized = False , stderr_redir = True )
2022-06-26 14:06:01 +09:30
wait_for ( lambda : l1 . daemon . is_in_stderr ( " Only one DNS can be announced " ) )
2022-01-01 14:05:58 +01:00
2022-07-17 13:24:05 +02:00
def test_announce_dns_without_port ( node_factory , bitcoind ) :
""" Checks that the port of a DNS announcement is set to the corresponding
network port . In this case regtest 19846
"""
2023-05-30 13:58:18 +09:30
opts = { ' announce-addr ' : [ ' dns:example.com ' ] }
2022-07-17 13:24:05 +02:00
l1 = node_factory . get_node ( options = opts )
# 'address': [{'type': 'dns', 'address': 'example.com', 'port': 0}]
info = l1 . rpc . getinfo ( )
assert info [ ' address ' ] [ 0 ] [ ' type ' ] == ' dns '
assert info [ ' address ' ] [ 0 ] [ ' address ' ] == ' example.com '
2022-08-02 20:58:20 +02:00
if TEST_NETWORK == ' regtest ' :
default_port = 19846
else :
assert TEST_NETWORK == ' liquid-regtest '
default_port = 20735
assert info [ ' address ' ] [ 0 ] [ ' port ' ] == default_port
2022-07-17 13:24:05 +02:00
2021-05-09 02:39:41 +05:30
def test_gossip_timestamp_filter ( node_factory , bitcoind , chainparams ) :
2022-07-18 21:42:28 +09:30
l1 , l2 , l3 , l4 = node_factory . line_graph ( 4 , fundchannel = False , opts = { ' log-level ' : ' io ' } )
2021-05-09 02:39:41 +05:30
genesis_blockhash = chainparams [ ' chain_hash ' ]
2018-08-02 23:47:36 +02:00
2019-09-30 11:05:19 +09:30
before_anything = int ( time . time ( ) )
2018-08-02 23:47:36 +02:00
# Make a public channel.
2020-10-15 13:10:31 -05:00
chan12 , _ = l1 . fundchannel ( l2 , 10 * * 5 )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 ] )
2018-08-02 23:47:36 +02:00
2019-02-07 20:52:19 +01:00
l3 . wait_for_channel_updates ( [ chan12 ] )
2018-08-02 23:47:36 +02:00
after_12 = int ( time . time ( ) )
# Make another one, different timestamp.
2022-06-26 13:57:01 +09:30
time . sleep ( 10 )
before_23 = int ( time . time ( ) )
2020-10-15 13:10:31 -05:00
chan23 , _ = l2 . fundchannel ( l3 , 10 * * 5 )
2018-08-02 23:47:36 +02:00
bitcoind . generate_block ( 5 )
2019-02-07 20:52:19 +01:00
l1 . wait_for_channel_updates ( [ chan23 ] )
2022-07-28 11:00:57 +09:30
after_23 = int ( time . time ( ) ) + 1
2018-08-02 23:47:36 +02:00
2019-11-04 12:33:01 +10:30
# Make sure l4 has received all the gossip.
wait_for ( lambda : [ ' alias ' in node for node in l4 . rpc . listnodes ( ) [ ' nodes ' ] ] == [ True , True , True ] )
2018-08-02 23:47:36 +02:00
2019-11-04 12:33:01 +10:30
msgs = l4 . query_gossip ( ' gossip_timestamp_filter ' ,
2021-05-09 02:39:41 +05:30
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
' 0 ' , ' 0xFFFFFFFF ' ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2018-08-02 23:47:36 +02:00
# 0x0100 = channel_announcement
# 0x0102 = channel_update
# 0x0101 = node_announcement
# The order of node_announcements relative to others is undefined.
2019-09-30 11:05:19 +09:30
types = Counter ( [ m [ 0 : 4 ] for m in msgs ] )
assert types == Counter ( [ ' 0100 ' ] * 2 + [ ' 0102 ' ] * 4 + [ ' 0101 ' ] * 3 )
2018-08-02 23:47:36 +02:00
# Now timestamp which doesn't overlap (gives nothing).
2019-11-04 12:33:01 +10:30
msgs = l4 . query_gossip ( ' gossip_timestamp_filter ' ,
2021-05-09 02:39:41 +05:30
genesis_blockhash ,
2024-01-31 13:46:17 +10:30
' 0 ' , before_anything ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 11:05:19 +09:30
assert msgs == [ ]
2018-08-02 23:47:36 +02:00
# Now choose range which will only give first update.
2019-11-04 12:33:01 +10:30
msgs = l4 . query_gossip ( ' gossip_timestamp_filter ' ,
2021-05-09 02:39:41 +05:30
genesis_blockhash ,
2024-01-31 13:46:17 +10:30
before_anything ,
2019-10-08 11:38:24 +10:30
after_12 - before_anything + 1 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 11:05:19 +09:30
2018-08-02 23:47:36 +02:00
# 0x0100 = channel_announcement
# 0x0102 = channel_update
# (Node announcement may have any timestamp)
2019-09-30 11:05:19 +09:30
types = Counter ( [ m [ 0 : 4 ] for m in msgs ] )
assert types [ ' 0100 ' ] == 1
assert types [ ' 0102 ' ] == 2
2018-08-02 23:47:36 +02:00
# Now choose range which will only give second update.
2019-11-04 12:33:01 +10:30
msgs = l4 . query_gossip ( ' gossip_timestamp_filter ' ,
2021-05-09 02:39:41 +05:30
genesis_blockhash ,
2024-01-31 13:46:17 +10:30
before_23 ,
2022-06-26 13:57:01 +09:30
after_23 - before_23 + 1 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 11:05:19 +09:30
2018-08-02 23:47:36 +02:00
# 0x0100 = channel_announcement
# 0x0102 = channel_update
# (Node announcement may have any timestamp)
2019-09-30 11:05:19 +09:30
types = Counter ( [ m [ 0 : 4 ] for m in msgs ] )
assert types [ ' 0100 ' ] == 1
assert types [ ' 0102 ' ] == 2
2018-08-02 23:47:36 +02:00
def test_connect_by_gossip ( node_factory , bitcoind ) :
""" Test connecting to an unknown peer using node gossip
"""
2018-08-09 09:57:30 +09:30
# l1 announces a bogus addresses.
2018-08-08 23:36:58 +09:30
l1 , l2 , l3 = node_factory . get_nodes ( 3 ,
2018-08-09 09:57:30 +09:30
opts = [ { ' announce-addr ' :
[ ' 127.0.0.1:2 ' ,
' [::]:2 ' ,
' vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion ' ] ,
' dev-allow-localhost ' : None } ,
2018-08-08 23:36:58 +09:30
{ } ,
2018-08-09 09:57:30 +09:30
{ ' dev-allow-localhost ' : None ,
' log-level ' : ' io ' } ] )
2018-08-02 23:47:36 +02:00
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
# Nodes are gossiped only if they have channels
2020-10-15 13:10:31 -05:00
chanid , _ = l2 . fundchannel ( l3 , 10 * * 6 )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2018-08-02 23:47:36 +02:00
# Let channel reach announcement depth
l2 . wait_channel_active ( chanid )
# Make sure l3 has given node announcement to l2.
l2 . daemon . wait_for_logs ( [ ' Received node_announcement for node {} ' . format ( l3 . info [ ' id ' ] ) ] )
# Let l1 learn of l3 by node gossip
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . daemon . wait_for_logs ( [ ' Received node_announcement for node {} ' . format ( l3 . info [ ' id ' ] ) ] )
# Have l1 connect to l3 without explicit host and port.
2021-03-16 14:14:36 +10:30
ret = l1 . rpc . connect ( l3 . info [ ' id ' ] )
assert ret [ ' address ' ] == { ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : l3 . port }
# Now give it *wrong* port (after we make sure l2 isn't listening), it should fall back.
l1 . rpc . disconnect ( l3 . info [ ' id ' ] )
l2 . stop ( )
ret = l1 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l2 . port )
assert ret [ ' address ' ] == { ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : l3 . port }
2018-08-02 23:47:36 +02:00
def test_gossip_jsonrpc ( node_factory ) :
2018-12-07 15:27:14 -08:00
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = True , wait_for_announce = False )
2018-08-02 23:47:36 +02:00
# Shouldn't send announce signatures until 6 deep.
assert not l1 . daemon . is_in_log ( ' peer_out WIRE_ANNOUNCEMENT_SIGNATURES ' )
# Make sure we can route through the channel, will raise on failure
l1 . rpc . getroute ( l2 . info [ ' id ' ] , 100 , 1 )
2023-12-13 16:06:19 +10:30
# Channels not should be activated locally
assert l1 . rpc . listchannels ( ) == { ' channels ' : [ ] }
assert l2 . rpc . listchannels ( ) == { ' channels ' : [ ] }
# Outgoing should be public, even if not announced yet.
channels1 = l1 . rpc . listpeerchannels ( ) [ ' channels ' ]
channels2 = l2 . rpc . listpeerchannels ( ) [ ' channels ' ]
assert [ c [ ' private ' ] for c in channels1 ] == [ False ]
assert [ c [ ' private ' ] for c in channels2 ] == [ False ]
# Now proceed to funding-depth and do a full gossip round
l1 . bitcoin . generate_block ( 5 )
# Could happen in either order.
l1 . daemon . wait_for_logs ( [ ' peer_out WIRE_ANNOUNCEMENT_SIGNATURES ' ,
' peer_in WIRE_ANNOUNCEMENT_SIGNATURES ' ] )
2018-08-02 23:47:36 +02:00
2023-12-13 16:06:19 +10:30
# Just wait for the update to kick off and then check the effect
needle = " Received node_announcement for node "
l1 . daemon . wait_for_log ( needle )
l2 . daemon . wait_for_log ( needle )
l1 . wait_channel_active ( only_one ( channels1 ) [ ' short_channel_id ' ] )
l2 . wait_channel_active ( only_one ( channels1 ) [ ' short_channel_id ' ] )
2018-08-02 23:47:36 +02:00
2019-01-15 14:39:27 +10:30
# Test listchannels-by-source
channels1 = l1 . rpc . listchannels ( source = l1 . info [ ' id ' ] ) [ ' channels ' ]
channels2 = l2 . rpc . listchannels ( source = l1 . info [ ' id ' ] ) [ ' channels ' ]
assert only_one ( channels1 ) [ ' source ' ] == l1 . info [ ' id ' ]
assert only_one ( channels1 ) [ ' destination ' ] == l2 . info [ ' id ' ]
2023-01-30 16:37:03 +10:30
if l1 . info [ ' id ' ] > l2 . info [ ' id ' ] :
assert only_one ( channels1 ) [ ' direction ' ] == 1
else :
assert only_one ( channels1 ) [ ' direction ' ] == 0
2019-01-15 14:39:27 +10:30
assert channels1 == channels2
2021-06-22 15:43:00 +02:00
# Test listchannels-by-destination
channels1 = l1 . rpc . listchannels ( destination = l1 . info [ ' id ' ] ) [ ' channels ' ]
channels2 = l2 . rpc . listchannels ( destination = l1 . info [ ' id ' ] ) [ ' channels ' ]
assert only_one ( channels1 ) [ ' destination ' ] == l1 . info [ ' id ' ]
assert only_one ( channels1 ) [ ' source ' ] == l2 . info [ ' id ' ]
2023-01-30 16:37:03 +10:30
if l2 . info [ ' id ' ] > l1 . info [ ' id ' ] :
assert only_one ( channels1 ) [ ' direction ' ] == 1
else :
assert only_one ( channels1 ) [ ' direction ' ] == 0
2021-06-22 15:43:00 +02:00
assert channels1 == channels2
# Test only one of short_channel_id, source or destination can be supplied
with pytest . raises ( RpcError , match = r " Can only specify one of.* " ) :
l1 . rpc . listchannels ( source = l1 . info [ ' id ' ] , destination = l2 . info [ ' id ' ] )
with pytest . raises ( RpcError , match = r " Can only specify one of.* " ) :
l1 . rpc . listchannels ( short_channel_id = " 1x1x1 " , source = l2 . info [ ' id ' ] )
2019-01-15 14:39:27 +10:30
2018-08-02 23:47:36 +02:00
nodes = l1 . rpc . listnodes ( ) [ ' nodes ' ]
assert set ( [ n [ ' nodeid ' ] for n in nodes ] ) == set ( [ l1 . info [ ' id ' ] , l2 . info [ ' id ' ] ] )
# Test listnodes with an arg, while we're here.
n1 = l1 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ]
n2 = l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ]
assert n1 [ ' nodeid ' ] == l1 . info [ ' id ' ]
assert n2 [ ' nodeid ' ] == l2 . info [ ' id ' ]
# Might not have seen other node-announce yet.
assert n1 [ ' alias ' ] . startswith ( ' JUNIORBEAM ' )
assert n1 [ ' color ' ] == ' 0266e4 '
if ' alias ' not in n2 :
assert ' color ' not in n2
assert ' addresses ' not in n2
else :
assert n2 [ ' alias ' ] . startswith ( ' SILENTARTIST ' )
assert n2 [ ' color ' ] == ' 022d22 '
assert [ c [ ' active ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ]
assert [ c [ ' public ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ]
assert [ c [ ' active ' ] for c in l2 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ]
assert [ c [ ' public ' ] for c in l2 . rpc . listchannels ( ) [ ' channels ' ] ] == [ True , True ]
2022-01-30 14:07:23 +10:30
def test_gossip_badsig ( node_factory , bitcoind ) :
2018-08-02 23:47:36 +02:00
""" Make sure node announcement signatures are ok.
This is a smoke test to see if signatures fail . This used to be the case
occasionally before PR #276 was merged: we'd be waiting for the HSM to reply
with a signature and would then regenerate the message , which might roll the
timestamp , invalidating the signature .
"""
l1 , l2 , l3 = node_factory . get_nodes ( 3 )
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2020-09-24 14:06:36 +02:00
l2 . fundchannel ( l1 , 10 * * 6 )
l2 . fundchannel ( l3 , 10 * * 6 )
2018-08-02 23:47:36 +02:00
# Wait for route propagation.
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2018-08-02 23:47:36 +02:00
l1 . daemon . wait_for_log ( ' Received node_announcement for node {} '
. format ( l3 . info [ ' id ' ] ) )
assert not l1 . daemon . is_in_log ( ' signature verification failed ' )
assert not l2 . daemon . is_in_log ( ' signature verification failed ' )
assert not l3 . daemon . is_in_log ( ' signature verification failed ' )
def test_gossip_weirdalias ( node_factory , bitcoind ) :
weird_name = ' \t \n \" \n \r \n \\ '
normal_name = ' Normal name '
opts = [
{ ' alias ' : weird_name } ,
{ ' alias ' : normal_name }
]
l1 , l2 = node_factory . get_nodes ( 2 , opts = opts )
2020-10-06 11:57:51 +02:00
weird_name_json = json . encoder . JSONEncoder ( ) . encode ( weird_name ) [ 1 : - 1 ]
2018-08-02 23:47:36 +02:00
aliasline = l1 . daemon . is_in_log ( ' Server started with public key .* alias ' )
assert weird_name_json in str ( aliasline )
assert l2 . daemon . is_in_log ( ' Server started with public key .* alias {} '
. format ( normal_name ) )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-11-30 15:48:39 -06:00
l2 . daemon . wait_for_log ( ' Handed peer, entering loop ' )
2020-09-24 14:06:36 +02:00
l2 . fundchannel ( l1 , 10 * * 6 )
2018-10-26 16:04:56 +10:30
bitcoind . generate_block ( 6 )
2018-08-02 23:47:36 +02:00
# They should gossip together.
l1 . daemon . wait_for_log ( ' Received node_announcement for node {} '
. format ( l2 . info [ ' id ' ] ) )
l2 . daemon . wait_for_log ( ' Received node_announcement for node {} '
. format ( l1 . info [ ' id ' ] ) )
node = l1 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ]
assert node [ ' alias ' ] == weird_name
node = l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] [ 0 ]
assert node [ ' alias ' ] == weird_name
def test_gossip_persistence ( node_factory , bitcoind ) :
""" Gossip for a while, restart and it should remember.
Also tests for funding outpoint spends , and they should be persisted
too .
"""
opts = { ' dev-no-reconnect ' : None , ' may_reconnect ' : True }
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 , opts = opts )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l3 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
2020-10-15 13:10:31 -05:00
scid12 , _ = l1 . fundchannel ( l2 , 10 * * 6 )
scid23 , _ = l2 . fundchannel ( l3 , 10 * * 6 )
2018-08-02 23:47:36 +02:00
2022-09-27 09:48:33 +09:30
# Make channels public, except for l3 -> l4, which is kept local-only
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 ] )
2022-09-27 09:48:33 +09:30
scid34 , _ = l3 . fundchannel ( l4 , 10 * * 6 , announce_channel = False )
2018-10-26 16:04:56 +10:30
bitcoind . generate_block ( 1 )
2018-08-02 23:47:36 +02:00
2019-04-11 13:30:29 +09:30
def active ( node ) :
2018-08-02 23:47:36 +02:00
chans = node . rpc . listchannels ( ) [ ' channels ' ]
2019-04-11 13:30:29 +09:30
return sorted ( [ c [ ' short_channel_id ' ] for c in chans if c [ ' active ' ] ] )
def non_public ( node ) :
2023-12-13 16:06:19 +10:30
# Not just c["private"] == True, but immature ones too.
public_chans = [ c [ ' short_channel_id ' ] for c in node . rpc . listchannels ( ) [ ' channels ' ] ]
our_chans = [ c [ ' short_channel_id ' ] for c in node . rpc . listpeerchannels ( ) [ ' channels ' ] if c [ ' state ' ] in ( ' CHANNELD_NORMAL ' , ' CHANNELD_AWAITING_SPLICE ' ) ]
return sorted ( list ( set ( our_chans ) - set ( public_chans ) ) )
2018-08-02 23:47:36 +02:00
# Channels should be activated
2019-04-11 13:30:29 +09:30
wait_for ( lambda : active ( l1 ) == [ scid12 , scid12 , scid23 , scid23 ] )
wait_for ( lambda : active ( l2 ) == [ scid12 , scid12 , scid23 , scid23 ] )
2023-12-13 16:06:19 +10:30
# This one has private channels, but doesn't appear in listchannels.
wait_for ( lambda : active ( l3 ) == [ scid12 , scid12 , scid23 , scid23 ] )
2018-08-02 23:47:36 +02:00
2023-12-13 16:06:19 +10:30
# l1 restarts and public gossip should persist
2018-08-02 23:47:36 +02:00
l1 . restart ( )
2023-12-13 16:06:19 +10:30
wait_for ( lambda : active ( l1 ) == [ scid12 , scid12 , scid23 , scid23 ] )
2018-08-02 23:47:36 +02:00
# Now reconnect, they should re-enable the two l1 <-> l2 directions
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2019-04-11 13:30:29 +09:30
wait_for ( lambda : active ( l1 ) == [ scid12 , scid12 , scid23 , scid23 ] )
2018-08-02 23:47:36 +02:00
# Now spend the funding tx, generate a block and see others deleting the
# channel from their network view
l1 . rpc . dev_fail ( l2 . info [ ' id ' ] )
2021-01-14 11:46:38 +01:00
2022-09-14 13:20:32 +09:30
# We need to wait for the unilateral close to hit the mempool,
# and 12 blocks for nodes to actually forget it.
bitcoind . generate_block ( 13 , wait_for_mempool = 1 )
2018-08-02 23:47:36 +02:00
2019-04-11 13:30:29 +09:30
wait_for ( lambda : active ( l1 ) == [ scid23 , scid23 ] )
wait_for ( lambda : active ( l2 ) == [ scid23 , scid23 ] )
2023-12-13 16:06:19 +10:30
wait_for ( lambda : active ( l3 ) == [ scid23 , scid23 ] )
2018-08-02 23:47:36 +02:00
# The channel l3 -> l4 should be known only to them
2019-04-11 13:30:29 +09:30
assert non_public ( l1 ) == [ ]
assert non_public ( l2 ) == [ ]
2023-12-13 16:06:19 +10:30
wait_for ( lambda : non_public ( l3 ) == [ scid34 ] )
wait_for ( lambda : non_public ( l4 ) == [ scid34 ] )
2018-08-02 23:47:36 +02:00
# Finally, it should also remember the deletion after a restart
l3 . restart ( )
l4 . restart ( )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
l3 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
2023-12-13 16:06:19 +10:30
wait_for ( lambda : active ( l3 ) == [ scid23 , scid23 ] )
2018-08-02 23:47:36 +02:00
# Both l3 and l4 should remember their local-only channel
2023-12-13 16:06:19 +10:30
wait_for ( lambda : non_public ( l3 ) == [ scid34 ] )
wait_for ( lambda : non_public ( l4 ) == [ scid34 ] )
2018-08-02 23:47:36 +02:00
2024-01-31 13:46:20 +10:30
def test_routing_gossip_reconnect ( node_factory , bitcoind ) :
2018-08-02 23:47:36 +02:00
# Connect two peers, reconnect and then see if we resume the
# gossip.
disconnects = [ ' -WIRE_CHANNEL_ANNOUNCEMENT ' ]
2020-08-07 12:44:55 +09:30
l1 , l2 , l3 = node_factory . get_nodes ( 3 ,
opts = [ { ' disconnect ' : disconnects ,
' may_reconnect ' : True } ,
{ ' may_reconnect ' : True } ,
{ } ] )
2024-01-31 13:46:20 +10:30
# Make sure everyone is up to block height so we don't get bad gossip msgs!
sync_blockheight ( bitcoind , [ l1 , l2 , l3 ] )
2018-08-02 23:47:36 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2023-02-03 11:32:29 -05:00
l1 . openchannel ( l2 , CHANNEL_SIZE )
2018-08-02 23:47:36 +02:00
2024-01-31 13:46:20 +10:30
# Make sure everyone is up to block height so we don't get bad gossip msgs!
sync_blockheight ( bitcoind , [ l1 , l2 , l3 ] )
2018-08-02 23:47:36 +02:00
# Now open new channels and everybody should sync
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2023-02-03 11:32:29 -05:00
l2 . openchannel ( l3 , CHANNEL_SIZE )
2018-08-02 23:47:36 +02:00
# Settle the gossip
for n in [ l1 , l2 , l3 ] :
wait_for ( lambda : len ( n . rpc . listchannels ( ) [ ' channels ' ] ) == 4 )
2022-01-29 14:03:06 +10:30
def test_gossip_no_empty_announcements ( node_factory , bitcoind , chainparams ) :
2018-08-02 23:47:36 +02:00
# Need full IO logging so we can see gossip
2022-01-29 14:03:06 +10:30
# l2 sends CHANNEL_ANNOUNCEMENT to l1, but not CHANNEL_UDPATE.
l1 , l2 , l3 , l4 = node_factory . line_graph ( 4 , opts = [ { ' log-level ' : ' io ' ,
' dev-no-reconnect ' : None } ,
{ ' log-level ' : ' io ' ,
' disconnect ' : [ ' +WIRE_CHANNEL_ANNOUNCEMENT ' ] ,
2020-08-07 12:44:55 +09:30
' may_reconnect ' : True } ,
2022-01-29 14:03:06 +10:30
{ ' may_reconnect ' : True } ,
2020-08-07 12:44:55 +09:30
{ ' may_reconnect ' : True } ] ,
fundchannel = False )
2018-08-02 23:47:36 +02:00
2020-09-24 14:06:36 +02:00
l3 . fundchannel ( l4 , 10 * * 5 )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 ] )
2018-08-02 23:47:36 +02:00
2022-01-29 14:03:06 +10:30
# l2 sends CHANNEL_ANNOUNCEMENT to l1, then disconnects/
l2 . daemon . wait_for_log ( ' dev_disconnect ' )
l1 . daemon . wait_for_log ( r ' \ [IN \ ] 0100 ' )
pytest: fix test_gossip_no_empty_announcements flake.
This is a side-effect of fixing aging: sometimes, we age our
rcvd_filter cache too fast, and thus re-xmit. This breaks
our test, since it used dev-disconnect on the channel_announce,
but that closes to l3, not l1!
```
> assert l1.rpc.listchannels()['channels'] == []
E AssertionError: assert [{'active': T...ags': 1, ...}] == []
E Left contains 2 more items, first extra item: {'active': True, 'amount_msat': 100000000msat, 'base_fee_millisatoshi': 1, 'channel_flags': 0, ...}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Fixes: #5403
2022-07-12 14:14:36 +09:30
wait_for ( lambda : l1 . rpc . listchannels ( ) [ ' channels ' ] == [ ] )
2018-08-02 23:47:36 +02:00
2022-01-29 14:03:06 +10:30
# l1 won't relay it (make sure it has time to digest though)
2018-08-02 23:47:36 +02:00
time . sleep ( 2 )
2022-01-29 14:03:06 +10:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
assert l1 . query_gossip ( ' query_channel_range ' ,
chainparams [ ' chain_hash ' ] ,
0 , 1000000 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] ) == [ ' 0108 '
# blockhash
+ chainparams [ ' chain_hash ' ]
# first_blocknum, number_of_blocks, complete
+ format ( 0 , ' 08x ' ) + format ( 1000000 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ format ( len ( encoded ) / / 2 , ' 04x ' )
+ encoded ]
2018-08-02 23:47:36 +02:00
# If we reconnect, gossip will now flow.
2022-01-29 14:03:06 +10:30
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2018-08-02 23:47:36 +02:00
wait_for ( lambda : len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
def test_routing_gossip ( node_factory , bitcoind ) :
nodes = node_factory . get_nodes ( 5 )
for i in range ( len ( nodes ) - 1 ) :
src , dst = nodes [ i ] , nodes [ i + 1 ]
src . rpc . connect ( dst . info [ ' id ' ] , ' localhost ' , dst . port )
2023-02-03 11:32:29 -05:00
src . openchannel ( dst , CHANNEL_SIZE , confirm = False , wait_for_announce = False )
2018-08-02 23:47:36 +02:00
2022-01-30 14:07:23 +10:30
# openchannel calls fundwallet which mines a block; so first channel
# is 4 deep, last is unconfirmed.
2020-12-18 10:42:00 +01:00
pytest: fix flake in test_routing_gossip.
If we mine too fast, gossip can reach a node which considers it too far in the guture. Break it up.
```
@pytest.mark.developer("Too slow without --dev-fast-gossip")
def test_routing_gossip(node_factory, bitcoind):
nodes = node_factory.get_nodes(5)
for i in range(len(nodes) - 1):
src, dst = nodes[i], nodes[i + 1]
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
src.openchannel(dst, CHANNEL_SIZE, confirm=False, wait_for_announce=False)
# openchannel calls fundwallet which mines a block; so first channel
# is 4 deep, last is unconfirmed.
# Allow announce messages.
mine_funding_to_announce(bitcoind, nodes, num_blocks=6, wait_for_mempool=1)
# Deep check that all channels are in there
comb = []
for i in range(len(nodes) - 1):
comb.append((nodes[i].info['id'], nodes[i + 1].info['id']))
comb.append((nodes[i + 1].info['id'], nodes[i].info['id']))
def check_gossip(n):
seen = []
channels = n.rpc.listchannels()['channels']
for c in channels:
seen.append((c['source'], c['destination']))
missing = set(comb) - set(seen)
logging.debug("Node {id} is missing channels {chans}".format(
id=n.info['id'],
chans=missing)
)
return len(missing) == 0
for n in nodes:
> wait_for(lambda: check_gossip(n))
tests/test_gossip.py:721:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
success = <function test_routing_gossip.<locals>.<lambda> at 0x7f3200534ef0>
timeout = 180
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success():
time_left = start_time + timeout - time.time()
if time_left <= 0:
> raise ValueError("Timeout while waiting for {}".format(success))
E ValueError: Timeout while waiting for <function test_routing_gossip.<locals>.<lambda> at 0x7f3200534ef0>
2023-07-10 15:24:19 +09:30
# Allow announce messages, but don't run too fast, otherwise gossip can be in the future for nodes.
sync_blockheight ( bitcoind , nodes )
bitcoind . generate_block ( wait_for_mempool = 1 )
mine_funding_to_announce ( bitcoind , nodes )
2018-08-02 23:47:36 +02:00
# Deep check that all channels are in there
comb = [ ]
for i in range ( len ( nodes ) - 1 ) :
comb . append ( ( nodes [ i ] . info [ ' id ' ] , nodes [ i + 1 ] . info [ ' id ' ] ) )
comb . append ( ( nodes [ i + 1 ] . info [ ' id ' ] , nodes [ i ] . info [ ' id ' ] ) )
def check_gossip ( n ) :
seen = [ ]
channels = n . rpc . listchannels ( ) [ ' channels ' ]
for c in channels :
seen . append ( ( c [ ' source ' ] , c [ ' destination ' ] ) )
missing = set ( comb ) - set ( seen )
logging . debug ( " Node {id} is missing channels {chans} " . format (
id = n . info [ ' id ' ] ,
chans = missing )
)
return len ( missing ) == 0
for n in nodes :
2018-10-10 09:41:55 +10:30
wait_for ( lambda : check_gossip ( n ) )
2018-08-02 23:47:36 +02:00
2019-12-01 15:38:04 +01:00
def test_gossip_query_channel_range ( node_factory , bitcoind , chainparams ) :
2019-09-30 14:27:12 +09:30
l1 , l2 , l3 , l4 = node_factory . line_graph ( 4 , fundchannel = False )
2019-12-01 15:38:04 +01:00
genesis_blockhash = chainparams [ ' chain_hash ' ]
2018-08-02 23:47:36 +02:00
2019-01-15 15:53:35 +10:30
# Make public channels on consecutive blocks
l1 . fundwallet ( 10 * * 6 )
l2 . fundwallet ( 10 * * 6 )
num_tx = len ( bitcoind . rpc . getrawmempool ( ) )
2022-01-30 14:07:23 +10:30
# We want these one block apart.
2019-01-15 15:53:35 +10:30
l1 . rpc . fundchannel ( l2 . info [ ' id ' ] , 10 * * 5 ) [ ' tx ' ]
2022-01-30 14:07:23 +10:30
bitcoind . generate_block ( wait_for_mempool = num_tx + 1 )
sync_blockheight ( bitcoind , [ l1 , l2 , l3 , l4 ] )
2019-01-15 15:53:35 +10:30
l2 . rpc . fundchannel ( l3 . info [ ' id ' ] , 10 * * 5 ) [ ' tx ' ]
# Get them both to gossip depth.
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 ] ,
num_blocks = 6 ,
wait_for_mempool = 1 )
2018-08-02 23:47:36 +02:00
2022-06-26 13:58:01 +09:30
# Make sure l4 has received all the gossip.
l4 . daemon . wait_for_logs ( [ ' Received node_announcement for node ' + n . info [ ' id ' ] for n in ( l1 , l2 , l3 ) ] )
2018-08-02 23:47:36 +02:00
2023-01-12 11:55:55 +10:30
scid12 = l1 . rpc . listpeerchannels ( l2 . info [ ' id ' ] ) [ ' channels ' ] [ 0 ] [ ' short_channel_id ' ]
scid23 = l3 . rpc . listpeerchannels ( l2 . info [ ' id ' ] ) [ ' channels ' ] [ 0 ] [ ' short_channel_id ' ]
2019-01-15 15:53:35 +10:30
block12 = int ( scid12 . split ( ' x ' ) [ 0 ] )
block23 = int ( scid23 . split ( ' x ' ) [ 0 ] )
assert block23 == block12 + 1
2022-06-26 13:58:01 +09:30
# Asks l4 for all channels, gets both.
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
chainparams [ ' chain_hash ' ] ,
2019-10-08 11:38:24 +10:30
0 , 1000000 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid12 , scid23 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( 0 , ' 08x ' ) + format ( 1000000 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ format ( len ( encoded ) / / 2 , ' 04x ' )
+ encoded ]
2018-08-02 23:47:36 +02:00
# Does not include scid12
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
0 , block12 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( 0 , ' 08x ' ) + format ( block12 , ' 08x ' ) + ' 01 '
# encoded_short_ids
' 000100 ' ]
2018-08-02 23:47:36 +02:00
# Does include scid12
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
0 , block12 + 1 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid12 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( 0 , ' 08x ' ) + format ( block12 + 1 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ format ( len ( encoded ) / / 2 , ' 04x ' )
+ encoded ]
2018-08-02 23:47:36 +02:00
# Doesn't include scid23
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
0 , block23 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid12 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( 0 , ' 08x ' ) + format ( block23 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ format ( len ( encoded ) / / 2 , ' 04x ' )
+ encoded ]
2018-08-02 23:47:36 +02:00
# Does include scid23
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
block12 , block23 - block12 + 1 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid12 , scid23 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( block12 , ' 08x ' ) + format ( block23 - block12 + 1 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ format ( len ( encoded ) / / 2 , ' 04x ' )
+ encoded ]
2018-08-02 23:47:36 +02:00
# Only includes scid23
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
block23 , 1 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid23 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( block23 , ' 08x ' ) + format ( 1 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ format ( len ( encoded ) / / 2 , ' 04x ' )
+ encoded ]
2018-08-02 23:47:36 +02:00
# Past both
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
block23 + 1 , 1000000 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-09-30 14:27:12 +09:30
# reply_channel_range == 264
assert msgs == [ ' 0108 '
# blockhash
2019-12-01 15:38:04 +01:00
+ genesis_blockhash
2019-09-30 14:27:12 +09:30
# first_blocknum, number_of_blocks, complete
+ format ( block23 + 1 , ' 08x ' ) + format ( 1000000 , ' 08x ' ) + ' 01 '
# encoded_short_ids
+ ' 000100 ' ]
2019-01-15 15:53:35 +10:30
2022-06-26 13:58:01 +09:30
# Make l4 split reply into two (technically async)
l4 . rpc . dev_set_max_scids_encode_size ( max = 9 )
l4 . daemon . wait_for_log ( ' Set max_scids_encode_bytes to 9 ' )
2019-01-15 15:53:35 +10:30
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
0 , 1000000 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2018-08-02 23:47:36 +02:00
# It should definitely have split
2022-06-26 13:58:01 +09:30
l4 . daemon . wait_for_log ( ' reply_channel_range: splitting 0-1 of 2 ' )
2020-11-09 20:01:01 +10:30
2019-09-30 14:27:12 +09:30
start = 0
scids = ' 00 '
for m in msgs :
2019-12-01 15:38:04 +01:00
assert m . startswith ( ' 0108 ' + genesis_blockhash )
2019-09-30 14:27:12 +09:30
this_start = int ( m [ 4 + 64 : 4 + 64 + 8 ] , base = 16 )
num = int ( m [ 4 + 64 + 8 : 4 + 64 + 8 + 8 ] , base = 16 )
# Pull off end of packet, assume it's uncompressed, and no TLVs!
scids + = m [ 4 + 64 + 8 + 8 + 2 + 4 + 2 : ]
assert this_start == start
start + = num
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid12 , scid23 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
assert scids == encoded
2018-08-02 23:47:36 +02:00
2020-11-09 20:01:01 +10:30
# Test overflow case doesn't split forever; should still only get 2 for this
2022-06-26 13:58:01 +09:30
msgs = l4 . query_gossip ( ' query_channel_range ' ,
2019-12-01 15:38:04 +01:00
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
1 , 429496000 ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2020-11-09 20:01:01 +10:30
assert len ( msgs ) == 2
2019-01-15 20:03:25 +10:30
2018-08-02 23:47:36 +02:00
# Long test involving 4 lightningd instances.
def test_report_routing_failure ( node_factory , bitcoind ) :
""" Test routing failure and retrying of routing.
"""
# The setup is as follows:
# l3-->l4
# ^ / |
# | / |
# | L v
# l2<--l1
#
# l1 wants to pay to l4.
# The shortest route is l1-l4, but l1 cannot
# afford to pay to l1 because l4 has all the
# funds.
# This is a local failure.
# The next shortest route is l1-l2-l4, but
# l2 cannot afford to pay l4 for same reason.
# This is a remote failure.
# Finally the only possible path is
# l1-l2-l3-l4.
# Setup
# Construct lightningd
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 )
# Wire them up
# The ordering below matters!
# Particularly, l1 is payer and we will
# wait for l1 to receive gossip for the
# channel being made.
channels = [ ]
for src , dst in [ ( l1 , l2 ) , ( l2 , l3 ) , ( l3 , l4 ) , ( l4 , l1 ) , ( l4 , l2 ) ] :
src . rpc . connect ( dst . info [ ' id ' ] , ' localhost ' , dst . port )
2019-09-30 11:05:19 +09:30
print ( " src= {} , dst= {} " . format ( src . daemon . lightning_dir ,
dst . daemon . lightning_dir ) )
2020-10-15 13:10:31 -05:00
c , _ = src . fundchannel ( dst , 10 * * 6 )
channels . append ( c )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 ] )
2018-08-02 23:47:36 +02:00
for c in channels :
l1 . wait_channel_active ( c )
# Test
inv = l4 . rpc . invoice ( 1234567 , ' inv ' , ' for testing ' ) [ ' bolt11 ' ]
l1 . rpc . pay ( inv )
2019-12-01 16:20:23 +01:00
def test_query_short_channel_id ( node_factory , bitcoind , chainparams ) :
2022-06-26 14:24:01 +09:30
l1 , l2 , l3 , l4 = node_factory . get_nodes ( 4 )
2018-08-02 23:47:36 +02:00
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l2 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
2019-12-01 16:20:23 +01:00
chain_hash = chainparams [ ' chain_hash ' ]
2018-08-02 23:47:36 +02:00
# Empty result tests.
2019-09-30 14:26:16 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , ' 1x1x1 ' , ' 2x2x2 ' ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
msgs = l1 . query_gossip ( ' query_short_channel_ids ' ,
2019-12-01 16:20:23 +01:00
chain_hash ,
2019-10-08 11:38:24 +10:30
encoded ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2019-10-08 11:38:24 +10:30
2019-09-30 14:26:16 +09:30
# Should just get the WIRE_REPLY_SHORT_CHANNEL_IDS_END = 262
# (with chainhash and completeflag = 1)
assert len ( msgs ) == 1
2019-12-01 16:20:23 +01:00
assert msgs [ 0 ] == ' 0106 {} 01 ' . format ( chain_hash )
2018-08-02 23:47:36 +02:00
# Make channels public.
2020-10-15 13:10:31 -05:00
scid12 , _ = l1 . fundchannel ( l2 , 10 * * 5 )
scid23 , _ = l2 . fundchannel ( l3 , 10 * * 5 )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2018-08-02 23:47:36 +02:00
2022-06-26 14:24:01 +09:30
# Attach node which won't spam us (since it's not their channel).
l4 . rpc . connect ( l1 . info [ ' id ' ] , ' localhost ' , l1 . port )
l4 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l4 . rpc . connect ( l3 . info [ ' id ' ] , ' localhost ' , l3 . port )
# Make sure it sees all channels, then node announcements.
wait_for ( lambda : len ( l4 . rpc . listchannels ( ) [ ' channels ' ] ) == 4 )
wait_for ( lambda : all ( ' alias ' in n for n in l4 . rpc . listnodes ( ) [ ' nodes ' ] ) )
2018-08-02 23:47:36 +02:00
# This query should get channel announcements, channel updates, and node announcements.
2019-09-30 14:26:16 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid23 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
2022-06-26 14:24:01 +09:30
msgs = l4 . query_gossip ( ' query_short_channel_ids ' ,
2019-12-01 16:20:23 +01:00
chain_hash ,
2019-10-08 11:38:24 +10:30
encoded ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2018-08-02 23:47:36 +02:00
2019-09-30 14:26:16 +09:30
assert len ( msgs ) == 6
2018-08-02 23:47:36 +02:00
# 0x0100 = channel_announcement
2019-09-30 14:26:16 +09:30
assert msgs [ 0 ] . startswith ( ' 0100 ' )
2018-08-02 23:47:36 +02:00
# 0x0102 = channel_update
2019-09-30 14:26:16 +09:30
assert msgs [ 1 ] . startswith ( ' 0102 ' )
assert msgs [ 2 ] . startswith ( ' 0102 ' )
2018-08-02 23:47:36 +02:00
# 0x0101 = node_announcement
2019-09-30 14:26:16 +09:30
assert msgs [ 3 ] . startswith ( ' 0101 ' )
assert msgs [ 4 ] . startswith ( ' 0101 ' )
2019-12-01 16:20:23 +01:00
assert msgs [ 5 ] == ' 0106 {} 01 ' . format ( chain_hash )
2019-09-30 14:26:16 +09:30
encoded = subprocess . run ( [ ' devtools/mkencoded ' , ' --scids ' , ' 00 ' , scid12 , scid23 ] ,
check = True ,
timeout = TIMEOUT ,
stdout = subprocess . PIPE ) . stdout . strip ( ) . decode ( )
2022-06-26 14:24:01 +09:30
msgs = l4 . query_gossip ( ' query_short_channel_ids ' ,
2019-12-01 16:20:23 +01:00
chain_hash ,
2019-10-08 11:38:24 +10:30
encoded ,
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0012 ' ] )
2018-08-02 23:47:36 +02:00
# Technically, this order could be different, but this matches code.
2019-09-30 14:26:16 +09:30
assert len ( msgs ) == 10
2018-08-02 23:47:36 +02:00
# 0x0100 = channel_announcement
2019-09-30 14:26:16 +09:30
assert msgs [ 0 ] . startswith ( ' 0100 ' )
2018-08-02 23:47:36 +02:00
# 0x0102 = channel_update
2019-09-30 14:26:16 +09:30
assert msgs [ 1 ] . startswith ( ' 0102 ' )
assert msgs [ 2 ] . startswith ( ' 0102 ' )
2018-08-02 23:47:36 +02:00
# 0x0100 = channel_announcement
2019-09-30 14:26:16 +09:30
assert msgs [ 3 ] . startswith ( ' 0100 ' )
2018-08-02 23:47:36 +02:00
# 0x0102 = channel_update
2019-09-30 14:26:16 +09:30
assert msgs [ 4 ] . startswith ( ' 0102 ' )
assert msgs [ 5 ] . startswith ( ' 0102 ' )
2018-08-02 23:47:36 +02:00
# 0x0101 = node_announcement
2019-09-30 14:26:16 +09:30
assert msgs [ 6 ] . startswith ( ' 0101 ' )
assert msgs [ 7 ] . startswith ( ' 0101 ' )
assert msgs [ 8 ] . startswith ( ' 0101 ' )
2019-12-01 16:20:23 +01:00
assert msgs [ 9 ] == ' 0106 {} 01 ' . format ( chain_hash )
2018-08-02 23:47:36 +02:00
def test_gossip_addresses ( node_factory , bitcoind ) :
2021-05-20 17:45:27 +02:00
l1 = node_factory . get_node ( options = {
' announce-addr ' : [
' [::]:3 ' ,
2021-12-06 17:16:24 +10:30
' [::] ' ,
2021-05-20 17:45:27 +02:00
' 127.0.0.1:2 ' ,
2021-12-06 17:16:24 +10:30
' 127.0.0.1 ' ,
' vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion ' ,
2021-11-10 10:57:41 +10:30
' 4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion:1234 '
2021-05-20 17:45:27 +02:00
] ,
} )
2018-08-02 23:47:36 +02:00
l2 = node_factory . get_node ( )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2020-09-24 14:06:36 +02:00
l1 . fundchannel ( l2 , 100000 )
2018-08-02 23:47:36 +02:00
bitcoind . generate_block ( 6 )
l2 . daemon . wait_for_log ( ' Received node_announcement for node {} '
. format ( l1 . info [ ' id ' ] ) )
nodes = l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ]
2021-12-06 17:16:24 +10:30
if TEST_NETWORK == ' regtest ' :
default_port = 19846
else :
assert TEST_NETWORK == ' liquid-regtest '
default_port = 20735
2018-08-02 23:47:36 +02:00
assert len ( nodes ) == 1 and nodes [ 0 ] [ ' addresses ' ] == [
{ ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : 2 } ,
2021-12-06 17:16:24 +10:30
{ ' type ' : ' ipv4 ' , ' address ' : ' 127.0.0.1 ' , ' port ' : default_port } ,
2018-08-02 23:47:36 +02:00
{ ' type ' : ' ipv6 ' , ' address ' : ' :: ' , ' port ' : 3 } ,
2021-12-06 17:16:24 +10:30
{ ' type ' : ' ipv6 ' , ' address ' : ' :: ' , ' port ' : default_port } ,
{ ' type ' : ' torv3 ' , ' address ' : ' vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd.onion ' , ' port ' : default_port } ,
2021-11-10 10:57:41 +10:30
{ ' type ' : ' torv3 ' , ' address ' : ' 4acth47i6kxnvkewtm6q7ib2s3ufpo5sqbsnzjpbi7utijcltosqemad.onion ' , ' port ' : 1234 } ,
2018-08-02 23:47:36 +02:00
]
2018-09-04 14:52:03 +09:30
2021-05-26 18:16:07 -05:00
@unittest.skipIf ( TEST_NETWORK != ' regtest ' , ' elementsd doesnt yet support PSBT features we need ' )
@pytest.mark.openchannel ( ' v2 ' )
def test_gossip_lease_rates ( node_factory , bitcoind ) :
lease_opts = { ' lease-fee-basis ' : 50 ,
2022-01-17 15:35:40 +02:00
' lease-fee-base-sat ' : ' 2000msat ' ,
2021-05-26 18:16:07 -05:00
' channel-fee-max-base-msat ' : ' 500sat ' ,
' channel-fee-max-proportional-thousandths ' : 200 }
l1 , l2 = node_factory . get_nodes ( 2 , opts = [ lease_opts , { } ] )
rates = l1 . rpc . call ( ' funderupdate ' )
assert rates [ ' channel_fee_max_base_msat ' ] == Millisatoshi ( ' 500000msat ' )
assert rates [ ' channel_fee_max_proportional_thousandths ' ] == 200
assert rates [ ' funding_weight ' ] == 666 # Default on regtest
assert rates [ ' lease_fee_base_msat ' ] == Millisatoshi ( ' 2000msat ' )
assert rates [ ' lease_fee_basis ' ] == 50
rates = l2 . rpc . call ( ' funderupdate ' )
assert ' channel_fee_max_base_msat ' not in rates
assert ' channel_fee_max_proportional_thousandths ' not in rates
assert ' funding_weight ' not in rates
assert ' lease_fee_base_msat ' not in rates
assert ' lease_fee_basis ' not in rates
# Open a channel, check that the node_announcements
# include offer details, as expected
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
l1 . fundchannel ( l2 , 10 * * 6 )
2024-01-31 13:46:20 +10:30
# Don't have l2 reject channel_announcement as too far in future.
sync_blockheight ( bitcoind , [ l1 , l2 ] )
2021-05-26 18:16:07 -05:00
# Announce depth is ALWAYS 6 blocks
bitcoind . generate_block ( 5 )
l2 . daemon . wait_for_log ( ' Received node_announcement for node {} '
. format ( l1 . info [ ' id ' ] ) )
l1 . daemon . wait_for_log ( ' Received node_announcement for node {} '
. format ( l2 . info [ ' id ' ] ) )
l2_nodeinfo = only_one ( l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ] )
l1_nodeinfo = only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] )
assert ' option_will_fund ' not in l2_nodeinfo
rates = l1_nodeinfo [ ' option_will_fund ' ]
assert rates [ ' channel_fee_max_base_msat ' ] == Millisatoshi ( ' 500000msat ' )
assert rates [ ' channel_fee_max_proportional_thousandths ' ] == 200
assert rates [ ' funding_weight ' ] == 666 # Default on regtest
assert rates [ ' lease_fee_base_msat ' ] == Millisatoshi ( ' 2000msat ' )
assert rates [ ' lease_fee_basis ' ] == 50
# Update the node announce (set new on l2, turn off l1)
# (Turn off by setting everything to zero)
l1 . rpc . call ( ' funderupdate ' , { ' channel_fee_max_base_msat ' : ' 0msat ' ,
' channel_fee_max_proportional_thousandths ' : 0 ,
' funding_weight ' : 0 ,
' lease_fee_base_msat ' : ' 0msat ' ,
' lease_fee_basis ' : 0 } )
l2 . rpc . call ( ' funderupdate ' , { ' channel_fee_max_base_msat ' : ' 30000msat ' ,
' channel_fee_max_proportional_thousandths ' : 100 ,
' lease_fee_base_msat ' : ' 400000msat ' ,
' lease_fee_basis ' : 20 } )
l1 . daemon . wait_for_log ( ' Received node_announcement for node {} ' . format ( l2 . info [ ' id ' ] ) )
l2 . daemon . wait_for_log ( ' Received node_announcement for node {} ' . format ( l1 . info [ ' id ' ] ) )
l2_nodeinfo = only_one ( l1 . rpc . listnodes ( l2 . info [ ' id ' ] ) [ ' nodes ' ] )
l1_nodeinfo = only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] )
assert ' option_will_fund ' not in l1_nodeinfo
rates = l2_nodeinfo [ ' option_will_fund ' ]
assert rates [ ' channel_fee_max_base_msat ' ] == Millisatoshi ( ' 30000msat ' )
assert rates [ ' channel_fee_max_proportional_thousandths ' ] == 100
assert rates [ ' funding_weight ' ] == 666 # Default on regtest
assert rates [ ' lease_fee_base_msat ' ] == Millisatoshi ( ' 400000msat ' )
assert rates [ ' lease_fee_basis ' ] == 20
2018-09-04 14:52:03 +09:30
def test_gossip_store_load ( node_factory ) :
""" Make sure we can read canned gossip store """
l1 = node_factory . get_node ( start = False )
2019-11-23 12:16:40 +10:30
with open ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store ' ) , ' wb ' ) as f :
2022-12-16 10:25:47 -06:00
f . write ( bytearray . fromhex ( " 0c " # GOSSIP_STORE_VERSION
2019-06-04 03:38:25 +09:30
" 000001b0 " # len
2019-06-11 15:00:54 +09:30
" fea676e8 " # csum
2019-06-04 03:47:25 +09:30
" 5b8d9b44 " # timestamp
2019-06-04 03:38:25 +09:30
" 0100 " # WIRE_CHANNEL_ANNOUNCEMENT
" bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a "
" 0000000a " # len
2019-06-11 15:00:54 +09:30
" 99dc98b4 " # csum
2019-06-04 03:47:25 +09:30
" 00000000 " # timestamp
2019-06-04 03:38:25 +09:30
" 1005 " # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
" 0000000001000000 "
2022-09-14 13:20:31 +09:30
" 0000008a " # len
" 0c6aca0e " # csum
2019-06-04 03:47:25 +09:30
" 5b8d9b44 " # timestamp
2019-06-04 03:38:25 +09:30
" 0102 " # WIRE_CHANNEL_UPDATE
2022-09-14 13:20:31 +09:30
" 1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440100009000000000000003e8000003e8000000010000000000FFFFFF "
2019-06-04 03:38:25 +09:30
" 00000095 " # len
2019-06-11 15:00:54 +09:30
" f036515e " # csum
2019-06-04 03:47:25 +09:30
" 5aab817c " # timestamp
2019-06-04 03:38:25 +09:30
" 0101 " # WIRE_NODE_ANNOUNCEMENT
" cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607 " ) )
2018-09-04 14:52:03 +09:30
l1 . start ( )
# May preceed the Started msg waited for in 'start'.
2024-01-31 16:38:20 +10:30
wait_for ( lambda : l1 . daemon . is_in_log ( ' Read 1/1/1/0 cannounce/cupdate/nannounce/delete from store in 800 bytes, now 778 bytes ' ) )
2018-09-04 14:52:03 +09:30
assert not l1 . daemon . is_in_log ( ' gossip_store.*truncating ' )
2018-09-21 09:59:44 +09:30
2022-09-14 13:20:31 +09:30
def test_gossip_store_v10_upgrade ( node_factory ) :
""" We remove a channel_update without an htlc_maximum_msat """
2022-09-14 13:20:31 +09:30
l1 = node_factory . get_node ( start = False , allow_broken_log = True )
2022-09-14 13:20:31 +09:30
with open ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store ' ) , ' wb ' ) as f :
f . write ( bytearray . fromhex ( " 0a " # GOSSIP_STORE_VERSION
" 000001b0 " # len
" fea676e8 " # csum
" 5b8d9b44 " # timestamp
" 0100 " # WIRE_CHANNEL_ANNOUNCEMENT
" bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a "
" 0000000a " # len
" 99dc98b4 " # csum
" 00000000 " # timestamp
" 1005 " # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
" 0000000001000000 "
" 00000082 " # len
" fd421aeb " # csum
" 5b8d9b44 " # timestamp
" 0102 " # WIRE_CHANNEL_UPDATE
" 1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440000009000000000000003e8000003e800000001 " ) )
l1 . start ( )
2024-01-31 16:14:06 +10:30
# Channel "exists" but doesn't show in listchannels, as it has no updates.
assert l1 . rpc . listchannels ( ) == { ' channels ' : [ ] }
assert only_one ( l1 . rpc . listnodes ( ' 021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c675 ' ) [ ' nodes ' ] )
assert only_one ( l1 . rpc . listnodes ( ' 03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d2 ' ) [ ' nodes ' ] )
assert len ( l1 . rpc . listnodes ( ) [ ' nodes ' ] ) == 2
2022-09-14 13:20:31 +09:30
2019-06-14 12:56:58 +09:30
def test_gossip_store_load_announce_before_update ( node_factory ) :
""" Make sure we can read canned gossip store with node_announce before update. This happens when a channel_update gets replaced, leaving node_announce before it """
l1 = node_factory . get_node ( start = False )
2019-11-23 12:16:40 +10:30
with open ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store ' ) , ' wb ' ) as f :
2022-12-16 10:25:47 -06:00
f . write ( bytearray . fromhex ( " 0c " # GOSSIP_STORE_VERSION
2019-06-14 12:56:58 +09:30
" 000001b0 " # len
" fea676e8 " # csum
" 5b8d9b44 " # timestamp
" 0100 " # WIRE_CHANNEL_ANNOUNCEMENT
" bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a "
" 0000000a " # len
" 99dc98b4 " # csum
" 00000000 " # timestamp
" 1005 " # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
" 0000000001000000 "
2022-09-14 13:20:31 +09:30
" 8000008a " # len (DELETED)
" ca01ed56 " # csum
2019-06-14 12:56:58 +09:30
" 5b8d9b44 " # timestamp
" 0102 " # WIRE_CHANNEL_UPDATE
2022-09-14 13:20:31 +09:30
# Note - msgflags set and htlc_max added by hand, so signature doesn't match (gossipd ignores)
" 1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440100009000000000000003e8000003e8000000010000000000FFFFFF "
2019-06-14 12:56:58 +09:30
" 00000095 " # len
" f036515e " # csum
" 5aab817c " # timestamp
" 0101 " # WIRE_NODE_ANNOUNCEMENT
" cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607 "
2022-09-14 13:20:31 +09:30
" 0000008a " # len
" 0c6aca0e " # csum
2019-06-14 12:56:58 +09:30
" 5b8d9b44 " # timestamp
" 0102 " # WIRE_CHANNEL_UPDATE
2022-09-14 13:20:31 +09:30
# Note - msgflags set and htlc_max added by hand, so signature doesn't match (gossipd ignores)
" 1ea7c2eadf8a29eb8690511a519b5656e29aa0a853771c4e38e65c5abf43d907295a915e69e451f4c7a0c3dc13dd943cfbe3ae88c0b96667cd7d58955dbfedcf43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b500015b8d9b440100009000000000000003e8000003e8000000010000000000FFFFFF " ) )
2019-06-14 12:56:58 +09:30
l1 . start ( )
# May preceed the Started msg waited for in 'start'.
2024-01-31 16:38:20 +10:30
wait_for ( lambda : l1 . daemon . is_in_log ( ' Read 1/1/1/1 cannounce/cupdate/nannounce/delete from store in 950 bytes, now 778 bytes ' ) )
2019-06-14 12:56:58 +09:30
assert not l1 . daemon . is_in_log ( ' gossip_store.*truncating ' )
2019-06-14 12:57:30 +09:30
def test_gossip_store_load_amount_truncated ( node_factory ) :
""" Make sure we can read canned gossip store with truncated amount """
2019-06-30 10:12:44 +09:30
l1 = node_factory . get_node ( start = False , allow_broken_log = True )
2019-11-23 12:16:40 +10:30
with open ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store ' ) , ' wb ' ) as f :
2022-12-16 10:25:47 -06:00
f . write ( bytearray . fromhex ( " 0c " # GOSSIP_STORE_VERSION
2019-06-14 12:57:30 +09:30
" 000001b0 " # len
" fea676e8 " # csum
" 5b8d9b44 " # timestamp
" 0100 " # WIRE_CHANNEL_ANNOUNCEMENT
" bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a " ) )
l1 . start ( )
# May preceed the Started msg waited for in 'start'.
2024-01-31 16:38:20 +10:30
wait_for ( lambda : l1 . daemon . is_in_log ( r ' \ * \ *BROKEN \ * \ * gossipd: gossip_store: channel_announcement without amount \ (offset 1 \ ). Moving to gossip_store.corrupt and truncating ' ) )
wait_for ( lambda : l1 . daemon . is_in_log ( r ' gossip_store: Read 0/0/0/0 cannounce/cupdate/nannounce/delete from store in 467 bytes, now 1 bytes \ (populated=false \ ) ' ) )
2019-11-23 12:16:40 +10:30
assert os . path . exists ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store.corrupt ' ) )
2019-06-14 12:57:30 +09:30
2019-06-14 12:56:58 +09:30
2021-05-07 13:39:23 -05:00
@pytest.mark.openchannel ( ' v1 ' )
@pytest.mark.openchannel ( ' v2 ' )
2021-05-09 02:39:41 +05:30
def test_node_reannounce ( node_factory , bitcoind , chainparams ) :
2018-09-24 11:10:56 +09:30
" Test that we reannounce a node when parameters change "
2018-09-24 11:11:37 +09:30
l1 , l2 = node_factory . line_graph ( 2 , opts = { ' may_reconnect ' : True ,
2019-11-18 10:57:17 +10:30
' log-level ' : ' io ' } )
2018-09-24 11:10:56 +09:30
bitcoind . generate_block ( 5 )
2021-05-09 02:39:41 +05:30
genesis_blockhash = chainparams [ ' chain_hash ' ]
2018-09-24 11:10:56 +09:30
2018-09-24 11:11:37 +09:30
# Wait for node_announcement for l1.
2018-10-04 11:51:49 -07:00
l2 . daemon . wait_for_log ( r ' \ [IN \ ] 0101.* {} ' . format ( l1 . info [ ' id ' ] ) )
2018-09-24 11:11:37 +09:30
# Wait for it to process it.
wait_for ( lambda : l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] != [ ] )
wait_for ( lambda : ' alias ' in only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) )
assert only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) [ ' alias ' ] . startswith ( ' JUNIORBEAM ' )
2018-09-24 11:10:56 +09:30
2019-10-04 10:44:00 +09:30
# Make sure it gets features correct.
2022-10-19 13:35:45 -05:00
assert only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) [ ' features ' ] == expected_node_features ( )
2019-10-04 10:44:00 +09:30
2018-09-24 11:10:56 +09:30
l1 . stop ( )
l1 . daemon . opts [ ' alias ' ] = ' SENIORBEAM '
2019-09-16 14:43:15 +09:30
# It won't update within 5 seconds, so sleep.
time . sleep ( 5 )
2018-09-24 11:10:56 +09:30
l1 . start ( )
2018-09-24 11:11:37 +09:30
wait_for ( lambda : only_one ( l2 . rpc . listnodes ( l1 . info [ ' id ' ] ) [ ' nodes ' ] ) [ ' alias ' ] == ' SENIORBEAM ' )
2019-10-08 11:38:24 +10:30
# Get node_announcements.
msgs = l1 . query_gossip ( ' gossip_timestamp_filter ' ,
2021-05-09 02:39:41 +05:30
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
' 0 ' , ' 0xFFFFFFFF ' ,
# Filter out gossip_timestamp_filter,
# channel_announcement and channel_updates.
2022-01-29 14:01:19 +10:30
# And pings.
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0102 ' , ' 0100 ' , ' 0012 ' ] )
2018-09-24 11:11:37 +09:30
2022-06-26 14:25:01 +09:30
# May send its own announcement *twice*, since it always spams us.
msgs = list ( set ( msgs ) )
2019-10-08 11:38:24 +10:30
assert len ( msgs ) == 2
assert ( bytes ( " SENIORBEAM " , encoding = " utf8 " ) . hex ( ) in msgs [ 0 ]
or bytes ( " SENIORBEAM " , encoding = " utf8 " ) . hex ( ) in msgs [ 1 ] )
# Restart should re-xmit exact same update on reconnect!
l1 . restart ( )
2018-09-24 11:10:56 +09:30
2019-10-08 11:38:24 +10:30
msgs2 = l1 . query_gossip ( ' gossip_timestamp_filter ' ,
2021-05-09 02:39:41 +05:30
genesis_blockhash ,
2019-10-08 11:38:24 +10:30
' 0 ' , ' 0xFFFFFFFF ' ,
# Filter out gossip_timestamp_filter,
# channel_announcement and channel_updates.
2022-01-29 14:01:19 +10:30
# And pings.
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0102 ' , ' 0100 ' , ' 0012 ' ] )
2022-06-26 14:25:01 +09:30
# May send its own announcement *twice*, since it always spams us.
2022-06-30 09:48:48 +09:30
assert set ( msgs ) == set ( msgs2 )
2019-09-16 14:43:15 +09:30
# Won't have queued up another one, either.
assert not l1 . daemon . is_in_log ( ' node_announcement: delaying ' )
2021-05-28 11:39:52 -05:00
# Try updating the lease rates ad
ad = l1 . rpc . call ( ' setleaserates ' ,
{ ' lease_fee_base_msat ' : ' 1000sat ' ,
' lease_fee_basis ' : 20 ,
' funding_weight ' : 150 ,
' channel_fee_max_base_msat ' : ' 2000msat ' ,
' channel_fee_max_proportional_thousandths ' : 22 } )
assert ad [ ' lease_fee_base_msat ' ] == Millisatoshi ( ' 1000000msat ' )
assert ad [ ' lease_fee_basis ' ] == 20
assert ad [ ' funding_weight ' ] == 150
assert ad [ ' channel_fee_max_base_msat ' ] == Millisatoshi ( ' 2000msat ' )
assert ad [ ' channel_fee_max_proportional_thousandths ' ] == 22
2022-06-26 14:25:01 +09:30
# May send its own announcement *twice*, since it always spams us.
2021-05-28 11:39:52 -05:00
msgs2 = l1 . query_gossip ( ' gossip_timestamp_filter ' ,
genesis_blockhash ,
' 0 ' , ' 0xFFFFFFFF ' ,
# Filter out gossip_timestamp_filter,
# channel_announcement and channel_updates.
2022-01-29 14:01:19 +10:30
# And pings.
2022-06-26 14:23:01 +09:30
filters = [ ' 0109 ' , ' 0107 ' , ' 0102 ' , ' 0100 ' , ' 0012 ' ] )
2022-06-30 09:48:48 +09:30
assert set ( msgs ) != set ( msgs2 )
2021-05-28 11:39:52 -05:00
2018-09-24 11:10:56 +09:30
2018-09-21 09:59:44 +09:30
def test_gossipwith ( node_factory ) :
2018-12-07 15:27:14 -08:00
l1 , l2 = node_factory . line_graph ( 2 , wait_for_announce = True )
2018-09-21 09:59:44 +09:30
out = subprocess . run ( [ ' devtools/gossipwith ' ,
' --initial-sync ' ,
2019-11-15 14:48:34 +10:30
' --timeout-after= {} ' . format ( int ( math . sqrt ( TIMEOUT ) + 1 ) ) ,
2018-09-21 09:59:44 +09:30
' {} @localhost: {} ' . format ( l1 . info [ ' id ' ] , l1 . port ) ] ,
check = True ,
timeout = TIMEOUT , stdout = subprocess . PIPE ) . stdout
2023-07-20 12:10:52 +09:30
msgs = set ( )
2018-09-21 09:59:44 +09:30
while len ( out ) :
l , t = struct . unpack ( ' >HH ' , out [ 0 : 4 ] )
2023-07-20 12:10:52 +09:30
msg = out [ 2 : 2 + l ]
2018-09-21 09:59:44 +09:30
out = out [ 2 + l : ]
2022-01-29 14:01:19 +10:30
# Ignore pings, timestamp_filter
if t == 265 or t == 18 :
continue
# channel_announcement node_announcement or channel_update
assert t == 256 or t == 257 or t == 258
2023-07-20 12:10:52 +09:30
msgs . add ( msg )
2018-09-21 09:59:44 +09:30
# one channel announcement, two channel_updates, two node announcements.
2023-07-20 12:10:52 +09:30
# due to initial blast, we can have duplicates!
assert len ( msgs ) == 5
2018-09-21 09:59:48 +09:30
def test_gossip_notices_close ( node_factory , bitcoind ) :
2019-11-06 12:46:01 +10:30
# We want IO logging so we can replay a channel_announce to l1;
# We also *really* do feed it bad gossip!
2020-08-07 12:44:55 +09:30
l1 , l2 , l3 = node_factory . get_nodes ( 3 , opts = [ { ' log-level ' : ' io ' ,
' allow_bad_gossip ' : True } ,
{ } ,
{ } ] )
node_factory . join_nodes ( [ l2 , l3 ] )
2018-09-21 09:59:48 +09:30
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2018-09-21 09:59:48 +09:30
2019-05-29 06:41:30 +09:30
# Make sure l1 learns about channel and nodes.
2018-09-21 09:59:48 +09:30
wait_for ( lambda : len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
2019-05-29 06:41:30 +09:30
wait_for ( lambda : [ ' alias ' in n for n in l1 . rpc . listnodes ( ) [ ' nodes ' ] ] == [ True , True ] )
2018-09-21 09:59:48 +09:30
l1 . rpc . disconnect ( l2 . info [ ' id ' ] )
2018-09-21 09:59:48 +09:30
# Grab channel_announcement from io logs (ends in ')
2018-10-04 11:51:49 -07:00
channel_announcement = l1 . daemon . is_in_log ( r ' \ [IN \ ] 0100 ' ) . split ( ' ' ) [ - 1 ] [ : - 1 ]
channel_update = l1 . daemon . is_in_log ( r ' \ [IN \ ] 0102 ' ) . split ( ' ' ) [ - 1 ] [ : - 1 ]
node_announcement = l1 . daemon . is_in_log ( r ' \ [IN \ ] 0101 ' ) . split ( ' ' ) [ - 1 ] [ : - 1 ]
2018-09-21 09:59:48 +09:30
2019-06-09 12:40:39 +02:00
txid = l2 . rpc . close ( l3 . info [ ' id ' ] ) [ ' txid ' ]
2023-01-12 11:55:55 +10:30
wait_for ( lambda : l2 . rpc . listpeerchannels ( l3 . info [ ' id ' ] ) [ ' channels ' ] [ 0 ] [ ' state ' ] == ' CLOSINGD_COMPLETE ' )
2022-09-14 13:20:32 +09:30
bitcoind . generate_block ( 13 , txid )
2018-09-21 09:59:48 +09:30
wait_for ( lambda : l1 . rpc . listchannels ( ) [ ' channels ' ] == [ ] )
wait_for ( lambda : l1 . rpc . listnodes ( ) [ ' nodes ' ] == [ ] )
2019-11-06 12:46:09 +10:30
subprocess . run ( [ ' devtools/gossipwith ' ,
' --max-messages=0 ' ,
' {} @localhost: {} ' . format ( l1 . info [ ' id ' ] , l1 . port ) ,
channel_announcement ,
channel_update ,
node_announcement ] ,
timeout = TIMEOUT )
2018-09-21 09:59:48 +09:30
# l1 should reject it.
assert ( l1 . rpc . listchannels ( ) [ ' channels ' ] == [ ] )
assert ( l1 . rpc . listnodes ( ) [ ' nodes ' ] == [ ] )
2018-09-21 09:59:48 +09:30
l1 . stop ( )
l1 . start ( )
assert ( l1 . rpc . listchannels ( ) [ ' channels ' ] == [ ] )
assert ( l1 . rpc . listnodes ( ) [ ' nodes ' ] == [ ] )
2019-01-15 14:35:27 +10:30
2019-08-02 09:45:35 +00:00
def test_getroute_exclude_duplicate ( node_factory ) :
2019-09-01 20:56:30 +08:00
""" Test that accidentally duplicating the same channel or same node
in the exclude list will not have permanent effects .
2019-08-02 09:45:35 +00:00
"""
l1 , l2 = node_factory . line_graph ( 2 , wait_for_announce = True )
# Starting route
route = l1 . rpc . getroute ( l2 . info [ ' id ' ] , 1 , 1 ) [ ' route ' ]
# l1 id is > l2 id, so 1 means l1->l2
chan_l1l2 = route [ 0 ] [ ' channel ' ] + ' /1 '
# This should fail to find a route as the only viable channel
# is excluded, and worse, is excluded twice.
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l2 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l1l2 , chan_l1l2 ] )
# This should still succeed since nothing is excluded anymore
# and in particular should return the exact same route as
# earlier.
route2 = l1 . rpc . getroute ( l2 . info [ ' id ' ] , 1 , 1 ) [ ' route ' ]
assert route == route2
2019-09-01 20:56:30 +08:00
# This should also fail to find a route as the only viable channel
# is excluded, and worse, is excluded twice.
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l2 . info [ ' id ' ] , 1 , 1 , exclude = [ l2 . info [ ' id ' ] , l2 . info [ ' id ' ] ] )
# This should still succeed since nothing is excluded anymore
# and in particular should return the exact same route as
# earlier.
route3 = l1 . rpc . getroute ( l2 . info [ ' id ' ] , 1 , 1 ) [ ' route ' ]
assert route == route3
2019-08-02 09:45:35 +00:00
2019-01-15 14:35:27 +10:30
def test_getroute_exclude ( node_factory , bitcoind ) :
""" Test getroute ' s exclude argument """
2020-08-07 12:44:55 +09:30
l1 , l2 , l3 , l4 , l5 = node_factory . get_nodes ( 5 )
node_factory . join_nodes ( [ l1 , l2 , l3 , l4 ] , wait_for_announce = True )
2019-01-15 14:35:27 +10:30
# This should work
route = l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 ) [ ' route ' ]
# l1 id is > l2 id, so 1 means l1->l2
chan_l1l2 = route [ 0 ] [ ' channel ' ] + ' /1 '
chan_l2l1 = route [ 0 ] [ ' channel ' ] + ' /0 '
# This should not
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l1l2 ] )
2019-09-01 20:56:30 +08:00
# This should also not
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ l2 . info [ ' id ' ] ] )
2019-01-15 14:35:27 +10:30
# Blocking the wrong way should be fine.
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l2l1 ] )
# Now, create an alternate (better) route.
l2 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
2020-10-15 13:10:31 -05:00
scid , _ = l2 . fundchannel ( l4 , 1000000 , wait_for_active = False )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 , l5 ] )
2019-01-15 14:35:27 +10:30
# We don't wait above, because we care about it hitting l1.
2019-01-15 14:41:27 +10:30
l1 . daemon . wait_for_logs ( [ r ' update for channel {} /0 now ACTIVE '
2019-01-15 14:35:27 +10:30
. format ( scid ) ,
2019-01-15 14:41:27 +10:30
r ' update for channel {} /1 now ACTIVE '
2019-01-15 14:35:27 +10:30
. format ( scid ) ] )
# l3 id is > l2 id, so 1 means l3->l2
# chan_l3l2 = route[1]['channel'] + '/1'
chan_l2l3 = route [ 1 ] [ ' channel ' ] + ' /0 '
# l4 is > l2
# chan_l4l2 = scid + '/1'
chan_l2l4 = scid + ' /0 '
# This works
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l2l3 ] )
# This works
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l2l4 ] )
2019-09-01 20:56:30 +08:00
# This works
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ l3 . info [ ' id ' ] ] )
2019-01-15 14:35:27 +10:30
# This doesn't
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l2l3 , chan_l2l4 ] )
2019-04-08 09:22:19 +09:30
2019-09-01 20:56:30 +08:00
# This doesn't
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ l3 . info [ ' id ' ] , chan_l2l4 ] )
l1 . rpc . connect ( l5 . info [ ' id ' ] , ' localhost ' , l5 . port )
2020-10-15 13:10:31 -05:00
scid15 , _ = l1 . fundchannel ( l5 , 1000000 , wait_for_active = False )
2019-09-01 20:56:30 +08:00
l5 . rpc . connect ( l4 . info [ ' id ' ] , ' localhost ' , l4 . port )
2020-10-15 13:10:31 -05:00
scid54 , _ = l5 . fundchannel ( l4 , 1000000 , wait_for_active = False )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 , l4 , l5 ] )
2019-09-01 20:56:30 +08:00
# We don't wait above, because we care about it hitting l1.
l1 . daemon . wait_for_logs ( [ r ' update for channel {} /0 now ACTIVE '
. format ( scid15 ) ,
r ' update for channel {} /1 now ACTIVE '
. format ( scid15 ) ,
r ' update for channel {} /0 now ACTIVE '
. format ( scid54 ) ,
r ' update for channel {} /1 now ACTIVE '
. format ( scid54 ) ] )
# This works now
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ l3 . info [ ' id ' ] , chan_l2l4 ] )
# This works now
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ l3 . info [ ' id ' ] , l5 . info [ ' id ' ] ] )
# This doesn't work
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ l3 . info [ ' id ' ] , l5 . info [ ' id ' ] , chan_l2l4 ] )
# This doesn't work
with pytest . raises ( RpcError ) :
l1 . rpc . getroute ( l4 . info [ ' id ' ] , 1 , 1 , exclude = [ chan_l2l3 , l5 . info [ ' id ' ] , chan_l2l4 ] )
2019-04-08 09:22:19 +09:30
2019-05-29 14:05:12 +09:30
def setup_gossip_store_test ( node_factory , bitcoind ) :
2023-12-13 16:05:43 +10:30
l1 , l2 , l3 = node_factory . line_graph ( 3 , wait_for_announce = True )
2019-05-29 14:05:12 +09:30
# Now, replace the one channel_update, so it's past the node announcements.
2022-03-21 11:28:28 +10:30
l2 . rpc . setchannel ( l3 . info [ ' id ' ] , 20 , 1000 )
2023-12-13 16:05:43 +10:30
l3 . rpc . setchannel ( l2 . info [ ' id ' ] , 21 , 1001 )
2019-05-29 14:05:12 +09:30
2023-12-13 16:05:43 +10:30
# Wait for it to hit l1's gossip store.
wait_for ( lambda : sorted ( [ c [ ' fee_per_millionth ' ] for c in l1 . rpc . listchannels ( ) [ ' channels ' ] ] ) == [ 10 , 10 , 1000 , 1001 ] )
2019-05-29 14:05:12 +09:30
2023-12-13 16:05:43 +10:30
# Records in l2's store now looks (something) like:
# channel_announcement (scid12)
# channel_amount
# channel_update (scid12/0)
# channel_update (scid12/1)
# node_announcement (l1)
# node_announcement (l2)
2019-05-29 14:05:12 +09:30
# channel_announcement (scid23)
# channel_amount
2019-09-16 20:13:59 +09:30
# DELETED: channel_update (scid23/0)
# DELETED: channel_update (scid23/1)
2019-05-29 14:05:12 +09:30
# node_announcement
2023-12-13 16:05:43 +10:30
# channel_update (scid23/0)
# channel_update (scid23/1)
2019-05-29 14:05:12 +09:30
return l2
2019-06-20 10:21:12 +09:30
def test_gossip_store_compact_noappend ( node_factory , bitcoind ) :
l2 = setup_gossip_store_test ( node_factory , bitcoind )
# It should truncate this, not leave junk!
2019-11-23 12:16:40 +10:30
with open ( os . path . join ( l2 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store.tmp ' ) , ' wb ' ) as f :
2019-06-20 10:21:12 +09:30
f . write ( bytearray . fromhex ( " 07deadbeef " ) )
l2 . restart ( )
wait_for ( lambda : l2 . daemon . is_in_log ( ' gossip_store: Read ' ) )
assert not l2 . daemon . is_in_log ( ' gossip_store:.*truncate ' )
2019-05-29 14:05:12 +09:30
def test_gossip_store_load_complex ( node_factory , bitcoind ) :
l2 = setup_gossip_store_test ( node_factory , bitcoind )
l2 . restart ( )
wait_for ( lambda : l2 . daemon . is_in_log ( ' gossip_store: Read ' ) )
2019-06-14 12:59:04 +09:30
def test_gossip_store_load_no_channel_update ( node_factory ) :
""" Make sure we can read truncated gossip store with a channel_announcement and no channel_update """
2019-06-30 10:12:44 +09:30
l1 = node_factory . get_node ( start = False , allow_broken_log = True )
2019-06-14 12:59:04 +09:30
# A channel announcement with no channel_update.
2019-11-23 12:16:40 +10:30
with open ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store ' ) , ' wb ' ) as f :
2024-01-31 13:46:17 +10:30
f . write ( bytearray . fromhex ( " 0d " # GOSSIP_STORE_VERSION
2019-06-14 12:59:04 +09:30
" 000001b0 " # len
" fea676e8 " # csum
" 5b8d9b44 " # timestamp
" 0100 " # WIRE_CHANNEL_ANNOUNCEMENT
" bb8d7b6998cca3c2b3ce12a6bd73a8872c808bb48de2a30c5ad9cdf835905d1e27505755087e675fb517bbac6beb227629b694ea68f49d357458327138978ebfd7adfde1c69d0d2f497154256f6d5567a5cf2317c589e0046c0cc2b3e986cf9b6d3b44742bd57bce32d72cd1180a7f657795976130b20508b239976d3d4cdc4d0d6e6fbb9ab6471f664a662972e406f519eab8bce87a8c0365646df5acbc04c91540b4c7c518cec680a4a6af14dae1aca0fd5525220f7f0e96fcd2adef3c803ac9427fe71034b55a50536638820ef21903d09ccddd38396675b598587fa886ca711415c813fc6d69f46552b9a0a539c18f265debd0e2e286980a118ba349c216000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea33090000000013a63c0000b50001021bf3de4e84e3d52f9a3e36fbdcd2c4e8dbf203b9ce4fc07c2f03be6c21d0c67503f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d203801fd8ab98032f11cc9e4916dd940417082727077609d5c7f8cc6e9a3ad25dd102517164b97ab46cee3826160841a36c46a2b7b9c74da37bdc070ed41ba172033a "
" 0000000a " # len
" 99dc98b4 " # csum
" 00000000 " # timestamp
" 1005 " # WIRE_GOSSIP_STORE_CHANNEL_AMOUNT
" 0000000001000000 "
2019-06-20 12:24:52 +09:30
" 00000095 " # len
" f036515e " # csum
" 5aab817c " # timestamp
2019-06-14 12:59:04 +09:30
" 0101 " # WIRE_NODE_ANNOUNCEMENT
" cf5d870bc7ecabcb7cd16898ef66891e5f0c6c5851bd85b670f03d325bc44d7544d367cd852e18ec03f7f4ff369b06860a3b12b07b29f36fb318ca11348bf8ec00005aab817c03f113414ebdc6c1fb0f33c99cd5a1d09dd79e7fdf2468cf1fe1af6674361695d23974b250757a7a6c6549544300000000000000000000000000000000000000000000000007010566933e2607 " ) )
l1 . start ( )
2019-06-20 12:28:52 +09:30
# May preceed the Started msg waited for in 'start'.
2024-01-31 16:38:20 +10:30
wait_for ( lambda : l1 . daemon . is_in_log ( ' Read 1/0/1/0 cannounce/cupdate/nannounce/delete from store in 650 bytes, now 628 bytes ' ) )
2024-01-31 16:14:06 +10:30
assert not os . path . exists ( os . path . join ( l1 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store.corrupt ' ) )
2019-06-22 09:48:13 +09:30
def test_gossip_store_compact_on_load ( node_factory , bitcoind ) :
l2 = setup_gossip_store_test ( node_factory , bitcoind )
2022-07-21 14:04:04 -05:00
gs_path = os . path . join ( l2 . daemon . lightning_dir , TEST_NETWORK , ' gossip_store ' )
gs = subprocess . run ( [ ' devtools/dump-gossipstore ' , ' --print-deleted ' , gs_path ] ,
check = True , timeout = TIMEOUT , stdout = subprocess . PIPE )
print ( gs . stdout . decode ( ) )
2019-06-22 09:48:13 +09:30
l2 . restart ( )
pytest: fix flake in test_gossip_store_compact_on_load.
We can have almost arbitrary re-transmissions of private updates, looking at the decode: in this case, instead of the expected:
```
DELETED: private channel_announcement (scid23)
DELETED: private channel_update (scid23/0)
DELETED: private channel_update (scid23/1)
delete channel (scid23)
channel_announcement (scid23)
channel_amount
DELETED: channel_update (scid23/0)
DELETED: channel_update (scid23/1)
node_announcement
node_announcement
channel_update (scid23)
private channel_announcement (scid12)
DELETED: private channel_update (scid12/0)
DELETED: private channel_update (scid12/1)
channel_update (scid23)
private_channel_update (scid12)
private_channel_update (scid12)
```
We had:
```
DELETED: private channel_announcement
DELETED: private channel_update
DELETED: private channel_update
DELETED: private channel_update
DELETED: private channel_update
channel_announcement
channel_amount
DELETED: channel_update
DELETED: channel_update
node_announcement
node_announcement
channel_update
private channel_announcement
DELETED: private channel_update
DELETED: private channel_update
channel_update
private channel_update
private channel_update
```
Which means we deleted 9, and hence failed:
```
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_gossip_store_compact_on_load(node_factory, bitcoind):
l2 = setup_gossip_store_test(node_factory, bitcoind)
gs_path = os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'gossip_store')
gs = subprocess.run(['devtools/dump-gossipstore', '--print-deleted', gs_path],
check=True, timeout=TIMEOUT, stdout=subprocess.PIPE)
print(gs.stdout.decode())
l2.restart()
> wait_for(lambda: l2.daemon.is_in_log(r'gossip_store_compact_offline: [5-8] deleted, 9 copied'))
tests/test_gossip.py:1776:
```
2023-07-27 14:15:53 +09:30
# These appear before we're fully started, so will already in log:
2024-01-31 16:38:20 +10:30
assert l2 . daemon . is_in_log ( ' gossip_store: Read 2/4/3/2 cannounce/cupdate/nannounce/delete from store ' )
2019-08-19 16:35:20 +09:30
def test_gossip_announce_invalid_block ( node_factory , bitcoind ) :
""" bitcoind lags and we might get an announcement for a block we don ' t have.
"""
# Need to slow down the poll interval so the announcement preceeds the
# blockchain catchup, otherwise we won't call `getfilteredblock`.
2023-09-21 15:06:28 +09:30
opts = { ' dev-bitcoind-poll ' : TIMEOUT / / 2 }
2019-08-19 16:35:20 +09:30
l1 = node_factory . get_node ( options = opts )
bitcoind . generate_block ( 1 )
assert bitcoind . rpc . getblockchaininfo ( ) [ ' blocks ' ] == 102
# Test gossip for an unknown block.
subprocess . run ( [ ' devtools/gossipwith ' ,
' --max-messages=0 ' ,
' {} @localhost: {} ' . format ( l1 . info [ ' id ' ] , l1 . port ) ,
# short_channel_id=103x1x1
' 01008d9f3d16dbdd985c099b74a3c9a74ccefd52a6d2bd597a553ce9a4c7fac3bfaa7f93031932617d38384cc79533730c9ce875b02643893cacaf51f503b5745fc3aef7261784ce6b50bff6fc947466508b7357d20a7c2929cc5ec3ae649994308527b2cbe1da66038e3bfa4825b074237708b455a4137bdb541cf2a7e6395a288aba15c23511baaae722fdb515910e2b42581f9c98a1f840a9f71897b4ad6f9e2d59e1ebeaf334cf29617633d35bcf6e0056ca0be60d7c002337bbb089b1ab52397f734bcdb2e418db43d1f192195b56e60eefbf82acf043d6068a682e064db23848b4badb20d05594726ec5b59267f4397b093747c23059b397b0c5620c4ab37a000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010001022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d029053521d6ea7a52cdd55f733d0fb2d077c0373b0053b5b810d927244061b757302d6063d022691b2490ab454dee73a57c6ff5d308352b461ece69f3c284f2c2412 ' ] ,
check = True , timeout = TIMEOUT )
# Make sure it's OK once it's caught up.
sync_blockheight ( bitcoind , [ l1 ] )
2019-08-19 18:53:40 +02:00
2019-08-19 16:35:20 +09:30
def test_gossip_announce_unknown_block ( node_factory , bitcoind ) :
""" Don ' t backfill the future!
If we get a channel_announcement that is for a block height that is above
our sync height we should not store the filteredblock in the blocks table ,
otherwise we end up with a duplicate when we finally catch up with the
blockchain .
"""
# Need to slow down the poll interval so the announcement preceeds the
# blockchain catchup, otherwise we won't call `getfilteredblock`.
2023-09-21 15:06:28 +09:30
opts = { ' dev-bitcoind-poll ' : TIMEOUT / / 2 }
2019-08-19 16:35:20 +09:30
l1 = node_factory . get_node ( options = opts )
bitcoind . generate_block ( 2 )
assert bitcoind . rpc . getblockchaininfo ( ) [ ' blocks ' ] == 103
# Test gossip for unknown block.
subprocess . run ( [ ' devtools/gossipwith ' ,
' --max-messages=0 ' ,
' {} @localhost: {} ' . format ( l1 . info [ ' id ' ] , l1 . port ) ,
# short_channel_id=103x1x1
' 01008d9f3d16dbdd985c099b74a3c9a74ccefd52a6d2bd597a553ce9a4c7fac3bfaa7f93031932617d38384cc79533730c9ce875b02643893cacaf51f503b5745fc3aef7261784ce6b50bff6fc947466508b7357d20a7c2929cc5ec3ae649994308527b2cbe1da66038e3bfa4825b074237708b455a4137bdb541cf2a7e6395a288aba15c23511baaae722fdb515910e2b42581f9c98a1f840a9f71897b4ad6f9e2d59e1ebeaf334cf29617633d35bcf6e0056ca0be60d7c002337bbb089b1ab52397f734bcdb2e418db43d1f192195b56e60eefbf82acf043d6068a682e064db23848b4badb20d05594726ec5b59267f4397b093747c23059b397b0c5620c4ab37a000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0000670000010001022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59035d2b1192dfba134e10e540875d366ebc8bc353d5aa766b80c090b39c3a5d885d029053521d6ea7a52cdd55f733d0fb2d077c0373b0053b5b810d927244061b757302d6063d022691b2490ab454dee73a57c6ff5d308352b461ece69f3c284f2c2412 ' ] ,
check = True , timeout = TIMEOUT )
# Make sure it's OK once it's caught up.
sync_blockheight ( bitcoind , [ l1 ] )
2019-09-06 14:15:05 +09:30
def test_gossip_no_backtalk ( node_factory ) :
2020-08-07 12:44:55 +09:30
# l3 connects, gets gossip, but should *not* play it back.
l1 , l2 , l3 = node_factory . get_nodes ( 3 ,
opts = [ { } , { } , { ' log-level ' : ' io ' } ] )
node_factory . join_nodes ( [ l1 , l2 ] , wait_for_announce = True )
2019-09-06 14:15:05 +09:30
l3 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Will get channel_announce, then two channel_update and two node_announcement
l3 . daemon . wait_for_logs ( [ r ' \ [IN \ ] 0100 ' ,
r ' \ [IN \ ] 0102 ' , r ' \ [IN \ ] 0102 ' ,
r ' \ [IN \ ] 0101 ' , r ' \ [IN \ ] 0101 ' ] )
2023-09-21 15:06:28 +09:30
# With --developer, this is long enough for gossip flush.
2022-07-12 17:00:26 +09:30
time . sleep ( 2 )
2019-09-06 14:15:05 +09:30
assert not l3 . daemon . is_in_log ( r ' \ [OUT \ ] 0100 ' )
2019-09-18 10:35:10 +09:30
2020-09-19 16:27:50 +02:00
@unittest.skipIf (
TEST_NETWORK != ' regtest ' ,
" Channel announcement contains genesis hash, receiving node discards on mismatch "
)
2019-11-15 09:47:45 +01:00
def check_socket ( ip_addr , port ) :
result = True
sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
# let's also check for fatal and try it ;-)
try :
result = sock . connect_ex ( ( ip_addr , port ) )
sock . close ( )
except Exception :
return False
return not result
2023-07-21 14:32:07 +09:30
def test_static_tor_onions ( node_factory ) :
2019-11-15 09:47:45 +01:00
""" First basic tests ;-)
Assume that tor is configured and just test
if we see the right onion address for our blob
"""
# please define your values
torip = ' 127.0.0.1 '
2020-01-07 22:58:21 +01:00
torips = ' 127.0.0.1:9051 '
2019-11-15 09:47:45 +01:00
torport = 9050
torserviceport = 9051
2020-01-07 22:58:21 +01:00
portA , portB = reserve ( ) , reserve ( )
2019-11-15 09:47:45 +01:00
if not check_socket ( format ( torip ) , torserviceport ) :
return
if not check_socket ( format ( torip ) , torport ) :
return
2020-01-07 22:58:21 +01:00
l1 = node_factory . get_node ( may_fail = True , options = {
' bind-addr ' : ' 127.0.0.1: {} ' . format ( portA ) ,
' addr ' : [ ' statictor: {} ' . format ( torips ) ]
} )
l2 = node_factory . get_node ( may_fail = True , options = {
' bind-addr ' : ' 127.0.0.1: {} ' . format ( portB ) ,
2022-02-16 13:39:24 +10:30
' addr ' : [ ' statictor: {} /torblob=11234567890123456789012345678901/torport= {} ' . format ( torips , 9736 ) ]
2020-01-07 22:58:21 +01:00
} )
2019-11-15 09:47:45 +01:00
2020-01-07 22:58:21 +01:00
assert l1 . daemon . is_in_log ( ' 127.0.0.1: {} ' . format ( l1 . port ) )
2022-02-16 13:39:24 +10:30
# Did not specify torport, so it's the default.
2022-06-26 23:04:50 +00:00
assert l1 . daemon . is_in_log ( ' .onion: {} ' . format ( default_ln_port ( l1 . info [ " network " ] ) ) )
2022-02-16 13:39:24 +10:30
assert l2 . daemon . is_in_log ( ' x2y4zvh4fn5q3eouuh7nxnc7zeawrqoutljrup2xjtiyxgx3emgkemad.onion: {} ,127.0.0.1: {} ' . format ( 9736 , l2 . port ) )
2019-11-15 09:47:45 +01:00
2023-07-21 14:32:07 +09:30
def test_tor_port_onions ( node_factory ) :
2019-11-15 09:47:45 +01:00
""" First basic tests for torport ;-)
Assume that tor is configured and just test
if we see the right onion address for our blob
"""
# please define your values
torip = ' 127.0.0.1 '
2020-01-07 22:58:21 +01:00
torips = ' 127.0.0.1:9051 '
2019-11-15 09:47:45 +01:00
torport = 9050
torserviceport = 9051
2020-01-07 22:58:21 +01:00
if not check_socket ( torip , torserviceport ) :
2019-11-15 09:47:45 +01:00
return
2020-01-07 22:58:21 +01:00
if not check_socket ( torip , torport ) :
2019-11-15 09:47:45 +01:00
return
2020-01-07 22:58:21 +01:00
portA , portB = reserve ( ) , reserve ( )
l1 = node_factory . get_node ( may_fail = True , options = { ' bind-addr ' : ' 127.0.0.1: {} ' . format ( portA ) , ' addr ' : [ ' statictor: {} /torport=45321 ' . format ( torips ) ] } )
l2 = node_factory . get_node ( may_fail = True , options = { ' bind-addr ' : ' 127.0.0.1: {} ' . format ( portB ) , ' addr ' : [ ' statictor: {} /torport=45321/torblob=11234567890123456789012345678901 ' . format ( torips ) ] } )
2019-11-15 09:47:45 +01:00
2020-01-07 22:58:21 +01:00
assert l1 . daemon . is_in_log ( ' 45321,127.0.0.1: {} ' . format ( l1 . port ) )
assert l2 . daemon . is_in_log ( ' x2y4zvh4fn5q3eouuh7nxnc7zeawrqoutljrup2xjtiyxgx3emgkemad.onion:45321,127.0.0.1: {} ' . format ( l2 . port ) )
2020-05-04 09:47:54 +09:30
2020-10-20 14:28:06 +10:30
def test_routetool ( node_factory ) :
2023-10-02 14:24:58 +10:30
""" Test that route tool can see published channels """
l1 , l2 = node_factory . line_graph ( 2 , wait_for_announce = True )
2020-10-20 14:28:06 +10:30
subprocess . run ( [ ' devtools/route ' ,
os . path . join ( l1 . daemon . lightning_dir ,
TEST_NETWORK ,
' gossip_store ' ) ,
l1 . info [ ' id ' ] ,
l2 . info [ ' id ' ] ] ,
check = True , timeout = TIMEOUT )
2021-02-02 15:46:20 +10:30
def test_addgossip ( node_factory ) :
l1 , l2 = node_factory . line_graph ( 2 , fundchannel = True , wait_for_announce = True ,
opts = { ' log-level ' : ' io ' } )
# We should get two node_announcements, one channel_announcement, and two
# channel_update.
l3 = node_factory . get_node ( )
# 0x0100 = channel_announcement
# 0x0102 = channel_update
# 0x0101 = node_announcement
2021-09-23 11:38:50 +09:30
l1 . daemon . logsearch_start = 0
2021-09-27 15:31:44 +02:00
ann = l1 . daemon . wait_for_log ( r " \ [(OUT|IN) \ ] 0100.* " ) # Either direction will suppress the other.
2021-02-02 15:46:20 +10:30
2024-01-30 04:45:12 +10:30
l2 . daemon . logsearch_start = 0
l2 . daemon . wait_for_log ( r " \ [(OUT|IN) \ ] 0100.* " )
# Be sure not to get the *private* updates!
upd1 = l1 . daemon . is_in_log ( r " \ [OUT \ ] 0102.* " , start = l1 . daemon . logsearch_start )
upd2 = l2 . daemon . is_in_log ( r " \ [OUT \ ] 0102.* " , start = l2 . daemon . logsearch_start )
2021-02-02 15:46:20 +10:30
nann1 = l1 . daemon . is_in_log ( r " \ [OUT \ ] 0101.* " )
nann2 = l2 . daemon . is_in_log ( r " \ [OUT \ ] 0101.* " )
2022-07-08 19:27:11 +09:30
# Feed them to l3 (Each one starts with PREFIX TIMESTAMP chanid-xxx: [OUT] ...)
l3 . rpc . addgossip ( ann . split ( ) [ 4 ] )
2021-02-02 15:46:20 +10:30
2022-07-08 19:27:11 +09:30
l3 . rpc . addgossip ( upd1 . split ( ) [ 4 ] )
l3 . rpc . addgossip ( upd2 . split ( ) [ 4 ] )
l3 . rpc . addgossip ( nann1 . split ( ) [ 4 ] )
l3 . rpc . addgossip ( nann2 . split ( ) [ 4 ] )
2021-02-02 15:46:20 +10:30
# In this case, it can actually have to wait, since it does scid lookup.
wait_for ( lambda : len ( l3 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
wait_for ( lambda : len ( l3 . rpc . listnodes ( ) [ ' nodes ' ] ) == 2 )
# Now corrupt an update
2022-07-08 19:27:11 +09:30
badupdate = upd1 . split ( ) [ 4 ]
2021-02-02 15:46:20 +10:30
if badupdate . endswith ( ' f ' ) :
badupdate = badupdate [ : - 1 ] + ' e '
else :
badupdate = badupdate [ : - 1 ] + ' f '
with pytest . raises ( RpcError , match = ' Bad signature ' ) :
l3 . rpc . addgossip ( badupdate )
2021-07-21 21:11:17 +02:00
2021-09-07 13:36:06 +09:30
def test_topology_leak ( node_factory , bitcoind ) :
l1 , l2 , l3 = node_factory . line_graph ( 3 )
l1 . rpc . listchannels ( )
2022-01-30 14:07:23 +10:30
mine_funding_to_announce ( bitcoind , [ l1 , l2 , l3 ] )
2021-09-07 13:36:06 +09:30
# Wait until l1 sees all the channels.
wait_for ( lambda : len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 4 )
# Close and wait for gossip to catchup.
txid = l2 . rpc . close ( l3 . info [ ' id ' ] ) [ ' txid ' ]
2022-09-14 13:20:32 +09:30
bitcoind . generate_block ( 13 , txid )
2021-09-07 13:36:06 +09:30
wait_for ( lambda : len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
2021-07-21 21:11:17 +02:00
def test_parms_listforwards ( node_factory ) :
"""
Simple test to ensure that the order of the listforwards
is correct as describe in the documentation .
This test is written by a issue report in the IR channel ,
it is simple and not useful , but it is good to have to avoid
simile errors in the future .
"""
2021-08-13 16:18:08 +02:00
l1 , l2 = node_factory . line_graph ( 2 )
l2 . stop ( )
l2 . daemon . opts [ ' allow-deprecated-apis ' ] = True
l2 . start ( )
forwards_new = l1 . rpc . listforwards ( " settled " ) [ " forwards " ]
forwards_dep = l2 . rpc . call ( " listforwards " , { " in_channel " : " 0x1x2 " , " out_channel " : " 0x2x3 " , " status " : " settled " } ) [ " forwards " ]
2021-07-21 21:11:17 +02:00
2021-08-13 16:18:08 +02:00
assert len ( forwards_new ) == 0
assert len ( forwards_dep ) == 0
2022-09-14 13:20:32 +09:30
def test_close_12_block_delay ( node_factory , bitcoind ) :
l1 , l2 , l3 , l4 = node_factory . line_graph ( 4 , wait_for_announce = True )
# Close l1-l2
txid = l1 . rpc . close ( l2 . info [ ' id ' ] ) [ ' txid ' ]
bitcoind . generate_block ( 1 , txid )
# But l4 doesn't believe it immediately.
l4 . daemon . wait_for_log ( " channel .* closing soon due to the funding outpoint being spent " )
# Close l2-l3 one block later.
txid = l2 . rpc . close ( l3 . info [ ' id ' ] ) [ ' txid ' ]
bitcoind . generate_block ( 1 , txid )
l4 . daemon . wait_for_log ( " channel .* closing soon due to the funding outpoint being spent " )
# BOLT #7:
# - once its funding output has been spent OR reorganized out:
# - SHOULD forget a channel after a 12-block delay.
# That implies 12 blocks *after* spending, i.e. 13 blocks deep!
# 12 blocks deep, l4 still sees it
bitcoind . generate_block ( 10 )
sync_blockheight ( bitcoind , [ l4 ] )
assert len ( l4 . rpc . listchannels ( source = l1 . info [ ' id ' ] ) [ ' channels ' ] ) == 1
# 13 blocks deep does it.
bitcoind . generate_block ( 1 )
wait_for ( lambda : l4 . rpc . listchannels ( source = l1 . info [ ' id ' ] ) [ ' channels ' ] == [ ] )
# Other channel still visible.
assert len ( l4 . rpc . listchannels ( source = l2 . info [ ' id ' ] ) [ ' channels ' ] ) == 1
# Restart: it remembers channel is dying.
l4 . restart ( )
# One more block, it's forgotten too.
bitcoind . generate_block ( 1 )
wait_for ( lambda : l4 . rpc . listchannels ( source = l2 . info [ ' id ' ] ) [ ' channels ' ] == [ ] )
2022-10-15 11:23:57 -05:00
2023-07-19 16:07:40 +09:30
def test_gossip_not_dying ( node_factory , bitcoind ) :
l1 = node_factory . get_node ( )
l2 , l3 = node_factory . line_graph ( 2 , wait_for_announce = True )
l1 . rpc . connect ( l2 . info [ ' id ' ] , ' localhost ' , l2 . port )
# Wait until it sees all the updates, node announcments.
wait_for ( lambda : len ( [ n for n in l1 . rpc . listnodes ( ) [ ' nodes ' ] if ' alias ' in n ] )
+ len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 4 )
def get_gossip ( node ) :
out = subprocess . run ( [ ' devtools/gossipwith ' ,
' --initial-sync ' ,
' --timeout-after=2 ' ,
' {} @localhost: {} ' . format ( node . info [ ' id ' ] , node . port ) ] ,
check = True ,
timeout = TIMEOUT , stdout = subprocess . PIPE ) . stdout
msgs = [ ]
while len ( out ) :
l , t = struct . unpack ( ' >HH ' , out [ 0 : 4 ] )
msg = out [ 2 : 2 + l ]
out = out [ 2 + l : ]
# Ignore pings, timestamp_filter
if t == 265 or t == 18 :
continue
# channel_announcement node_announcement or channel_update
assert t == 256 or t == 257 or t == 258
msgs . append ( msg )
return msgs
assert len ( get_gossip ( l1 ) ) == 5
# Close l2->l3, mine block.
l2 . rpc . close ( l3 . info [ ' id ' ] )
bitcoind . generate_block ( 1 , wait_for_mempool = 1 )
l1 . daemon . wait_for_log ( " closing soon due to the funding outpoint being spent " )
2024-02-11 21:02:41 +10:30
# We won't gossip the dead channel any more, nor the node_announcements. But connectd is not explicitly synced, so wait for "a bit".
pytest: fix race in test_gossip_not_dying
Writing to the gossip_store file is not explicitly synchronized, so it
seems that connectd has not caught up with the dying flags we've set.
Simply wait for a second. Hacky, but should work.
```
def test_gossip_not_dying(node_factory, bitcoind):
l1 = node_factory.get_node()
l2, l3 = node_factory.line_graph(2, wait_for_announce=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Wait until it sees all the updates, node announcments.
wait_for(lambda: len([n for n in l1.rpc.listnodes()['nodes'] if 'alias' in n])
+ len(l1.rpc.listchannels()['channels']) == 4)
def get_gossip(node):
out = subprocess.run(['devtools/gossipwith',
'--initial-sync',
'--timeout-after=2',
'{}@localhost:{}'.format(node.info['id'], node.port)],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout
msgs = []
while len(out):
l, t = struct.unpack('>HH', out[0:4])
msg = out[2:2 + l]
out = out[2 + l:]
# Ignore pings, timestamp_filter
if t == 265 or t == 18:
continue
# channel_announcement node_announcement or channel_update
assert t == 256 or t == 257 or t == 258
msgs.append(msg)
return msgs
assert len(get_gossip(l1)) == 5
# Close l2->l3, mine block.
l2.rpc.close(l3.info['id'])
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log("closing soon due to the funding outpoint being spent")
# We won't gossip the dead channel any more (but we still propagate node_announcement)
> assert len(get_gossip(l1)) == 2
E assert 4 == 2
E + where 4 = len([b'\x01\x01L\xc2\xbe\x08\xbb\xa8~\x8f\x80R\x9e`J\x1cS\x18|\x12\n\xe5_6\xb0\xa6S\x9fU\xae\x19\x9c\x1fXB\xab\x81N\x13\xdc\x8e}\xb9\xb0\xb6\xe6\x14h\xd4:\x90\xce\xc3\xad\x9ezR`~\xba@\xc9\x91e\x89\xab\x00\x07\x88\xa0\x00\n\x02i\xa2d\xb8\xa9`\x02-"6 \xa3Y\xa4\x7f\xf7\xf7\xacD|\x85\xc4l\x92=\xa53\x89"\x1a\x00T\xc1\x1c\x1e<\xa3\x1dY\x02-"SILENTARTIST-27fc801-modded\x00\x00\x00\x00\x00\x00\x00', b'\x01\x01M\x00\x86\x8e4\xc8\x90p\n\x98\xf7\xce4\x1e\xd9\xd6-6\xfb(\xf0\xe4\xb7\x90\x7f\x89\xb9\xfa\x00\x82\x1b\xeb\x1fY\x93\x1e\xe0c\xb2\x0e<\xe6\x06x\xb7\xe54};\xfbd\xa0\x01S\xcf\xe8{\xf8\x8f/\xa7\xc0\xe2h\x00\x07\x88\xa0\x00\n\x02i\xa2d\xb8\xa9`\x03]+\x11\x92\xdf\xba\x13N\x10\xe5@\x87]6n\xbc\x8b\xc3S\xd5\xaavk\x80\xc0\x90\xb3\x9c:]\x88]\x03]+HOPPINGFIRE-27fc801-modded\x00\x00\x00\x00\x00\x00\x00\x00', b'\x01\x02~\xe0\x13\xb4\x84Gz\xcf(\xd4w\xa7\x9bZ\x1a\xe82\xd1\xe1\x1bLm\xc8\n\xcd\xd4\xfb\x88\xf8\xc6\xdbt\\v\x89~\xd1.e\xc8\xa8o\x9c`\xd5\xa8\x97\x11l\xf2g\xcb\xa8\xcf\r\x869\xd3\xb5\xd5\x9a\xa0my\x9f\x87\xebX\x0b\x9e_\x11\xdc!\x1e\x9f\xb6j\xbb6\x99\x99\x90D\xf8\xfe\x14h\x01\x16#\x936B\x86\xc6\x00\x00g\x00\x00\x01\x00\x00d\xb8\xa9d\x01\x02\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\n\x00\x00\x00\x00;\x023\x80', b"\x01\x0284\xf1a\x86z\x8e\xf2\xe5'\xf7\xfe1\x8d\x96R\x0c\xe7\x1fj#\xaf\xbd/\xba\x10e\xd1\xccQ-\xcf/>\xa5g\xc6\xd8\x9cO \xe7~\xb3\xda\xe0\\vg\xfb\x02&T\x93\xa0\xd4\x95\x8e\xd5L\x12\x9a\xf7\xe6\x9f\x87\xebX\x0b\x9e_\x11\xdc!\x1e\x9f\xb6j\xbb6\x99\x99\x90D\xf8\xfe\x14h\x01\x16#\x936B\x86\xc6\x00\x00g\x00\x00\x01\x00\x00d\xb8\xa9d\x01\x03\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\n\x00\x00\x00\x00;\x023\x80"])
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-07-21 07:14:48 +09:30
time . sleep ( 1 )
2024-02-11 21:02:41 +10:30
assert get_gossip ( l1 ) == [ ]
2023-07-19 16:07:40 +09:30
2023-07-19 06:49:29 +09:30
def test_dump_own_gossip ( node_factory ) :
""" We *should* send all self-related gossip unsolicited, if we have any """
l1 , l2 = node_factory . line_graph ( 2 , wait_for_announce = True )
# Make sure l1 has updates in both directions, and node_announcements
wait_for ( lambda : len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
wait_for ( lambda : len ( l1 . rpc . listnodes ( ) [ ' nodes ' ] ) == 2 )
# We should get channel_announcement, channel_update, node_announcement.
# (Plus random pings, timestamp_filter)
out = subprocess . run ( [ ' devtools/gossipwith ' ,
' --timeout-after= {} ' . format ( int ( math . sqrt ( TIMEOUT ) + 1 ) ) ,
' {} @localhost: {} ' . format ( l1 . info [ ' id ' ] , l1 . port ) ] ,
check = True ,
timeout = TIMEOUT , stdout = subprocess . PIPE ) . stdout
# In theory, we could do the node_announcement any time after channel_announcement, but we don't.
expect = [ 256 , # channel_announcement
258 , # channel_update
258 , # channel_update
257 ] # node_announcement
while len ( out ) :
l , t = struct . unpack ( ' >HH ' , out [ 0 : 4 ] )
out = out [ 2 + l : ]
# Ignore pings, timestamp_filter
if t == 265 or t == 18 :
continue
assert t == expect [ 0 ]
expect = expect [ 1 : ]
# We should get exactly what we expected.
assert expect == [ ]
2023-08-08 16:46:59 -05:00
2023-12-13 16:06:28 +10:30
def test_listchannels_deprecated_local ( node_factory , bitcoind ) :
""" Test listchannels shows local/private channels only in deprecated mode """
l1 , l2 , l3 = node_factory . get_nodes ( 3 ,
opts = [ { } , { ' allow-deprecated-apis ' : True } , { } ] )
# This will be in block 103
node_factory . join_nodes ( [ l1 , l2 ] , wait_for_announce = False )
l1l2 = first_scid ( l1 , l2 )
# This will be in block 104
node_factory . join_nodes ( [ l2 , l3 ] , wait_for_announce = False )
l2l3 = first_scid ( l2 , l3 )
# Non-deprecated nodes say no.
assert l1 . rpc . listchannels ( ) == { ' channels ' : [ ] }
assert l3 . rpc . listchannels ( ) == { ' channels ' : [ ] }
# Deprecated API lists both sides of local channels:
vals = [ ( c [ ' active ' ] , c [ ' public ' ] , c [ ' short_channel_id ' ] ) for c in l2 . rpc . listchannels ( ) [ ' channels ' ] ]
# Either order
assert vals == [ ( True , False , l1l2 ) ] * 2 + [ ( True , False , l2l3 ) ] * 2 or vals == [ ( True , False , l2l3 ) ] * 2 + [ ( True , False , l1l2 ) ] * 2
# Mine l1-l2 channel so it's public.
bitcoind . generate_block ( 4 )
sync_blockheight ( bitcoind , [ l1 , l2 , l3 ] )
wait_for ( lambda : len ( l1 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
wait_for ( lambda : len ( l3 . rpc . listchannels ( ) [ ' channels ' ] ) == 2 )
# l2 shows public one correctly, and private one correctly
# Either order
vals = [ ( c [ ' active ' ] , c [ ' public ' ] , c [ ' short_channel_id ' ] ) for c in l2 . rpc . listchannels ( ) [ ' channels ' ] ]
assert vals == [ ( True , True , l1l2 ) ] * 2 + [ ( True , False , l2l3 ) ] * 2 or vals == [ ( True , False , l2l3 ) ] * 2 + [ ( True , True , l1l2 ) ] * 2