core-lightning/connectd/connect_wire.csv
Rusty Russell 5591c0b5d8 gossipd: don't send gossip stream, let per-peer daemons read it themselves.
Keeping the uintmap ordering all the broadcastable messages is expensive:
130MB for the million-channels project.  But now we delete obsolete entries
from the store, we can have the per-peer daemons simply read that sequentially
and stream the gossip itself.

This is the most primitive version, where all gossip is streamed;
successive patches will bring back proper handling of timestamp filtering
and initial_routing_sync.

We add a gossip_state field to track what's happening with our gossip
streaming: it's initialized in gossipd, and currently always set, but
once we handle timestamps the per-peer daemon may do it when the first
filter is sent.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2019-06-04 01:29:39 +00:00

2.3 KiB

1#include <common/cryptomsg.h>
2#include <common/per_peer_state.h>
3#include <common/wireaddr.h>
4#include <lightningd/gossip_msg.h>
5connectctl_init,2000
6connectctl_init,,id,struct node_id
7connectctl_init,,num_wireaddrs,u16
8connectctl_init,,wireaddrs,num_wireaddrs*struct wireaddr_internal
9connectctl_init,,listen_announce,num_wireaddrs*enum addr_listen_announce
10connectctl_init,,tor_proxyaddr,?struct wireaddr
11connectctl_init,,use_tor_proxy_always,bool
12connectctl_init,,dev_allow_localhost,bool
13connectctl_init,,use_dns,bool
14connectctl_init,,tor_password,wirestring
15# Connectd->master, here are the addresses I bound, can announce.
16connectctl_init_reply,2100
17connectctl_init_reply,,num_bindings,u16
18connectctl_init_reply,,bindings,num_bindings*struct wireaddr_internal
19connectctl_init_reply,,num_announcable,u16
20connectctl_init_reply,,announcable,num_announcable*struct wireaddr
21# Activate the connect daemon, so others can connect.
22connectctl_activate,2025
23# Do we listen?
24connectctl_activate,,listen,bool
25# Connectd->master, I am ready.
26connectctl_activate_reply,2125
27# connectd->master: disconnect this peer please (due to reconnect).
28connect_reconnected,2112
29connect_reconnected,,id,struct node_id
30# Master -> connectd: connect to a peer.
31connectctl_connect_to_peer,2001
32connectctl_connect_to_peer,,id,struct node_id
33connectctl_connect_to_peer,,seconds_waited,u32
34connectctl_connect_to_peer,,addrhint,?struct wireaddr_internal
35# Connectd->master: connect failed.
36connectctl_connect_failed,2020
37connectctl_connect_failed,,id,struct node_id
38connectctl_connect_failed,,failreason,wirestring
39connectctl_connect_failed,,seconds_to_delay,u32
40connectctl_connect_failed,,addrhint,?struct wireaddr_internal
41# Connectd -> master: we got a peer. Three fds: peer, gossip and gossip_store
42connect_peer_connected,2002
43connect_peer_connected,,id,struct node_id
44connect_peer_connected,,addr,struct wireaddr_internal
45connect_peer_connected,,pps,struct per_peer_state
46connect_peer_connected,,gflen,u16
47connect_peer_connected,,globalfeatures,gflen*u8
48connect_peer_connected,,lflen,u16
49connect_peer_connected,,localfeatures,lflen*u8
50# master -> connectd: peer has disconnected.
51connectctl_peer_disconnected,2015
52connectctl_peer_disconnected,,id,struct node_id
53# master -> connectd: do you have a memleak?
54connect_dev_memleak,2033
55connect_dev_memleak_reply,2133
56connect_dev_memleak_reply,,leak,bool