2022-01-08 14:19:29 +01:00
|
|
|
/*~ This contains all the code to shuffle data between socket to the peer
|
|
|
|
* itself, and the subdaemons. */
|
|
|
|
#include "config.h"
|
|
|
|
#include <assert.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <bitcoin/block.h>
|
|
|
|
#include <bitcoin/chainparams.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <ccan/io/io.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
#include <common/cryptomsg.h>
|
2022-01-24 21:08:52 +01:00
|
|
|
#include <common/daemon_conn.h>
|
2022-01-08 14:25:29 +01:00
|
|
|
#include <common/dev_disconnect.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <common/features.h>
|
|
|
|
#include <common/gossip_constants.h>
|
|
|
|
#include <common/gossip_store.h>
|
2022-01-11 02:16:49 +01:00
|
|
|
#include <common/memleak.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
#include <common/per_peer_state.h>
|
2022-01-29 04:31:32 +01:00
|
|
|
#include <common/ping.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <common/status.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <common/timeout.h>
|
2022-03-22 21:27:30 +01:00
|
|
|
#include <common/type_to_string.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <common/utils.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <common/wire_error.h>
|
|
|
|
#include <connectd/connectd.h>
|
2022-01-24 21:08:52 +01:00
|
|
|
#include <connectd/connectd_gossipd_wiregen.h>
|
2022-01-29 04:31:32 +01:00
|
|
|
#include <connectd/connectd_wiregen.h>
|
2022-06-16 09:32:39 +02:00
|
|
|
#include <connectd/gossip_rcvd_filter.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <connectd/multiplex.h>
|
2022-01-29 04:32:32 +01:00
|
|
|
#include <connectd/onion_message.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <errno.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <fcntl.h>
|
2022-01-08 14:26:29 +01:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/tcp.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <sys/socket.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <sys/stat.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <sys/types.h>
|
2022-01-08 14:26:29 +01:00
|
|
|
#include <wire/peer_wire.h>
|
2022-01-08 14:25:29 +01:00
|
|
|
#include <wire/wire.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
#include <wire/wire_io.h>
|
2022-01-24 21:08:52 +01:00
|
|
|
#include <wire/wire_sync.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd {
|
|
|
|
/* Owner: we are in peer->subds[] */
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
/* The temporary or permanant channel_id */
|
|
|
|
struct channel_id channel_id;
|
|
|
|
|
|
|
|
/* In passing, we can have a temporary one, too. */
|
|
|
|
struct channel_id *temporary_channel_id;
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* The opening revocation basepoint, for v2 channel_id. */
|
|
|
|
struct pubkey *opener_revocation_basepoint;
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* The actual connection to talk to it (NULL if it's not connected yet) */
|
2022-03-22 09:52:13 +01:00
|
|
|
struct io_conn *conn;
|
|
|
|
|
|
|
|
/* Input buffer */
|
|
|
|
u8 *in;
|
|
|
|
|
|
|
|
/* Output buffer */
|
|
|
|
struct msg_queue *outq;
|
|
|
|
};
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
static struct subd *find_subd(struct peer *peer,
|
|
|
|
const struct channel_id *channel_id)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < tal_count(peer->subds); i++) {
|
|
|
|
struct subd *subd = peer->subds[i];
|
|
|
|
|
|
|
|
/* Once we see a message using the real channel_id, we
|
|
|
|
* clear the temporary_channel_id */
|
|
|
|
if (channel_id_eq(&subd->channel_id, channel_id)) {
|
|
|
|
subd->temporary_channel_id
|
|
|
|
= tal_free(subd->temporary_channel_id);
|
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
if (subd->temporary_channel_id
|
|
|
|
&& channel_id_eq(subd->temporary_channel_id, channel_id)) {
|
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Except for a reconnection, we finally free a peer when the io_conn
|
|
|
|
* is closed and all subds are gone. */
|
|
|
|
static void maybe_free_peer(struct peer *peer)
|
|
|
|
{
|
|
|
|
if (peer->to_peer)
|
|
|
|
return;
|
|
|
|
if (tal_count(peer->subds) != 0)
|
|
|
|
return;
|
|
|
|
status_debug("maybe_free_peer freeing peer!");
|
|
|
|
tal_free(peer);
|
|
|
|
}
|
|
|
|
|
2022-07-16 06:49:30 +02:00
|
|
|
/* We try to send the final messages, but if buffer is full and they're
|
|
|
|
* not reading, we have to give up. */
|
2022-07-18 14:12:27 +02:00
|
|
|
static void close_peer_io_timeout(struct peer *peer)
|
2022-07-16 06:49:30 +02:00
|
|
|
{
|
|
|
|
/* BROKEN means we'll trigger CI if we see it, though it's possible */
|
|
|
|
status_peer_broken(&peer->id, "Peer did not close, forcing close");
|
2022-07-18 14:12:27 +02:00
|
|
|
io_close(peer->to_peer);
|
2022-07-16 06:49:30 +02:00
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
static void close_subd_timeout(struct subd *subd)
|
2022-07-16 06:49:29 +02:00
|
|
|
{
|
2022-07-18 14:12:27 +02:00
|
|
|
/* BROKEN means we'll trigger CI if we see it, though it's possible */
|
|
|
|
status_peer_broken(&subd->peer->id, "Subd did not close, forcing close");
|
|
|
|
io_close(subd->conn);
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:28 +02:00
|
|
|
static void drain_peer(struct peer *peer)
|
2022-07-18 14:12:27 +02:00
|
|
|
{
|
|
|
|
status_debug("drain_peer");
|
2022-07-16 06:49:29 +02:00
|
|
|
assert(!peer->draining);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Since we immediately free any subds we didn't connect yet,
|
|
|
|
* we need peer->to_peer set so it won't free peer! */
|
|
|
|
assert(peer->to_peer);
|
|
|
|
|
|
|
|
/* Give the subds 5 seconds to close their fds to us. */
|
|
|
|
for (size_t i = 0; i < tal_count(peer->subds); i++) {
|
|
|
|
if (!peer->subds[i]->conn) {
|
|
|
|
/* Deletes itself from array, so be careful! */
|
|
|
|
tal_free(peer->subds[i]);
|
|
|
|
i--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
status_debug("drain_peer draining subd!");
|
|
|
|
notleak(new_reltimer(&peer->daemon->timers,
|
|
|
|
peer->subds[i], time_from_sec(5),
|
|
|
|
close_subd_timeout, peer->subds[i]));
|
|
|
|
/* Wake any outgoing queued on subd */
|
|
|
|
io_wake(peer->subds[i]->outq);
|
|
|
|
}
|
2022-07-16 06:49:29 +02:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Wake them to ensure they notice the close! */
|
|
|
|
io_wake(&peer->subds);
|
2022-07-16 06:49:29 +02:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
if (peer->to_peer) {
|
|
|
|
/* You have 5 seconds to drain... */
|
|
|
|
notleak(new_reltimer(&peer->daemon->timers,
|
|
|
|
peer->to_peer, time_from_sec(5),
|
|
|
|
close_peer_io_timeout, peer));
|
|
|
|
}
|
2022-07-16 06:49:30 +02:00
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
/* Clean peer from hashtable; we no longer exist. */
|
|
|
|
destroy_peer(peer);
|
|
|
|
tal_del_destructor(peer, destroy_peer);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* This is a 5-second leak, worst case! */
|
|
|
|
notleak(peer);
|
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
/* Start draining process! */
|
|
|
|
io_wake(peer->peer_outq);
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
void inject_peer_msg(struct peer *peer, const u8 *msg TAKES)
|
2022-01-08 14:22:29 +01:00
|
|
|
{
|
2022-01-29 04:33:05 +01:00
|
|
|
status_peer_io(LOG_IO_OUT, &peer->id, msg);
|
2022-01-08 14:22:29 +01:00
|
|
|
msg_enqueue(peer->peer_outq, msg);
|
|
|
|
}
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
void multiplex_final_msg(struct peer *peer, const u8 *final_msg TAKES)
|
|
|
|
{
|
|
|
|
inject_peer_msg(peer, final_msg);
|
|
|
|
drain_peer(peer);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* Send warning, close connection to peer */
|
|
|
|
static void send_warning(struct peer *peer, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
2022-07-16 06:49:29 +02:00
|
|
|
u8 *msg;
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
status_vfmt(LOG_UNUSUAL, &peer->id, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2022-07-16 06:49:29 +02:00
|
|
|
msg = towire_warningfmtv(NULL, NULL, fmt, ap);
|
2022-01-08 14:28:29 +01:00
|
|
|
va_end(ap);
|
2022-07-16 06:49:29 +02:00
|
|
|
|
|
|
|
multiplex_final_msg(peer, take(msg));
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
/* Kicks off write_to_peer() to look for more gossip to send from store */
|
|
|
|
static void wake_gossip(struct peer *peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
static struct oneshot *gossip_stream_timer(struct peer *peer)
|
|
|
|
{
|
|
|
|
u32 next;
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2022-01-11 02:15:43 +01:00
|
|
|
* A node:
|
|
|
|
*...
|
|
|
|
* - SHOULD flush outgoing gossip messages once every 60 seconds,
|
|
|
|
* independently of the arrival times of the messages.
|
|
|
|
* - Note: this results in staggered announcements that are unique
|
|
|
|
* (not duplicated).
|
2022-01-08 14:28:29 +01:00
|
|
|
*/
|
2022-01-11 02:15:43 +01:00
|
|
|
/* We shorten this for dev_fast_gossip! */
|
|
|
|
next = GOSSIP_FLUSH_INTERVAL(peer->daemon->dev_fast_gossip);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
return new_reltimer(&peer->daemon->timers,
|
|
|
|
peer, time_from_sec(next),
|
|
|
|
wake_gossip, peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-07-15 07:59:26 +02:00
|
|
|
/* It's so common to ask for "recent" gossip (we ask for 10 minutes
|
|
|
|
* ago, LND and Eclair ask for now, LDK asks for 1 hour ago) that it's
|
|
|
|
* worth keeping track of where that starts, so we can skip most of
|
|
|
|
* the store. */
|
|
|
|
static void update_recent_timestamp(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
/* 2 hours allows for some clock drift, not too much gossip */
|
|
|
|
u32 recent = time_now().ts.tv_sec - 7200;
|
|
|
|
|
|
|
|
/* Only update every minute */
|
|
|
|
if (daemon->gossip_recent_time + 60 > recent)
|
|
|
|
return;
|
|
|
|
|
|
|
|
daemon->gossip_recent_time = recent;
|
|
|
|
daemon->gossip_store_recent_off
|
|
|
|
= find_gossip_store_by_timestamp(daemon->gossip_store_fd,
|
|
|
|
daemon->gossip_store_recent_off,
|
|
|
|
daemon->gossip_recent_time);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* This is called once we need it: otherwise, the gossip_store may not exist,
|
|
|
|
* since we start at the same time as gossipd itself. */
|
|
|
|
static void setup_gossip_store(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
daemon->gossip_store_fd = open(GOSSIP_STORE_FILENAME, O_RDONLY);
|
|
|
|
if (daemon->gossip_store_fd < 0)
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Opening gossip_store %s: %s",
|
|
|
|
GOSSIP_STORE_FILENAME, strerror(errno));
|
2022-07-15 07:59:26 +02:00
|
|
|
|
|
|
|
daemon->gossip_recent_time = 0;
|
|
|
|
daemon->gossip_store_recent_off = 1;
|
|
|
|
update_recent_timestamp(daemon);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* gossipd will be writing to this, and it's not atomic! Safest
|
|
|
|
* way to find the "end" is to walk through. */
|
|
|
|
daemon->gossip_store_end
|
2022-07-15 07:59:26 +02:00
|
|
|
= find_gossip_store_end(daemon->gossip_store_fd,
|
|
|
|
daemon->gossip_store_recent_off);
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void setup_peer_gossip_store(struct peer *peer,
|
|
|
|
const struct feature_set *our_features,
|
|
|
|
const u8 *their_features)
|
|
|
|
{
|
|
|
|
/* Lazy setup */
|
|
|
|
if (peer->daemon->gossip_store_fd == -1)
|
|
|
|
setup_gossip_store(peer->daemon);
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.grf = new_gossip_rcvd_filter(peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* A node:
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages it did not generate itself,
|
|
|
|
* unless explicitly requested.
|
|
|
|
*/
|
2022-01-11 02:15:43 +01:00
|
|
|
if (feature_negotiated(our_features, their_features, OPT_GOSSIP_QUERIES)) {
|
|
|
|
peer->gs.gossip_timer = NULL;
|
|
|
|
peer->gs.active = false;
|
|
|
|
peer->gs.off = 1;
|
2022-01-08 14:28:29 +01:00
|
|
|
return;
|
2022-01-11 02:15:43 +01:00
|
|
|
}
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.gossip_timer = gossip_stream_timer(peer);
|
2022-03-31 07:56:22 +02:00
|
|
|
peer->gs.active = IFDEV(!peer->daemon->dev_suppress_gossip, true);
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.timestamp_min = 0;
|
|
|
|
peer->gs.timestamp_max = UINT32_MAX;
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - upon receiving an `init` message with the
|
|
|
|
* `initial_routing_sync` flag set to 1:
|
|
|
|
* - SHOULD send gossip messages for all known channels and
|
|
|
|
* nodes, as if they were just received.
|
|
|
|
* - if the `initial_routing_sync` flag is set to 0, OR if the
|
|
|
|
* initial sync was completed:
|
|
|
|
* - SHOULD resume normal operation, as specified in the
|
|
|
|
* following [Rebroadcasting](#rebroadcasting) section.
|
|
|
|
*/
|
2022-01-11 02:15:43 +01:00
|
|
|
if (feature_offered(their_features, OPT_INITIAL_ROUTING_SYNC))
|
|
|
|
peer->gs.off = 1;
|
|
|
|
else {
|
2022-01-08 14:28:29 +01:00
|
|
|
/* During tests, particularly, we find that the gossip_store
|
|
|
|
* moves fast, so make sure it really does start at the end. */
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.off
|
2022-01-08 14:28:29 +01:00
|
|
|
= find_gossip_store_end(peer->daemon->gossip_store_fd,
|
|
|
|
peer->daemon->gossip_store_end);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:26:29 +01:00
|
|
|
/* We're happy for the kernel to batch update and gossip messages, but a
|
|
|
|
* commitment message, for example, should be instantly sent. There's no
|
|
|
|
* great way of doing this, unfortunately.
|
|
|
|
*
|
|
|
|
* Setting TCP_NODELAY on Linux flushes the socket, which really means
|
|
|
|
* we'd want to toggle on then off it *after* sending. But Linux has
|
|
|
|
* TCP_CORK. On FreeBSD, it seems (looking at source) not to, so
|
|
|
|
* there we'd want to set it before the send, and reenable it
|
|
|
|
* afterwards. Even if this is wrong on other non-Linux platforms, it
|
|
|
|
* only means one extra packet.
|
|
|
|
*/
|
|
|
|
static void set_urgent_flag(struct peer *peer, bool urgent)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
int opt;
|
|
|
|
const char *optname;
|
|
|
|
static bool complained = false;
|
|
|
|
|
|
|
|
if (urgent == peer->urgent)
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef TCP_CORK
|
|
|
|
opt = TCP_CORK;
|
|
|
|
optname = "TCP_CORK";
|
|
|
|
#elif defined(TCP_NODELAY)
|
|
|
|
opt = TCP_NODELAY;
|
|
|
|
optname = "TCP_NODELAY";
|
|
|
|
#else
|
|
|
|
#error "Please report platform with neither TCP_CORK nor TCP_NODELAY?"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
val = urgent;
|
|
|
|
if (setsockopt(io_conn_fd(peer->to_peer),
|
|
|
|
IPPROTO_TCP, opt, &val, sizeof(val)) != 0) {
|
|
|
|
/* This actually happens in testing, where we blackhole the fd */
|
|
|
|
if (!complained) {
|
|
|
|
status_unusual("setsockopt %s=1: %s",
|
|
|
|
optname,
|
|
|
|
strerror(errno));
|
|
|
|
complained = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
peer->urgent = urgent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_urgent(enum peer_wire type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case WIRE_INIT:
|
|
|
|
case WIRE_ERROR:
|
|
|
|
case WIRE_WARNING:
|
|
|
|
case WIRE_TX_ADD_INPUT:
|
|
|
|
case WIRE_TX_ADD_OUTPUT:
|
|
|
|
case WIRE_TX_REMOVE_INPUT:
|
|
|
|
case WIRE_TX_REMOVE_OUTPUT:
|
|
|
|
case WIRE_TX_COMPLETE:
|
|
|
|
case WIRE_TX_SIGNATURES:
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
case WIRE_ACCEPT_CHANNEL:
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
case WIRE_FUNDING_SIGNED:
|
|
|
|
case WIRE_FUNDING_LOCKED:
|
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
|
|
|
case WIRE_INIT_RBF:
|
|
|
|
case WIRE_ACK_RBF:
|
|
|
|
case WIRE_SHUTDOWN:
|
|
|
|
case WIRE_CLOSING_SIGNED:
|
|
|
|
case WIRE_UPDATE_ADD_HTLC:
|
|
|
|
case WIRE_UPDATE_FULFILL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_MALFORMED_HTLC:
|
|
|
|
case WIRE_UPDATE_FEE:
|
|
|
|
case WIRE_UPDATE_BLOCKHEIGHT:
|
|
|
|
case WIRE_CHANNEL_REESTABLISH:
|
|
|
|
case WIRE_ANNOUNCEMENT_SIGNATURES:
|
|
|
|
case WIRE_CHANNEL_ANNOUNCEMENT:
|
|
|
|
case WIRE_NODE_ANNOUNCEMENT:
|
|
|
|
case WIRE_CHANNEL_UPDATE:
|
|
|
|
case WIRE_QUERY_SHORT_CHANNEL_IDS:
|
|
|
|
case WIRE_REPLY_SHORT_CHANNEL_IDS_END:
|
|
|
|
case WIRE_QUERY_CHANNEL_RANGE:
|
|
|
|
case WIRE_REPLY_CHANNEL_RANGE:
|
|
|
|
case WIRE_GOSSIP_TIMESTAMP_FILTER:
|
|
|
|
case WIRE_OBS2_ONION_MESSAGE:
|
|
|
|
case WIRE_ONION_MESSAGE:
|
|
|
|
#if EXPERIMENTAL_FEATURES
|
|
|
|
case WIRE_STFU:
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* These are time-sensitive, and so send without delay. */
|
|
|
|
case WIRE_PING:
|
|
|
|
case WIRE_PONG:
|
|
|
|
case WIRE_COMMITMENT_SIGNED:
|
|
|
|
case WIRE_REVOKE_AND_ACK:
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* plugins can inject other messages; assume not urgent. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:28 +02:00
|
|
|
/* io_sock_shutdown, but in format suitable for an io_plan callback */
|
|
|
|
static struct io_plan *io_sock_shutdown_cb(struct io_conn *conn, struct peer *unused)
|
|
|
|
{
|
|
|
|
return io_sock_shutdown(conn);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
static struct io_plan *encrypt_and_send(struct peer *peer,
|
|
|
|
const u8 *msg TAKES,
|
|
|
|
struct io_plan *(*next)
|
|
|
|
(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer))
|
|
|
|
{
|
2022-01-08 14:25:29 +01:00
|
|
|
int type = fromwire_peektype(msg);
|
|
|
|
|
2022-01-08 14:26:29 +01:00
|
|
|
#if DEVELOPER
|
2022-01-08 14:25:29 +01:00
|
|
|
switch (dev_disconnect(&peer->id, type)) {
|
|
|
|
case DEV_DISCONNECT_BEFORE:
|
|
|
|
if (taken(msg))
|
|
|
|
tal_free(msg);
|
|
|
|
return io_close(peer->to_peer);
|
|
|
|
case DEV_DISCONNECT_AFTER:
|
2022-01-11 02:16:10 +01:00
|
|
|
/* Disallow reads from now on */
|
|
|
|
peer->dev_read_enabled = false;
|
2022-01-08 14:25:29 +01:00
|
|
|
next = (void *)io_close_cb;
|
|
|
|
break;
|
|
|
|
case DEV_DISCONNECT_BLACKHOLE:
|
2022-01-11 02:16:10 +01:00
|
|
|
/* Disable both reads and writes from now on */
|
|
|
|
peer->dev_read_enabled = false;
|
|
|
|
peer->dev_writes_enabled = talz(peer, u32);
|
2022-01-08 14:25:29 +01:00
|
|
|
break;
|
|
|
|
case DEV_DISCONNECT_NORMAL:
|
|
|
|
break;
|
|
|
|
case DEV_DISCONNECT_DISABLE_AFTER:
|
2022-01-11 02:16:10 +01:00
|
|
|
peer->dev_read_enabled = false;
|
|
|
|
peer->dev_writes_enabled = tal(peer, u32);
|
|
|
|
*peer->dev_writes_enabled = 1;
|
2022-01-08 14:25:29 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
2022-01-08 14:26:29 +01:00
|
|
|
set_urgent_flag(peer, is_urgent(type));
|
2022-01-08 14:25:29 +01:00
|
|
|
|
2022-07-18 14:12:28 +02:00
|
|
|
/* BOLT #1:
|
|
|
|
*
|
|
|
|
* A sending node:
|
|
|
|
*...
|
|
|
|
* - MAY close the connection after sending.
|
|
|
|
*/
|
|
|
|
if (type == WIRE_ERROR || type == WIRE_WARNING) {
|
|
|
|
/* Might already be draining... */
|
|
|
|
if (!peer->draining)
|
|
|
|
drain_peer(peer);
|
|
|
|
|
|
|
|
/* Close as soon as we've sent this. */
|
|
|
|
next = io_sock_shutdown_cb;
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* We free this and the encrypted version in next write_to_peer */
|
2022-01-08 14:24:29 +01:00
|
|
|
peer->sent_to_peer = cryptomsg_encrypt_msg(peer, &peer->cs, msg);
|
2022-01-08 14:22:29 +01:00
|
|
|
return io_write(peer->to_peer,
|
|
|
|
peer->sent_to_peer,
|
|
|
|
tal_bytelen(peer->sent_to_peer),
|
|
|
|
next, peer);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* Kicks off write_to_peer() to look for more gossip to send from store */
|
|
|
|
static void wake_gossip(struct peer *peer)
|
|
|
|
{
|
pytest: fix test_gossip_no_empty_announcements flake.
This is a side-effect of fixing aging: sometimes, we age our
rcvd_filter cache too fast, and thus re-xmit. This breaks
our test, since it used dev-disconnect on the channel_announce,
but that closes to l3, not l1!
```
> assert l1.rpc.listchannels()['channels'] == []
E AssertionError: assert [{'active': T...ags': 1, ...}] == []
E Left contains 2 more items, first extra item: {'active': True, 'amount_msat': 100000000msat, 'base_fee_millisatoshi': 1, 'channel_flags': 0, ...}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Fixes: #5403
2022-07-12 06:44:36 +02:00
|
|
|
bool flush_gossip_filter = true;
|
|
|
|
|
|
|
|
#if DEVELOPER
|
|
|
|
/* With dev-fast-gossip, we clean every 2 seconds, which is too
|
|
|
|
* fast for our slow tests! So we only call this one time in 5
|
|
|
|
* actually twice that, as it's not per-peer! */
|
|
|
|
static int gossip_age_count;
|
|
|
|
|
|
|
|
if (peer->daemon->dev_fast_gossip && gossip_age_count++ % 5 != 0)
|
|
|
|
flush_gossip_filter = false;
|
|
|
|
#endif
|
|
|
|
|
2022-06-16 09:32:28 +02:00
|
|
|
/* Don't remember sent per-peer gossip forever. */
|
pytest: fix test_gossip_no_empty_announcements flake.
This is a side-effect of fixing aging: sometimes, we age our
rcvd_filter cache too fast, and thus re-xmit. This breaks
our test, since it used dev-disconnect on the channel_announce,
but that closes to l3, not l1!
```
> assert l1.rpc.listchannels()['channels'] == []
E AssertionError: assert [{'active': T...ags': 1, ...}] == []
E Left contains 2 more items, first extra item: {'active': True, 'amount_msat': 100000000msat, 'base_fee_millisatoshi': 1, 'channel_flags': 0, ...}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Fixes: #5403
2022-07-12 06:44:36 +02:00
|
|
|
if (flush_gossip_filter)
|
|
|
|
gossip_rcvd_filter_age(peer->gs.grf);
|
2022-06-16 09:32:28 +02:00
|
|
|
|
2022-03-31 07:56:22 +02:00
|
|
|
peer->gs.active = IFDEV(!peer->daemon->dev_suppress_gossip, true);
|
2022-01-08 14:28:29 +01:00
|
|
|
io_wake(peer->peer_outq);
|
2022-01-11 02:15:43 +01:00
|
|
|
|
|
|
|
/* And go again in 60 seconds (from now, now when we finish!) */
|
|
|
|
peer->gs.gossip_timer = gossip_stream_timer(peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If we are streaming gossip, get something from gossip store */
|
|
|
|
static u8 *maybe_from_gossip_store(const tal_t *ctx, struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
|
2022-04-19 23:53:02 +02:00
|
|
|
/* dev-mode can suppress all gossip */
|
|
|
|
if (IFDEV(peer->daemon->dev_suppress_gossip, false))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages it did not generate itself,
|
|
|
|
* unless explicitly requested.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* So, even if they didn't send us a timestamp_filter message,
|
|
|
|
* we *still* send our own gossip. */
|
|
|
|
if (!peer->gs.gossip_timer) {
|
|
|
|
return gossip_store_next(ctx, &peer->daemon->gossip_store_fd,
|
|
|
|
0, 0xFFFFFFFF,
|
|
|
|
true,
|
2022-05-03 19:42:31 +02:00
|
|
|
false,
|
2022-04-19 23:53:02 +02:00
|
|
|
&peer->gs.off,
|
|
|
|
&peer->daemon->gossip_store_end);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not streaming right now? */
|
2022-01-11 02:15:43 +01:00
|
|
|
if (!peer->gs.active)
|
2022-01-08 14:28:29 +01:00
|
|
|
return NULL;
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
/* This should be around to kick us every 60 seconds */
|
|
|
|
assert(peer->gs.gossip_timer);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
again:
|
|
|
|
msg = gossip_store_next(ctx, &peer->daemon->gossip_store_fd,
|
|
|
|
peer->gs.timestamp_min,
|
|
|
|
peer->gs.timestamp_max,
|
2022-04-19 23:46:35 +02:00
|
|
|
false,
|
2022-05-03 19:42:31 +02:00
|
|
|
false,
|
2022-01-11 02:15:43 +01:00
|
|
|
&peer->gs.off,
|
|
|
|
&peer->daemon->gossip_store_end);
|
|
|
|
/* Don't send back gossip they sent to us! */
|
2022-01-08 14:28:29 +01:00
|
|
|
if (msg) {
|
2022-01-11 02:15:43 +01:00
|
|
|
if (gossip_rcvd_filter_del(peer->gs.grf, msg)) {
|
|
|
|
msg = tal_free(msg);
|
|
|
|
goto again;
|
|
|
|
}
|
2022-01-08 14:28:29 +01:00
|
|
|
status_peer_io(LOG_IO_OUT, &peer->id, msg);
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.active = false;
|
2022-01-08 14:28:29 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
/* Mutual recursion */
|
|
|
|
static void send_ping(struct peer *peer);
|
|
|
|
|
|
|
|
static void set_ping_timer(struct peer *peer)
|
|
|
|
{
|
2022-03-25 04:11:55 +01:00
|
|
|
if (IFDEV(peer->daemon->dev_no_ping_timer, false)) {
|
|
|
|
peer->ping_timer = NULL;
|
|
|
|
return;
|
|
|
|
}
|
2022-01-29 04:31:32 +01:00
|
|
|
peer->ping_timer = new_reltimer(&peer->daemon->timers, peer,
|
|
|
|
time_from_sec(15 + pseudorand(30)),
|
|
|
|
send_ping, peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void send_ping(struct peer *peer)
|
|
|
|
{
|
2022-06-26 08:44:12 +02:00
|
|
|
/* If it's still sending us traffic, maybe ping reply is backed up?
|
|
|
|
* That's OK, ping is just to make sure it's still alive, and clearly
|
|
|
|
* it is. */
|
|
|
|
if (time_before(peer->last_recv_time,
|
|
|
|
timeabs_sub(time_now(), time_from_sec(60)))) {
|
|
|
|
/* Already have a ping in flight? */
|
|
|
|
if (peer->expecting_pong != PONG_UNEXPECTED) {
|
|
|
|
status_peer_debug(&peer->id, "Last ping unreturned: hanging up");
|
|
|
|
if (peer->to_peer)
|
|
|
|
io_close(peer->to_peer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inject_peer_msg(peer, take(make_ping(NULL, 1, 0)));
|
|
|
|
peer->expecting_pong = PONG_EXPECTED_PROBING;
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
set_ping_timer(peer);
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
void send_custommsg(struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
|
|
|
u8 *custommsg;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_connectd_custommsg_out(tmpctx, msg, &id, &custommsg))
|
|
|
|
master_badmsg(WIRE_CONNECTD_CUSTOMMSG_OUT, msg);
|
|
|
|
|
|
|
|
/* Races can happen: this might be gone by now. */
|
|
|
|
peer = peer_htable_get(&daemon->peers, &id);
|
|
|
|
if (peer)
|
|
|
|
inject_peer_msg(peer, take(custommsg));
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
static void handle_ping_in(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *pong;
|
|
|
|
|
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
|
|
|
|
if (!check_ping_make_pong(NULL, msg, &pong)) {
|
|
|
|
send_warning(peer, "Invalid ping %s", tal_hex(msg, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pong)
|
2022-01-29 04:33:05 +01:00
|
|
|
inject_peer_msg(peer, take(pong));
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_ping_reply(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *ignored;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* We print this out because we asked for pong, so can't spam us... */
|
|
|
|
if (!fromwire_pong(msg, msg, &ignored))
|
|
|
|
status_peer_unusual(&peer->id, "Got malformed ping reply %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
|
2022-04-06 07:09:48 +02:00
|
|
|
/* We print this because dev versions of Core Lightning embed
|
2022-01-29 04:31:32 +01:00
|
|
|
* version here: see check_ping_make_pong! */
|
|
|
|
for (i = 0; i < tal_count(ignored); i++) {
|
|
|
|
if (ignored[i] < ' ' || ignored[i] == 127)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
status_debug("Got pong %zu bytes (%.*s...)",
|
|
|
|
tal_count(ignored), (int)i, (char *)ignored);
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL, true,
|
|
|
|
tal_bytelen(msg))));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_pong_in(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
|
|
|
|
switch (peer->expecting_pong) {
|
|
|
|
case PONG_EXPECTED_COMMAND:
|
|
|
|
handle_ping_reply(peer, msg);
|
|
|
|
/* fall thru */
|
|
|
|
case PONG_EXPECTED_PROBING:
|
|
|
|
peer->expecting_pong = PONG_UNEXPECTED;
|
|
|
|
return;
|
|
|
|
case PONG_UNEXPECTED:
|
|
|
|
status_debug("Unexpected pong?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Forward to gossipd */
|
|
|
|
static void handle_gossip_in(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *gmsg = towire_gossipd_recv_gossip(NULL, &peer->id, msg);
|
|
|
|
|
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
daemon_conn_send(peer->daemon->gossipd, take(gmsg));
|
|
|
|
}
|
|
|
|
|
2022-04-19 23:54:04 +02:00
|
|
|
static void handle_gossip_timestamp_filter_in(struct peer *peer, const u8 *msg)
|
2022-01-08 14:28:29 +01:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
u32 first_timestamp, timestamp_range;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_timestamp_filter(msg, &chain_hash,
|
|
|
|
&first_timestamp,
|
|
|
|
×tamp_range)) {
|
2022-01-29 04:31:32 +01:00
|
|
|
send_warning(peer, "gossip_timestamp_filter invalid: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 08:35:48 +01:00
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
if (!bitcoin_blkid_eq(&chainparams->genesis_blockhash, &chain_hash)) {
|
|
|
|
send_warning(peer, "gossip_timestamp_filter for bad chain: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2022-01-29 04:31:32 +01:00
|
|
|
return;
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.timestamp_min = first_timestamp;
|
|
|
|
peer->gs.timestamp_max = first_timestamp + timestamp_range - 1;
|
|
|
|
/* Make sure we never leave it on an impossible value. */
|
|
|
|
if (peer->gs.timestamp_max < peer->gs.timestamp_min)
|
|
|
|
peer->gs.timestamp_max = UINT32_MAX;
|
|
|
|
|
2022-07-15 07:58:45 +02:00
|
|
|
/* Optimization: they don't want anything. LND and us (at least),
|
|
|
|
* both set first_timestamp to 0xFFFFFFFF to indicate that. */
|
|
|
|
if (peer->gs.timestamp_min == UINT32_MAX)
|
|
|
|
peer->gs.off = peer->daemon->gossip_store_end;
|
2022-07-15 07:59:26 +02:00
|
|
|
else {
|
|
|
|
/* Second optimation: it's common to ask for "recent" gossip,
|
|
|
|
* so we don't have to start at beginning of store. */
|
|
|
|
update_recent_timestamp(peer->daemon);
|
|
|
|
if (peer->gs.timestamp_min >= peer->daemon->gossip_recent_time)
|
|
|
|
peer->gs.off = peer->daemon->gossip_store_recent_off;
|
|
|
|
else
|
|
|
|
peer->gs.off = 1;
|
|
|
|
}
|
2022-01-11 02:15:43 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
* - MAY wait for the next outgoing gossip flush to send these.
|
|
|
|
*/
|
|
|
|
/* We send immediately the first time, after that we wait. */
|
|
|
|
if (!peer->gs.gossip_timer)
|
2022-01-08 14:28:29 +01:00
|
|
|
wake_gossip(peer);
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
2022-01-11 02:15:43 +01:00
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
static bool handle_custommsg(struct daemon *daemon,
|
|
|
|
struct peer *peer,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
enum peer_wire type = fromwire_peektype(msg);
|
|
|
|
if (type % 2 == 1 && !peer_wire_is_defined(type)) {
|
|
|
|
/* The message is not part of the messages we know how to
|
|
|
|
* handle. Assuming this is a custommsg, we just forward it to the
|
|
|
|
* master. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_custommsg_in(NULL,
|
|
|
|
&peer->id,
|
|
|
|
msg)));
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
/* We handle pings and gossip messages. */
|
|
|
|
static bool handle_message_locally(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
enum peer_wire type = fromwire_peektype(msg);
|
|
|
|
|
|
|
|
/* We remember these so we don't rexmit them */
|
2022-06-16 09:32:39 +02:00
|
|
|
gossip_rcvd_filter_add(peer->gs.grf, msg);
|
2022-01-29 04:31:32 +01:00
|
|
|
|
|
|
|
if (type == WIRE_GOSSIP_TIMESTAMP_FILTER) {
|
2022-04-19 23:54:04 +02:00
|
|
|
handle_gossip_timestamp_filter_in(peer, msg);
|
2022-01-29 04:31:32 +01:00
|
|
|
return true;
|
|
|
|
} else if (type == WIRE_PING) {
|
|
|
|
handle_ping_in(peer, msg);
|
|
|
|
return true;
|
|
|
|
} else if (type == WIRE_PONG) {
|
|
|
|
handle_pong_in(peer, msg);
|
|
|
|
return true;
|
2022-03-28 01:10:54 +02:00
|
|
|
} else if (type == WIRE_OBS2_ONION_MESSAGE) {
|
|
|
|
handle_obs2_onion_message(peer->daemon, peer, msg);
|
|
|
|
return true;
|
2022-01-29 04:32:32 +01:00
|
|
|
} else if (type == WIRE_ONION_MESSAGE) {
|
|
|
|
handle_onion_message(peer->daemon, peer, msg);
|
|
|
|
return true;
|
2022-01-29 04:33:05 +01:00
|
|
|
} else if (handle_custommsg(peer->daemon, peer, msg)) {
|
|
|
|
return true;
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do we want to divert to gossipd? */
|
|
|
|
if (is_msg_for_gossipd(msg)) {
|
|
|
|
handle_gossip_in(peer, msg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* Move "channel_id" to temporary. */
|
|
|
|
static void move_channel_id_to_temp(struct subd *subd)
|
|
|
|
{
|
|
|
|
tal_free(subd->temporary_channel_id);
|
|
|
|
subd->temporary_channel_id
|
|
|
|
= tal_dup(subd, struct channel_id, &subd->channel_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only works for open_channel2 and accept_channel2 */
|
|
|
|
static struct pubkey *extract_revocation_basepoint(const tal_t *ctx,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
const u8 *cursor = msg;
|
|
|
|
size_t max = tal_bytelen(msg);
|
|
|
|
enum peer_wire t;
|
|
|
|
struct pubkey pubkey;
|
|
|
|
|
|
|
|
t = fromwire_u16(&cursor, &max);
|
|
|
|
|
|
|
|
switch (t) {
|
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
/* BOLT-dualfund #2:
|
|
|
|
* 1. type: 64 (`open_channel2`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`chain_hash`:`chain_hash`]
|
|
|
|
* * [`channel_id`:`zerod_channel_id`]
|
|
|
|
* * [`u32`:`funding_feerate_perkw`]
|
|
|
|
* * [`u32`:`commitment_feerate_perkw`]
|
|
|
|
* * [`u64`:`funding_satoshis`]
|
|
|
|
* * [`u64`:`dust_limit_satoshis`]
|
|
|
|
* * [`u64`:`max_htlc_value_in_flight_msat`]
|
|
|
|
* * [`u64`:`htlc_minimum_msat`]
|
|
|
|
* * [`u16`:`to_self_delay`]
|
|
|
|
* * [`u16`:`max_accepted_htlcs`]
|
|
|
|
* * [`u32`:`locktime`]
|
|
|
|
* * [`point`:`funding_pubkey`]
|
|
|
|
* * [`point`:`revocation_basepoint`]
|
|
|
|
*/
|
|
|
|
fromwire_pad(&cursor, &max,
|
|
|
|
sizeof(struct bitcoin_blkid)
|
|
|
|
+ sizeof(struct channel_id)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ PUBKEY_CMPR_LEN);
|
|
|
|
break;
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
|
|
|
/* BOLT-dualfund #2:
|
|
|
|
* 1. type: 65 (`accept_channel2`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`channel_id`:`zerod_channel_id`]
|
|
|
|
* * [`u64`:`funding_satoshis`]
|
|
|
|
* * [`u64`:`dust_limit_satoshis`]
|
|
|
|
* * [`u64`:`max_htlc_value_in_flight_msat`]
|
|
|
|
* * [`u64`:`htlc_minimum_msat`]
|
|
|
|
* * [`u32`:`minimum_depth`]
|
|
|
|
* * [`u16`:`to_self_delay`]
|
|
|
|
* * [`u16`:`max_accepted_htlcs`]
|
|
|
|
* * [`point`:`funding_pubkey`]
|
|
|
|
* * [`point`:`revocation_basepoint`]
|
|
|
|
*/
|
|
|
|
fromwire_pad(&cursor, &max,
|
|
|
|
sizeof(struct channel_id)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ PUBKEY_CMPR_LEN);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
fromwire_pubkey(&cursor, &max, &pubkey);
|
|
|
|
if (!cursor)
|
|
|
|
return NULL;
|
|
|
|
return tal_dup(ctx, struct pubkey, &pubkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only works for funding_created */
|
|
|
|
static bool extract_funding_created_funding(const u8 *funding_created,
|
|
|
|
struct bitcoin_outpoint *outp)
|
|
|
|
{
|
|
|
|
const u8 *cursor = funding_created;
|
|
|
|
size_t max = tal_bytelen(funding_created);
|
|
|
|
enum peer_wire t;
|
|
|
|
|
|
|
|
t = fromwire_u16(&cursor, &max);
|
|
|
|
|
|
|
|
switch (t) {
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
/* BOLT #2:
|
|
|
|
* 1. type: 34 (`funding_created`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`32*byte`:`temporary_channel_id`]
|
|
|
|
* * [`sha256`:`funding_txid`]
|
|
|
|
* * [`u16`:`funding_output_index`]
|
|
|
|
*/
|
|
|
|
fromwire_pad(&cursor, &max, 32);
|
|
|
|
fromwire_bitcoin_txid(&cursor, &max, &outp->txid);
|
|
|
|
outp->n = fromwire_u16(&cursor, &max);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
return cursor != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_v1_channelid(struct subd *subd, const u8 *funding_created)
|
|
|
|
{
|
|
|
|
struct bitcoin_outpoint outp;
|
|
|
|
|
|
|
|
if (!extract_funding_created_funding(funding_created, &outp)) {
|
|
|
|
status_peer_unusual(&subd->peer->id, "WARNING: funding_created no tx info?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
move_channel_id_to_temp(subd);
|
|
|
|
derive_channel_id(&subd->channel_id, &outp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_v2_channelid(struct subd *subd, const u8 *accept_channel2)
|
|
|
|
{
|
|
|
|
struct pubkey *acc_basepoint;
|
|
|
|
|
|
|
|
acc_basepoint = extract_revocation_basepoint(tmpctx, accept_channel2);
|
|
|
|
if (!acc_basepoint) {
|
|
|
|
status_peer_unusual(&subd->peer->id, "WARNING: accept_channel2 no revocation_basepoint?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!subd->opener_revocation_basepoint) {
|
|
|
|
status_peer_unusual(&subd->peer->id, "WARNING: accept_channel2 without open_channel2?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
move_channel_id_to_temp(subd);
|
|
|
|
derive_channel_id_v2(&subd->channel_id,
|
|
|
|
subd->opener_revocation_basepoint, acc_basepoint);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We maintain channel_id matching for subds by snooping: we set it manually
|
|
|
|
* for first packet (open_channel or open_channel2). */
|
|
|
|
static void maybe_update_channelid(struct subd *subd, const u8 *msg)
|
|
|
|
{
|
|
|
|
switch (fromwire_peektype(msg)) {
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
extract_channel_id(msg, &subd->channel_id);
|
|
|
|
break;
|
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
subd->opener_revocation_basepoint
|
|
|
|
= extract_revocation_basepoint(subd, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
|
|
|
update_v2_channelid(subd, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
update_v1_channelid(subd, msg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:19:29 +01:00
|
|
|
static struct io_plan *write_to_peer(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
|
|
|
{
|
2022-01-08 14:22:29 +01:00
|
|
|
const u8 *msg;
|
2022-01-08 14:19:29 +01:00
|
|
|
assert(peer->to_peer == peer_conn);
|
|
|
|
|
|
|
|
/* Free last sent one (if any) */
|
2022-01-08 14:22:29 +01:00
|
|
|
peer->sent_to_peer = tal_free(peer->sent_to_peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Pop tail of send queue */
|
2022-01-08 14:22:29 +01:00
|
|
|
msg = msg_dequeue(peer->peer_outq);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-01-11 02:16:55 +01:00
|
|
|
/* Still nothing to send? */
|
|
|
|
if (!msg) {
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Draining? We're done when subds are done. */
|
|
|
|
if (peer->draining && tal_count(peer->subds) == 0)
|
2022-07-16 06:49:29 +02:00
|
|
|
return io_sock_shutdown(peer_conn);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* If they want us to send gossip, do so now. */
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!peer->draining)
|
|
|
|
msg = maybe_from_gossip_store(NULL, peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
if (!msg) {
|
|
|
|
/* Tell them to read again, */
|
2022-03-22 09:52:13 +01:00
|
|
|
io_wake(&peer->subds);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* Wait for them to wake us */
|
|
|
|
return msg_queue_wait(peer_conn, peer->peer_outq,
|
|
|
|
write_to_peer, peer);
|
|
|
|
}
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:16:10 +01:00
|
|
|
/* dev_disconnect can disable writes */
|
|
|
|
#if DEVELOPER
|
|
|
|
if (peer->dev_writes_enabled) {
|
|
|
|
if (*peer->dev_writes_enabled == 0) {
|
|
|
|
tal_free(msg);
|
|
|
|
/* Continue, to drain queue */
|
|
|
|
return write_to_peer(peer_conn, peer);
|
|
|
|
}
|
|
|
|
(*peer->dev_writes_enabled)--;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
return encrypt_and_send(peer, take(msg), write_to_peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *read_from_subd(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
static struct io_plan *read_from_subd_done(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-03-22 21:27:30 +01:00
|
|
|
maybe_update_channelid(subd, subd->in);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Tell them to encrypt & write. */
|
2022-03-22 09:52:13 +01:00
|
|
|
msg_enqueue(subd->peer->peer_outq, take(subd->in));
|
|
|
|
subd->in = NULL;
|
2022-01-08 14:22:29 +01:00
|
|
|
|
2022-01-08 14:19:29 +01:00
|
|
|
/* Wait for them to wake us */
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_wait(subd_conn, &subd->peer->subds, read_from_subd, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *read_from_subd(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_read_wire(subd_conn, subd, &subd->in,
|
|
|
|
read_from_subd_done, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* These four function handle peer->subd */
|
|
|
|
static struct io_plan *write_to_subd(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-01-08 14:22:29 +01:00
|
|
|
const u8 *msg;
|
2022-03-22 09:52:13 +01:00
|
|
|
assert(subd->conn == subd_conn);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Pop tail of send queue */
|
2022-03-22 09:52:13 +01:00
|
|
|
msg = msg_dequeue(subd->outq);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Nothing to send? */
|
2022-01-08 14:22:29 +01:00
|
|
|
if (!msg) {
|
2022-01-11 02:17:01 +01:00
|
|
|
/* If peer is closed, close this. */
|
2022-03-22 09:52:13 +01:00
|
|
|
if (!subd->peer->to_peer)
|
2022-01-11 02:17:01 +01:00
|
|
|
return io_close(subd_conn);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Tell them to read again. */
|
2022-03-22 09:52:13 +01:00
|
|
|
io_wake(&subd->peer->peer_in);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Wait for them to wake us */
|
2022-03-22 09:52:13 +01:00
|
|
|
return msg_queue_wait(subd_conn, subd->outq,
|
|
|
|
write_to_subd, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_write_wire(subd_conn, take(msg), write_to_subd, subd);
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
static void destroy_subd(struct subd *subd)
|
|
|
|
{
|
|
|
|
struct peer *peer = subd->peer;
|
|
|
|
size_t pos;
|
|
|
|
|
|
|
|
for (pos = 0; peer->subds[pos] != subd; pos++)
|
|
|
|
assert(pos < tal_count(peer->subds));
|
|
|
|
|
|
|
|
tal_arr_remove(&peer->subds, pos);
|
|
|
|
|
|
|
|
/* Make sure we try to keep reading from peer (might
|
|
|
|
* have been waiting for write_to_subd) */
|
|
|
|
io_wake(&peer->peer_in);
|
2022-07-18 14:12:27 +02:00
|
|
|
|
|
|
|
/* Maybe we were last subd out? */
|
|
|
|
maybe_free_peer(peer);
|
2022-07-18 14:12:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct subd *new_subd(struct peer *peer,
|
|
|
|
const struct channel_id *channel_id)
|
|
|
|
{
|
|
|
|
struct subd *subd;
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
subd = tal(peer, struct subd);
|
2022-07-18 14:12:18 +02:00
|
|
|
subd->peer = peer;
|
|
|
|
subd->outq = msg_queue_new(subd, false);
|
|
|
|
subd->channel_id = *channel_id;
|
|
|
|
subd->temporary_channel_id = NULL;
|
|
|
|
subd->opener_revocation_basepoint = NULL;
|
|
|
|
subd->conn = NULL;
|
|
|
|
|
|
|
|
/* Connect it to the peer */
|
|
|
|
tal_arr_expand(&peer->subds, subd);
|
|
|
|
tal_add_destructor(subd, destroy_subd);
|
|
|
|
|
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
static struct io_plan *read_hdr_from_peer(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer);
|
|
|
|
static struct io_plan *read_body_from_peer_done(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *decrypted;
|
2022-03-22 09:52:13 +01:00
|
|
|
struct channel_id channel_id;
|
|
|
|
struct subd *subd;
|
2022-01-08 14:22:29 +01:00
|
|
|
|
2022-03-22 09:51:13 +01:00
|
|
|
decrypted = cryptomsg_decrypt_body(tmpctx, &peer->cs,
|
2022-01-08 14:22:29 +01:00
|
|
|
peer->peer_in);
|
2022-01-11 02:15:58 +01:00
|
|
|
if (!decrypted) {
|
|
|
|
status_peer_debug(&peer->id, "Bad encrypted packet len %zu",
|
|
|
|
tal_bytelen(peer->peer_in));
|
2022-01-08 14:22:29 +01:00
|
|
|
return io_close(peer_conn);
|
2022-01-11 02:15:58 +01:00
|
|
|
}
|
2022-01-08 14:22:29 +01:00
|
|
|
tal_free(peer->peer_in);
|
|
|
|
|
2022-01-11 02:16:10 +01:00
|
|
|
/* dev_disconnect can disable read */
|
2022-03-22 09:51:13 +01:00
|
|
|
if (!IFDEV(peer->dev_read_enabled, true))
|
2022-01-11 02:16:10 +01:00
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
|
2022-06-26 08:44:12 +02:00
|
|
|
/* We got something! */
|
|
|
|
peer->last_recv_time = time_now();
|
|
|
|
|
2022-01-11 02:16:49 +01:00
|
|
|
/* Don't process packets while we're closing */
|
2022-07-16 06:49:30 +02:00
|
|
|
if (peer->draining)
|
2022-01-11 02:16:49 +01:00
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* If we swallow this, just try again. */
|
2022-03-22 09:51:13 +01:00
|
|
|
if (handle_message_locally(peer, decrypted))
|
2022-01-08 14:28:29 +01:00
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
/* After this we should be able to match to subd by channel_id */
|
|
|
|
if (!extract_channel_id(decrypted, &channel_id)) {
|
|
|
|
enum peer_wire type = fromwire_peektype(decrypted);
|
|
|
|
|
|
|
|
/* We won't log this anywhere else, so do it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, decrypted);
|
|
|
|
|
|
|
|
/* Could be a all-channel error or warning? Log it
|
|
|
|
* more verbose, and hang up. */
|
|
|
|
if (type == WIRE_ERROR || type == WIRE_WARNING) {
|
|
|
|
char *desc = sanitize_error(tmpctx, decrypted, NULL);
|
|
|
|
status_peer_info(&peer->id,
|
|
|
|
"Received %s: %s",
|
|
|
|
peer_wire_name(type), desc);
|
|
|
|
return io_close(peer_conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This sets final_msg: will close after sending warning */
|
|
|
|
send_warning(peer, "Unexpected message %s: %s",
|
|
|
|
peer_wire_name(type),
|
|
|
|
tal_hex(tmpctx, decrypted));
|
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:28 +02:00
|
|
|
/* If we don't find a subdaemon for this, create a new one. */
|
2022-03-22 09:52:13 +01:00
|
|
|
subd = find_subd(peer, &channel_id);
|
2022-03-22 21:27:29 +01:00
|
|
|
if (!subd) {
|
|
|
|
enum peer_wire t = fromwire_peektype(decrypted);
|
|
|
|
status_peer_debug(&peer->id, "Activating for message %s",
|
|
|
|
peer_wire_name(t));
|
2022-07-18 14:12:18 +02:00
|
|
|
subd = new_subd(peer, &channel_id);
|
|
|
|
/* We tell lightningd to fire up a subdaemon to handle this! */
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_connectd_peer_spoke(NULL, &peer->id,
|
2022-07-18 14:12:27 +02:00
|
|
|
peer->counter,
|
2022-07-18 14:12:18 +02:00
|
|
|
t,
|
|
|
|
&channel_id)));
|
2022-03-22 21:27:29 +01:00
|
|
|
}
|
2022-01-11 02:17:01 +01:00
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* Even if we just created it, call this to catch open_channel2 */
|
|
|
|
maybe_update_channelid(subd, decrypted);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Tell them to write. */
|
2022-03-22 09:52:13 +01:00
|
|
|
msg_enqueue(subd->outq, take(decrypted));
|
2022-01-08 14:22:29 +01:00
|
|
|
|
|
|
|
/* Wait for them to wake us */
|
|
|
|
return io_wait(peer_conn, &peer->peer_in, read_hdr_from_peer, peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *read_body_from_peer(struct io_conn *peer_conn,
|
2022-01-08 14:19:29 +01:00
|
|
|
struct peer *peer)
|
|
|
|
{
|
2022-01-08 14:22:29 +01:00
|
|
|
u16 len;
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-01-08 14:24:29 +01:00
|
|
|
if (!cryptomsg_decrypt_header(&peer->cs, peer->peer_in, &len))
|
2022-01-08 14:22:29 +01:00
|
|
|
return io_close(peer_conn);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
tal_resize(&peer->peer_in, (u32)len + CRYPTOMSG_BODY_OVERHEAD);
|
|
|
|
return io_read(peer_conn, peer->peer_in, tal_count(peer->peer_in),
|
|
|
|
read_body_from_peer_done, peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
static struct io_plan *read_hdr_from_peer(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
|
|
|
assert(peer->to_peer == peer_conn);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* BOLT #8:
|
|
|
|
*
|
|
|
|
* ### Receiving and Decrypting Messages
|
|
|
|
*
|
|
|
|
* In order to decrypt the _next_ message in the network
|
|
|
|
* stream, the following steps are completed:
|
|
|
|
*
|
|
|
|
* 1. Read _exactly_ 18 bytes from the network buffer.
|
|
|
|
*/
|
|
|
|
peer->peer_in = tal_arr(peer, u8, CRYPTOMSG_HDR_SIZE);
|
|
|
|
return io_read(peer_conn, peer->peer_in, CRYPTOMSG_HDR_SIZE,
|
|
|
|
read_body_from_peer, peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
static struct io_plan *subd_conn_init(struct io_conn *subd_conn,
|
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-03-22 09:52:13 +01:00
|
|
|
subd->conn = subd_conn;
|
2022-07-18 14:12:27 +02:00
|
|
|
|
|
|
|
/* subd is a child of the conn: free when it closes! */
|
|
|
|
tal_steal(subd->conn, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
return io_duplex(subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
read_from_subd(subd_conn, subd),
|
|
|
|
write_to_subd(subd_conn, subd));
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_peer_conn(struct io_conn *peer_conn, struct peer *peer)
|
|
|
|
{
|
|
|
|
assert(peer->to_peer == peer_conn);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* If subds need cleaning, this will do it */
|
|
|
|
if (!peer->draining)
|
|
|
|
drain_peer(peer);
|
2022-01-11 02:16:49 +01:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
peer->to_peer = NULL;
|
|
|
|
|
|
|
|
/* Or if there were no subds, this will free the peer. */
|
|
|
|
maybe_free_peer(peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
struct io_plan *multiplex_peer_setup(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
|
|
|
{
|
2022-07-16 06:49:30 +02:00
|
|
|
/*~ If conn closes, we drain the subd connections and wait for
|
2022-01-08 14:19:29 +01:00
|
|
|
* lightningd to tell us to close with the peer */
|
|
|
|
tal_add_destructor2(peer_conn, destroy_peer_conn, peer);
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
/* Start keepalives */
|
|
|
|
peer->expecting_pong = PONG_UNEXPECTED;
|
|
|
|
set_ping_timer(peer);
|
|
|
|
|
2022-03-22 21:27:29 +01:00
|
|
|
/* This used to be in openingd; don't break tests. */
|
|
|
|
status_peer_debug(&peer->id, "Handed peer, entering loop");
|
|
|
|
|
2022-01-08 14:19:29 +01:00
|
|
|
return io_duplex(peer_conn,
|
2022-01-08 14:22:29 +01:00
|
|
|
read_hdr_from_peer(peer_conn, peer),
|
2022-01-08 14:19:29 +01:00
|
|
|
write_to_peer(peer_conn, peer));
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
void peer_connect_subd(struct daemon *daemon, const u8 *msg, int fd)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
2022-07-18 14:12:27 +02:00
|
|
|
u64 counter;
|
2022-07-18 14:12:18 +02:00
|
|
|
struct peer *peer;
|
|
|
|
struct channel_id channel_id;
|
|
|
|
struct subd *subd;
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!fromwire_connectd_peer_connect_subd(msg, &id, &counter, &channel_id))
|
2022-07-18 14:12:18 +02:00
|
|
|
master_badmsg(WIRE_CONNECTD_PEER_CONNECT_SUBD, msg);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Races can happen: this might be gone by now (or reconnected!). */
|
2022-07-18 14:12:18 +02:00
|
|
|
peer = peer_htable_get(&daemon->peers, &id);
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!peer || peer->counter != counter) {
|
2022-07-18 14:12:18 +02:00
|
|
|
close(fd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Could be disconnecting now */
|
|
|
|
if (!peer->to_peer) {
|
|
|
|
close(fd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If peer said something, we created this and queued msg. */
|
|
|
|
subd = find_subd(peer, &channel_id);
|
|
|
|
if (!subd)
|
|
|
|
subd = new_subd(peer, &channel_id);
|
|
|
|
|
|
|
|
assert(!subd->conn);
|
2022-07-18 14:12:27 +02:00
|
|
|
|
|
|
|
/* This sets subd->conn inside subd_conn_init, and reparents subd! */
|
|
|
|
io_new_conn(peer, fd, subd_conn_init, subd);
|
2022-07-18 14:12:18 +02:00
|
|
|
}
|
2022-01-29 04:31:32 +01:00
|
|
|
|
|
|
|
/* Lightningd says to send a ping */
|
|
|
|
void send_manual_ping(struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *ping;
|
|
|
|
struct node_id id;
|
|
|
|
u16 len, num_pong_bytes;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_connectd_ping(msg, &id, &num_pong_bytes, &len))
|
|
|
|
master_badmsg(WIRE_CONNECTD_PING, msg);
|
|
|
|
|
|
|
|
peer = peer_htable_get(&daemon->peers, &id);
|
|
|
|
if (!peer) {
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL,
|
|
|
|
false, 0)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're not supposed to send another ping until previous replied */
|
|
|
|
if (peer->expecting_pong != PONG_UNEXPECTED) {
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL,
|
|
|
|
false, 0)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It should never ask for an oversize ping. */
|
|
|
|
ping = make_ping(NULL, num_pong_bytes, len);
|
|
|
|
if (tal_count(ping) > 65535)
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "Oversize ping");
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
inject_peer_msg(peer, take(ping));
|
2022-01-29 04:31:32 +01:00
|
|
|
|
|
|
|
status_debug("sending ping expecting %sresponse",
|
|
|
|
num_pong_bytes >= 65532 ? "no " : "");
|
|
|
|
|
|
|
|
/* BOLT #1:
|
|
|
|
*
|
|
|
|
* A node receiving a `ping` message:
|
|
|
|
* - if `num_pong_bytes` is less than 65532:
|
|
|
|
* - MUST respond by sending a `pong` message, with `byteslen` equal
|
|
|
|
* to `num_pong_bytes`.
|
|
|
|
* - otherwise (`num_pong_bytes` is **not** less than 65532):
|
|
|
|
* - MUST ignore the `ping`.
|
|
|
|
*/
|
|
|
|
if (num_pong_bytes >= 65532) {
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL,
|
|
|
|
true, 0)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We'll respond to lightningd once the pong comes in */
|
|
|
|
peer->expecting_pong = PONG_EXPECTED_COMMAND;
|
|
|
|
|
|
|
|
/* Since we're doing this manually, kill and restart timer. */
|
|
|
|
tal_free(peer->ping_timer);
|
|
|
|
set_ping_timer(peer);
|
|
|
|
}
|