2022-01-08 14:19:29 +01:00
|
|
|
/*~ This contains all the code to shuffle data between socket to the peer
|
|
|
|
* itself, and the subdaemons. */
|
|
|
|
#include "config.h"
|
|
|
|
#include <assert.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <bitcoin/block.h>
|
|
|
|
#include <bitcoin/chainparams.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <ccan/io/io.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
#include <common/cryptomsg.h>
|
2022-01-24 21:08:52 +01:00
|
|
|
#include <common/daemon_conn.h>
|
2022-01-08 14:25:29 +01:00
|
|
|
#include <common/dev_disconnect.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <common/features.h>
|
|
|
|
#include <common/gossip_constants.h>
|
2022-01-11 02:16:49 +01:00
|
|
|
#include <common/memleak.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
#include <common/per_peer_state.h>
|
2022-01-29 04:31:32 +01:00
|
|
|
#include <common/ping.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <common/status.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <common/timeout.h>
|
2022-03-22 21:27:30 +01:00
|
|
|
#include <common/type_to_string.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <common/utils.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <common/wire_error.h>
|
|
|
|
#include <connectd/connectd.h>
|
2022-01-24 21:08:52 +01:00
|
|
|
#include <connectd/connectd_gossipd_wiregen.h>
|
2022-01-29 04:31:32 +01:00
|
|
|
#include <connectd/connectd_wiregen.h>
|
2022-06-16 09:32:39 +02:00
|
|
|
#include <connectd/gossip_rcvd_filter.h>
|
2023-01-30 07:24:16 +01:00
|
|
|
#include <connectd/gossip_store.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <connectd/multiplex.h>
|
2022-01-29 04:32:32 +01:00
|
|
|
#include <connectd/onion_message.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <errno.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <fcntl.h>
|
2022-01-08 14:26:29 +01:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/tcp.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <sys/socket.h>
|
2022-01-08 14:28:29 +01:00
|
|
|
#include <sys/stat.h>
|
2022-01-08 14:19:29 +01:00
|
|
|
#include <sys/types.h>
|
2022-01-08 14:26:29 +01:00
|
|
|
#include <wire/peer_wire.h>
|
2022-01-08 14:25:29 +01:00
|
|
|
#include <wire/wire.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
#include <wire/wire_io.h>
|
2022-01-24 21:08:52 +01:00
|
|
|
#include <wire/wire_sync.h>
|
2022-01-08 14:22:29 +01:00
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd {
|
|
|
|
/* Owner: we are in peer->subds[] */
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
/* The temporary or permanant channel_id */
|
|
|
|
struct channel_id channel_id;
|
|
|
|
|
|
|
|
/* In passing, we can have a temporary one, too. */
|
|
|
|
struct channel_id *temporary_channel_id;
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* The opening revocation basepoint, for v2 channel_id. */
|
|
|
|
struct pubkey *opener_revocation_basepoint;
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
/* The actual connection to talk to it (NULL if it's not connected yet) */
|
2022-03-22 09:52:13 +01:00
|
|
|
struct io_conn *conn;
|
|
|
|
|
|
|
|
/* Input buffer */
|
|
|
|
u8 *in;
|
|
|
|
|
|
|
|
/* Output buffer */
|
|
|
|
struct msg_queue *outq;
|
connectd: fix forwarding after tx_abort.
If we get a WIRE_TX_ABORT then another message, we send the other message to the same
subd (even though the tx abort causes it to shutdown). This means we effectively
lose the next message, and timeout (see below from CI, reproduced locally).
So, have connectd ignore the subd after it forwards the WIRE_TX_ABORT. The next
message will, correctly, cause a fresh subdaemon to be spawned.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# Attempt to do abort, should fail since we've
# already gotten an inflight
with pytest.raises(RpcError):
l1.rpc.openchannel_abort(chan_id)
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
min_witness_weight=110,
excess_as_change=True)
# Do the bump
bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
# Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled']
# We no longer disconnect on aborts, because magic!
assert only_one(l1.rpc.listpeers()['peers'])['connected']
# Do the bump, again, same feerate
> bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
tests/test_opening.py:668:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-client/pyln/client/lightning.py:1206: in openchannel_bump
return self.call("openchannel_bump", payload)
contrib/pyln-testing/pyln/testing/utils.py:718: in call
res = LightningRpc.call(self, method, payload, cmdprefix, filter)
contrib/pyln-client/pyln/client/lightning.py:398: in call
resp, buf = self._readobj(sock, buf)
contrib/pyln-client/pyln/client/lightning.py:315: in _readobj
b = sock.recv(max(1024, len(buff)))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.client.lightning.UnixSocket object at 0x7f34675aae80>
length = 1024
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
> return self.sock.recv(length)
E Failed: Timeout >1200.0s
```
2023-10-23 08:04:35 +02:00
|
|
|
|
|
|
|
/* After we've told it to tx_abort, we don't send anything else. */
|
|
|
|
bool rcvd_tx_abort;
|
2022-03-22 09:52:13 +01:00
|
|
|
};
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
static struct subd *find_subd(struct peer *peer,
|
|
|
|
const struct channel_id *channel_id)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < tal_count(peer->subds); i++) {
|
|
|
|
struct subd *subd = peer->subds[i];
|
|
|
|
|
connectd: fix forwarding after tx_abort.
If we get a WIRE_TX_ABORT then another message, we send the other message to the same
subd (even though the tx abort causes it to shutdown). This means we effectively
lose the next message, and timeout (see below from CI, reproduced locally).
So, have connectd ignore the subd after it forwards the WIRE_TX_ABORT. The next
message will, correctly, cause a fresh subdaemon to be spawned.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# Attempt to do abort, should fail since we've
# already gotten an inflight
with pytest.raises(RpcError):
l1.rpc.openchannel_abort(chan_id)
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
min_witness_weight=110,
excess_as_change=True)
# Do the bump
bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
# Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled']
# We no longer disconnect on aborts, because magic!
assert only_one(l1.rpc.listpeers()['peers'])['connected']
# Do the bump, again, same feerate
> bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
tests/test_opening.py:668:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-client/pyln/client/lightning.py:1206: in openchannel_bump
return self.call("openchannel_bump", payload)
contrib/pyln-testing/pyln/testing/utils.py:718: in call
res = LightningRpc.call(self, method, payload, cmdprefix, filter)
contrib/pyln-client/pyln/client/lightning.py:398: in call
resp, buf = self._readobj(sock, buf)
contrib/pyln-client/pyln/client/lightning.py:315: in _readobj
b = sock.recv(max(1024, len(buff)))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.client.lightning.UnixSocket object at 0x7f34675aae80>
length = 1024
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
> return self.sock.recv(length)
E Failed: Timeout >1200.0s
```
2023-10-23 08:04:35 +02:00
|
|
|
/* Once we sent it tx_abort, we pretend it doesn't exist */
|
|
|
|
if (subd->rcvd_tx_abort)
|
|
|
|
continue;
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* Once we see a message using the real channel_id, we
|
|
|
|
* clear the temporary_channel_id */
|
|
|
|
if (channel_id_eq(&subd->channel_id, channel_id)) {
|
|
|
|
subd->temporary_channel_id
|
|
|
|
= tal_free(subd->temporary_channel_id);
|
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
if (subd->temporary_channel_id
|
|
|
|
&& channel_id_eq(subd->temporary_channel_id, channel_id)) {
|
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Except for a reconnection, we finally free a peer when the io_conn
|
|
|
|
* is closed and all subds are gone. */
|
|
|
|
static void maybe_free_peer(struct peer *peer)
|
|
|
|
{
|
|
|
|
if (peer->to_peer)
|
|
|
|
return;
|
|
|
|
if (tal_count(peer->subds) != 0)
|
|
|
|
return;
|
|
|
|
status_debug("maybe_free_peer freeing peer!");
|
|
|
|
tal_free(peer);
|
|
|
|
}
|
|
|
|
|
2022-07-16 06:49:30 +02:00
|
|
|
/* We try to send the final messages, but if buffer is full and they're
|
|
|
|
* not reading, we have to give up. */
|
2022-07-18 14:12:27 +02:00
|
|
|
static void close_peer_io_timeout(struct peer *peer)
|
2022-07-16 06:49:30 +02:00
|
|
|
{
|
|
|
|
/* BROKEN means we'll trigger CI if we see it, though it's possible */
|
|
|
|
status_peer_broken(&peer->id, "Peer did not close, forcing close");
|
2022-07-18 14:12:27 +02:00
|
|
|
io_close(peer->to_peer);
|
2022-07-16 06:49:30 +02:00
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
static void close_subd_timeout(struct subd *subd)
|
2022-07-16 06:49:29 +02:00
|
|
|
{
|
2022-11-29 20:30:35 +01:00
|
|
|
status_peer_debug(&subd->peer->id, "Subd did not close, forcing close");
|
2022-07-18 14:12:27 +02:00
|
|
|
io_close(subd->conn);
|
|
|
|
}
|
|
|
|
|
2023-10-22 06:07:31 +02:00
|
|
|
void drain_peer(struct peer *peer)
|
2022-07-18 14:12:27 +02:00
|
|
|
{
|
|
|
|
status_debug("drain_peer");
|
2022-07-16 06:49:29 +02:00
|
|
|
assert(!peer->draining);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Since we immediately free any subds we didn't connect yet,
|
|
|
|
* we need peer->to_peer set so it won't free peer! */
|
|
|
|
assert(peer->to_peer);
|
|
|
|
|
|
|
|
/* Give the subds 5 seconds to close their fds to us. */
|
|
|
|
for (size_t i = 0; i < tal_count(peer->subds); i++) {
|
|
|
|
if (!peer->subds[i]->conn) {
|
|
|
|
/* Deletes itself from array, so be careful! */
|
|
|
|
tal_free(peer->subds[i]);
|
|
|
|
i--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
status_debug("drain_peer draining subd!");
|
|
|
|
notleak(new_reltimer(&peer->daemon->timers,
|
|
|
|
peer->subds[i], time_from_sec(5),
|
|
|
|
close_subd_timeout, peer->subds[i]));
|
|
|
|
/* Wake any outgoing queued on subd */
|
|
|
|
io_wake(peer->subds[i]->outq);
|
|
|
|
}
|
2022-07-16 06:49:29 +02:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Wake them to ensure they notice the close! */
|
|
|
|
io_wake(&peer->subds);
|
2022-07-16 06:49:29 +02:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
if (peer->to_peer) {
|
|
|
|
/* You have 5 seconds to drain... */
|
|
|
|
notleak(new_reltimer(&peer->daemon->timers,
|
|
|
|
peer->to_peer, time_from_sec(5),
|
|
|
|
close_peer_io_timeout, peer));
|
|
|
|
}
|
2022-07-16 06:49:30 +02:00
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
/* Clean peer from hashtable; we no longer exist. */
|
|
|
|
destroy_peer(peer);
|
|
|
|
tal_del_destructor(peer, destroy_peer);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* This is a 5-second leak, worst case! */
|
|
|
|
notleak(peer);
|
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
/* Start draining process! */
|
|
|
|
io_wake(peer->peer_outq);
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
void inject_peer_msg(struct peer *peer, const u8 *msg TAKES)
|
2022-01-08 14:22:29 +01:00
|
|
|
{
|
2022-01-29 04:33:05 +01:00
|
|
|
status_peer_io(LOG_IO_OUT, &peer->id, msg);
|
2022-01-08 14:22:29 +01:00
|
|
|
msg_enqueue(peer->peer_outq, msg);
|
|
|
|
}
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-07-16 06:49:29 +02:00
|
|
|
void multiplex_final_msg(struct peer *peer, const u8 *final_msg TAKES)
|
|
|
|
{
|
|
|
|
inject_peer_msg(peer, final_msg);
|
|
|
|
drain_peer(peer);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* Send warning, close connection to peer */
|
|
|
|
static void send_warning(struct peer *peer, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
2022-07-16 06:49:29 +02:00
|
|
|
u8 *msg;
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
status_vfmt(LOG_UNUSUAL, &peer->id, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2022-07-16 06:49:29 +02:00
|
|
|
msg = towire_warningfmtv(NULL, NULL, fmt, ap);
|
2022-01-08 14:28:29 +01:00
|
|
|
va_end(ap);
|
2022-07-16 06:49:29 +02:00
|
|
|
|
|
|
|
multiplex_final_msg(peer, take(msg));
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
/* Kicks off write_to_peer() to look for more gossip to send from store */
|
|
|
|
static void wake_gossip(struct peer *peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
static struct oneshot *gossip_stream_timer(struct peer *peer)
|
|
|
|
{
|
|
|
|
u32 next;
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
2022-01-11 02:15:43 +01:00
|
|
|
* A node:
|
|
|
|
*...
|
|
|
|
* - SHOULD flush outgoing gossip messages once every 60 seconds,
|
|
|
|
* independently of the arrival times of the messages.
|
|
|
|
* - Note: this results in staggered announcements that are unique
|
|
|
|
* (not duplicated).
|
2022-01-08 14:28:29 +01:00
|
|
|
*/
|
2022-01-11 02:15:43 +01:00
|
|
|
/* We shorten this for dev_fast_gossip! */
|
|
|
|
next = GOSSIP_FLUSH_INTERVAL(peer->daemon->dev_fast_gossip);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
return new_reltimer(&peer->daemon->timers,
|
|
|
|
peer, time_from_sec(next),
|
|
|
|
wake_gossip, peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-07-15 07:59:26 +02:00
|
|
|
/* It's so common to ask for "recent" gossip (we ask for 10 minutes
|
|
|
|
* ago, LND and Eclair ask for now, LDK asks for 1 hour ago) that it's
|
|
|
|
* worth keeping track of where that starts, so we can skip most of
|
|
|
|
* the store. */
|
|
|
|
static void update_recent_timestamp(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
/* 2 hours allows for some clock drift, not too much gossip */
|
|
|
|
u32 recent = time_now().ts.tv_sec - 7200;
|
|
|
|
|
|
|
|
/* Only update every minute */
|
|
|
|
if (daemon->gossip_recent_time + 60 > recent)
|
|
|
|
return;
|
|
|
|
|
|
|
|
daemon->gossip_recent_time = recent;
|
|
|
|
daemon->gossip_store_recent_off
|
|
|
|
= find_gossip_store_by_timestamp(daemon->gossip_store_fd,
|
|
|
|
daemon->gossip_store_recent_off,
|
|
|
|
daemon->gossip_recent_time);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* This is called once we need it: otherwise, the gossip_store may not exist,
|
|
|
|
* since we start at the same time as gossipd itself. */
|
|
|
|
static void setup_gossip_store(struct daemon *daemon)
|
|
|
|
{
|
|
|
|
daemon->gossip_store_fd = open(GOSSIP_STORE_FILENAME, O_RDONLY);
|
|
|
|
if (daemon->gossip_store_fd < 0)
|
|
|
|
status_failed(STATUS_FAIL_INTERNAL_ERROR,
|
|
|
|
"Opening gossip_store %s: %s",
|
|
|
|
GOSSIP_STORE_FILENAME, strerror(errno));
|
2022-07-15 07:59:26 +02:00
|
|
|
|
|
|
|
daemon->gossip_recent_time = 0;
|
|
|
|
daemon->gossip_store_recent_off = 1;
|
|
|
|
update_recent_timestamp(daemon);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* gossipd will be writing to this, and it's not atomic! Safest
|
|
|
|
* way to find the "end" is to walk through. */
|
|
|
|
daemon->gossip_store_end
|
2022-07-15 07:59:26 +02:00
|
|
|
= find_gossip_store_end(daemon->gossip_store_fd,
|
|
|
|
daemon->gossip_store_recent_off);
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void setup_peer_gossip_store(struct peer *peer,
|
|
|
|
const struct feature_set *our_features,
|
|
|
|
const u8 *their_features)
|
|
|
|
{
|
|
|
|
/* Lazy setup */
|
|
|
|
if (peer->daemon->gossip_store_fd == -1)
|
|
|
|
setup_gossip_store(peer->daemon);
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.grf = new_gossip_rcvd_filter(peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* A node:
|
|
|
|
* - if the `gossip_queries` feature is negotiated:
|
|
|
|
* - MUST NOT relay any gossip messages it did not generate itself,
|
|
|
|
* unless explicitly requested.
|
|
|
|
*/
|
2022-01-11 02:15:43 +01:00
|
|
|
if (feature_negotiated(our_features, their_features, OPT_GOSSIP_QUERIES)) {
|
|
|
|
peer->gs.gossip_timer = NULL;
|
|
|
|
peer->gs.active = false;
|
|
|
|
peer->gs.off = 1;
|
2022-01-08 14:28:29 +01:00
|
|
|
return;
|
2022-01-11 02:15:43 +01:00
|
|
|
}
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.gossip_timer = gossip_stream_timer(peer);
|
2023-09-21 07:36:26 +02:00
|
|
|
peer->gs.active = !peer->daemon->dev_suppress_gossip;
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.timestamp_min = 0;
|
|
|
|
peer->gs.timestamp_max = UINT32_MAX;
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
*
|
|
|
|
* - upon receiving an `init` message with the
|
|
|
|
* `initial_routing_sync` flag set to 1:
|
|
|
|
* - SHOULD send gossip messages for all known channels and
|
|
|
|
* nodes, as if they were just received.
|
|
|
|
* - if the `initial_routing_sync` flag is set to 0, OR if the
|
|
|
|
* initial sync was completed:
|
|
|
|
* - SHOULD resume normal operation, as specified in the
|
|
|
|
* following [Rebroadcasting](#rebroadcasting) section.
|
|
|
|
*/
|
2022-01-11 02:15:43 +01:00
|
|
|
if (feature_offered(their_features, OPT_INITIAL_ROUTING_SYNC))
|
|
|
|
peer->gs.off = 1;
|
|
|
|
else {
|
2022-01-08 14:28:29 +01:00
|
|
|
/* During tests, particularly, we find that the gossip_store
|
|
|
|
* moves fast, so make sure it really does start at the end. */
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.off
|
2022-01-08 14:28:29 +01:00
|
|
|
= find_gossip_store_end(peer->daemon->gossip_store_fd,
|
|
|
|
peer->daemon->gossip_store_end);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:26:29 +01:00
|
|
|
/* We're happy for the kernel to batch update and gossip messages, but a
|
|
|
|
* commitment message, for example, should be instantly sent. There's no
|
|
|
|
* great way of doing this, unfortunately.
|
|
|
|
*
|
|
|
|
* Setting TCP_NODELAY on Linux flushes the socket, which really means
|
|
|
|
* we'd want to toggle on then off it *after* sending. But Linux has
|
|
|
|
* TCP_CORK. On FreeBSD, it seems (looking at source) not to, so
|
|
|
|
* there we'd want to set it before the send, and reenable it
|
|
|
|
* afterwards. Even if this is wrong on other non-Linux platforms, it
|
|
|
|
* only means one extra packet.
|
|
|
|
*/
|
|
|
|
static void set_urgent_flag(struct peer *peer, bool urgent)
|
|
|
|
{
|
|
|
|
int val;
|
|
|
|
int opt;
|
|
|
|
const char *optname;
|
|
|
|
|
|
|
|
if (urgent == peer->urgent)
|
|
|
|
return;
|
|
|
|
|
2023-04-09 05:25:07 +02:00
|
|
|
/* FIXME: We can't do this on websockets, but we could signal our
|
|
|
|
* websocket proxy via some magic message to do so! */
|
|
|
|
if (peer->is_websocket != NORMAL_SOCKET)
|
|
|
|
return;
|
|
|
|
|
2022-01-08 14:26:29 +01:00
|
|
|
#ifdef TCP_CORK
|
|
|
|
opt = TCP_CORK;
|
|
|
|
optname = "TCP_CORK";
|
|
|
|
#elif defined(TCP_NODELAY)
|
|
|
|
opt = TCP_NODELAY;
|
|
|
|
optname = "TCP_NODELAY";
|
|
|
|
#else
|
|
|
|
#error "Please report platform with neither TCP_CORK nor TCP_NODELAY?"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
val = urgent;
|
|
|
|
if (setsockopt(io_conn_fd(peer->to_peer),
|
2023-04-09 05:25:09 +02:00
|
|
|
IPPROTO_TCP, opt, &val, sizeof(val)) != 0
|
|
|
|
/* This actually happens in testing, where we blackhole the fd */
|
2023-09-21 07:36:26 +02:00
|
|
|
&& peer->daemon->dev_disconnect_fd == -1) {
|
2023-04-09 05:25:09 +02:00
|
|
|
status_broken("setsockopt %s=1 fd=%u: %s",
|
|
|
|
optname, io_conn_fd(peer->to_peer),
|
|
|
|
strerror(errno));
|
2022-01-08 14:26:29 +01:00
|
|
|
}
|
|
|
|
peer->urgent = urgent;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_urgent(enum peer_wire type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case WIRE_INIT:
|
|
|
|
case WIRE_ERROR:
|
|
|
|
case WIRE_WARNING:
|
|
|
|
case WIRE_TX_ADD_INPUT:
|
|
|
|
case WIRE_TX_ADD_OUTPUT:
|
|
|
|
case WIRE_TX_REMOVE_INPUT:
|
|
|
|
case WIRE_TX_REMOVE_OUTPUT:
|
|
|
|
case WIRE_TX_COMPLETE:
|
2022-07-11 16:14:51 +02:00
|
|
|
case WIRE_TX_ABORT:
|
2022-01-08 14:26:29 +01:00
|
|
|
case WIRE_TX_SIGNATURES:
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
case WIRE_ACCEPT_CHANNEL:
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
case WIRE_FUNDING_SIGNED:
|
2022-09-10 04:10:31 +02:00
|
|
|
case WIRE_CHANNEL_READY:
|
2022-01-08 14:26:29 +01:00
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
2022-07-11 16:14:51 +02:00
|
|
|
case WIRE_TX_INIT_RBF:
|
|
|
|
case WIRE_TX_ACK_RBF:
|
2022-01-08 14:26:29 +01:00
|
|
|
case WIRE_SHUTDOWN:
|
|
|
|
case WIRE_CLOSING_SIGNED:
|
|
|
|
case WIRE_UPDATE_ADD_HTLC:
|
|
|
|
case WIRE_UPDATE_FULFILL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_HTLC:
|
|
|
|
case WIRE_UPDATE_FAIL_MALFORMED_HTLC:
|
|
|
|
case WIRE_UPDATE_FEE:
|
|
|
|
case WIRE_UPDATE_BLOCKHEIGHT:
|
|
|
|
case WIRE_CHANNEL_REESTABLISH:
|
|
|
|
case WIRE_ANNOUNCEMENT_SIGNATURES:
|
|
|
|
case WIRE_CHANNEL_ANNOUNCEMENT:
|
|
|
|
case WIRE_NODE_ANNOUNCEMENT:
|
|
|
|
case WIRE_CHANNEL_UPDATE:
|
|
|
|
case WIRE_QUERY_SHORT_CHANNEL_IDS:
|
|
|
|
case WIRE_REPLY_SHORT_CHANNEL_IDS_END:
|
|
|
|
case WIRE_QUERY_CHANNEL_RANGE:
|
|
|
|
case WIRE_REPLY_CHANNEL_RANGE:
|
|
|
|
case WIRE_GOSSIP_TIMESTAMP_FILTER:
|
|
|
|
case WIRE_ONION_MESSAGE:
|
2023-02-02 11:01:22 +01:00
|
|
|
case WIRE_PEER_STORAGE:
|
|
|
|
case WIRE_YOUR_PEER_STORAGE:
|
2022-01-08 14:26:29 +01:00
|
|
|
case WIRE_STFU:
|
2023-07-27 23:37:52 +02:00
|
|
|
case WIRE_SPLICE:
|
|
|
|
case WIRE_SPLICE_ACK:
|
|
|
|
case WIRE_SPLICE_LOCKED:
|
2022-01-08 14:26:29 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* These are time-sensitive, and so send without delay. */
|
|
|
|
case WIRE_PING:
|
|
|
|
case WIRE_PONG:
|
|
|
|
case WIRE_COMMITMENT_SIGNED:
|
|
|
|
case WIRE_REVOKE_AND_ACK:
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* plugins can inject other messages; assume not urgent. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
connectd: use shutdown() not close() on TCP sockets for dev-disconnect.
close() is allowed to lose data, and I saw this in CI:
```
2023-10-22T05:12:36.6576005Z ____________________________ test_permfail_htlc_out ____________________________
2023-10-22T05:12:36.6608511Z [gw2] linux -- Python 3.8.18 /home/runner/.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/bin/python
2023-10-22T05:12:36.6611663Z
2023-10-22T05:12:36.6614768Z node_factory = <pyln.testing.utils.NodeFactory object at 0x7f381039a5e0>
2023-10-22T05:12:36.6623694Z bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f38103c0400>
2023-10-22T05:12:36.6627092Z executor = <concurrent.futures.thread.ThreadPoolExecutor object at 0x7f38103c0ee0>
2023-10-22T05:12:36.6627701Z
2023-10-22T05:12:36.6628051Z def test_permfail_htlc_out(node_factory, bitcoind, executor):
2023-10-22T05:12:36.6631192Z # Test case where we fail with unsettled outgoing HTLC.
2023-10-22T05:12:36.6634154Z disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
2023-10-22T05:12:36.6635106Z l1 = node_factory.get_node(options={'dev-no-reconnect': None})
2023-10-22T05:12:36.6637321Z # Feerates identical so we don't get gratuitous commit to update them
2023-10-22T05:12:36.6642691Z l2 = node_factory.get_node(disconnect=disconnects,
2023-10-22T05:12:36.6644734Z feerates=(7500, 7500, 7500, 7500))
2023-10-22T05:12:36.6647205Z
2023-10-22T05:12:36.6649671Z l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
2023-10-22T05:12:36.6650460Z l2.daemon.wait_for_log('Handed peer, entering loop')
2023-10-22T05:12:36.6654865Z l2.fundchannel(l1, 10**6)
2023-10-22T05:12:36.6655305Z
2023-10-22T05:12:36.6657810Z # This will fail at l2's end.
2023-10-22T05:12:36.6660554Z t = executor.submit(l2.pay, l1, 200000000)
2023-10-22T05:12:36.6662947Z
2023-10-22T05:12:36.6665147Z l2.daemon.wait_for_log('dev_disconnect permfail')
2023-10-22T05:12:36.6668530Z l2.wait_for_channel_onchain(l1.info['id'])
2023-10-22T05:12:36.6671588Z bitcoind.generate_block(1)
2023-10-22T05:12:36.6674510Z > l1.daemon.wait_for_log('Their unilateral tx, old commit point')
2023-10-22T05:12:36.6675001Z
2023-10-22T05:12:36.6675212Z tests/test_closing.py:3027:
...
2023-10-22T05:12:36.8784390Z lightningd-2 2023-10-22T04:41:04.448Z DEBUG 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-connectd: dev_disconnect: +WIRE_REVOKE_AND_ACK (WIRE_REVOKE_AND_ACK)
2023-10-22T05:12:36.8786260Z lightningd-2 2023-10-22T04:41:04.452Z INFO 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-channeld-chan#1: Peer connection lost
2023-10-22T05:12:36.8788076Z lightningd-2 2023-10-22T04:41:04.453Z DEBUG 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-channeld-chan#1: Status closed, but not exited. Killing
2023-10-22T05:12:36.8789915Z lightningd-1 2023-10-22T04:41:04.454Z INFO 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59-channeld-chan#1: Peer connection lost
```
Note that l1 doesn't receive WIRE_REVOKE_AND_ACK!
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-10-23 03:05:39 +02:00
|
|
|
/* io_sock_shutdown, but in format suitable for an io_plan callback */
|
|
|
|
static struct io_plan *io_sock_shutdown_cb(struct io_conn *conn, struct peer *unused)
|
|
|
|
{
|
|
|
|
return io_sock_shutdown(conn);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
static struct io_plan *encrypt_and_send(struct peer *peer,
|
|
|
|
const u8 *msg TAKES,
|
|
|
|
struct io_plan *(*next)
|
|
|
|
(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer))
|
|
|
|
{
|
2022-01-08 14:25:29 +01:00
|
|
|
int type = fromwire_peektype(msg);
|
|
|
|
|
|
|
|
switch (dev_disconnect(&peer->id, type)) {
|
|
|
|
case DEV_DISCONNECT_BEFORE:
|
|
|
|
if (taken(msg))
|
|
|
|
tal_free(msg);
|
|
|
|
return io_close(peer->to_peer);
|
|
|
|
case DEV_DISCONNECT_AFTER:
|
2022-01-11 02:16:10 +01:00
|
|
|
/* Disallow reads from now on */
|
|
|
|
peer->dev_read_enabled = false;
|
connectd: use shutdown() not close() on TCP sockets for dev-disconnect.
close() is allowed to lose data, and I saw this in CI:
```
2023-10-22T05:12:36.6576005Z ____________________________ test_permfail_htlc_out ____________________________
2023-10-22T05:12:36.6608511Z [gw2] linux -- Python 3.8.18 /home/runner/.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.8/bin/python
2023-10-22T05:12:36.6611663Z
2023-10-22T05:12:36.6614768Z node_factory = <pyln.testing.utils.NodeFactory object at 0x7f381039a5e0>
2023-10-22T05:12:36.6623694Z bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f38103c0400>
2023-10-22T05:12:36.6627092Z executor = <concurrent.futures.thread.ThreadPoolExecutor object at 0x7f38103c0ee0>
2023-10-22T05:12:36.6627701Z
2023-10-22T05:12:36.6628051Z def test_permfail_htlc_out(node_factory, bitcoind, executor):
2023-10-22T05:12:36.6631192Z # Test case where we fail with unsettled outgoing HTLC.
2023-10-22T05:12:36.6634154Z disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
2023-10-22T05:12:36.6635106Z l1 = node_factory.get_node(options={'dev-no-reconnect': None})
2023-10-22T05:12:36.6637321Z # Feerates identical so we don't get gratuitous commit to update them
2023-10-22T05:12:36.6642691Z l2 = node_factory.get_node(disconnect=disconnects,
2023-10-22T05:12:36.6644734Z feerates=(7500, 7500, 7500, 7500))
2023-10-22T05:12:36.6647205Z
2023-10-22T05:12:36.6649671Z l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
2023-10-22T05:12:36.6650460Z l2.daemon.wait_for_log('Handed peer, entering loop')
2023-10-22T05:12:36.6654865Z l2.fundchannel(l1, 10**6)
2023-10-22T05:12:36.6655305Z
2023-10-22T05:12:36.6657810Z # This will fail at l2's end.
2023-10-22T05:12:36.6660554Z t = executor.submit(l2.pay, l1, 200000000)
2023-10-22T05:12:36.6662947Z
2023-10-22T05:12:36.6665147Z l2.daemon.wait_for_log('dev_disconnect permfail')
2023-10-22T05:12:36.6668530Z l2.wait_for_channel_onchain(l1.info['id'])
2023-10-22T05:12:36.6671588Z bitcoind.generate_block(1)
2023-10-22T05:12:36.6674510Z > l1.daemon.wait_for_log('Their unilateral tx, old commit point')
2023-10-22T05:12:36.6675001Z
2023-10-22T05:12:36.6675212Z tests/test_closing.py:3027:
...
2023-10-22T05:12:36.8784390Z lightningd-2 2023-10-22T04:41:04.448Z DEBUG 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-connectd: dev_disconnect: +WIRE_REVOKE_AND_ACK (WIRE_REVOKE_AND_ACK)
2023-10-22T05:12:36.8786260Z lightningd-2 2023-10-22T04:41:04.452Z INFO 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-channeld-chan#1: Peer connection lost
2023-10-22T05:12:36.8788076Z lightningd-2 2023-10-22T04:41:04.453Z DEBUG 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-channeld-chan#1: Status closed, but not exited. Killing
2023-10-22T05:12:36.8789915Z lightningd-1 2023-10-22T04:41:04.454Z INFO 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59-channeld-chan#1: Peer connection lost
```
Note that l1 doesn't receive WIRE_REVOKE_AND_ACK!
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2023-10-23 03:05:39 +02:00
|
|
|
/* Using io_close here can lose the data we're about to send! */
|
|
|
|
next = io_sock_shutdown_cb;
|
2022-01-08 14:25:29 +01:00
|
|
|
break;
|
|
|
|
case DEV_DISCONNECT_BLACKHOLE:
|
2022-01-11 02:16:10 +01:00
|
|
|
/* Disable both reads and writes from now on */
|
|
|
|
peer->dev_read_enabled = false;
|
|
|
|
peer->dev_writes_enabled = talz(peer, u32);
|
2022-01-08 14:25:29 +01:00
|
|
|
break;
|
|
|
|
case DEV_DISCONNECT_NORMAL:
|
|
|
|
break;
|
|
|
|
case DEV_DISCONNECT_DISABLE_AFTER:
|
2022-01-11 02:16:10 +01:00
|
|
|
peer->dev_read_enabled = false;
|
|
|
|
peer->dev_writes_enabled = tal(peer, u32);
|
|
|
|
*peer->dev_writes_enabled = 1;
|
2022-01-08 14:25:29 +01:00
|
|
|
break;
|
|
|
|
}
|
2023-09-21 07:36:26 +02:00
|
|
|
|
2022-01-08 14:26:29 +01:00
|
|
|
set_urgent_flag(peer, is_urgent(type));
|
2022-01-08 14:25:29 +01:00
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* We free this and the encrypted version in next write_to_peer */
|
2022-01-08 14:24:29 +01:00
|
|
|
peer->sent_to_peer = cryptomsg_encrypt_msg(peer, &peer->cs, msg);
|
2022-01-08 14:22:29 +01:00
|
|
|
return io_write(peer->to_peer,
|
|
|
|
peer->sent_to_peer,
|
|
|
|
tal_bytelen(peer->sent_to_peer),
|
|
|
|
next, peer);
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* Kicks off write_to_peer() to look for more gossip to send from store */
|
|
|
|
static void wake_gossip(struct peer *peer)
|
|
|
|
{
|
pytest: fix test_gossip_no_empty_announcements flake.
This is a side-effect of fixing aging: sometimes, we age our
rcvd_filter cache too fast, and thus re-xmit. This breaks
our test, since it used dev-disconnect on the channel_announce,
but that closes to l3, not l1!
```
> assert l1.rpc.listchannels()['channels'] == []
E AssertionError: assert [{'active': T...ags': 1, ...}] == []
E Left contains 2 more items, first extra item: {'active': True, 'amount_msat': 100000000msat, 'base_fee_millisatoshi': 1, 'channel_flags': 0, ...}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Fixes: #5403
2022-07-12 06:44:36 +02:00
|
|
|
bool flush_gossip_filter = true;
|
|
|
|
/* With dev-fast-gossip, we clean every 2 seconds, which is too
|
|
|
|
* fast for our slow tests! So we only call this one time in 5
|
|
|
|
* actually twice that, as it's not per-peer! */
|
|
|
|
static int gossip_age_count;
|
|
|
|
|
|
|
|
if (peer->daemon->dev_fast_gossip && gossip_age_count++ % 5 != 0)
|
|
|
|
flush_gossip_filter = false;
|
|
|
|
|
2022-06-16 09:32:28 +02:00
|
|
|
/* Don't remember sent per-peer gossip forever. */
|
pytest: fix test_gossip_no_empty_announcements flake.
This is a side-effect of fixing aging: sometimes, we age our
rcvd_filter cache too fast, and thus re-xmit. This breaks
our test, since it used dev-disconnect on the channel_announce,
but that closes to l3, not l1!
```
> assert l1.rpc.listchannels()['channels'] == []
E AssertionError: assert [{'active': T...ags': 1, ...}] == []
E Left contains 2 more items, first extra item: {'active': True, 'amount_msat': 100000000msat, 'base_fee_millisatoshi': 1, 'channel_flags': 0, ...}
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Fixes: #5403
2022-07-12 06:44:36 +02:00
|
|
|
if (flush_gossip_filter)
|
|
|
|
gossip_rcvd_filter_age(peer->gs.grf);
|
2022-06-16 09:32:28 +02:00
|
|
|
|
2023-09-21 07:36:26 +02:00
|
|
|
peer->gs.active = !peer->daemon->dev_suppress_gossip;
|
2022-01-08 14:28:29 +01:00
|
|
|
io_wake(peer->peer_outq);
|
2022-01-11 02:15:43 +01:00
|
|
|
|
|
|
|
/* And go again in 60 seconds (from now, now when we finish!) */
|
|
|
|
peer->gs.gossip_timer = gossip_stream_timer(peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If we are streaming gossip, get something from gossip store */
|
|
|
|
static u8 *maybe_from_gossip_store(const tal_t *ctx, struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *msg;
|
|
|
|
|
2022-04-19 23:53:02 +02:00
|
|
|
/* dev-mode can suppress all gossip */
|
2023-09-21 07:36:26 +02:00
|
|
|
if (peer->daemon->dev_suppress_gossip)
|
2022-04-19 23:53:02 +02:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Not streaming right now? */
|
2022-01-11 02:15:43 +01:00
|
|
|
if (!peer->gs.active)
|
2022-01-08 14:28:29 +01:00
|
|
|
return NULL;
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
/* This should be around to kick us every 60 seconds */
|
|
|
|
assert(peer->gs.gossip_timer);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
again:
|
|
|
|
msg = gossip_store_next(ctx, &peer->daemon->gossip_store_fd,
|
|
|
|
peer->gs.timestamp_min,
|
|
|
|
peer->gs.timestamp_max,
|
2022-04-19 23:46:35 +02:00
|
|
|
false,
|
2022-01-11 02:15:43 +01:00
|
|
|
&peer->gs.off,
|
|
|
|
&peer->daemon->gossip_store_end);
|
|
|
|
/* Don't send back gossip they sent to us! */
|
2022-01-08 14:28:29 +01:00
|
|
|
if (msg) {
|
2022-01-11 02:15:43 +01:00
|
|
|
if (gossip_rcvd_filter_del(peer->gs.grf, msg)) {
|
|
|
|
msg = tal_free(msg);
|
|
|
|
goto again;
|
|
|
|
}
|
2022-01-08 14:28:29 +01:00
|
|
|
status_peer_io(LOG_IO_OUT, &peer->id, msg);
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.active = false;
|
2022-01-08 14:28:29 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
/* Mutual recursion */
|
|
|
|
static void send_ping(struct peer *peer);
|
|
|
|
|
|
|
|
static void set_ping_timer(struct peer *peer)
|
|
|
|
{
|
2023-09-21 07:36:26 +02:00
|
|
|
if (peer->daemon->dev_no_ping_timer) {
|
2022-03-25 04:11:55 +01:00
|
|
|
peer->ping_timer = NULL;
|
|
|
|
return;
|
|
|
|
}
|
2022-01-29 04:31:32 +01:00
|
|
|
peer->ping_timer = new_reltimer(&peer->daemon->timers, peer,
|
|
|
|
time_from_sec(15 + pseudorand(30)),
|
|
|
|
send_ping, peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void send_ping(struct peer *peer)
|
|
|
|
{
|
2022-06-26 08:44:12 +02:00
|
|
|
/* If it's still sending us traffic, maybe ping reply is backed up?
|
|
|
|
* That's OK, ping is just to make sure it's still alive, and clearly
|
|
|
|
* it is. */
|
|
|
|
if (time_before(peer->last_recv_time,
|
|
|
|
timeabs_sub(time_now(), time_from_sec(60)))) {
|
|
|
|
/* Already have a ping in flight? */
|
|
|
|
if (peer->expecting_pong != PONG_UNEXPECTED) {
|
|
|
|
status_peer_debug(&peer->id, "Last ping unreturned: hanging up");
|
|
|
|
if (peer->to_peer)
|
|
|
|
io_close(peer->to_peer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
inject_peer_msg(peer, take(make_ping(NULL, 1, 0)));
|
|
|
|
peer->expecting_pong = PONG_EXPECTED_PROBING;
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
set_ping_timer(peer);
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
void send_custommsg(struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
|
|
|
u8 *custommsg;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_connectd_custommsg_out(tmpctx, msg, &id, &custommsg))
|
|
|
|
master_badmsg(WIRE_CONNECTD_CUSTOMMSG_OUT, msg);
|
|
|
|
|
|
|
|
/* Races can happen: this might be gone by now. */
|
2023-01-03 05:46:52 +01:00
|
|
|
peer = peer_htable_get(daemon->peers, &id);
|
2022-01-29 04:33:05 +01:00
|
|
|
if (peer)
|
|
|
|
inject_peer_msg(peer, take(custommsg));
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
static void handle_ping_in(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *pong;
|
|
|
|
|
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
|
|
|
|
if (!check_ping_make_pong(NULL, msg, &pong)) {
|
|
|
|
send_warning(peer, "Invalid ping %s", tal_hex(msg, msg));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pong)
|
2022-01-29 04:33:05 +01:00
|
|
|
inject_peer_msg(peer, take(pong));
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_ping_reply(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *ignored;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* We print this out because we asked for pong, so can't spam us... */
|
|
|
|
if (!fromwire_pong(msg, msg, &ignored))
|
|
|
|
status_peer_unusual(&peer->id, "Got malformed ping reply %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
|
2022-04-06 07:09:48 +02:00
|
|
|
/* We print this because dev versions of Core Lightning embed
|
2022-01-29 04:31:32 +01:00
|
|
|
* version here: see check_ping_make_pong! */
|
|
|
|
for (i = 0; i < tal_count(ignored); i++) {
|
|
|
|
if (ignored[i] < ' ' || ignored[i] == 127)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
status_debug("Got pong %zu bytes (%.*s...)",
|
|
|
|
tal_count(ignored), (int)i, (char *)ignored);
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL, true,
|
|
|
|
tal_bytelen(msg))));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_pong_in(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
|
|
|
|
switch (peer->expecting_pong) {
|
|
|
|
case PONG_EXPECTED_COMMAND:
|
|
|
|
handle_ping_reply(peer, msg);
|
|
|
|
/* fall thru */
|
|
|
|
case PONG_EXPECTED_PROBING:
|
|
|
|
peer->expecting_pong = PONG_UNEXPECTED;
|
|
|
|
return;
|
|
|
|
case PONG_UNEXPECTED:
|
|
|
|
status_debug("Unexpected pong?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Forward to gossipd */
|
|
|
|
static void handle_gossip_in(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *gmsg = towire_gossipd_recv_gossip(NULL, &peer->id, msg);
|
|
|
|
|
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
daemon_conn_send(peer->daemon->gossipd, take(gmsg));
|
|
|
|
}
|
|
|
|
|
2022-04-19 23:54:04 +02:00
|
|
|
static void handle_gossip_timestamp_filter_in(struct peer *peer, const u8 *msg)
|
2022-01-08 14:28:29 +01:00
|
|
|
{
|
|
|
|
struct bitcoin_blkid chain_hash;
|
|
|
|
u32 first_timestamp, timestamp_range;
|
|
|
|
|
|
|
|
if (!fromwire_gossip_timestamp_filter(msg, &chain_hash,
|
|
|
|
&first_timestamp,
|
|
|
|
×tamp_range)) {
|
2022-01-29 04:31:32 +01:00
|
|
|
send_warning(peer, "gossip_timestamp_filter invalid: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
|
|
|
return;
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 08:35:48 +01:00
|
|
|
/* gossipd doesn't log IO, so we log it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
if (!bitcoin_blkid_eq(&chainparams->genesis_blockhash, &chain_hash)) {
|
|
|
|
send_warning(peer, "gossip_timestamp_filter for bad chain: %s",
|
|
|
|
tal_hex(tmpctx, msg));
|
2022-01-29 04:31:32 +01:00
|
|
|
return;
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:15:43 +01:00
|
|
|
peer->gs.timestamp_min = first_timestamp;
|
|
|
|
peer->gs.timestamp_max = first_timestamp + timestamp_range - 1;
|
|
|
|
/* Make sure we never leave it on an impossible value. */
|
|
|
|
if (peer->gs.timestamp_max < peer->gs.timestamp_min)
|
|
|
|
peer->gs.timestamp_max = UINT32_MAX;
|
|
|
|
|
2023-03-21 22:18:57 +01:00
|
|
|
/* BOLT-gossip-filter-simplify #7:
|
|
|
|
* The receiver:
|
|
|
|
*...
|
|
|
|
* - if `first_timestamp` is 0:
|
|
|
|
* - SHOULD send all known gossip messages.
|
|
|
|
* - otherwise, if `first_timestamp` is 0xFFFFFFFF:
|
|
|
|
* - SHOULD NOT send any gossip messages (except its own).
|
|
|
|
* - otherwise:
|
|
|
|
* - SHOULD send gossip messages it receives from now own.
|
|
|
|
*/
|
|
|
|
/* For us, this means we only sweep the gossip store for messages
|
|
|
|
* if the first_timestamp is 0 */
|
|
|
|
if (first_timestamp == 0)
|
|
|
|
peer->gs.off = 1;
|
|
|
|
else if (first_timestamp == 0xFFFFFFFF)
|
2022-07-15 07:58:45 +02:00
|
|
|
peer->gs.off = peer->daemon->gossip_store_end;
|
2022-07-15 07:59:26 +02:00
|
|
|
else {
|
2023-03-21 22:18:57 +01:00
|
|
|
/* We are actually a bit nicer than the spec, and we include
|
|
|
|
* "recent" gossip here. */
|
2022-07-15 07:59:26 +02:00
|
|
|
update_recent_timestamp(peer->daemon);
|
2023-03-21 22:18:57 +01:00
|
|
|
peer->gs.off = peer->daemon->gossip_store_recent_off;
|
2022-07-15 07:59:26 +02:00
|
|
|
}
|
2022-01-11 02:15:43 +01:00
|
|
|
|
|
|
|
/* BOLT #7:
|
|
|
|
* - MAY wait for the next outgoing gossip flush to send these.
|
|
|
|
*/
|
|
|
|
/* We send immediately the first time, after that we wait. */
|
|
|
|
if (!peer->gs.gossip_timer)
|
2022-01-08 14:28:29 +01:00
|
|
|
wake_gossip(peer);
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
2022-01-11 02:15:43 +01:00
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
static bool handle_custommsg(struct daemon *daemon,
|
|
|
|
struct peer *peer,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
enum peer_wire type = fromwire_peektype(msg);
|
2023-02-02 11:01:24 +01:00
|
|
|
if (type % 2 == 1 && !peer_wire_is_internal(type)) {
|
2022-01-29 04:33:05 +01:00
|
|
|
/* The message is not part of the messages we know how to
|
|
|
|
* handle. Assuming this is a custommsg, we just forward it to the
|
|
|
|
* master. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, msg);
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_custommsg_in(NULL,
|
|
|
|
&peer->id,
|
|
|
|
msg)));
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
/* We handle pings and gossip messages. */
|
|
|
|
static bool handle_message_locally(struct peer *peer, const u8 *msg)
|
|
|
|
{
|
|
|
|
enum peer_wire type = fromwire_peektype(msg);
|
|
|
|
|
|
|
|
/* We remember these so we don't rexmit them */
|
2022-06-16 09:32:39 +02:00
|
|
|
gossip_rcvd_filter_add(peer->gs.grf, msg);
|
2022-01-29 04:31:32 +01:00
|
|
|
|
|
|
|
if (type == WIRE_GOSSIP_TIMESTAMP_FILTER) {
|
2022-04-19 23:54:04 +02:00
|
|
|
handle_gossip_timestamp_filter_in(peer, msg);
|
2022-01-29 04:31:32 +01:00
|
|
|
return true;
|
|
|
|
} else if (type == WIRE_PING) {
|
|
|
|
handle_ping_in(peer, msg);
|
|
|
|
return true;
|
|
|
|
} else if (type == WIRE_PONG) {
|
|
|
|
handle_pong_in(peer, msg);
|
|
|
|
return true;
|
2022-01-29 04:32:32 +01:00
|
|
|
} else if (type == WIRE_ONION_MESSAGE) {
|
|
|
|
handle_onion_message(peer->daemon, peer, msg);
|
|
|
|
return true;
|
2022-01-29 04:33:05 +01:00
|
|
|
} else if (handle_custommsg(peer->daemon, peer, msg)) {
|
|
|
|
return true;
|
2022-01-29 04:31:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do we want to divert to gossipd? */
|
|
|
|
if (is_msg_for_gossipd(msg)) {
|
|
|
|
handle_gossip_in(peer, msg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2022-01-08 14:28:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* Move "channel_id" to temporary. */
|
|
|
|
static void move_channel_id_to_temp(struct subd *subd)
|
|
|
|
{
|
|
|
|
tal_free(subd->temporary_channel_id);
|
|
|
|
subd->temporary_channel_id
|
|
|
|
= tal_dup(subd, struct channel_id, &subd->channel_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only works for open_channel2 and accept_channel2 */
|
|
|
|
static struct pubkey *extract_revocation_basepoint(const tal_t *ctx,
|
|
|
|
const u8 *msg)
|
|
|
|
{
|
|
|
|
const u8 *cursor = msg;
|
|
|
|
size_t max = tal_bytelen(msg);
|
|
|
|
enum peer_wire t;
|
|
|
|
struct pubkey pubkey;
|
|
|
|
|
|
|
|
t = fromwire_u16(&cursor, &max);
|
|
|
|
|
|
|
|
switch (t) {
|
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
/* BOLT-dualfund #2:
|
|
|
|
* 1. type: 64 (`open_channel2`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`chain_hash`:`chain_hash`]
|
|
|
|
* * [`channel_id`:`zerod_channel_id`]
|
|
|
|
* * [`u32`:`funding_feerate_perkw`]
|
|
|
|
* * [`u32`:`commitment_feerate_perkw`]
|
|
|
|
* * [`u64`:`funding_satoshis`]
|
|
|
|
* * [`u64`:`dust_limit_satoshis`]
|
|
|
|
* * [`u64`:`max_htlc_value_in_flight_msat`]
|
|
|
|
* * [`u64`:`htlc_minimum_msat`]
|
|
|
|
* * [`u16`:`to_self_delay`]
|
|
|
|
* * [`u16`:`max_accepted_htlcs`]
|
|
|
|
* * [`u32`:`locktime`]
|
|
|
|
* * [`point`:`funding_pubkey`]
|
|
|
|
* * [`point`:`revocation_basepoint`]
|
|
|
|
*/
|
|
|
|
fromwire_pad(&cursor, &max,
|
|
|
|
sizeof(struct bitcoin_blkid)
|
|
|
|
+ sizeof(struct channel_id)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ PUBKEY_CMPR_LEN);
|
|
|
|
break;
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
|
|
|
/* BOLT-dualfund #2:
|
|
|
|
* 1. type: 65 (`accept_channel2`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`channel_id`:`zerod_channel_id`]
|
|
|
|
* * [`u64`:`funding_satoshis`]
|
|
|
|
* * [`u64`:`dust_limit_satoshis`]
|
|
|
|
* * [`u64`:`max_htlc_value_in_flight_msat`]
|
|
|
|
* * [`u64`:`htlc_minimum_msat`]
|
|
|
|
* * [`u32`:`minimum_depth`]
|
|
|
|
* * [`u16`:`to_self_delay`]
|
|
|
|
* * [`u16`:`max_accepted_htlcs`]
|
|
|
|
* * [`point`:`funding_pubkey`]
|
|
|
|
* * [`point`:`revocation_basepoint`]
|
|
|
|
*/
|
|
|
|
fromwire_pad(&cursor, &max,
|
|
|
|
sizeof(struct channel_id)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u64)
|
|
|
|
+ sizeof(u32)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ sizeof(u16)
|
|
|
|
+ PUBKEY_CMPR_LEN);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
fromwire_pubkey(&cursor, &max, &pubkey);
|
|
|
|
if (!cursor)
|
|
|
|
return NULL;
|
|
|
|
return tal_dup(ctx, struct pubkey, &pubkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Only works for funding_created */
|
|
|
|
static bool extract_funding_created_funding(const u8 *funding_created,
|
|
|
|
struct bitcoin_outpoint *outp)
|
|
|
|
{
|
|
|
|
const u8 *cursor = funding_created;
|
|
|
|
size_t max = tal_bytelen(funding_created);
|
|
|
|
enum peer_wire t;
|
|
|
|
|
|
|
|
t = fromwire_u16(&cursor, &max);
|
|
|
|
|
|
|
|
switch (t) {
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
/* BOLT #2:
|
|
|
|
* 1. type: 34 (`funding_created`)
|
|
|
|
* 2. data:
|
|
|
|
* * [`32*byte`:`temporary_channel_id`]
|
|
|
|
* * [`sha256`:`funding_txid`]
|
|
|
|
* * [`u16`:`funding_output_index`]
|
|
|
|
*/
|
|
|
|
fromwire_pad(&cursor, &max, 32);
|
|
|
|
fromwire_bitcoin_txid(&cursor, &max, &outp->txid);
|
|
|
|
outp->n = fromwire_u16(&cursor, &max);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
return cursor != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_v1_channelid(struct subd *subd, const u8 *funding_created)
|
|
|
|
{
|
|
|
|
struct bitcoin_outpoint outp;
|
|
|
|
|
|
|
|
if (!extract_funding_created_funding(funding_created, &outp)) {
|
|
|
|
status_peer_unusual(&subd->peer->id, "WARNING: funding_created no tx info?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
move_channel_id_to_temp(subd);
|
|
|
|
derive_channel_id(&subd->channel_id, &outp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_v2_channelid(struct subd *subd, const u8 *accept_channel2)
|
|
|
|
{
|
|
|
|
struct pubkey *acc_basepoint;
|
|
|
|
|
|
|
|
acc_basepoint = extract_revocation_basepoint(tmpctx, accept_channel2);
|
|
|
|
if (!acc_basepoint) {
|
|
|
|
status_peer_unusual(&subd->peer->id, "WARNING: accept_channel2 no revocation_basepoint?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!subd->opener_revocation_basepoint) {
|
|
|
|
status_peer_unusual(&subd->peer->id, "WARNING: accept_channel2 without open_channel2?");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
move_channel_id_to_temp(subd);
|
|
|
|
derive_channel_id_v2(&subd->channel_id,
|
|
|
|
subd->opener_revocation_basepoint, acc_basepoint);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We maintain channel_id matching for subds by snooping: we set it manually
|
|
|
|
* for first packet (open_channel or open_channel2). */
|
|
|
|
static void maybe_update_channelid(struct subd *subd, const u8 *msg)
|
|
|
|
{
|
|
|
|
switch (fromwire_peektype(msg)) {
|
|
|
|
case WIRE_OPEN_CHANNEL:
|
|
|
|
extract_channel_id(msg, &subd->channel_id);
|
|
|
|
break;
|
|
|
|
case WIRE_OPEN_CHANNEL2:
|
|
|
|
subd->opener_revocation_basepoint
|
|
|
|
= extract_revocation_basepoint(subd, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_ACCEPT_CHANNEL2:
|
|
|
|
update_v2_channelid(subd, msg);
|
|
|
|
break;
|
|
|
|
case WIRE_FUNDING_CREATED:
|
|
|
|
update_v1_channelid(subd, msg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:19:29 +01:00
|
|
|
static struct io_plan *write_to_peer(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
|
|
|
{
|
2022-01-08 14:22:29 +01:00
|
|
|
const u8 *msg;
|
2022-01-08 14:19:29 +01:00
|
|
|
assert(peer->to_peer == peer_conn);
|
|
|
|
|
|
|
|
/* Free last sent one (if any) */
|
2022-01-08 14:22:29 +01:00
|
|
|
peer->sent_to_peer = tal_free(peer->sent_to_peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Pop tail of send queue */
|
2022-01-08 14:22:29 +01:00
|
|
|
msg = msg_dequeue(peer->peer_outq);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-01-11 02:16:55 +01:00
|
|
|
/* Still nothing to send? */
|
|
|
|
if (!msg) {
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Draining? We're done when subds are done. */
|
|
|
|
if (peer->draining && tal_count(peer->subds) == 0)
|
2022-07-16 06:49:29 +02:00
|
|
|
return io_sock_shutdown(peer_conn);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* If they want us to send gossip, do so now. */
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!peer->draining)
|
|
|
|
msg = maybe_from_gossip_store(NULL, peer);
|
2022-01-08 14:28:29 +01:00
|
|
|
if (!msg) {
|
|
|
|
/* Tell them to read again, */
|
2022-03-22 09:52:13 +01:00
|
|
|
io_wake(&peer->subds);
|
2022-01-08 14:28:29 +01:00
|
|
|
|
|
|
|
/* Wait for them to wake us */
|
|
|
|
return msg_queue_wait(peer_conn, peer->peer_outq,
|
|
|
|
write_to_peer, peer);
|
|
|
|
}
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-11 02:16:10 +01:00
|
|
|
/* dev_disconnect can disable writes */
|
|
|
|
if (peer->dev_writes_enabled) {
|
|
|
|
if (*peer->dev_writes_enabled == 0) {
|
|
|
|
tal_free(msg);
|
|
|
|
/* Continue, to drain queue */
|
|
|
|
return write_to_peer(peer_conn, peer);
|
|
|
|
}
|
|
|
|
(*peer->dev_writes_enabled)--;
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
return encrypt_and_send(peer, take(msg), write_to_peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *read_from_subd(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
static struct io_plan *read_from_subd_done(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-03-22 21:27:30 +01:00
|
|
|
maybe_update_channelid(subd, subd->in);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Tell them to encrypt & write. */
|
2022-03-22 09:52:13 +01:00
|
|
|
msg_enqueue(subd->peer->peer_outq, take(subd->in));
|
|
|
|
subd->in = NULL;
|
2022-01-08 14:22:29 +01:00
|
|
|
|
2022-01-08 14:19:29 +01:00
|
|
|
/* Wait for them to wake us */
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_wait(subd_conn, &subd->peer->subds, read_from_subd, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *read_from_subd(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_read_wire(subd_conn, subd, &subd->in,
|
|
|
|
read_from_subd_done, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* These four function handle peer->subd */
|
|
|
|
static struct io_plan *write_to_subd(struct io_conn *subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-01-08 14:22:29 +01:00
|
|
|
const u8 *msg;
|
2022-03-22 09:52:13 +01:00
|
|
|
assert(subd->conn == subd_conn);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Pop tail of send queue */
|
2022-03-22 09:52:13 +01:00
|
|
|
msg = msg_dequeue(subd->outq);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Nothing to send? */
|
2022-01-08 14:22:29 +01:00
|
|
|
if (!msg) {
|
2022-01-11 02:17:01 +01:00
|
|
|
/* If peer is closed, close this. */
|
2022-03-22 09:52:13 +01:00
|
|
|
if (!subd->peer->to_peer)
|
2022-01-11 02:17:01 +01:00
|
|
|
return io_close(subd_conn);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Tell them to read again. */
|
2022-03-22 09:52:13 +01:00
|
|
|
io_wake(&subd->peer->peer_in);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
|
|
|
/* Wait for them to wake us */
|
2022-03-22 09:52:13 +01:00
|
|
|
return msg_queue_wait(subd_conn, subd->outq,
|
|
|
|
write_to_subd, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_write_wire(subd_conn, take(msg), write_to_subd, subd);
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
static void destroy_subd(struct subd *subd)
|
|
|
|
{
|
|
|
|
struct peer *peer = subd->peer;
|
|
|
|
size_t pos;
|
|
|
|
|
|
|
|
for (pos = 0; peer->subds[pos] != subd; pos++)
|
|
|
|
assert(pos < tal_count(peer->subds));
|
|
|
|
|
|
|
|
tal_arr_remove(&peer->subds, pos);
|
|
|
|
|
|
|
|
/* Make sure we try to keep reading from peer (might
|
|
|
|
* have been waiting for write_to_subd) */
|
|
|
|
io_wake(&peer->peer_in);
|
2022-07-18 14:12:27 +02:00
|
|
|
|
|
|
|
/* Maybe we were last subd out? */
|
|
|
|
maybe_free_peer(peer);
|
2022-07-18 14:12:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct subd *new_subd(struct peer *peer,
|
|
|
|
const struct channel_id *channel_id)
|
|
|
|
{
|
|
|
|
struct subd *subd;
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
subd = tal(peer, struct subd);
|
2022-07-18 14:12:18 +02:00
|
|
|
subd->peer = peer;
|
|
|
|
subd->outq = msg_queue_new(subd, false);
|
|
|
|
subd->channel_id = *channel_id;
|
|
|
|
subd->temporary_channel_id = NULL;
|
|
|
|
subd->opener_revocation_basepoint = NULL;
|
|
|
|
subd->conn = NULL;
|
connectd: fix forwarding after tx_abort.
If we get a WIRE_TX_ABORT then another message, we send the other message to the same
subd (even though the tx abort causes it to shutdown). This means we effectively
lose the next message, and timeout (see below from CI, reproduced locally).
So, have connectd ignore the subd after it forwards the WIRE_TX_ABORT. The next
message will, correctly, cause a fresh subdaemon to be spawned.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# Attempt to do abort, should fail since we've
# already gotten an inflight
with pytest.raises(RpcError):
l1.rpc.openchannel_abort(chan_id)
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
min_witness_weight=110,
excess_as_change=True)
# Do the bump
bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
# Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled']
# We no longer disconnect on aborts, because magic!
assert only_one(l1.rpc.listpeers()['peers'])['connected']
# Do the bump, again, same feerate
> bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
tests/test_opening.py:668:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-client/pyln/client/lightning.py:1206: in openchannel_bump
return self.call("openchannel_bump", payload)
contrib/pyln-testing/pyln/testing/utils.py:718: in call
res = LightningRpc.call(self, method, payload, cmdprefix, filter)
contrib/pyln-client/pyln/client/lightning.py:398: in call
resp, buf = self._readobj(sock, buf)
contrib/pyln-client/pyln/client/lightning.py:315: in _readobj
b = sock.recv(max(1024, len(buff)))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.client.lightning.UnixSocket object at 0x7f34675aae80>
length = 1024
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
> return self.sock.recv(length)
E Failed: Timeout >1200.0s
```
2023-10-23 08:04:35 +02:00
|
|
|
subd->rcvd_tx_abort = false;
|
2022-07-18 14:12:18 +02:00
|
|
|
|
|
|
|
/* Connect it to the peer */
|
|
|
|
tal_arr_expand(&peer->subds, subd);
|
|
|
|
tal_add_destructor(subd, destroy_subd);
|
|
|
|
|
|
|
|
return subd;
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
static struct io_plan *read_hdr_from_peer(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer);
|
|
|
|
static struct io_plan *read_body_from_peer_done(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
|
|
|
{
|
|
|
|
u8 *decrypted;
|
2022-03-22 09:52:13 +01:00
|
|
|
struct channel_id channel_id;
|
|
|
|
struct subd *subd;
|
connectd: fix forwarding after tx_abort.
If we get a WIRE_TX_ABORT then another message, we send the other message to the same
subd (even though the tx abort causes it to shutdown). This means we effectively
lose the next message, and timeout (see below from CI, reproduced locally).
So, have connectd ignore the subd after it forwards the WIRE_TX_ABORT. The next
message will, correctly, cause a fresh subdaemon to be spawned.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# Attempt to do abort, should fail since we've
# already gotten an inflight
with pytest.raises(RpcError):
l1.rpc.openchannel_abort(chan_id)
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
min_witness_weight=110,
excess_as_change=True)
# Do the bump
bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
# Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled']
# We no longer disconnect on aborts, because magic!
assert only_one(l1.rpc.listpeers()['peers'])['connected']
# Do the bump, again, same feerate
> bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
tests/test_opening.py:668:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-client/pyln/client/lightning.py:1206: in openchannel_bump
return self.call("openchannel_bump", payload)
contrib/pyln-testing/pyln/testing/utils.py:718: in call
res = LightningRpc.call(self, method, payload, cmdprefix, filter)
contrib/pyln-client/pyln/client/lightning.py:398: in call
resp, buf = self._readobj(sock, buf)
contrib/pyln-client/pyln/client/lightning.py:315: in _readobj
b = sock.recv(max(1024, len(buff)))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.client.lightning.UnixSocket object at 0x7f34675aae80>
length = 1024
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
> return self.sock.recv(length)
E Failed: Timeout >1200.0s
```
2023-10-23 08:04:35 +02:00
|
|
|
enum peer_wire type;
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
|
2022-03-22 09:51:13 +01:00
|
|
|
decrypted = cryptomsg_decrypt_body(tmpctx, &peer->cs,
|
2022-01-08 14:22:29 +01:00
|
|
|
peer->peer_in);
|
2022-01-11 02:15:58 +01:00
|
|
|
if (!decrypted) {
|
|
|
|
status_peer_debug(&peer->id, "Bad encrypted packet len %zu",
|
|
|
|
tal_bytelen(peer->peer_in));
|
2022-01-08 14:22:29 +01:00
|
|
|
return io_close(peer_conn);
|
2022-01-11 02:15:58 +01:00
|
|
|
}
|
2022-01-08 14:22:29 +01:00
|
|
|
tal_free(peer->peer_in);
|
|
|
|
|
connectd: fix forwarding after tx_abort.
If we get a WIRE_TX_ABORT then another message, we send the other message to the same
subd (even though the tx abort causes it to shutdown). This means we effectively
lose the next message, and timeout (see below from CI, reproduced locally).
So, have connectd ignore the subd after it forwards the WIRE_TX_ABORT. The next
message will, correctly, cause a fresh subdaemon to be spawned.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# Attempt to do abort, should fail since we've
# already gotten an inflight
with pytest.raises(RpcError):
l1.rpc.openchannel_abort(chan_id)
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
min_witness_weight=110,
excess_as_change=True)
# Do the bump
bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
# Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled']
# We no longer disconnect on aborts, because magic!
assert only_one(l1.rpc.listpeers()['peers'])['connected']
# Do the bump, again, same feerate
> bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
tests/test_opening.py:668:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-client/pyln/client/lightning.py:1206: in openchannel_bump
return self.call("openchannel_bump", payload)
contrib/pyln-testing/pyln/testing/utils.py:718: in call
res = LightningRpc.call(self, method, payload, cmdprefix, filter)
contrib/pyln-client/pyln/client/lightning.py:398: in call
resp, buf = self._readobj(sock, buf)
contrib/pyln-client/pyln/client/lightning.py:315: in _readobj
b = sock.recv(max(1024, len(buff)))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.client.lightning.UnixSocket object at 0x7f34675aae80>
length = 1024
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
> return self.sock.recv(length)
E Failed: Timeout >1200.0s
```
2023-10-23 08:04:35 +02:00
|
|
|
type = fromwire_peektype(decrypted);
|
|
|
|
|
2022-01-11 02:16:10 +01:00
|
|
|
/* dev_disconnect can disable read */
|
2023-09-21 07:36:26 +02:00
|
|
|
if (!peer->dev_read_enabled)
|
2022-01-11 02:16:10 +01:00
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
|
2022-06-26 08:44:12 +02:00
|
|
|
/* We got something! */
|
|
|
|
peer->last_recv_time = time_now();
|
|
|
|
|
2022-01-11 02:16:49 +01:00
|
|
|
/* Don't process packets while we're closing */
|
2022-07-16 06:49:30 +02:00
|
|
|
if (peer->draining)
|
2022-01-11 02:16:49 +01:00
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
|
2022-01-08 14:28:29 +01:00
|
|
|
/* If we swallow this, just try again. */
|
2022-03-22 09:51:13 +01:00
|
|
|
if (handle_message_locally(peer, decrypted))
|
2022-01-08 14:28:29 +01:00
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
/* After this we should be able to match to subd by channel_id */
|
|
|
|
if (!extract_channel_id(decrypted, &channel_id)) {
|
|
|
|
/* We won't log this anywhere else, so do it here. */
|
|
|
|
status_peer_io(LOG_IO_IN, &peer->id, decrypted);
|
|
|
|
|
|
|
|
/* Could be a all-channel error or warning? Log it
|
2023-09-12 06:23:19 +02:00
|
|
|
* more verbose: hang up on error. */
|
2022-03-22 09:52:13 +01:00
|
|
|
if (type == WIRE_ERROR || type == WIRE_WARNING) {
|
|
|
|
char *desc = sanitize_error(tmpctx, decrypted, NULL);
|
|
|
|
status_peer_info(&peer->id,
|
|
|
|
"Received %s: %s",
|
|
|
|
peer_wire_name(type), desc);
|
2023-09-12 06:23:19 +02:00
|
|
|
if (type == WIRE_WARNING)
|
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
2022-03-22 09:52:13 +01:00
|
|
|
return io_close(peer_conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This sets final_msg: will close after sending warning */
|
|
|
|
send_warning(peer, "Unexpected message %s: %s",
|
|
|
|
peer_wire_name(type),
|
|
|
|
tal_hex(tmpctx, decrypted));
|
|
|
|
return read_hdr_from_peer(peer_conn, peer);
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:28 +02:00
|
|
|
/* If we don't find a subdaemon for this, create a new one. */
|
2022-03-22 09:52:13 +01:00
|
|
|
subd = find_subd(peer, &channel_id);
|
2022-03-22 21:27:29 +01:00
|
|
|
if (!subd) {
|
|
|
|
enum peer_wire t = fromwire_peektype(decrypted);
|
2023-02-03 04:38:39 +01:00
|
|
|
|
|
|
|
/* Simplest to close on them at this point. */
|
|
|
|
if (peer->daemon->shutting_down) {
|
|
|
|
status_peer_debug(&peer->id,
|
|
|
|
"Shutting down: hanging up for %s",
|
|
|
|
peer_wire_name(t));
|
|
|
|
return io_close(peer_conn);
|
|
|
|
}
|
2022-03-22 21:27:29 +01:00
|
|
|
status_peer_debug(&peer->id, "Activating for message %s",
|
|
|
|
peer_wire_name(t));
|
2022-07-18 14:12:18 +02:00
|
|
|
subd = new_subd(peer, &channel_id);
|
|
|
|
/* We tell lightningd to fire up a subdaemon to handle this! */
|
|
|
|
daemon_conn_send(peer->daemon->master,
|
|
|
|
take(towire_connectd_peer_spoke(NULL, &peer->id,
|
2022-07-18 14:12:27 +02:00
|
|
|
peer->counter,
|
2022-07-18 14:12:18 +02:00
|
|
|
t,
|
2023-10-22 06:07:32 +02:00
|
|
|
&channel_id,
|
|
|
|
is_peer_error(tmpctx, decrypted))));
|
2022-03-22 21:27:29 +01:00
|
|
|
}
|
2022-01-11 02:17:01 +01:00
|
|
|
|
2022-03-22 21:27:30 +01:00
|
|
|
/* Even if we just created it, call this to catch open_channel2 */
|
|
|
|
maybe_update_channelid(subd, decrypted);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Tell them to write. */
|
2022-03-22 09:52:13 +01:00
|
|
|
msg_enqueue(subd->outq, take(decrypted));
|
2022-01-08 14:22:29 +01:00
|
|
|
|
connectd: fix forwarding after tx_abort.
If we get a WIRE_TX_ABORT then another message, we send the other message to the same
subd (even though the tx abort causes it to shutdown). This means we effectively
lose the next message, and timeout (see below from CI, reproduced locally).
So, have connectd ignore the subd after it forwards the WIRE_TX_ABORT. The next
message will, correctly, cause a fresh subdaemon to be spawned.
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_v2_rbf_multi(node_factory, bitcoind, chainparams):
l1, l2 = node_factory.get_nodes(2,
opts={'may_reconnect': True,
'dev-no-reconnect': None,
'allow_warning': True})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# Attempt to do abort, should fail since we've
# already gotten an inflight
with pytest.raises(RpcError):
l1.rpc.openchannel_abort(chan_id)
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
min_witness_weight=110,
excess_as_change=True)
# Do the bump
bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
# Abort this open attempt! We will re-try
aborted = l1.rpc.openchannel_abort(chan_id)
assert not aborted['channel_canceled']
# We no longer disconnect on aborts, because magic!
assert only_one(l1.rpc.listpeers()['peers'])['connected']
# Do the bump, again, same feerate
> bump = l1.rpc.openchannel_bump(chan_id, chan_amount,
initpsbt['psbt'],
funding_feerate=next_feerate)
tests/test_opening.py:668:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-client/pyln/client/lightning.py:1206: in openchannel_bump
return self.call("openchannel_bump", payload)
contrib/pyln-testing/pyln/testing/utils.py:718: in call
res = LightningRpc.call(self, method, payload, cmdprefix, filter)
contrib/pyln-client/pyln/client/lightning.py:398: in call
resp, buf = self._readobj(sock, buf)
contrib/pyln-client/pyln/client/lightning.py:315: in _readobj
b = sock.recv(max(1024, len(buff)))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.client.lightning.UnixSocket object at 0x7f34675aae80>
length = 1024
def recv(self, length: int) -> bytes:
if self.sock is None:
raise socket.error("not connected")
> return self.sock.recv(length)
E Failed: Timeout >1200.0s
```
2023-10-23 08:04:35 +02:00
|
|
|
/* Is this a tx_abort? Ignore from now on, and close after sending! */
|
|
|
|
if (type == WIRE_TX_ABORT) {
|
|
|
|
subd->rcvd_tx_abort = true;
|
|
|
|
/* In case it doesn't close by itself */
|
|
|
|
notleak(new_reltimer(&peer->daemon->timers, subd,
|
|
|
|
time_from_sec(5),
|
|
|
|
close_subd_timeout, subd));
|
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* Wait for them to wake us */
|
|
|
|
return io_wait(peer_conn, &peer->peer_in, read_hdr_from_peer, peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *read_body_from_peer(struct io_conn *peer_conn,
|
2022-01-08 14:19:29 +01:00
|
|
|
struct peer *peer)
|
|
|
|
{
|
2022-01-08 14:22:29 +01:00
|
|
|
u16 len;
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-01-08 14:24:29 +01:00
|
|
|
if (!cryptomsg_decrypt_header(&peer->cs, peer->peer_in, &len))
|
2022-01-08 14:22:29 +01:00
|
|
|
return io_close(peer_conn);
|
2022-01-08 14:19:29 +01:00
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
tal_resize(&peer->peer_in, (u32)len + CRYPTOMSG_BODY_OVERHEAD);
|
|
|
|
return io_read(peer_conn, peer->peer_in, tal_count(peer->peer_in),
|
|
|
|
read_body_from_peer_done, peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
static struct io_plan *read_hdr_from_peer(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
|
|
|
assert(peer->to_peer == peer_conn);
|
|
|
|
|
2022-01-08 14:22:29 +01:00
|
|
|
/* BOLT #8:
|
|
|
|
*
|
|
|
|
* ### Receiving and Decrypting Messages
|
|
|
|
*
|
|
|
|
* In order to decrypt the _next_ message in the network
|
|
|
|
* stream, the following steps are completed:
|
|
|
|
*
|
|
|
|
* 1. Read _exactly_ 18 bytes from the network buffer.
|
|
|
|
*/
|
|
|
|
peer->peer_in = tal_arr(peer, u8, CRYPTOMSG_HDR_SIZE);
|
|
|
|
return io_read(peer_conn, peer->peer_in, CRYPTOMSG_HDR_SIZE,
|
|
|
|
read_body_from_peer, peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
2022-03-22 09:52:13 +01:00
|
|
|
static struct io_plan *subd_conn_init(struct io_conn *subd_conn,
|
|
|
|
struct subd *subd)
|
2022-01-08 14:19:29 +01:00
|
|
|
{
|
2022-03-22 09:52:13 +01:00
|
|
|
subd->conn = subd_conn;
|
2022-07-18 14:12:27 +02:00
|
|
|
|
|
|
|
/* subd is a child of the conn: free when it closes! */
|
|
|
|
tal_steal(subd->conn, subd);
|
2022-01-08 14:19:29 +01:00
|
|
|
return io_duplex(subd_conn,
|
2022-03-22 09:52:13 +01:00
|
|
|
read_from_subd(subd_conn, subd),
|
|
|
|
write_to_subd(subd_conn, subd));
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_peer_conn(struct io_conn *peer_conn, struct peer *peer)
|
|
|
|
{
|
|
|
|
assert(peer->to_peer == peer_conn);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* If subds need cleaning, this will do it */
|
|
|
|
if (!peer->draining)
|
|
|
|
drain_peer(peer);
|
2022-01-11 02:16:49 +01:00
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
peer->to_peer = NULL;
|
|
|
|
|
|
|
|
/* Or if there were no subds, this will free the peer. */
|
|
|
|
maybe_free_peer(peer);
|
2022-01-08 14:19:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
struct io_plan *multiplex_peer_setup(struct io_conn *peer_conn,
|
|
|
|
struct peer *peer)
|
|
|
|
{
|
2022-07-16 06:49:30 +02:00
|
|
|
/*~ If conn closes, we drain the subd connections and wait for
|
2022-01-08 14:19:29 +01:00
|
|
|
* lightningd to tell us to close with the peer */
|
|
|
|
tal_add_destructor2(peer_conn, destroy_peer_conn, peer);
|
|
|
|
|
2022-01-29 04:31:32 +01:00
|
|
|
/* Start keepalives */
|
|
|
|
peer->expecting_pong = PONG_UNEXPECTED;
|
|
|
|
set_ping_timer(peer);
|
|
|
|
|
2022-03-22 21:27:29 +01:00
|
|
|
/* This used to be in openingd; don't break tests. */
|
|
|
|
status_peer_debug(&peer->id, "Handed peer, entering loop");
|
|
|
|
|
2022-01-08 14:19:29 +01:00
|
|
|
return io_duplex(peer_conn,
|
2022-01-08 14:22:29 +01:00
|
|
|
read_hdr_from_peer(peer_conn, peer),
|
2022-01-08 14:19:29 +01:00
|
|
|
write_to_peer(peer_conn, peer));
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:12:18 +02:00
|
|
|
void peer_connect_subd(struct daemon *daemon, const u8 *msg, int fd)
|
|
|
|
{
|
|
|
|
struct node_id id;
|
2022-07-18 14:12:27 +02:00
|
|
|
u64 counter;
|
2022-07-18 14:12:18 +02:00
|
|
|
struct peer *peer;
|
|
|
|
struct channel_id channel_id;
|
|
|
|
struct subd *subd;
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!fromwire_connectd_peer_connect_subd(msg, &id, &counter, &channel_id))
|
2022-07-18 14:12:18 +02:00
|
|
|
master_badmsg(WIRE_CONNECTD_PEER_CONNECT_SUBD, msg);
|
|
|
|
|
2022-07-18 14:12:27 +02:00
|
|
|
/* Races can happen: this might be gone by now (or reconnected!). */
|
2023-01-03 05:46:52 +01:00
|
|
|
peer = peer_htable_get(daemon->peers, &id);
|
2022-07-18 14:12:27 +02:00
|
|
|
if (!peer || peer->counter != counter) {
|
2022-07-18 14:12:18 +02:00
|
|
|
close(fd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Could be disconnecting now */
|
|
|
|
if (!peer->to_peer) {
|
|
|
|
close(fd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If peer said something, we created this and queued msg. */
|
|
|
|
subd = find_subd(peer, &channel_id);
|
|
|
|
if (!subd)
|
|
|
|
subd = new_subd(peer, &channel_id);
|
|
|
|
|
|
|
|
assert(!subd->conn);
|
2022-07-18 14:12:27 +02:00
|
|
|
|
|
|
|
/* This sets subd->conn inside subd_conn_init, and reparents subd! */
|
|
|
|
io_new_conn(peer, fd, subd_conn_init, subd);
|
2022-07-18 14:12:18 +02:00
|
|
|
}
|
2022-01-29 04:31:32 +01:00
|
|
|
|
|
|
|
/* Lightningd says to send a ping */
|
|
|
|
void send_manual_ping(struct daemon *daemon, const u8 *msg)
|
|
|
|
{
|
|
|
|
u8 *ping;
|
|
|
|
struct node_id id;
|
|
|
|
u16 len, num_pong_bytes;
|
|
|
|
struct peer *peer;
|
|
|
|
|
|
|
|
if (!fromwire_connectd_ping(msg, &id, &num_pong_bytes, &len))
|
|
|
|
master_badmsg(WIRE_CONNECTD_PING, msg);
|
|
|
|
|
2023-01-03 05:46:52 +01:00
|
|
|
peer = peer_htable_get(daemon->peers, &id);
|
2022-01-29 04:31:32 +01:00
|
|
|
if (!peer) {
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL,
|
|
|
|
false, 0)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're not supposed to send another ping until previous replied */
|
|
|
|
if (peer->expecting_pong != PONG_UNEXPECTED) {
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL,
|
|
|
|
false, 0)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* It should never ask for an oversize ping. */
|
|
|
|
ping = make_ping(NULL, num_pong_bytes, len);
|
|
|
|
if (tal_count(ping) > 65535)
|
|
|
|
status_failed(STATUS_FAIL_MASTER_IO, "Oversize ping");
|
|
|
|
|
2022-01-29 04:33:05 +01:00
|
|
|
inject_peer_msg(peer, take(ping));
|
2022-01-29 04:31:32 +01:00
|
|
|
|
|
|
|
status_debug("sending ping expecting %sresponse",
|
|
|
|
num_pong_bytes >= 65532 ? "no " : "");
|
|
|
|
|
|
|
|
/* BOLT #1:
|
|
|
|
*
|
|
|
|
* A node receiving a `ping` message:
|
|
|
|
* - if `num_pong_bytes` is less than 65532:
|
|
|
|
* - MUST respond by sending a `pong` message, with `byteslen` equal
|
|
|
|
* to `num_pong_bytes`.
|
|
|
|
* - otherwise (`num_pong_bytes` is **not** less than 65532):
|
|
|
|
* - MUST ignore the `ping`.
|
|
|
|
*/
|
|
|
|
if (num_pong_bytes >= 65532) {
|
|
|
|
daemon_conn_send(daemon->master,
|
|
|
|
take(towire_connectd_ping_reply(NULL,
|
|
|
|
true, 0)));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We'll respond to lightningd once the pong comes in */
|
|
|
|
peer->expecting_pong = PONG_EXPECTED_COMMAND;
|
|
|
|
|
|
|
|
/* Since we're doing this manually, kill and restart timer. */
|
|
|
|
tal_free(peer->ping_timer);
|
|
|
|
set_ping_timer(peer);
|
|
|
|
}
|