lightningd: remove some "single active channel" assumptions.

Generally this means converting a lazy "peer_active_channel(peer)" call
into an explicit iteration.

1. notify_feerate_change: call all channels (ignores non-active ones anyway).
2. peer_get_owning_subd remove unused function.
3. peer_connected hook: don't save channel, do lookup and iterate channels.
4. In json_setchannelfee "all" remove useless call to peer_active_channel
   since we check state anyway, and iterate.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2022-03-23 07:00:54 +10:30
parent 7de7b7be61
commit 90be2cc104
4 changed files with 47 additions and 81 deletions

View file

@ -117,15 +117,14 @@ void notify_feerate_change(struct lightningd *ld)
/* FIXME: We should notify onchaind about NORMAL fee change in case
* it's going to generate more txs. */
list_for_each(&ld->peers, peer, list) {
struct channel *channel = peer_active_channel(peer);
struct channel *channel;
if (!channel)
continue;
/* FIXME: We choose not to drop to chain if we can't contact
* peer. We *could* do so, however. */
try_update_feerates(ld, channel);
list_for_each(&peer->channels, channel, list)
try_update_feerates(ld, channel);
}
/* FIXME: We choose not to drop to chain if we can't contact
* peer. We *could* do so, however. */
}
void channel_record_open(struct channel *channel)

View file

@ -1252,16 +1252,3 @@ static const struct json_command fundchannel_complete_command = {
"with {psbt}. Returns true on success, false otherwise."
};
AUTODATA(json_command, &fundchannel_complete_command);
struct subd *peer_get_owning_subd(struct peer *peer)
{
struct channel *channel;
channel = peer_active_channel(peer);
if (channel != NULL) {
return channel->owner;
} else if (peer->uncommitted_channel != NULL) {
return peer->uncommitted_channel->open_daemon;
}
return NULL;
}

View file

@ -939,7 +939,6 @@ static void json_add_channel(struct lightningd *ld,
struct peer_connected_hook_payload {
struct lightningd *ld;
struct channel *channel;
struct wireaddr_internal addr;
struct wireaddr *remote_addr;
bool incoming;
@ -969,7 +968,7 @@ peer_connected_serialize(struct peer_connected_hook_payload *payload,
static void peer_connected_hook_final(struct peer_connected_hook_payload *payload STEALS)
{
struct lightningd *ld = payload->ld;
struct channel *channel = payload->channel;
struct channel *channel;
struct wireaddr_internal addr = payload->addr;
struct peer *peer = payload->peer;
u8 *error;
@ -988,16 +987,7 @@ static void peer_connected_hook_final(struct peer_connected_hook_payload *payloa
goto send_error;
}
if (channel) {
log_debug(channel->log, "Peer has reconnected, state %s",
channel_state_name(channel));
/* If we have a canned error, deliver it now. */
if (channel->error) {
error = channel->error;
goto send_error;
}
list_for_each(&peer->channels, channel, list) {
#if DEVELOPER
if (dev_disconnect_permanent(ld)) {
channel_fail_permanent(channel, REASON_LOCAL,
@ -1008,39 +998,38 @@ static void peer_connected_hook_final(struct peer_connected_hook_payload *payloa
#endif
switch (channel->state) {
case ONCHAIN:
case FUNDING_SPEND_SEEN:
case CLOSINGD_COMPLETE:
/* Channel is supposed to be active!*/
abort();
case CLOSED:
/* Channel should not have been loaded */
abort();
/* We consider this "active" but we only send an error */
case AWAITING_UNILATERAL: {
/* channel->error is not saved in db, so this can
* happen if we restart. */
error = towire_errorfmt(tmpctx, &channel->cid,
"Awaiting unilateral close");
goto send_error;
}
case ONCHAIN:
case FUNDING_SPEND_SEEN:
case CLOSINGD_COMPLETE:
case AWAITING_UNILATERAL:
/* We don't send anything here; if they talk about
* this channel they'll get an error. */
continue;
case DUALOPEND_OPEN_INIT:
case DUALOPEND_AWAITING_LOCKIN:
case CHANNELD_AWAITING_LOCKIN:
case CHANNELD_NORMAL:
case CHANNELD_SHUTTING_DOWN:
case CLOSINGD_SIGEXCHANGE:
log_debug(channel->log, "Peer has reconnected, state %s: telling connectd to make active",
channel_state_name(channel));
assert(!channel->owner);
channel->peer->addr = addr;
channel->peer->connected_incoming = payload->incoming;
goto make_active;
subd_send_msg(ld->connectd,
take(towire_connectd_peer_make_active(NULL, &peer->id,
&channel->cid)));
continue;
}
/* Oops, channel->state is corrupted? */
abort();
}
/* If we get here, it means we have no channel */
assert(!channel);
return;
send_error:
@ -1050,15 +1039,6 @@ send_error:
subd_send_msg(ld->connectd,
take(towire_connectd_peer_final_msg(NULL, &peer->id,
error)));
return;
make_active:
log_peer_debug(ld->log, &peer->id,
"Telling connectd to make active, state %s",
channel_state_name(channel));
subd_send_msg(ld->connectd,
take(towire_connectd_peer_make_active(NULL, &peer->id,
&channel->cid)));
}
static bool
@ -1210,11 +1190,6 @@ void peer_connected(struct lightningd *ld, const u8 *msg)
/* Can't be opening, since we wouldn't have sent peer_disconnected. */
assert(!peer->uncommitted_channel);
hook_payload->channel = peer_active_channel(peer);
/* It might be v2 opening, though, since we hang onto these */
if (!hook_payload->channel)
hook_payload->channel = peer_unsaved_channel(peer);
/* Log and update remote_addr for Nat/IP discovery. */
if (hook_payload->remote_addr) {
@ -1222,7 +1197,7 @@ void peer_connected(struct lightningd *ld, const u8 *msg)
fmt_wireaddr(tmpctx, hook_payload->remote_addr));
/* Currently only from peers we have a channel with, until we
* do stuff like probing for remote_addr to a random node. */
if (hook_payload->channel)
if (!list_empty(&peer->channels))
update_remote_addr(ld, hook_payload->remote_addr, id);
}
@ -1259,6 +1234,12 @@ void peer_active(struct lightningd *ld, const u8 *msg, int fd)
/* Do we know what channel they're talking about? */
channel = find_channel_by_id(peer, &channel_id);
if (channel) {
/* If we have a canned error for this channel, send it now */
if (channel->error) {
error = channel->error;
goto send_error;
}
switch (channel->state) {
case ONCHAIN:
case FUNDING_SPEND_SEEN:
@ -1777,11 +1758,12 @@ command_find_channel(struct command *cmd,
if (json_tok_channel_id(buffer, tok, &cid)) {
list_for_each(&ld->peers, peer, list) {
*channel = peer_active_channel(peer);
if (!*channel)
continue;
if (channel_id_eq(&(*channel)->cid, &cid))
return NULL;
list_for_each(&peer->channels, (*channel), list) {
if (!channel_active(*channel))
continue;
if (channel_id_eq(&(*channel)->cid, &cid))
return NULL;
}
}
return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
"Channel ID not found: '%.*s'",
@ -2350,17 +2332,15 @@ static struct command_result *json_setchannelfee(struct command *cmd,
/* If the users requested 'all' channels we need to iterate */
if (channel == NULL) {
list_for_each(&cmd->ld->peers, peer, list) {
channel = peer_active_channel(peer);
if (!channel)
continue;
if (channel->state != CHANNELD_NORMAL &&
channel->state != CHANNELD_AWAITING_LOCKIN &&
channel->state != DUALOPEND_AWAITING_LOCKIN)
continue;
set_channel_config(cmd, channel, base, ppm, NULL, NULL,
*delaysecs, response, false);
list_for_each(&peer->channels, channel, list) {
if (channel->state != CHANNELD_NORMAL &&
channel->state != CHANNELD_AWAITING_LOCKIN &&
channel->state != DUALOPEND_AWAITING_LOCKIN)
continue;
set_channel_config(cmd, channel, base, ppm, NULL, NULL,
*delaysecs, response, false);
}
}
/* single channel should be updated */
} else {
set_channel_config(cmd, channel, base, ppm, NULL, NULL,

View file

@ -935,7 +935,7 @@ def test_channel_state_changed_unilateral(node_factory, bitcoind):
assert(l2.rpc.listpeers()['peers'][0]['channels'][0]['closer'] == 'local')
if EXPERIMENTAL_DUAL_FUND:
l1.daemon.wait_for_log(r'Peer has reconnected, state')
l2.daemon.wait_for_log(r'Peer has reconnected, state')
l2.daemon.wait_for_log(r'Telling connectd to send error')
# l1 will receive error, and go into AWAITING_UNILATERAL
# FIXME: l2 should re-xmit shutdown, but it doesn't until it's mined :(