mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-01-19 05:44:12 +01:00
gossipd: don't validate UTXOs on our own channels.
It's an unnecessary round-trip, and can cause us to complain in CI, in the case where the channel has been closed by the time we ask. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
f7ff3613c6
commit
8f7f76f8c6
@ -812,8 +812,9 @@ static void inject_gossip(struct daemon *daemon, const u8 *msg)
|
||||
u8 *goss;
|
||||
const u8 *errmsg;
|
||||
const char *err;
|
||||
struct amount_sat *known_amount;
|
||||
|
||||
if (!fromwire_gossipd_addgossip(msg, msg, &goss))
|
||||
if (!fromwire_gossipd_addgossip(msg, msg, &goss, &known_amount))
|
||||
master_badmsg(WIRE_GOSSIPD_ADDGOSSIP, msg);
|
||||
|
||||
switch (fromwire_peektype(goss)) {
|
||||
|
@ -68,6 +68,7 @@ msgtype,gossipd_new_blockheight_reply,3126
|
||||
msgtype,gossipd_addgossip,3044
|
||||
msgdata,gossipd_addgossip,len,u16,
|
||||
msgdata,gossipd_addgossip,msg,u8,len
|
||||
msgdata,gossipd_addgossip,known_channel,?amount_sat,
|
||||
|
||||
# Empty string means no problem.
|
||||
msgtype,gossipd_addgossip_reply,3144
|
||||
|
|
@ -371,7 +371,8 @@ static void peer_warning(struct gossmap_manage *gm,
|
||||
const char *gossmap_manage_channel_announcement(const tal_t *ctx,
|
||||
struct gossmap_manage *gm,
|
||||
const u8 *announce TAKES,
|
||||
const struct node_id *source_peer TAKES)
|
||||
const struct node_id *source_peer TAKES,
|
||||
const struct amount_sat *known_amount)
|
||||
{
|
||||
secp256k1_ecdsa_signature node_signature_1, node_signature_2;
|
||||
secp256k1_ecdsa_signature bitcoin_signature_1, bitcoin_signature_2;
|
||||
@ -427,6 +428,19 @@ const char *gossmap_manage_channel_announcement(const tal_t *ctx,
|
||||
pca->channel_announcement = tal_dup_talarr(pca, u8, announce);
|
||||
pca->source_peer = tal_dup_or_null(pca, struct node_id, source_peer);
|
||||
|
||||
/* Are we supposed to add immediately without checking with lightningd?
|
||||
* Unless we already got it from a peer and we're processing now!
|
||||
*/
|
||||
if (known_amount
|
||||
&& !uintmap_get(&gm->pending_ann_map, scid.u64)
|
||||
&& !uintmap_get(&gm->early_ann_map, scid.u64)) {
|
||||
/* Set with timestamp 0 (we will update once we have a channel_update) */
|
||||
gossip_store_add(gm->daemon->gs, announce, 0, false,
|
||||
towire_gossip_store_channel_amount(tmpctx, *known_amount));
|
||||
tal_free(pca);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* FIXME: Flood protection! */
|
||||
/* Don't know blockheight yet, or not yet deep enough? Don't even ask */
|
||||
if (!is_scid_depth_announceable(&scid, blockheight)) {
|
||||
|
@ -14,13 +14,18 @@ struct gossmap_manage *gossmap_manage_new(const tal_t *ctx,
|
||||
* @gm: the gossmap_manage context
|
||||
* @announce: the channel_announcement message
|
||||
* @source_peer: peer who sent this (NULL if it's from lightningd)
|
||||
* @known_amount: if non-NULL, do not ask lightningd to look up UTXO.
|
||||
*
|
||||
* Returns an error string if it wasn't redundant or included.
|
||||
* Returns an error string if it wasn't redundant or included. Lightningd
|
||||
* suppresses lookups if it generated the announcement, partially because it's
|
||||
* redundant, but also because in our tests the UTXO is often spent by the time
|
||||
* it processes the lookup!
|
||||
*/
|
||||
const char *gossmap_manage_channel_announcement(const tal_t *ctx,
|
||||
struct gossmap_manage *gm,
|
||||
const u8 *announce TAKES,
|
||||
const struct node_id *source_peer TAKES);
|
||||
const struct node_id *source_peer TAKES,
|
||||
const struct amount_sat *known_amount);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -324,7 +324,7 @@ static void broadcast_public_cupdate(struct channel *channel,
|
||||
take(sign_update(NULL, channel->peer->ld, cupdate)));
|
||||
|
||||
subd_req(ld->gossip, ld->gossip,
|
||||
take(towire_gossipd_addgossip(NULL, cg->cupdate)),
|
||||
take(towire_gossipd_addgossip(NULL, cg->cupdate, NULL)),
|
||||
-1, 0, broadcast_public_cupdate_addgossip_reply, channel);
|
||||
}
|
||||
|
||||
@ -496,7 +496,7 @@ static void send_channel_announcement(struct channel *channel)
|
||||
&cg->remote_sigs->bitcoin_sig);
|
||||
|
||||
subd_req(ld->gossip, ld->gossip,
|
||||
take(towire_gossipd_addgossip(NULL, ca)),
|
||||
take(towire_gossipd_addgossip(NULL, ca, &channel->funding_sats)),
|
||||
-1, 0, send_channel_announce_addgossip_reply, channel);
|
||||
/* We can also send our first public channel_update now */
|
||||
broadcast_public_cupdate(channel, true);
|
||||
@ -1088,6 +1088,6 @@ void channel_gossip_node_announce(struct lightningd *ld)
|
||||
|
||||
/* Tell gossipd. */
|
||||
subd_req(ld->gossip, ld->gossip,
|
||||
take(towire_gossipd_addgossip(NULL, nannounce)),
|
||||
take(towire_gossipd_addgossip(NULL, nannounce, NULL)),
|
||||
-1, 0, node_announce_addgossip_reply, NULL);
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ static struct command_result *json_addgossip(struct command *cmd,
|
||||
NULL))
|
||||
return command_param_failed();
|
||||
|
||||
req = towire_gossipd_addgossip(cmd, gossip_msg);
|
||||
req = towire_gossipd_addgossip(cmd, gossip_msg, NULL);
|
||||
subd_req(cmd->ld->gossip, cmd->ld->gossip,
|
||||
req, -1, 0, json_addgossip_reply, cmd);
|
||||
|
||||
|
@ -1063,7 +1063,7 @@ u8 *towire_final_incorrect_cltv_expiry(const tal_t *ctx UNNEEDED, u32 cltv_expir
|
||||
u8 *towire_final_incorrect_htlc_amount(const tal_t *ctx UNNEEDED, struct amount_msat incoming_htlc_amt UNNEEDED)
|
||||
{ fprintf(stderr, "towire_final_incorrect_htlc_amount called!\n"); abort(); }
|
||||
/* Generated stub for towire_gossipd_addgossip */
|
||||
u8 *towire_gossipd_addgossip(const tal_t *ctx UNNEEDED, const u8 *msg UNNEEDED)
|
||||
u8 *towire_gossipd_addgossip(const tal_t *ctx UNNEEDED, const u8 *msg UNNEEDED, struct amount_sat *known_channel UNNEEDED)
|
||||
{ fprintf(stderr, "towire_gossipd_addgossip called!\n"); abort(); }
|
||||
/* Generated stub for towire_hsmd_check_pubkey */
|
||||
u8 *towire_hsmd_check_pubkey(const tal_t *ctx UNNEEDED, u32 index UNNEEDED, const struct pubkey *pubkey UNNEEDED)
|
||||
|
Loading…
Reference in New Issue
Block a user