gossipd: neaten code now we don't have to prepend prefix after.

We can simply start with a '0' where encoding is prefixed, so simplify
internal interface.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Rusty Russell 2022-05-18 10:21:16 +09:30
parent 5735f71e3c
commit a4c365f8d0
2 changed files with 26 additions and 47 deletions

View file

@ -27,9 +27,15 @@ static u32 dev_max_encoding_bytes = -1U;
* simple compression scheme: the first byte indicates the encoding, the
* rest contains the data.
*/
static u8 *encoding_start(const tal_t *ctx)
static u8 *encoding_start(const tal_t *ctx, bool prepend_encoding)
{
return tal_arr(ctx, u8, 0);
u8 *ret;
if (prepend_encoding) {
ret = tal_arr(ctx, u8, 1);
ret[0] = ARR_UNCOMPRESSED;
} else
ret = tal_arr(ctx, u8, 0);
return ret;
}
/* Marshal a single short_channel_id */
@ -52,35 +58,13 @@ static void encoding_add_query_flag(u8 **encoded, bigsize_t flag)
towire_bigsize(encoded, flag);
}
static void encoding_end_no_compress(u8 **encoded, size_t off)
static bool encoding_end(const u8 *encoded, size_t max_bytes)
{
size_t len = tal_count(*encoded);
tal_resize(encoded, off + len);
memmove(*encoded + off, *encoded, len);
}
/* Once we've assembled it, try compressing.
* Prepends encoding type to @encoding. */
static bool encoding_end_prepend_type(u8 **encoded, size_t max_bytes)
{
encoding_end_no_compress(encoded, 1);
**encoded = ARR_UNCOMPRESSED;
#if DEVELOPER
if (tal_count(*encoded) > dev_max_encoding_bytes)
if (tal_count(encoded) > dev_max_encoding_bytes)
return false;
#endif
return tal_count(*encoded) <= max_bytes;
}
/* Try compressing, leaving type external */
static bool encoding_end_external_type(u8 **encoded, u8 *type, size_t max_bytes)
{
encoding_end_no_compress(encoded, 0);
*type = ARR_UNCOMPRESSED;
return tal_count(*encoded) <= max_bytes;
return tal_count(encoded) <= max_bytes;
}
/* Query this peer for these short-channel-ids. */
@ -126,7 +110,7 @@ bool query_short_channel_ids(struct daemon *daemon,
if (peer->scid_query_outstanding)
return false;
encoded = encoding_start(tmpctx);
encoded = encoding_start(tmpctx, true);
for (size_t i = 0; i < tal_count(scids); i++) {
/* BOLT #7:
*
@ -139,7 +123,7 @@ bool query_short_channel_ids(struct daemon *daemon,
encoding_add_short_channel_id(&encoded, &scids[i]);
}
if (!encoding_end_prepend_type(&encoded, max_encoded_bytes)) {
if (!encoding_end(encoded, max_encoded_bytes)) {
status_broken("query_short_channel_ids: %zu is too many",
tal_count(scids));
return false;
@ -150,15 +134,15 @@ bool query_short_channel_ids(struct daemon *daemon,
tlvs = tlv_query_short_channel_ids_tlvs_new(tmpctx);
tlvq = tlvs->query_flags = tal(tlvs,
struct tlv_query_short_channel_ids_tlvs_query_flags);
tlvq->encoded_query_flags = encoding_start(tlvq);
tlvq->encoding_type = ARR_UNCOMPRESSED;
tlvq->encoded_query_flags = encoding_start(tlvq, false);
for (size_t i = 0; i < tal_count(query_flags); i++)
encoding_add_query_flag(&tlvq->encoded_query_flags,
query_flags[i]);
max_encoded_bytes -= tal_bytelen(encoded);
if (!encoding_end_external_type(&tlvq->encoded_query_flags,
&tlvq->encoding_type,
max_encoded_bytes)) {
if (!encoding_end(tlvq->encoded_query_flags,
max_encoded_bytes)) {
status_broken("query_short_channel_ids:"
" %zu query_flags is too many",
tal_count(query_flags));
@ -305,24 +289,24 @@ static void send_reply_channel_range(struct peer *peer,
* - MUST limit `number_of_blocks` to the maximum number of blocks
* whose results could fit in `encoded_short_ids`
*/
u8 *encoded_scids = encoding_start(tmpctx);
u8 *encoded_timestamps = encoding_start(tmpctx);
u8 *encoded_scids = encoding_start(tmpctx, true);
u8 *encoded_timestamps = encoding_start(tmpctx, false);
struct tlv_reply_channel_range_tlvs *tlvs
= tlv_reply_channel_range_tlvs_new(tmpctx);
/* Encode them all */
for (size_t i = 0; i < num_scids; i++)
encoding_add_short_channel_id(&encoded_scids, &scids[i]);
encoding_end_prepend_type(&encoded_scids, tal_bytelen(encoded_scids));
encoding_end(encoded_scids, tal_bytelen(encoded_scids));
if (tstamps) {
for (size_t i = 0; i < num_scids; i++)
encoding_add_timestamps(&encoded_timestamps, &tstamps[i]);
tlvs->timestamps_tlv = tal(tlvs, struct tlv_reply_channel_range_tlvs_timestamps_tlv);
encoding_end_external_type(&encoded_timestamps,
&tlvs->timestamps_tlv->encoding_type,
tal_bytelen(encoded_timestamps));
tlvs->timestamps_tlv->encoding_type = ARR_UNCOMPRESSED;
encoding_end(encoded_timestamps,
tal_bytelen(encoded_timestamps));
tlvs->timestamps_tlv->encoded_timestamps
= tal_steal(tlvs, encoded_timestamps);
}

View file

@ -206,7 +206,7 @@ static u8 *get_scid_array(const tal_t *ctx,
scids = json_get_member(test_vector, obj, "shortChannelIds");
arr = json_get_member(test_vector, scids, "array");
encoded = encoding_start(ctx);
encoded = encoding_start(ctx, true);
encoding = json_get_member(test_vector, scids, "encoding");
json_for_each_arr(i, t, arr) {
struct short_channel_id scid;
@ -214,8 +214,6 @@ static u8 *get_scid_array(const tal_t *ctx,
encoding_add_short_channel_id(&encoded, &scid);
}
assert(json_tok_streq(test_vector, encoding, "UNCOMPRESSED"));
encoding_end_no_compress(&encoded, 1);
encoded[0] = ARR_UNCOMPRESSED;
return encoded;
}
@ -274,7 +272,7 @@ static u8 *test_reply_channel_range(const char *test_vector, const jsmntok_t *ob
= tal(tlvs, struct tlv_reply_channel_range_tlvs_timestamps_tlv);
tlvs->timestamps_tlv->encoded_timestamps
= encoding_start(tlvs->timestamps_tlv);
= encoding_start(tlvs->timestamps_tlv, false);
encodingtok = json_get_member(test_vector, opt, "encoding");
tstok = json_get_member(test_vector, opt, "timestamps");
json_for_each_arr(i, t, tstok) {
@ -290,8 +288,6 @@ static u8 *test_reply_channel_range(const char *test_vector, const jsmntok_t *ob
encoding_add_timestamps(&tlvs->timestamps_tlv->encoded_timestamps, &ts);
}
assert(json_tok_streq(test_vector, encodingtok, "UNCOMPRESSED"));
encoding_end_no_compress(&tlvs->timestamps_tlv->encoded_timestamps,
0);
tlvs->timestamps_tlv->encoding_type = ARR_UNCOMPRESSED;
}
@ -335,7 +331,7 @@ get_query_flags_array(const tal_t *ctx,
arr = json_get_member(test_vector, opt, "array");
tlv->encoded_query_flags = encoding_start(tlv);
tlv->encoded_query_flags = encoding_start(tlv, false);
encoding = json_get_member(test_vector, opt, "encoding");
json_for_each_arr(i, t, arr) {
bigsize_t f;
@ -343,7 +339,6 @@ get_query_flags_array(const tal_t *ctx,
encoding_add_query_flag(&tlv->encoded_query_flags, f);
}
assert(json_tok_streq(test_vector, encoding, "UNCOMPRESSED"));
encoding_end_no_compress(&tlv->encoded_query_flags, 0);
tlv->encoding_type = ARR_UNCOMPRESSED;
return tlv;