2020-09-23 03:37:04 +02:00
|
|
|
/* Hello friends!
|
|
|
|
*
|
|
|
|
* You found me! This is the inner, deep magic. Right here.
|
|
|
|
*
|
|
|
|
* To help development, we have a complete set of routines to scan for
|
|
|
|
* tal-memory leaks (valgrind will detect non-tal memory leaks at exit,
|
|
|
|
* but tal hierarchies tends to get freed at exit, so we need something
|
|
|
|
* more sophisticated).
|
|
|
|
*
|
|
|
|
* Memleak detection is only active if DEVELOPER is set. It does several
|
|
|
|
* things:
|
|
|
|
* 1. attaches a backtrace list to every allocation, so we can tell
|
|
|
|
* where it came from.
|
|
|
|
* 2. when memleak_find_allocations() is called, walks the entire tal
|
|
|
|
* tree and saves a pointer to all the objects it finds, with
|
|
|
|
* a few internal exceptions (including everything under 'tmpctx').
|
|
|
|
* It then calls registered helpers, which can remove opaque things
|
|
|
|
* and handles notleak() objects.
|
|
|
|
* 3. provides a routine to access any remaining pointers in the
|
|
|
|
* table: these are the leaks.
|
|
|
|
*/
|
2021-02-01 03:58:50 +01:00
|
|
|
#include "config.h"
|
2017-12-15 11:18:54 +01:00
|
|
|
#include <backtrace.h>
|
2021-09-16 07:00:42 +02:00
|
|
|
#include <ccan/cast/cast.h>
|
2017-12-15 11:17:54 +01:00
|
|
|
#include <ccan/crypto/siphash24/siphash24.h>
|
|
|
|
#include <ccan/htable/htable.h>
|
2018-08-24 07:20:02 +02:00
|
|
|
#include <ccan/intmap/intmap.h>
|
2021-11-24 20:59:20 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2017-12-15 11:17:54 +01:00
|
|
|
#include <common/memleak.h>
|
2021-11-24 20:59:20 +01:00
|
|
|
#include <common/utils.h>
|
2017-12-15 11:17:54 +01:00
|
|
|
|
2020-01-31 21:59:00 +01:00
|
|
|
struct backtrace_state *backtrace_state;
|
|
|
|
|
2017-12-15 11:17:54 +01:00
|
|
|
#if DEVELOPER
|
2019-09-06 06:43:05 +02:00
|
|
|
static bool memleak_track;
|
|
|
|
|
2019-09-06 06:42:05 +02:00
|
|
|
struct memleak_helper {
|
|
|
|
void (*cb)(struct htable *memtable, const tal_t *);
|
|
|
|
};
|
|
|
|
|
2021-11-24 20:59:20 +01:00
|
|
|
void *notleak_(void *ptr, bool plus_children)
|
2017-12-15 11:17:54 +01:00
|
|
|
{
|
2021-11-24 20:59:20 +01:00
|
|
|
const char *name;
|
2017-12-15 11:17:54 +01:00
|
|
|
/* If we're not tracking, don't do anything. */
|
2019-09-06 06:43:05 +02:00
|
|
|
if (!memleak_track)
|
2017-12-15 11:17:54 +01:00
|
|
|
return cast_const(void *, ptr);
|
|
|
|
|
2021-11-24 20:59:20 +01:00
|
|
|
/* We use special tal names to mark notleak */
|
|
|
|
name = tal_name(ptr);
|
|
|
|
if (!name)
|
|
|
|
name = "";
|
|
|
|
if (plus_children)
|
|
|
|
name = tal_fmt(tmpctx, "%s **NOTLEAK_IGNORE_CHILDREN**",
|
|
|
|
name);
|
|
|
|
else
|
|
|
|
name = tal_fmt(tmpctx, "%s **NOTLEAK**", name);
|
|
|
|
tal_set_name(ptr, name);
|
|
|
|
|
2017-12-15 11:17:54 +01:00
|
|
|
return cast_const(void *, ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t hash_ptr(const void *elem, void *unused UNNEEDED)
|
|
|
|
{
|
|
|
|
static struct siphash_seed seed;
|
|
|
|
return siphash24(&seed, &elem, sizeof(elem));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool pointer_referenced(struct htable *memtable, const void *p)
|
|
|
|
{
|
|
|
|
return htable_del(memtable, hash_ptr(p, NULL), p);
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:20:54 +01:00
|
|
|
static void children_into_htable(const void *exclude1, const void *exclude2,
|
2017-12-15 11:17:54 +01:00
|
|
|
struct htable *memtable, const tal_t *p)
|
|
|
|
{
|
|
|
|
const tal_t *i;
|
|
|
|
|
|
|
|
for (i = tal_first(p); i; i = tal_next(i)) {
|
2017-12-15 11:19:54 +01:00
|
|
|
const char *name = tal_name(i);
|
|
|
|
|
2018-08-24 07:20:06 +02:00
|
|
|
if (i == exclude1 || i == exclude2)
|
2022-02-26 05:06:36 +01:00
|
|
|
continue;
|
2017-12-15 11:18:54 +01:00
|
|
|
|
2017-12-15 11:19:54 +01:00
|
|
|
if (name) {
|
|
|
|
/* Don't add backtrace objects. */
|
|
|
|
if (streq(name, "backtrace"))
|
|
|
|
continue;
|
2017-12-15 11:18:54 +01:00
|
|
|
|
2021-11-24 20:59:20 +01:00
|
|
|
/* Don't add our own memleak_helpers */
|
|
|
|
if (strends(name, "struct memleak_helper"))
|
2019-09-06 06:42:05 +02:00
|
|
|
continue;
|
|
|
|
|
2018-02-18 13:57:46 +01:00
|
|
|
/* Don't add tal_link objects */
|
|
|
|
if (strends(name, "struct link")
|
|
|
|
|| strends(name, "struct linkable"))
|
|
|
|
continue;
|
|
|
|
|
2019-03-18 03:40:32 +01:00
|
|
|
/* ccan/io allocates pollfd array, always array. */
|
2018-08-02 08:49:55 +02:00
|
|
|
if (strends(name, "struct pollfd[]") && !tal_parent(i))
|
2017-12-15 11:19:54 +01:00
|
|
|
continue;
|
2018-03-15 05:30:38 +01:00
|
|
|
|
2019-03-18 03:40:32 +01:00
|
|
|
if (strends(name, "struct io_plan *[]") && !tal_parent(i))
|
|
|
|
continue;
|
|
|
|
|
2018-03-15 05:30:38 +01:00
|
|
|
/* Don't add tmpctx. */
|
|
|
|
if (streq(name, "tmpctx"))
|
|
|
|
continue;
|
2017-12-15 11:19:54 +01:00
|
|
|
}
|
2022-02-26 05:06:35 +01:00
|
|
|
htable_add(memtable, hash_ptr(i, NULL), i);
|
2017-12-15 11:20:54 +01:00
|
|
|
children_into_htable(exclude1, exclude2, memtable, i);
|
2017-12-15 11:17:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 07:20:06 +02:00
|
|
|
static void scan_for_pointers(struct htable *memtable,
|
|
|
|
const tal_t *p, size_t bytelen)
|
2017-12-15 11:17:54 +01:00
|
|
|
{
|
|
|
|
size_t i, n;
|
|
|
|
|
|
|
|
/* Search for (aligned) pointers. */
|
2018-08-24 07:20:06 +02:00
|
|
|
n = bytelen / sizeof(void *);
|
2017-12-15 11:17:54 +01:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
memcpy(&ptr, (char *)p + i * sizeof(void *), sizeof(ptr));
|
|
|
|
if (pointer_referenced(memtable, ptr))
|
2018-08-24 07:20:06 +02:00
|
|
|
scan_for_pointers(memtable, ptr, tal_bytelen(ptr));
|
2017-12-15 11:17:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-23 03:37:04 +02:00
|
|
|
void memleak_remove_region(struct htable *memtable,
|
|
|
|
const void *ptr, size_t bytelen)
|
2017-12-15 11:17:54 +01:00
|
|
|
{
|
|
|
|
pointer_referenced(memtable, ptr);
|
2018-08-24 07:20:06 +02:00
|
|
|
scan_for_pointers(memtable, ptr, bytelen);
|
2017-12-15 11:17:54 +01:00
|
|
|
}
|
|
|
|
|
2017-12-15 11:22:40 +01:00
|
|
|
static void remove_with_children(struct htable *memtable, const tal_t *p)
|
|
|
|
{
|
|
|
|
const tal_t *i;
|
|
|
|
|
|
|
|
pointer_referenced(memtable, p);
|
|
|
|
for (i = tal_first(p); i; i = tal_next(i))
|
|
|
|
remove_with_children(memtable, i);
|
|
|
|
}
|
|
|
|
|
2018-08-24 07:20:02 +02:00
|
|
|
/* memleak can't see inside hash tables, so do them manually */
|
|
|
|
void memleak_remove_htable(struct htable *memtable, const struct htable *ht)
|
|
|
|
{
|
|
|
|
struct htable_iter i;
|
|
|
|
const void *p;
|
|
|
|
|
|
|
|
for (p = htable_first(ht, &i); p; p = htable_next(ht, &i))
|
2020-09-23 03:37:04 +02:00
|
|
|
memleak_remove_region(memtable, p, tal_bytelen(p));
|
2018-08-24 07:20:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: If uintmap used tal, this wouldn't be necessary! */
|
|
|
|
void memleak_remove_intmap_(struct htable *memtable, const struct intmap *m)
|
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
intmap_index_t i;
|
|
|
|
|
|
|
|
for (p = intmap_first_(m, &i); p; p = intmap_after_(m, &i))
|
2020-09-23 03:37:04 +02:00
|
|
|
memleak_remove_region(memtable, p, tal_bytelen(p));
|
2018-08-24 07:20:02 +02:00
|
|
|
}
|
|
|
|
|
2021-09-06 14:40:27 +02:00
|
|
|
static bool handle_strmap(const char *member, void *p, void *memtable_)
|
|
|
|
{
|
|
|
|
struct htable *memtable = memtable_;
|
|
|
|
|
|
|
|
/* membername may *not* be a tal ptr, but it can be! */
|
|
|
|
pointer_referenced(memtable, member);
|
|
|
|
memleak_remove_region(memtable, p, tal_bytelen(p));
|
|
|
|
|
|
|
|
/* Keep going */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* FIXME: If strmap used tal, this wouldn't be necessary! */
|
|
|
|
void memleak_remove_strmap_(struct htable *memtable, const struct strmap *m)
|
|
|
|
{
|
|
|
|
strmap_iterate_(m, handle_strmap, memtable);
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:17:54 +01:00
|
|
|
static bool ptr_match(const void *candidate, void *ptr)
|
|
|
|
{
|
|
|
|
return candidate == ptr;
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:18:54 +01:00
|
|
|
const void *memleak_get(struct htable *memtable, const uintptr_t **backtrace)
|
2017-12-15 11:17:54 +01:00
|
|
|
{
|
|
|
|
struct htable_iter it;
|
|
|
|
const tal_t *i, *p;
|
|
|
|
|
2020-09-23 03:37:04 +02:00
|
|
|
/* Remove memtable itself */
|
|
|
|
pointer_referenced(memtable, memtable);
|
|
|
|
|
2017-12-15 11:17:54 +01:00
|
|
|
i = htable_first(memtable, &it);
|
|
|
|
if (!i)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Delete from table (avoids parenting loops) */
|
|
|
|
htable_delval(memtable, &it);
|
|
|
|
|
|
|
|
/* Find ancestor, which is probably source of leak. */
|
|
|
|
for (p = tal_parent(i);
|
|
|
|
htable_get(memtable, hash_ptr(p, NULL), ptr_match, p);
|
|
|
|
i = p, p = tal_parent(i));
|
|
|
|
|
|
|
|
/* Delete all children */
|
|
|
|
remove_with_children(memtable, i);
|
|
|
|
|
2017-12-15 11:18:54 +01:00
|
|
|
/* Does it have a child called "backtrace"? */
|
|
|
|
for (*backtrace = tal_first(i);
|
|
|
|
*backtrace;
|
|
|
|
*backtrace = tal_next(*backtrace)) {
|
|
|
|
if (tal_name(*backtrace)
|
|
|
|
&& streq(tal_name(*backtrace), "backtrace"))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:17:54 +01:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2017-12-15 11:18:54 +01:00
|
|
|
static int append_bt(void *data, uintptr_t pc)
|
2017-12-15 11:17:54 +01:00
|
|
|
{
|
2017-12-15 11:18:54 +01:00
|
|
|
uintptr_t *bt = data;
|
|
|
|
|
|
|
|
if (bt[0] == 32)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
bt[bt[0]++] = pc;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-21 16:06:07 +01:00
|
|
|
static void add_backtrace(tal_t *parent UNUSED, enum tal_notify_type type UNNEEDED,
|
2017-12-15 11:18:54 +01:00
|
|
|
void *child)
|
|
|
|
{
|
2018-07-28 08:00:20 +02:00
|
|
|
uintptr_t *bt = tal_arrz_label(child, uintptr_t, 32, "backtrace");
|
2017-12-15 11:18:54 +01:00
|
|
|
|
|
|
|
/* First serves as counter. */
|
|
|
|
bt[0] = 1;
|
|
|
|
backtrace_simple(backtrace_state, 2, append_bt, NULL, bt);
|
|
|
|
tal_add_notifier(child, TAL_NOTIFY_ADD_CHILD, add_backtrace);
|
|
|
|
}
|
|
|
|
|
2018-08-24 07:20:06 +02:00
|
|
|
static void add_backtrace_notifiers(const tal_t *root)
|
|
|
|
{
|
|
|
|
tal_add_notifier(root, TAL_NOTIFY_ADD_CHILD, add_backtrace);
|
|
|
|
|
|
|
|
for (tal_t *i = tal_first(root); i; i = tal_next(i))
|
|
|
|
add_backtrace_notifiers(i);
|
|
|
|
}
|
|
|
|
|
2019-09-06 06:42:05 +02:00
|
|
|
void memleak_add_helper_(const tal_t *p,
|
|
|
|
void (*cb)(struct htable *memtable, const tal_t *))
|
|
|
|
{
|
|
|
|
struct memleak_helper *mh = tal(p, struct memleak_helper);
|
|
|
|
mh->cb = cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-06 06:43:05 +02:00
|
|
|
/* Handle allocations marked with helpers or notleak() */
|
2019-09-06 06:42:05 +02:00
|
|
|
static void call_memleak_helpers(struct htable *memtable, const tal_t *p)
|
|
|
|
{
|
|
|
|
const tal_t *i;
|
|
|
|
|
|
|
|
for (i = tal_first(p); i; i = tal_next(i)) {
|
|
|
|
const char *name = tal_name(i);
|
|
|
|
|
2022-03-16 00:09:19 +01:00
|
|
|
if (name) {
|
|
|
|
if (strends(name, "struct memleak_helper")) {
|
|
|
|
const struct memleak_helper *mh = i;
|
|
|
|
mh->cb(memtable, p);
|
|
|
|
} else if (strends(name, " **NOTLEAK**")
|
|
|
|
|| strends(name, "_notleak")) {
|
|
|
|
pointer_referenced(memtable, i);
|
|
|
|
memleak_remove_region(memtable, i,
|
|
|
|
tal_bytelen(i));
|
|
|
|
} else if (strends(name,
|
|
|
|
" **NOTLEAK_IGNORE_CHILDREN**")) {
|
|
|
|
remove_with_children(memtable, i);
|
|
|
|
memleak_remove_region(memtable, i,
|
|
|
|
tal_bytelen(i));
|
|
|
|
}
|
2019-09-06 06:42:05 +02:00
|
|
|
}
|
2022-03-16 00:09:19 +01:00
|
|
|
|
|
|
|
/* Recurse down looking for "notleak" children */
|
|
|
|
call_memleak_helpers(memtable, i);
|
2019-09-06 06:42:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-23 03:37:04 +02:00
|
|
|
struct htable *memleak_find_allocations(const tal_t *ctx,
|
|
|
|
const void *exclude1,
|
|
|
|
const void *exclude2)
|
2019-09-06 06:42:05 +02:00
|
|
|
{
|
|
|
|
struct htable *memtable = tal(ctx, struct htable);
|
|
|
|
htable_init(memtable, hash_ptr, NULL);
|
|
|
|
|
2020-09-23 03:37:04 +02:00
|
|
|
if (memleak_track) {
|
|
|
|
/* First, add all pointers off NULL to table. */
|
|
|
|
children_into_htable(exclude1, exclude2, memtable, NULL);
|
2019-09-06 06:42:05 +02:00
|
|
|
|
2020-09-23 03:37:04 +02:00
|
|
|
/* Iterate and call helpers to eliminate hard-to-get references. */
|
|
|
|
call_memleak_helpers(memtable, NULL);
|
|
|
|
}
|
2019-09-06 06:42:05 +02:00
|
|
|
|
|
|
|
tal_add_destructor(memtable, htable_clear);
|
|
|
|
return memtable;
|
|
|
|
}
|
|
|
|
|
2018-08-24 07:20:06 +02:00
|
|
|
void memleak_init(void)
|
2017-12-15 11:18:54 +01:00
|
|
|
{
|
2019-09-06 06:43:05 +02:00
|
|
|
memleak_track = true;
|
2017-12-15 11:18:54 +01:00
|
|
|
if (backtrace_state)
|
2018-08-24 07:20:06 +02:00
|
|
|
add_backtrace_notifiers(NULL);
|
2017-12-15 11:17:54 +01:00
|
|
|
}
|
2021-09-06 14:39:27 +02:00
|
|
|
|
|
|
|
static int dump_syminfo(void *data, uintptr_t pc UNUSED,
|
|
|
|
const char *filename, int lineno,
|
|
|
|
const char *function)
|
|
|
|
{
|
|
|
|
void PRINTF_FMT(1,2) (*print)(const char *fmt, ...) = data;
|
|
|
|
/* This can happen in backtraces. */
|
|
|
|
if (!filename || !function)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
print(" %s:%u (%s)", filename, lineno, function);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_leak_backtrace(const uintptr_t *bt,
|
|
|
|
void PRINTF_FMT(1,2)
|
|
|
|
(*print)(const char *fmt, ...))
|
|
|
|
{
|
|
|
|
if (!bt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* First one serves as counter. */
|
|
|
|
print(" backtrace:");
|
|
|
|
for (size_t i = 1; i < bt[0]; i++) {
|
|
|
|
backtrace_pcinfo(backtrace_state,
|
|
|
|
bt[i], dump_syminfo,
|
|
|
|
NULL, print);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool dump_memleak(struct htable *memtable,
|
|
|
|
void PRINTF_FMT(1,2) (*print)(const char *fmt, ...))
|
|
|
|
{
|
|
|
|
const tal_t *i;
|
|
|
|
const uintptr_t *backtrace;
|
|
|
|
bool found_leak = false;
|
|
|
|
|
|
|
|
while ((i = memleak_get(memtable, &backtrace)) != NULL) {
|
|
|
|
print("MEMLEAK: %p", i);
|
|
|
|
if (tal_name(i))
|
|
|
|
print(" label=%s", tal_name(i));
|
|
|
|
|
|
|
|
dump_leak_backtrace(backtrace, print);
|
|
|
|
print(" parents:");
|
|
|
|
for (tal_t *p = tal_parent(i); p; p = tal_parent(p)) {
|
|
|
|
print(" %s", tal_name(p));
|
|
|
|
p = tal_parent(p);
|
|
|
|
}
|
|
|
|
found_leak = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return found_leak;
|
|
|
|
}
|
2019-09-06 06:40:05 +02:00
|
|
|
#else /* !DEVELOPER */
|
2021-11-24 20:59:20 +01:00
|
|
|
void *notleak_(void *ptr, bool plus_children UNNEEDED)
|
2019-09-06 06:40:05 +02:00
|
|
|
{
|
2021-11-24 20:59:20 +01:00
|
|
|
return ptr;
|
2019-09-06 06:40:05 +02:00
|
|
|
}
|
|
|
|
#endif /* !DEVELOPER */
|