2016-01-21 21:11:47 +01:00
|
|
|
#include "log.h"
|
2018-03-22 08:10:23 +01:00
|
|
|
#include <backtrace-supported.h>
|
2018-03-23 08:52:33 +01:00
|
|
|
#include <backtrace.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/array_size/array_size.h>
|
2018-08-22 12:06:40 +02:00
|
|
|
#include <ccan/err/err.h>
|
2019-11-18 01:27:17 +01:00
|
|
|
#include <ccan/htable/htable_type.h>
|
2018-08-22 12:06:40 +02:00
|
|
|
#include <ccan/io/io.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/opt/opt.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/read_write_all/read_write_all.h>
|
|
|
|
#include <ccan/str/hex/hex.h>
|
2018-02-18 13:56:46 +01:00
|
|
|
#include <ccan/tal/link/link.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <ccan/tal/str/str.h>
|
2018-12-08 01:39:28 +01:00
|
|
|
#include <common/json_command.h>
|
|
|
|
#include <common/jsonrpc_errors.h>
|
2017-12-15 11:29:03 +01:00
|
|
|
#include <common/memleak.h>
|
2018-12-08 01:39:28 +01:00
|
|
|
#include <common/param.h>
|
2017-08-28 18:04:01 +02:00
|
|
|
#include <common/pseudorand.h>
|
2017-08-28 18:02:01 +02:00
|
|
|
#include <common/utils.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <errno.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <fcntl.h>
|
2016-07-01 03:49:28 +02:00
|
|
|
#include <inttypes.h>
|
plugin: Add new notification type: warning
This notification bases on `LOG_BROKEN` and `LOG_UNUSUAL` level log.
--Introduction
A notification for topic `warning` is sent every time a new `BROKEN`/
`UNUSUAL` level(in plugins, we use `error`/`warn`) log generated, which
means an unusual/borken thing happens, such as channel failed,
message resolving failed...
```json
{
"warning": {
"level": "warn",
"time": "1559743608.565342521",
"source": "lightningd(17652): 0821f80652fb840239df8dc99205792bba2e559a05469915804c08420230e23c7c chan #7854:",
"log": "Peer permanent failure in CHANNELD_NORMAL: lightning_channeld: sent ERROR bad reestablish dataloss msg"
}
}
```
1. `level` is `warn` or `error`:
`warn` means something seems bad happened and it's under control, but
we'd better check it;
`error` means something extremely bad is out of control, and it may lead
to crash;
2. `time` is the second since epoch;
3. `source`, in fact, is the `prefix` of the log_entry. It means where
the event happened, it may have the following forms:
`<node_id> chan #<db_id_of_channel>:`, `lightningd(<lightningd_pid>):`,
`plugin-<plugin_name>:`, `<daemon_name>(<daemon_pid>):`, `jsonrpc:`,
`jcon fd <error_fd_to_jsonrpc>:`, `plugin-manager`;
4. `log` is the context of the original log entry.
--Note:
1. The main code uses `UNUSUAL`/`BROKEN`, and plugin module uses `warn`
/`error`, considering the consistency with plugin, warning choose `warn`
/`error`. But users who use c-lightning with plugins may want to
`getlog` with specified level when receive warning. It's the duty for
plugin dev to turn `warn`/`error` into `UNUSUAL`/`BROKEN` and present it
to the users, or pass it directly to `getlog`;
2. About time, `json_log()` in `log` module uses the Relative Time, from
the time when `log_book` inited to the time when this event happend.
But I consider the `UNUSUAL`/`BROKEN` event is rare, and it is very
likely to happen after running for a long time, so for users, they will
pay more attention to Absolute Time.
-- Related Change
1. Remove the definitions of `log`, `log_book`, `log_entry` from `log.c`
to `log.h`, then they can be used in warning declaration and definition.
2. Remove `void json_add_time(struct json_stream *result, const char
*fieldname, struct timespec ts)` from `log.c` to `json.c`, and add
related declaration in `json.h`. Now the notification function in
`notification.c` can call it.
2. Add a pointer to `struct lightningd` in `struct log_book`. This may
affect the independence of the `log` module, but storing a pointer to
`ld` is more direct;
2019-06-03 20:26:58 +02:00
|
|
|
#include <lightningd/json.h>
|
2018-02-05 05:09:28 +01:00
|
|
|
#include <lightningd/jsonrpc.h>
|
2017-12-15 11:18:54 +01:00
|
|
|
#include <lightningd/lightningd.h>
|
2019-06-06 10:26:42 +02:00
|
|
|
#include <lightningd/notification.h>
|
2018-02-05 05:09:28 +01:00
|
|
|
#include <lightningd/options.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <signal.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <stdio.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <sys/types.h>
|
2016-01-21 21:11:48 +01:00
|
|
|
#include <unistd.h>
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
/* What logging level to use if they didn't specify */
|
|
|
|
#define DEFAULT_LOGLEVEL LOG_INFORM
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
/* Once we're up and running, this is set up. */
|
|
|
|
struct log *crashlog;
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
struct print_filter {
|
|
|
|
struct list_node list;
|
|
|
|
|
|
|
|
const char *prefix;
|
|
|
|
enum log_level level;
|
|
|
|
};
|
|
|
|
|
2019-11-18 01:26:27 +01:00
|
|
|
struct log_book {
|
|
|
|
size_t mem_used;
|
|
|
|
size_t max_mem;
|
2019-11-18 01:27:17 +01:00
|
|
|
size_t num_entries;
|
2019-11-18 01:27:17 +01:00
|
|
|
struct list_head print_filters;
|
|
|
|
|
|
|
|
/* Non-null once it's been initialized */
|
|
|
|
enum log_level *default_print_level;
|
2019-11-18 01:26:27 +01:00
|
|
|
struct timeabs init_time;
|
2019-11-18 01:27:15 +01:00
|
|
|
FILE *outf;
|
2019-11-18 01:26:27 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
struct log_entry *log;
|
|
|
|
|
2019-11-18 01:26:27 +01:00
|
|
|
/* Although log_book will copy log entries to parent log_book
|
|
|
|
* (the log_book belongs to lightningd), a pointer to lightningd
|
|
|
|
* is more directly because the notification needs ld->plugins.
|
|
|
|
*/
|
|
|
|
struct lightningd *ld;
|
2019-11-18 01:27:17 +01:00
|
|
|
/* Cache of all node_ids, to avoid multiple copies. */
|
|
|
|
struct node_id_map *cache;
|
2019-11-18 01:26:27 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct log {
|
|
|
|
struct log_book *lr;
|
|
|
|
const struct node_id *default_node_id;
|
|
|
|
const char *prefix;
|
2019-11-18 01:27:17 +01:00
|
|
|
|
|
|
|
/* Non-NULL once it's been initialized */
|
|
|
|
enum log_level *print_level;
|
2019-11-18 01:26:27 +01:00
|
|
|
};
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
/* Avoids duplicate node_id entries. */
|
|
|
|
struct node_id_cache {
|
|
|
|
size_t count;
|
|
|
|
struct node_id node_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct node_id *node_cache_id(const struct node_id_cache *nc)
|
|
|
|
{
|
|
|
|
return &nc->node_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t node_id_hash(const struct node_id *id)
|
|
|
|
{
|
|
|
|
return siphash24(siphash_seed(), id->k, sizeof(id->k));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool node_id_cache_eq(const struct node_id_cache *nc,
|
|
|
|
const struct node_id *node_id)
|
|
|
|
{
|
|
|
|
return node_id_eq(&nc->node_id, node_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
HTABLE_DEFINE_TYPE(struct node_id_cache,
|
|
|
|
node_cache_id, node_id_hash, node_id_cache_eq,
|
|
|
|
node_id_map);
|
|
|
|
|
2019-06-30 02:42:43 +02:00
|
|
|
static const char *level_prefix(enum log_level level)
|
|
|
|
{
|
|
|
|
switch (level) {
|
|
|
|
case LOG_IO_OUT:
|
|
|
|
case LOG_IO_IN:
|
|
|
|
return "IO";
|
|
|
|
case LOG_DBG:
|
|
|
|
return "DEBUG";
|
|
|
|
case LOG_INFORM:
|
|
|
|
return "INFO";
|
|
|
|
case LOG_UNUSUAL:
|
|
|
|
return "UNUSUAL";
|
|
|
|
case LOG_BROKEN:
|
|
|
|
return "**BROKEN**";
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2018-02-02 01:07:19 +01:00
|
|
|
static void log_to_file(const char *prefix,
|
|
|
|
enum log_level level,
|
2019-11-17 12:41:33 +01:00
|
|
|
const struct node_id *node_id,
|
2018-02-02 01:07:19 +01:00
|
|
|
const struct timeabs *time,
|
|
|
|
const char *str,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2019-04-08 01:52:19 +02:00
|
|
|
size_t io_len,
|
2018-02-02 01:07:19 +01:00
|
|
|
FILE *logf)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-02-02 01:05:41 +01:00
|
|
|
char iso8601_msec_fmt[sizeof("YYYY-mm-ddTHH:MM:SS.%03dZ")];
|
2018-02-02 01:05:41 +01:00
|
|
|
strftime(iso8601_msec_fmt, sizeof(iso8601_msec_fmt), "%FT%T.%%03dZ", gmtime(&time->ts.tv_sec));
|
2018-02-02 01:05:41 +01:00
|
|
|
char iso8601_s[sizeof("YYYY-mm-ddTHH:MM:SS.nnnZ")];
|
2018-02-02 01:05:41 +01:00
|
|
|
snprintf(iso8601_s, sizeof(iso8601_s), iso8601_msec_fmt, (int) time->ts.tv_nsec / 1000000);
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
if (level == LOG_IO_IN || level == LOG_IO_OUT) {
|
|
|
|
const char *dir = level == LOG_IO_IN ? "[IN]" : "[OUT]";
|
2019-04-08 01:52:19 +02:00
|
|
|
char *hex = tal_hexstr(NULL, io, io_len);
|
2019-11-17 12:41:33 +01:00
|
|
|
if (!node_id)
|
2019-11-18 01:27:17 +01:00
|
|
|
fprintf(logf, "%s %s: %s%s %s\n",
|
2019-11-17 12:41:33 +01:00
|
|
|
iso8601_s, prefix, str, dir, hex);
|
|
|
|
else
|
2019-11-18 01:27:17 +01:00
|
|
|
fprintf(logf, "%s %s-%s: %s%s %s\n",
|
2019-11-17 12:41:33 +01:00
|
|
|
iso8601_s,
|
|
|
|
node_id_to_hexstr(tmpctx, node_id),
|
|
|
|
prefix, str, dir, hex);
|
2018-02-05 05:09:28 +01:00
|
|
|
tal_free(hex);
|
2019-11-18 01:27:17 +01:00
|
|
|
} else {
|
2019-11-17 12:41:33 +01:00
|
|
|
if (!node_id)
|
2019-11-18 01:27:17 +01:00
|
|
|
fprintf(logf, "%s %s %s: %s\n",
|
2019-11-17 12:41:33 +01:00
|
|
|
iso8601_s, level_prefix(level), prefix, str);
|
|
|
|
else
|
2019-11-18 01:27:17 +01:00
|
|
|
fprintf(logf, "%s %s %s-%s: %s\n",
|
2019-11-17 12:41:33 +01:00
|
|
|
iso8601_s, level_prefix(level),
|
|
|
|
node_id_to_hexstr(tmpctx, node_id),
|
|
|
|
prefix, str);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
2018-02-02 01:07:19 +01:00
|
|
|
fflush(logf);
|
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
static size_t mem_used(const struct log_entry *e)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-07-28 08:00:16 +02:00
|
|
|
return sizeof(*e) + strlen(e->log) + 1 + tal_count(e->io);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
/* Threshold (of 1000) to delete */
|
|
|
|
static u32 delete_threshold(enum log_level level)
|
|
|
|
{
|
|
|
|
switch (level) {
|
|
|
|
/* Delete 90% of log_io */
|
|
|
|
case LOG_IO_OUT:
|
|
|
|
case LOG_IO_IN:
|
|
|
|
return 900;
|
|
|
|
/* 50% of LOG_DBG */
|
|
|
|
case LOG_DBG:
|
|
|
|
return 500;
|
|
|
|
/* 25% of LOG_INFORM */
|
|
|
|
case LOG_INFORM:
|
|
|
|
return 250;
|
|
|
|
/* 5% of LOG_UNUSUAL / LOG_BROKEN */
|
|
|
|
case LOG_UNUSUAL:
|
|
|
|
case LOG_BROKEN:
|
|
|
|
return 50;
|
|
|
|
}
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
/* Delete a log entry: returns how many now deleted */
|
|
|
|
static size_t delete_entry(struct log_book *log, struct log_entry *i)
|
|
|
|
{
|
|
|
|
log->mem_used -= mem_used(i);
|
|
|
|
log->num_entries--;
|
|
|
|
if (i->nc && --i->nc->count == 0)
|
|
|
|
tal_free(i->nc);
|
|
|
|
free(i->log);
|
|
|
|
tal_free(i->io);
|
|
|
|
|
|
|
|
return 1 + i->skipped;
|
|
|
|
}
|
|
|
|
|
2017-01-10 05:48:26 +01:00
|
|
|
static size_t prune_log(struct log_book *log)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2019-11-18 01:27:18 +01:00
|
|
|
size_t skipped = 0, deleted = 0, count = 0, dst = 0, max, tail;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
/* Never delete the last 10% (and definitely not last one!). */
|
2019-11-18 01:27:18 +01:00
|
|
|
tail = log->num_entries / 10 + 1;
|
|
|
|
max = log->num_entries - tail;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
for (count = 0; count < max; count++) {
|
|
|
|
struct log_entry *i = &log->log[count];
|
2019-11-18 01:27:17 +01:00
|
|
|
|
|
|
|
if (pseudorand(1000) > delete_threshold(i->level)) {
|
2016-01-21 21:11:47 +01:00
|
|
|
i->skipped += skipped;
|
|
|
|
skipped = 0;
|
2019-11-18 01:27:18 +01:00
|
|
|
/* Move down if necesary. */
|
|
|
|
log->log[dst++] = *i;
|
2016-01-21 21:11:47 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
skipped += delete_entry(log, i);
|
2016-01-21 21:11:47 +01:00
|
|
|
deleted++;
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
/* Any skipped at tail go on the next entry */
|
|
|
|
log->log[count].skipped += skipped;
|
|
|
|
|
|
|
|
/* Move down the last 10% */
|
|
|
|
memmove(log->log + dst, log->log + count, tail * sizeof(*log->log));
|
2016-01-21 21:11:47 +01:00
|
|
|
return deleted;
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
static void destroy_log_book(struct log_book *log)
|
|
|
|
{
|
|
|
|
size_t num = log->num_entries;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < num; i++)
|
|
|
|
delete_entry(log, &log->log[i]);
|
|
|
|
|
|
|
|
assert(log->num_entries == 0);
|
|
|
|
assert(log->mem_used == 0);
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
struct log_book *new_log_book(struct lightningd *ld, size_t max_mem)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-02-18 13:56:46 +01:00
|
|
|
struct log_book *lr = tal_linkable(tal(NULL, struct log_book));
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
/* Give a reasonable size for memory limit! */
|
|
|
|
assert(max_mem > sizeof(struct log) * 2);
|
|
|
|
lr->mem_used = 0;
|
2019-11-18 01:27:17 +01:00
|
|
|
lr->num_entries = 0;
|
2016-01-21 21:11:47 +01:00
|
|
|
lr->max_mem = max_mem;
|
2019-11-18 01:27:15 +01:00
|
|
|
lr->outf = stdout;
|
2019-11-18 01:27:17 +01:00
|
|
|
lr->default_print_level = NULL;
|
|
|
|
list_head_init(&lr->print_filters);
|
2016-04-24 12:05:13 +02:00
|
|
|
lr->init_time = time_now();
|
2019-06-06 10:26:42 +02:00
|
|
|
lr->ld = ld;
|
2019-11-18 01:27:17 +01:00
|
|
|
lr->cache = tal(lr, struct node_id_map);
|
|
|
|
node_id_map_init(lr->cache);
|
2019-11-18 01:27:18 +01:00
|
|
|
lr->log = tal_arr(lr, struct log_entry, 128);
|
|
|
|
tal_add_destructor(lr, destroy_log_book);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
return lr;
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
static enum log_level filter_level(struct log_book *lr, const char *prefix)
|
|
|
|
{
|
|
|
|
struct print_filter *i;
|
|
|
|
|
|
|
|
assert(lr->default_print_level != NULL);
|
|
|
|
list_for_each(&lr->print_filters, i, list) {
|
|
|
|
if (strstr(prefix, i->prefix))
|
|
|
|
return i->level;
|
|
|
|
}
|
|
|
|
return *lr->default_print_level;
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
/* With different entry points */
|
2019-11-01 00:38:00 +01:00
|
|
|
struct log *
|
2019-11-17 12:41:33 +01:00
|
|
|
new_log(const tal_t *ctx, struct log_book *record,
|
|
|
|
const struct node_id *default_node_id,
|
|
|
|
const char *fmt, ...)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
struct log *log = tal(ctx, struct log);
|
|
|
|
va_list ap;
|
|
|
|
|
2018-02-18 13:56:46 +01:00
|
|
|
log->lr = tal_link(log, record);
|
2016-01-21 21:11:47 +01:00
|
|
|
va_start(ap, fmt);
|
|
|
|
/* log->lr owns this, since its entries keep a pointer to it. */
|
log: block reporting on minor memleak.
Exception: Node /tmp/lightning-t5gxc6gs/test_closing_different_fees/lightning-2/ has memory leaks: [{'value': '0x55caa0a0b8d0', 'label': 'ccan/ccan/tal/str/str.c:90:char[]', 'backtrace': ['ccan/ccan/tal/tal.c:467 (tal_alloc_)', 'ccan/ccan/tal/tal.c:496 (tal_alloc_arr_)', 'ccan/ccan/tal/str/str.c:90 (tal_vfmt)', 'lightningd/log.c:131 (new_log)', 'lightningd/subd.c:632 (new_subd)', 'lightningd/subd.c:686 (new_peer_subd)', 'lightningd/peer_control.c:2487 (peer_accept_channel)', 'lightningd/peer_control.c:674 (peer_sent_nongossip)', 'lightningd/gossip_control.c:55 (peer_nongossip)', 'lightningd/gossip_control.c:142 (gossip_msg)', 'lightningd/subd.c:477 (sd_msg_read)', 'lightningd/subd.c:319 (read_fds)', 'ccan/ccan/io/io.c:59 (next_plan)', 'ccan/ccan/io/io.c:387 (do_plan)', 'ccan/ccan/io/io.c:397 (io_ready)', 'ccan/ccan/io/poll.c:305 (io_loop)', 'lightningd/lightningd.c:347 (main)', '(null):0 ((null))', '(null):0 ((null))', '(null):0 ((null))'], 'parents': ['lightningd/log.c:103:struct log_book', 'lightningd/lightningd.c:43:struct lightningd']}]
Technically, true, but we save more memory by sharing the prefix pointer
than we lose by leaking it.
However, we'd ideally refcount so it's freed if the log is freed and
all the entries using it are pruned from the log book.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
2018-01-30 07:29:02 +01:00
|
|
|
/* FIXME: Refcount this! */
|
|
|
|
log->prefix = notleak(tal_vfmt(log->lr, fmt, ap));
|
2016-01-21 21:11:47 +01:00
|
|
|
va_end(ap);
|
2019-11-17 12:41:33 +01:00
|
|
|
if (default_node_id)
|
|
|
|
log->default_node_id = tal_dup(log, struct node_id,
|
|
|
|
default_node_id);
|
|
|
|
else
|
|
|
|
log->default_node_id = NULL;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
/* Initialized on first use */
|
|
|
|
log->print_level = NULL;
|
2016-01-21 21:11:47 +01:00
|
|
|
return log;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *log_prefix(const struct log *log)
|
|
|
|
{
|
|
|
|
return log->prefix;
|
|
|
|
}
|
2019-11-18 01:27:17 +01:00
|
|
|
|
|
|
|
enum log_level log_print_level(struct log *log)
|
|
|
|
{
|
|
|
|
if (!log->print_level) {
|
2019-11-23 02:46:58 +01:00
|
|
|
/* Not set globally yet? Print UNUSUAL / BROKEN messages only */
|
2019-11-18 01:27:17 +01:00
|
|
|
if (!log->lr->default_print_level)
|
2019-11-23 02:46:58 +01:00
|
|
|
return LOG_UNUSUAL;
|
2019-11-18 01:27:17 +01:00
|
|
|
log->print_level = tal(log, enum log_level);
|
|
|
|
*log->print_level = filter_level(log->lr, log->prefix);
|
|
|
|
}
|
|
|
|
return *log->print_level;
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
|
|
|
|
/* This may move entry! */
|
|
|
|
static void add_entry(struct log *log, struct log_entry **l)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2019-11-18 01:27:18 +01:00
|
|
|
log->lr->mem_used += mem_used(*l);
|
2019-11-18 01:27:17 +01:00
|
|
|
log->lr->num_entries++;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
if (log->lr->mem_used > log->lr->max_mem) {
|
|
|
|
size_t old_mem = log->lr->mem_used, deleted;
|
|
|
|
deleted = prune_log(log->lr);
|
2019-11-18 01:27:18 +01:00
|
|
|
/* Will have moved, but will be last entry. */
|
|
|
|
*l = &log->lr->log[log->lr->num_entries-1];
|
2016-01-21 21:11:47 +01:00
|
|
|
log_debug(log, "Log pruned %zu entries (mem %zu -> %zu)",
|
|
|
|
deleted, old_mem, log->lr->mem_used);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
static void destroy_node_id_cache(struct node_id_cache *nc, struct log_book *lr)
|
|
|
|
{
|
|
|
|
node_id_map_del(lr->cache, nc);
|
|
|
|
}
|
|
|
|
|
2019-11-17 12:41:33 +01:00
|
|
|
static struct log_entry *new_log_entry(struct log *log, enum log_level level,
|
|
|
|
const struct node_id *node_id)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2019-11-18 01:27:18 +01:00
|
|
|
struct log_entry *l;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
if (log->lr->num_entries == tal_count(log->lr->log))
|
|
|
|
tal_resize(&log->lr->log, tal_count(log->lr->log) * 2);
|
|
|
|
|
|
|
|
l = &log->lr->log[log->lr->num_entries];
|
2016-04-24 12:05:13 +02:00
|
|
|
l->time = time_now();
|
2016-01-21 21:11:47 +01:00
|
|
|
l->level = level;
|
|
|
|
l->skipped = 0;
|
|
|
|
l->prefix = log->prefix;
|
2018-02-05 05:09:28 +01:00
|
|
|
l->io = NULL;
|
2019-11-17 12:41:33 +01:00
|
|
|
if (!node_id)
|
|
|
|
node_id = log->default_node_id;
|
2019-11-18 01:27:17 +01:00
|
|
|
if (node_id) {
|
|
|
|
l->nc = node_id_map_get(log->lr->cache, node_id);
|
|
|
|
if (!l->nc) {
|
|
|
|
l->nc = tal(log->lr->cache, struct node_id_cache);
|
|
|
|
l->nc->count = 0;
|
|
|
|
l->nc->node_id = *node_id;
|
|
|
|
node_id_map_add(log->lr->cache, l->nc);
|
|
|
|
tal_add_destructor2(l->nc, destroy_node_id_cache,
|
|
|
|
log->lr);
|
|
|
|
}
|
|
|
|
l->nc->count++;
|
|
|
|
} else
|
|
|
|
l->nc = NULL;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
static void maybe_print(struct log *log, const struct log_entry *l)
|
2018-02-05 05:09:28 +01:00
|
|
|
{
|
2019-11-18 01:27:17 +01:00
|
|
|
if (l->level >= log_print_level(log))
|
2019-11-18 01:27:17 +01:00
|
|
|
log_to_file(log->prefix, l->level,
|
2019-11-18 01:27:17 +01:00
|
|
|
l->nc ? &l->nc->node_id : NULL,
|
|
|
|
&l->time, l->log,
|
2019-11-18 01:27:15 +01:00
|
|
|
l->io, tal_bytelen(l->io), log->lr->outf);
|
2018-02-05 05:09:28 +01:00
|
|
|
}
|
|
|
|
|
2019-11-17 12:41:33 +01:00
|
|
|
void logv(struct log *log, enum log_level level,
|
|
|
|
const struct node_id *node_id,
|
|
|
|
bool call_notifier,
|
|
|
|
const char *fmt, va_list ap)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-02-05 05:09:27 +01:00
|
|
|
int save_errno = errno;
|
2019-11-17 12:41:33 +01:00
|
|
|
struct log_entry *l = new_log_entry(log, level, node_id);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
/* This is WARN_UNUSED_RESULT, because everyone should somehow deal
|
|
|
|
* with OOM, even though nobody does. */
|
|
|
|
if (vasprintf(&l->log, fmt, ap) == -1)
|
|
|
|
abort();
|
2018-02-08 21:05:29 +01:00
|
|
|
|
2018-05-21 18:30:22 +02:00
|
|
|
size_t log_len = strlen(l->log);
|
|
|
|
|
2018-02-08 21:05:29 +01:00
|
|
|
/* Sanitize any non-printable characters, and replace with '?' */
|
2018-05-21 18:30:22 +02:00
|
|
|
for (size_t i=0; i<log_len; i++)
|
2018-02-08 21:05:29 +01:00
|
|
|
if (l->log[i] < ' ' || l->log[i] >= 0x7f)
|
|
|
|
l->log[i] = '?';
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
maybe_print(log, l);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
add_entry(log, &l);
|
2019-06-06 10:26:42 +02:00
|
|
|
|
|
|
|
if (call_notifier)
|
|
|
|
notify_warning(log->lr->ld, l);
|
|
|
|
|
2018-02-05 05:09:27 +01:00
|
|
|
errno = save_errno;
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
void log_io(struct log *log, enum log_level dir,
|
2019-11-17 12:41:33 +01:00
|
|
|
const struct node_id *node_id,
|
2018-02-05 05:09:28 +01:00
|
|
|
const char *str TAKES,
|
|
|
|
const void *data TAKES, size_t len)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
int save_errno = errno;
|
2019-11-17 12:41:33 +01:00
|
|
|
struct log_entry *l = new_log_entry(log, dir, node_id);
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
assert(dir == LOG_IO_IN || dir == LOG_IO_OUT);
|
|
|
|
|
2019-04-08 01:52:19 +02:00
|
|
|
/* Print first, in case we need to truncate. */
|
2019-11-18 01:27:17 +01:00
|
|
|
if (l->level >= log_print_level(log))
|
2019-11-18 01:27:17 +01:00
|
|
|
log_to_file(log->prefix, l->level,
|
2019-11-18 01:27:17 +01:00
|
|
|
l->nc ? &l->nc->node_id : NULL,
|
2019-11-18 01:27:15 +01:00
|
|
|
&l->time, str,
|
|
|
|
data, len, log->lr->outf);
|
2019-04-08 01:52:19 +02:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
/* Save a tal header, by using raw malloc. */
|
|
|
|
l->log = strdup(str);
|
|
|
|
if (taken(str))
|
|
|
|
tal_free(str);
|
2019-04-08 01:52:19 +02:00
|
|
|
|
|
|
|
/* Don't immediately fill buffer with giant IOs */
|
|
|
|
if (len > log->lr->max_mem / 64) {
|
|
|
|
l->skipped++;
|
|
|
|
len = log->lr->max_mem / 64;
|
|
|
|
}
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
/* FIXME: We could save 4 pointers by using a raw allow, but saving
|
|
|
|
* the length. */
|
|
|
|
l->io = tal_dup_arr(log->lr, u8, data, len, 0);
|
|
|
|
|
|
|
|
add_entry(log, &l);
|
2016-01-21 21:11:47 +01:00
|
|
|
errno = save_errno;
|
|
|
|
}
|
|
|
|
|
2019-11-17 12:41:33 +01:00
|
|
|
void log_(struct log *log, enum log_level level,
|
|
|
|
const struct node_id *node_id,
|
|
|
|
bool call_notifier,
|
|
|
|
const char *fmt, ...)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2019-11-17 12:41:33 +01:00
|
|
|
logv(log, level, node_id, call_notifier, fmt, ap);
|
2016-01-21 21:11:47 +01:00
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:15 +01:00
|
|
|
#define log_each_line(lr, func, arg) \
|
|
|
|
log_each_line_((lr), \
|
|
|
|
typesafe_cb_preargs(void, void *, (func), (arg), \
|
|
|
|
unsigned int, \
|
|
|
|
struct timerel, \
|
|
|
|
enum log_level, \
|
|
|
|
const struct node_id *, \
|
|
|
|
const char *, \
|
|
|
|
const char *, \
|
|
|
|
const u8 *), (arg))
|
|
|
|
|
|
|
|
static void log_each_line_(const struct log_book *lr,
|
|
|
|
void (*func)(unsigned int skipped,
|
|
|
|
struct timerel time,
|
|
|
|
enum log_level level,
|
|
|
|
const struct node_id *node_id,
|
|
|
|
const char *prefix,
|
|
|
|
const char *log,
|
|
|
|
const u8 *io,
|
|
|
|
void *arg),
|
|
|
|
void *arg)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2019-11-18 01:27:18 +01:00
|
|
|
for (size_t i = 0; i < lr->num_entries; i++) {
|
|
|
|
const struct log_entry *l = &lr->log[i];
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
func(l->skipped, time_between(l->time, lr->init_time),
|
|
|
|
l->level, l->nc ? &l->nc->node_id : NULL,
|
|
|
|
l->prefix, l->log, l->io, arg);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct log_data {
|
|
|
|
int fd;
|
|
|
|
const char *prefix;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void log_one_line(unsigned int skipped,
|
|
|
|
struct timerel diff,
|
|
|
|
enum log_level level,
|
2019-11-17 12:41:33 +01:00
|
|
|
const struct node_id *node_id,
|
2016-01-21 21:11:47 +01:00
|
|
|
const char *prefix,
|
|
|
|
const char *log,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2016-01-21 21:11:47 +01:00
|
|
|
struct log_data *data)
|
|
|
|
{
|
|
|
|
char buf[101];
|
|
|
|
|
|
|
|
if (skipped) {
|
2018-07-31 14:56:04 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%s... %u skipped...", data->prefix, skipped);
|
2016-01-21 21:11:47 +01:00
|
|
|
write_all(data->fd, buf, strlen(buf));
|
|
|
|
data->prefix = "\n";
|
|
|
|
}
|
|
|
|
|
2018-07-31 14:56:04 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%s+%lu.%09u %s%s: ",
|
2016-01-21 21:11:47 +01:00
|
|
|
data->prefix,
|
|
|
|
(unsigned long)diff.ts.tv_sec,
|
|
|
|
(unsigned)diff.ts.tv_nsec,
|
|
|
|
prefix,
|
2018-02-05 05:09:28 +01:00
|
|
|
level == LOG_IO_IN ? "IO_IN"
|
|
|
|
: level == LOG_IO_OUT ? "IO_OUT"
|
2016-01-21 21:11:47 +01:00
|
|
|
: level == LOG_DBG ? "DEBUG"
|
|
|
|
: level == LOG_INFORM ? "INFO"
|
|
|
|
: level == LOG_UNUSUAL ? "UNUSUAL"
|
|
|
|
: level == LOG_BROKEN ? "BROKEN"
|
|
|
|
: "**INVALID**");
|
|
|
|
|
|
|
|
write_all(data->fd, buf, strlen(buf));
|
2018-02-05 05:09:28 +01:00
|
|
|
write_all(data->fd, log, strlen(log));
|
|
|
|
if (level == LOG_IO_IN || level == LOG_IO_OUT) {
|
|
|
|
size_t off, used, len = tal_count(io);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
/* No allocations, may be in signal handler. */
|
|
|
|
for (off = 0; off < len; off += used) {
|
|
|
|
used = len - off;
|
|
|
|
if (hex_str_size(used) > sizeof(buf))
|
|
|
|
used = hex_data_size(sizeof(buf));
|
2018-02-05 05:09:28 +01:00
|
|
|
hex_encode(io + off, used, buf, hex_str_size(used));
|
2016-01-21 21:11:47 +01:00
|
|
|
write_all(data->fd, buf, strlen(buf));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
data->prefix = "\n";
|
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
static const struct level {
|
2016-01-21 21:11:47 +01:00
|
|
|
const char *name;
|
|
|
|
enum log_level level;
|
|
|
|
} log_levels[] = {
|
2018-02-05 05:09:28 +01:00
|
|
|
{ "IO", LOG_IO_OUT },
|
2016-01-21 21:11:47 +01:00
|
|
|
{ "DEBUG", LOG_DBG },
|
|
|
|
{ "INFO", LOG_INFORM },
|
|
|
|
{ "UNUSUAL", LOG_UNUSUAL },
|
|
|
|
{ "BROKEN", LOG_BROKEN }
|
|
|
|
};
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
static const struct level *str_to_level(const char *str, size_t len)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2019-11-18 01:27:17 +01:00
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(log_levels); i++) {
|
|
|
|
if (strlen(log_levels[i].name) != len)
|
|
|
|
continue;
|
|
|
|
if (strncasecmp(str, log_levels[i].name, len) != 0)
|
|
|
|
continue;
|
|
|
|
return &log_levels[i];
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
static const char *level_to_str(enum log_level level)
|
|
|
|
{
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(log_levels); i++) {
|
|
|
|
if (level == log_levels[i].level)
|
|
|
|
return log_levels[i].name;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *opt_log_level(const char *arg, struct log *log)
|
|
|
|
{
|
|
|
|
const struct level *level;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
len = strcspn(arg, ":");
|
|
|
|
level = str_to_level(arg, len);
|
|
|
|
if (!level)
|
|
|
|
return tal_fmt(NULL, "unknown log level %.*s", len, arg);
|
|
|
|
|
|
|
|
if (arg[len]) {
|
|
|
|
struct print_filter *f = tal(log->lr, struct print_filter);
|
|
|
|
f->prefix = arg + len + 1;
|
|
|
|
f->level = level->level;
|
|
|
|
list_add_tail(&log->lr->print_filters, &f->list);
|
|
|
|
} else {
|
|
|
|
tal_free(log->lr->default_print_level);
|
|
|
|
log->lr->default_print_level = tal(log->lr, enum log_level);
|
|
|
|
*log->lr->default_print_level = level->level;
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
2019-11-18 01:27:17 +01:00
|
|
|
return NULL;
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
void json_add_opt_log_levels(struct json_stream *response, struct log *log)
|
2018-01-29 01:30:15 +01:00
|
|
|
{
|
2019-11-18 01:27:17 +01:00
|
|
|
struct print_filter *i;
|
2018-01-29 01:30:15 +01:00
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
list_for_each(&log->lr->print_filters, i, list) {
|
|
|
|
json_add_member(response, "log-level", true, "%s:%s",
|
|
|
|
level_to_str(i->level), i->prefix);
|
2018-01-29 01:30:15 +01:00
|
|
|
}
|
2019-11-18 01:27:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void show_log_level(char buf[OPT_SHOW_LEN], const struct log *log)
|
|
|
|
{
|
|
|
|
enum log_level l;
|
|
|
|
|
|
|
|
if (log->lr->default_print_level)
|
|
|
|
l = *log->lr->default_print_level;
|
|
|
|
else
|
|
|
|
l = DEFAULT_LOGLEVEL;
|
|
|
|
strncpy(buf, level_to_str(l), OPT_SHOW_LEN-1);
|
2018-01-29 01:30:15 +01:00
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:47 +01:00
|
|
|
static char *arg_log_prefix(const char *arg, struct log *log)
|
|
|
|
{
|
2019-11-18 01:26:27 +01:00
|
|
|
/* log->lr owns this, since it keeps a pointer to it. */
|
|
|
|
log->prefix = tal_strdup(log->lr, arg);
|
2016-01-21 21:11:47 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
static void show_log_prefix(char buf[OPT_SHOW_LEN], const struct log *log)
|
|
|
|
{
|
|
|
|
strncpy(buf, log->prefix, OPT_SHOW_LEN);
|
|
|
|
}
|
|
|
|
|
2018-08-22 12:06:40 +02:00
|
|
|
static int signalfds[2];
|
|
|
|
|
|
|
|
static void handle_sighup(int sig)
|
|
|
|
{
|
|
|
|
/* Writes a single 0x00 byte to the signalfds pipe. This may fail if
|
|
|
|
* we're hammered with SIGHUP. We don't care. */
|
|
|
|
if (write(signalfds[1], "", 1))
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mutual recursion */
|
|
|
|
static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld);
|
|
|
|
|
|
|
|
static struct io_plan *rotate_log(struct io_conn *conn, struct lightningd *ld)
|
|
|
|
{
|
|
|
|
log_info(ld->log, "Ending log due to SIGHUP");
|
2019-11-18 01:27:15 +01:00
|
|
|
fclose(ld->log->lr->outf);
|
2018-08-22 12:06:40 +02:00
|
|
|
|
2019-11-18 01:27:15 +01:00
|
|
|
ld->log->lr->outf = fopen(ld->logfile, "a");
|
|
|
|
if (!ld->log->lr->outf)
|
2018-08-22 12:06:40 +02:00
|
|
|
err(1, "failed to reopen log file %s", ld->logfile);
|
|
|
|
|
|
|
|
log_info(ld->log, "Started log due to SIGHUP");
|
|
|
|
return setup_read(conn, ld);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld)
|
|
|
|
{
|
|
|
|
/* We read and discard. */
|
|
|
|
static char discard;
|
|
|
|
return io_read(conn, &discard, 1, rotate_log, ld);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_log_rotation(struct lightningd *ld)
|
|
|
|
{
|
|
|
|
struct sigaction act;
|
|
|
|
if (pipe(signalfds) != 0)
|
|
|
|
errx(1, "Pipe for signalfds");
|
|
|
|
|
|
|
|
notleak(io_new_conn(ld, signalfds[0], setup_read, ld));
|
|
|
|
|
|
|
|
io_fd_block(signalfds[1], false);
|
|
|
|
memset(&act, 0, sizeof(act));
|
|
|
|
act.sa_handler = handle_sighup;
|
|
|
|
act.sa_flags = SA_RESETHAND;
|
|
|
|
|
|
|
|
if (sigaction(SIGHUP, &act, NULL) != 0)
|
|
|
|
err(1, "Setting up SIGHUP handler");
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
char *arg_log_to_file(const char *arg, struct lightningd *ld)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2019-02-21 22:54:39 +01:00
|
|
|
int size;
|
2018-01-29 01:30:15 +01:00
|
|
|
|
|
|
|
if (ld->logfile) {
|
2019-11-18 01:27:15 +01:00
|
|
|
fclose(ld->log->lr->outf);
|
2018-01-29 01:30:15 +01:00
|
|
|
ld->logfile = tal_free(ld->logfile);
|
2018-08-22 12:06:40 +02:00
|
|
|
} else
|
|
|
|
setup_log_rotation(ld);
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
ld->logfile = tal_strdup(ld, arg);
|
2019-11-18 01:27:15 +01:00
|
|
|
ld->log->lr->outf = fopen(arg, "a");
|
|
|
|
if (!ld->log->lr->outf)
|
2016-01-21 21:11:47 +01:00
|
|
|
return tal_fmt(NULL, "Failed to open: %s", strerror(errno));
|
2018-12-16 06:25:45 +01:00
|
|
|
|
2019-02-21 22:54:39 +01:00
|
|
|
/* For convenience make a block of empty lines just like Bitcoin Core */
|
2019-11-18 01:27:15 +01:00
|
|
|
size = ftell(ld->log->lr->outf);
|
2019-02-21 22:54:39 +01:00
|
|
|
if (size > 0)
|
2019-11-18 01:27:15 +01:00
|
|
|
fprintf(ld->log->lr->outf, "\n\n\n\n");
|
2019-02-21 22:54:39 +01:00
|
|
|
|
2018-12-16 06:25:45 +01:00
|
|
|
log_debug(ld->log, "Opened log file %s", arg);
|
2016-01-21 21:11:47 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-01-29 01:30:15 +01:00
|
|
|
void opt_register_logging(struct lightningd *ld)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-12-16 06:25:25 +01:00
|
|
|
opt_register_early_arg("--log-level",
|
2019-11-18 01:27:17 +01:00
|
|
|
opt_log_level, show_log_level, ld->log,
|
|
|
|
"log level (io, debug, info, unusual, broken) [:prefix]");
|
2018-12-16 06:25:25 +01:00
|
|
|
opt_register_early_arg("--log-prefix", arg_log_prefix, show_log_prefix,
|
|
|
|
ld->log,
|
|
|
|
"log prefix");
|
2019-08-05 07:10:29 +02:00
|
|
|
opt_register_early_arg("--log-file=<file>", arg_log_to_file, NULL, ld,
|
|
|
|
"log to file instead of stdout");
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
void logging_options_parsed(struct log_book *lr)
|
|
|
|
{
|
|
|
|
/* If they didn't set an explicit level, set to info */
|
|
|
|
if (!lr->default_print_level) {
|
|
|
|
lr->default_print_level = tal(lr, enum log_level);
|
|
|
|
*lr->default_print_level = DEFAULT_LOGLEVEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Catch up, since before we were only printing BROKEN msgs */
|
2019-11-18 01:27:18 +01:00
|
|
|
for (size_t i = 0; i < lr->num_entries; i++) {
|
|
|
|
const struct log_entry *l = &lr->log[i];
|
|
|
|
|
2019-11-18 01:27:17 +01:00
|
|
|
if (l->level >= filter_level(lr, l->prefix))
|
2019-11-18 01:27:17 +01:00
|
|
|
log_to_file(l->prefix, l->level,
|
2019-11-18 01:27:17 +01:00
|
|
|
l->nc ? &l->nc->node_id : NULL,
|
2019-11-18 01:27:17 +01:00
|
|
|
&l->time, l->log,
|
|
|
|
l->io, tal_bytelen(l->io), lr->outf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
void log_backtrace_print(const char *fmt, ...)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
2018-03-29 04:06:45 +02:00
|
|
|
va_list ap;
|
2016-01-21 21:11:47 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
if (!crashlog)
|
|
|
|
return;
|
2016-01-21 21:11:48 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
va_start(ap, fmt);
|
2019-11-17 12:41:33 +01:00
|
|
|
logv(crashlog, LOG_BROKEN, NULL, false, fmt, ap);
|
2018-03-29 04:06:45 +02:00
|
|
|
va_end(ap);
|
2016-01-21 21:11:47 +01:00
|
|
|
}
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
static void log_dump_to_file(int fd, const struct log_book *lr)
|
2016-01-21 21:11:47 +01:00
|
|
|
{
|
|
|
|
char buf[100];
|
2018-03-10 20:02:33 +01:00
|
|
|
int len;
|
2016-01-21 21:11:47 +01:00
|
|
|
struct log_data data;
|
|
|
|
time_t start;
|
|
|
|
|
2019-11-18 01:27:18 +01:00
|
|
|
if (lr->num_entries == 0) {
|
2016-01-21 21:11:47 +01:00
|
|
|
write_all(fd, "0 bytes:\n\n", strlen("0 bytes:\n\n"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
start = lr->init_time.ts.tv_sec;
|
2018-07-31 14:56:04 +02:00
|
|
|
len = snprintf(buf, sizeof(buf), "%zu bytes, %s", lr->mem_used, ctime(&start));
|
2018-03-10 20:02:33 +01:00
|
|
|
write_all(fd, buf, len);
|
2016-01-21 21:11:47 +01:00
|
|
|
|
|
|
|
/* ctime includes \n... WTF? */
|
|
|
|
data.prefix = "";
|
|
|
|
data.fd = fd;
|
|
|
|
log_each_line(lr, log_one_line, &data);
|
|
|
|
write_all(fd, "\n\n", strlen("\n\n"));
|
|
|
|
}
|
2016-01-21 21:11:48 +01:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
void log_backtrace_exit(void)
|
|
|
|
{
|
2018-08-22 12:06:42 +02:00
|
|
|
int fd;
|
2018-08-22 13:17:20 +02:00
|
|
|
char timebuf[sizeof("YYYYmmddHHMMSS")];
|
|
|
|
char logfile[sizeof("/tmp/lightning-crash.log.") + sizeof(timebuf)];
|
|
|
|
struct timeabs time = time_now();
|
|
|
|
|
|
|
|
strftime(timebuf, sizeof(timebuf), "%Y%m%d%H%M%S", gmtime(&time.ts.tv_sec));
|
2018-08-22 12:06:42 +02:00
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
if (!crashlog)
|
|
|
|
return;
|
|
|
|
|
2018-08-22 12:06:42 +02:00
|
|
|
/* We expect to be in config dir. */
|
2018-08-22 13:17:20 +02:00
|
|
|
snprintf(logfile, sizeof(logfile), "crash.log.%s", timebuf);
|
2018-03-29 04:06:45 +02:00
|
|
|
|
2018-08-22 12:06:42 +02:00
|
|
|
fd = open(logfile, O_WRONLY|O_CREAT|O_TRUNC, 0600);
|
|
|
|
if (fd < 0) {
|
|
|
|
snprintf(logfile, sizeof(logfile),
|
2018-08-22 13:17:20 +02:00
|
|
|
"/tmp/lightning-crash.log.%s", timebuf);
|
2018-08-22 12:06:42 +02:00
|
|
|
fd = open(logfile, O_WRONLY|O_CREAT|O_TRUNC, 0600);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Dump entire log. */
|
|
|
|
if (fd >= 0) {
|
|
|
|
log_dump_to_file(fd, crashlog->lr);
|
|
|
|
close(fd);
|
|
|
|
fprintf(stderr, "Log dumped in %s\n", logfile);
|
2018-03-29 04:06:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-21 21:11:48 +01:00
|
|
|
void fatal(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vfprintf(stderr, fmt, ap);
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
va_end(ap);
|
|
|
|
|
2018-03-29 04:06:45 +02:00
|
|
|
if (!crashlog)
|
|
|
|
exit(1);
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2019-11-17 12:41:33 +01:00
|
|
|
logv(crashlog, LOG_BROKEN, NULL, true, fmt, ap);
|
2018-03-29 04:06:45 +02:00
|
|
|
va_end(ap);
|
2017-10-29 12:18:13 +01:00
|
|
|
abort();
|
2016-01-21 21:11:48 +01:00
|
|
|
}
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
struct log_info {
|
|
|
|
enum log_level level;
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-02-05 05:09:28 +01:00
|
|
|
unsigned int num_skipped;
|
2019-11-18 01:27:15 +01:00
|
|
|
/* If non-null, only show messages about this peer */
|
|
|
|
const struct node_id *node_id;
|
2018-02-05 05:09:28 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static void add_skipped(struct log_info *info)
|
|
|
|
{
|
|
|
|
if (info->num_skipped) {
|
|
|
|
json_object_start(info->response, NULL);
|
|
|
|
json_add_string(info->response, "type", "SKIPPED");
|
|
|
|
json_add_num(info->response, "num_skipped", info->num_skipped);
|
|
|
|
json_object_end(info->response);
|
|
|
|
info->num_skipped = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void log_to_json(unsigned int skipped,
|
|
|
|
struct timerel diff,
|
|
|
|
enum log_level level,
|
2019-11-17 12:41:33 +01:00
|
|
|
const struct node_id *node_id,
|
2018-02-05 05:09:28 +01:00
|
|
|
const char *prefix,
|
|
|
|
const char *log,
|
2018-02-05 05:09:28 +01:00
|
|
|
const u8 *io,
|
2018-02-05 05:09:28 +01:00
|
|
|
struct log_info *info)
|
|
|
|
{
|
|
|
|
info->num_skipped += skipped;
|
|
|
|
|
2019-11-18 01:27:15 +01:00
|
|
|
if (info->node_id) {
|
|
|
|
if (!node_id || !node_id_eq(node_id, info->node_id))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-05 05:09:28 +01:00
|
|
|
if (level < info->level) {
|
|
|
|
info->num_skipped++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
add_skipped(info);
|
|
|
|
|
|
|
|
json_object_start(info->response, NULL);
|
|
|
|
json_add_string(info->response, "type",
|
|
|
|
level == LOG_BROKEN ? "BROKEN"
|
|
|
|
: level == LOG_UNUSUAL ? "UNUSUAL"
|
|
|
|
: level == LOG_INFORM ? "INFO"
|
|
|
|
: level == LOG_DBG ? "DEBUG"
|
2018-02-05 05:09:28 +01:00
|
|
|
: level == LOG_IO_IN ? "IO_IN"
|
|
|
|
: level == LOG_IO_OUT ? "IO_OUT"
|
2018-02-05 05:09:28 +01:00
|
|
|
: "UNKNOWN");
|
|
|
|
json_add_time(info->response, "time", diff.ts);
|
2019-11-17 12:41:33 +01:00
|
|
|
if (node_id)
|
|
|
|
json_add_node_id(info->response, "node_id", node_id);
|
2018-02-05 05:09:28 +01:00
|
|
|
json_add_string(info->response, "source", prefix);
|
2018-02-05 05:09:28 +01:00
|
|
|
json_add_string(info->response, "log", log);
|
|
|
|
if (io)
|
2018-07-28 07:53:33 +02:00
|
|
|
json_add_hex_talarr(info->response, "data", io);
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
json_object_end(info->response);
|
|
|
|
}
|
|
|
|
|
2018-10-19 03:17:49 +02:00
|
|
|
void json_add_log(struct json_stream *response,
|
2019-11-18 01:27:15 +01:00
|
|
|
const struct log_book *lr,
|
|
|
|
const struct node_id *node_id,
|
|
|
|
enum log_level minlevel)
|
2018-02-05 05:09:28 +01:00
|
|
|
{
|
|
|
|
struct log_info info;
|
|
|
|
|
|
|
|
info.level = minlevel;
|
|
|
|
info.response = response;
|
|
|
|
info.num_skipped = 0;
|
2019-11-18 01:27:15 +01:00
|
|
|
info.node_id = node_id;
|
2018-02-05 05:09:28 +01:00
|
|
|
|
|
|
|
json_array_start(info.response, "log");
|
|
|
|
log_each_line(lr, log_to_json, &info);
|
|
|
|
add_skipped(&info);
|
|
|
|
json_array_end(info.response);
|
|
|
|
}
|
|
|
|
|
2018-12-16 05:50:06 +01:00
|
|
|
struct command_result *param_loglevel(struct command *cmd,
|
|
|
|
const char *name,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *tok,
|
|
|
|
enum log_level **level)
|
2018-02-05 05:09:28 +01:00
|
|
|
{
|
2018-08-15 16:40:37 +02:00
|
|
|
*level = tal(cmd, enum log_level);
|
2018-02-05 05:09:28 +01:00
|
|
|
if (json_tok_streq(buffer, tok, "io"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_IO_OUT;
|
2018-02-05 05:09:28 +01:00
|
|
|
else if (json_tok_streq(buffer, tok, "debug"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_DBG;
|
2018-02-05 05:09:28 +01:00
|
|
|
else if (json_tok_streq(buffer, tok, "info"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_INFORM;
|
2018-02-05 05:09:28 +01:00
|
|
|
else if (json_tok_streq(buffer, tok, "unusual"))
|
2018-08-15 16:40:37 +02:00
|
|
|
**level = LOG_UNUSUAL;
|
|
|
|
else {
|
2018-12-16 05:50:06 +01:00
|
|
|
return command_fail(cmd, JSONRPC2_INVALID_PARAMS,
|
|
|
|
"'%s' should be 'io', 'debug', 'info', or "
|
|
|
|
"'unusual', not '%.*s'",
|
|
|
|
name,
|
|
|
|
json_tok_full_len(tok),
|
|
|
|
json_tok_full(buffer, tok));
|
2018-08-15 16:40:37 +02:00
|
|
|
}
|
2018-12-16 05:50:06 +01:00
|
|
|
return NULL;
|
2018-02-05 05:09:28 +01:00
|
|
|
}
|
|
|
|
|
2018-12-16 05:52:06 +01:00
|
|
|
static struct command_result *json_getlog(struct command *cmd,
|
|
|
|
const char *buffer,
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
const jsmntok_t * params)
|
2018-02-05 05:09:28 +01:00
|
|
|
{
|
2018-10-19 03:17:49 +02:00
|
|
|
struct json_stream *response;
|
2018-08-15 16:40:37 +02:00
|
|
|
enum log_level *minlevel;
|
2018-02-05 05:09:28 +01:00
|
|
|
struct log_book *lr = cmd->ld->log_book;
|
|
|
|
|
2018-07-20 03:14:02 +02:00
|
|
|
if (!param(cmd, buffer, params,
|
2018-12-16 05:50:06 +01:00
|
|
|
p_opt_def("level", param_loglevel, &minlevel, LOG_INFORM),
|
2018-07-20 03:14:02 +02:00
|
|
|
NULL))
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_param_failed();
|
2018-02-05 05:09:28 +01:00
|
|
|
|
2018-10-19 03:17:48 +02:00
|
|
|
response = json_stream_success(cmd);
|
2019-05-23 12:09:17 +02:00
|
|
|
/* Suppress logging for this stream, to not bloat io logs */
|
|
|
|
json_stream_log_suppress_for_cmd(response, cmd);
|
2019-11-18 01:27:15 +01:00
|
|
|
json_add_time(response, "created_at", lr->init_time.ts);
|
|
|
|
json_add_num(response, "bytes_used", (unsigned int)lr->mem_used);
|
|
|
|
json_add_num(response, "bytes_max", (unsigned int)lr->max_mem);
|
|
|
|
json_add_log(response, lr, NULL, *minlevel);
|
2018-12-16 05:52:06 +01:00
|
|
|
return command_success(cmd, response);
|
2018-02-05 05:09:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct json_command getlog_command = {
|
|
|
|
"getlog",
|
2019-05-22 16:08:16 +02:00
|
|
|
"utility",
|
2018-02-05 05:09:28 +01:00
|
|
|
json_getlog,
|
|
|
|
"Show logs, with optional log {level} (info|unusual|debug|io)"
|
|
|
|
};
|
|
|
|
AUTODATA(json_command, &getlog_command);
|