diff --git a/lightningd/plugin_control.c b/lightningd/plugin_control.c index a5b83c8e9..0ed7f951e 100644 --- a/lightningd/plugin_control.c +++ b/lightningd/plugin_control.c @@ -32,6 +32,9 @@ static struct command_result *plugin_dynamic_list_plugins(struct command *cmd) return command_success(cmd, response); } +/* Mutual recursion. */ +static void plugin_dynamic_crash(struct plugin *plugin, struct dynamic_plugin *dp); + /** * Returned by all subcommands on error. */ @@ -42,6 +45,8 @@ plugin_dynamic_error(struct dynamic_plugin *dp, const char *error) plugin_kill(dp->plugin, "%s", error); else log_info(dp->cmd->ld->log, "%s", error); + + tal_del_destructor2(dp->plugin, plugin_dynamic_crash, dp); return command_fail(dp->cmd, JSONRPC2_INVALID_PARAMS, "%s: %s", dp->plugin ? dp->plugin->cmd : "unknown plugin", error); @@ -52,6 +57,11 @@ static void plugin_dynamic_timeout(struct dynamic_plugin *dp) plugin_dynamic_error(dp, "Timed out while waiting for plugin response"); } +static void plugin_dynamic_crash(struct plugin *p, struct dynamic_plugin *dp) +{ + plugin_dynamic_error(dp, "Plugin exited before completing handshake."); +} + static void plugin_dynamic_config_callback(const char *buffer, const jsmntok_t *toks, const jsmntok_t *idtok, @@ -63,6 +73,7 @@ static void plugin_dynamic_config_callback(const char *buffer, /* Reset the timer only now so that we are either configured, or * killed. */ tal_free(dp->plugin->timeout_timer); + tal_del_destructor2(dp->plugin, plugin_dynamic_crash, dp); list_for_each(&dp->plugin->plugins->plugins, p, list) { if (p->plugin_state != CONFIGURED) @@ -133,9 +144,21 @@ static struct command_result *plugin_start(struct dynamic_plugin *dp) /* Give the plugin 20 seconds to respond to `getmanifest`, so we don't hang * too long on the RPC caller. */ p->timeout_timer = new_reltimer(dp->cmd->ld->timers, dp, - time_from_sec((10)), + time_from_sec((20)), plugin_dynamic_timeout, dp); + /* Besides the timeout we could also have the plugin crash before + * completing the handshake. In that case we'll get notified and we + * can clean up the `struct dynamic_plugin` and return an appropriate + * error. + * + * The destructor is deregistered in the following places: + * + * - plugin_dynamic_error in case of a timeout or a crash + * - plugin_dynamic_config_callback if the handshake completes + */ + tal_add_destructor2(p, plugin_dynamic_crash, dp); + /* Create two connections, one read-only on top of the plugin's stdin, and one * write-only on its stdout. */ io_new_conn(p, stdout, plugin_stdout_conn_init, p); diff --git a/tests/plugins/slow_init.py b/tests/plugins/slow_init.py index 48c279047..8587531be 100755 --- a/tests/plugins/slow_init.py +++ b/tests/plugins/slow_init.py @@ -8,7 +8,7 @@ plugin = Plugin() @plugin.init() def init(options, configuration, plugin): plugin.log("slow_init.py initializing {}".format(configuration)) - time.sleep(11) + time.sleep(21) plugin.run() diff --git a/tests/test_plugin.py b/tests/test_plugin.py index 7a3920e95..806f2abfd 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -161,7 +161,7 @@ def test_plugin_command(node_factory): n2.rpc.plugin_stop(plugin="static.py") # Test that we don't crash when starting a broken plugin - with pytest.raises(RpcError, match=r"Timed out while waiting for plugin response"): + with pytest.raises(RpcError, match=r"Plugin exited before completing handshake."): n2.rpc.plugin_start(plugin=os.path.join(os.getcwd(), "tests/plugins/broken.py"))