mirror of
https://github.com/ElementsProject/lightning.git
synced 2025-01-18 21:35:11 +01:00
openingd: Implement sendcustommsg handling in openingd
Most of the work is done in `lightningd`, here we just need to queue the message itself.
This commit is contained in:
parent
5325ff6352
commit
3c88d5c8c4
@ -1355,6 +1355,14 @@ static void handle_dev_memleak(struct state *state, const u8 *msg)
|
||||
take(towire_opening_dev_memleak_reply(NULL,
|
||||
found_leak)));
|
||||
}
|
||||
|
||||
/* We were told to send a custommsg to the peer by `lightningd`. All the
|
||||
* verification is done on the side of `lightningd` so we should be good to
|
||||
* just forward it here. */
|
||||
static void openingd_send_custommsg(struct state *state, const u8 *msg)
|
||||
{
|
||||
sync_crypto_write(state->pps, take(msg));
|
||||
}
|
||||
#endif /* DEVELOPER */
|
||||
|
||||
/* Standard lightningd-fd-is-ready-to-read demux code. Again, we could hang
|
||||
@ -1418,7 +1426,7 @@ static u8 *handle_master_in(struct state *state)
|
||||
switch ((enum common_wire_type)t) {
|
||||
#if DEVELOPER
|
||||
case WIRE_CUSTOMMSG_OUT:
|
||||
/* TODO(cdecker) Add handling of custom messages. */
|
||||
openingd_send_custommsg(state, msg);
|
||||
return NULL;
|
||||
#else
|
||||
case WIRE_CUSTOMMSG_OUT:
|
||||
|
@ -2060,3 +2060,47 @@ def test_waitblockheight(node_factory, executor, bitcoind):
|
||||
bitcoind.generate_block(1)
|
||||
sync_blockheight(bitcoind, [node])
|
||||
fut2.result(5)
|
||||
|
||||
|
||||
@unittest.skipIf(not DEVELOPER, "Needs dev-sendcustommsg")
|
||||
def test_sendcustommsg(node_factory):
|
||||
"""Check that we can send custommsgs to peers in various states.
|
||||
|
||||
`l2` is the node under test. `l1` has a channel with `l2` and should
|
||||
therefore be attached to `channeld`. `l4` is just connected, so it should
|
||||
be attached to `openingd`. `l3` has a channel open, but is disconnected
|
||||
and we can't send to it.
|
||||
|
||||
"""
|
||||
l1, l2, l3 = node_factory.line_graph(3, opts={'log-level': 'io'})
|
||||
l4 = node_factory.get_node(options={'log-level': 'io'})
|
||||
l2.connect(l4)
|
||||
l3.stop()
|
||||
msg = r'ff' * 32
|
||||
serialized = r'04070020' + msg
|
||||
|
||||
# This address doesn't exist so we should get an error when we try sending
|
||||
# a message to it.
|
||||
node_id = '02df5ffe895c778e10f7742a6c5b8a0cefbe9465df58b92fadeb883752c8107c8f'
|
||||
with pytest.raises(RpcError, match=r'No such peer'):
|
||||
l1.rpc.dev_sendcustommsg(node_id, msg)
|
||||
|
||||
# `l3` is disconnected and we can't send messages to it
|
||||
assert(not l2.rpc.listpeers(l3.info['id'])['peers'][0]['connected'])
|
||||
with pytest.raises(RpcError, match=r'Peer is not connected'):
|
||||
l2.rpc.dev_sendcustommsg(l3.info['id'], msg)
|
||||
|
||||
# We should not be able to send a bogus `ping` message, since it collides
|
||||
# with a message defined in the spec, and could potentially mess up our
|
||||
# internal state.
|
||||
with pytest.raises(RpcError, match=r'Cannot send messages of type 18 .WIRE_PING.'):
|
||||
l2.rpc.dev_sendcustommsg(l2.info['id'], r'0012')
|
||||
|
||||
l2.rpc.dev_sendcustommsg(l1.info['id'], msg)
|
||||
l2.rpc.dev_sendcustommsg(l4.info['id'], msg)
|
||||
l2.daemon.wait_for_log(
|
||||
r'{peer_id}-openingd-chan#[0-9]: \[OUT\] {serialized}'.format(
|
||||
serialized=serialized, peer_id=l4.info['id']
|
||||
)
|
||||
)
|
||||
l4.daemon.wait_for_log(r'\[IN\] {}'.format(serialized))
|
||||
|
@ -640,6 +640,12 @@ struct txowatch *watch_txo(const tal_t *ctx UNNEEDED,
|
||||
size_t input_num UNNEEDED,
|
||||
const struct block *block))
|
||||
{ fprintf(stderr, "watch_txo called!\n"); abort(); }
|
||||
/* Generated stub for wire_type_is_defined */
|
||||
bool wire_type_is_defined(u16 type UNNEEDED)
|
||||
{ fprintf(stderr, "wire_type_is_defined called!\n"); abort(); }
|
||||
/* Generated stub for wire_type_name */
|
||||
const char *wire_type_name(int e UNNEEDED)
|
||||
{ fprintf(stderr, "wire_type_name called!\n"); abort(); }
|
||||
/* AUTOGENERATED MOCKS END */
|
||||
|
||||
#if DEVELOPER
|
||||
|
Loading…
Reference in New Issue
Block a user