Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Changelog-Fixed: JSON-RPC: `xpay` will refuse to make a 0msat payment (0msat invoice, partial payment, or manually-set on amountless invoice).
Fixes: https://github.com/ElementsProject/lightning/issues/8016
We have CI runs which timeout (after 2 hours). It's not clear why,
but we can at least eliminate CLN lockups as the answer.
Since pytest disabled the --timeout option on test shutdown, we could be
seeing an issue on stopping taking a long time?
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
If we get "lucky" then commit tx will have short sig, one less weight (1 in 256 chance):
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd anchors not supportd')
def test_onchain_slow_anchor(node_factory, bitcoind):
"""We still use anchors for non-critical closes"""
l1, l2 = node_factory.line_graph(2)
# Don't let l1 succeed in sending commit tx
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l1.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
close_start_depth = bitcoind.rpc.getblockchaininfo()['blocks']
# Make l1 close unilaterally.
l1.rpc.disconnect(l2.info['id'], force=True)
l1.rpc.close(l2.info['id'], unilateraltimeout=1)
# We will have a super-low-prio anchor spend.
l1.daemon.wait_for_log(r"Low-priority anchorspend aiming for block {} \(feerate 253\)".format(close_start_depth + 2016))
# Restart with reduced block time.
l1.stop()
l1.daemon.opts['dev-low-prio-anchor-blocks'] = 20
l1.start()
l1.daemon.wait_for_log("Low-priority anchorspend aiming for block {}".format(close_start_depth + 20))
l1.daemon.wait_for_log("Anchorspend for local commit tx")
# Won't go under 12 blocks though.
# Make sure it sees all these blocks at once, to avoid test flakes!
l1.stop()
bitcoind.generate_block(7)
l1.start()
height = bitcoind.rpc.getblockchaininfo()['blocks']
l1.daemon.wait_for_log(r"Low-priority anchorspend aiming for block {} \(feerate 7458\)".format(height + 13))
> l1.daemon.wait_for_log(r"Anchorspend for local commit tx fee 12335sat \(w=714\), commit_tx fee 4545sat \(w=768\): package feerate 11390 perkw")
```
Here's the log we *did* get:
```
2025-01-25T08:46:40.9399213Z lightningd-1 2025-01-25T08:40:06.312Z DEBUG 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59-chan#1: Anchorspend for local commit tx fee 12328sat (w=714), commit_tx fee 4545sat (w=767): package feerate 11392 perkw
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Doesn't always die messily, it seems?
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
def test_rbf_reconnect_tx_construct(node_factory, bitcoind, chainparams):
disconnects = ['=WIRE_TX_ADD_INPUT', # Initial funding succeeds
'-WIRE_TX_ADD_INPUT',
'+WIRE_TX_ADD_INPUT',
'-WIRE_TX_ADD_OUTPUT',
'+WIRE_TX_ADD_OUTPUT',
'-WIRE_TX_COMPLETE',
'+WIRE_TX_COMPLETE',
'-WIRE_COMMITMENT_SIGNED',
'+WIRE_COMMITMENT_SIGNED']
l1, l2 = node_factory.get_nodes(2,
opts=[{'disconnect': disconnects,
'may_reconnect': True,
'dev-no-reconnect': None},
{'may_reconnect': True,
'dev-no-reconnect': None,
'broken_log': 'dualopend daemon died before signed PSBT returned'}])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
amount = 2**24
chan_amount = 100000
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8 + 0.01)
bitcoind.generate_block(1)
# Wait for it to arrive.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > 0)
res = l1.rpc.fundchannel(l2.info['id'], chan_amount)
chan_id = res['channel_id']
vins = bitcoind.rpc.decoderawtransaction(res['tx'])['vin']
assert(only_one(vins))
prev_utxos = ["{}:{}".format(vins[0]['txid'], vins[0]['vout'])]
# Check that we're waiting for lockin
l1.daemon.wait_for_log(' to DUALOPEND_AWAITING_LOCKIN')
# rbf the lease with a higher amount
rate = int(find_next_feerate(l1, l2)[:-5])
# We 4x the feerate to beat the min-relay fee
next_feerate = '{}perkw'.format(rate * 4)
# Initiate an RBF
startweight = 42 + 172 # base weight, funding output
initpsbt = l1.rpc.utxopsbt(chan_amount, next_feerate, startweight,
prev_utxos, reservedok=True,
excess_as_change=True)
# Run through TX_ADD wires
for d in disconnects[1:-4]:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError):
l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'] is False)
# The first TX_COMPLETE breaks
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
bump = l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
with pytest.raises(RpcError):
update = l1.rpc.openchannel_update(chan_id, bump['psbt'])
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'] is False)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 should remember, l2 has forgotten
# l2 should send tx-abort, to reset
l2.daemon.wait_for_log(r'tx-abort: Sent next_funding_txid .* doesn\'t match ours .*')
l1.daemon.wait_for_log(r'Cleaned up incomplete inflight')
# abort doesn't cause a disconnect
assert l1.rpc.getpeer(l2.info['id'])['connected']
# The next TX_COMPLETE break (both remember) + they break on the
# COMMITMENT_SIGNED during the reconnect
bump = l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
with pytest.raises(RpcError):
update = l1.rpc.openchannel_update(chan_id, bump['psbt'])
wait_for(lambda: l1.rpc.getpeer(l2.info['id'])['connected'] is False)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_logs([r'Got dualopend reestablish',
r'No commitment, not sending our sigs'])
l1.daemon.wait_for_logs([r'Got dualopend reestablish',
r'No commitment, not sending our sigs',
r'dev_disconnect: -WIRE_COMMITMENT_SIGNED',
'peer_disconnect_done'])
assert not l1.rpc.getpeer(l2.info['id'])['connected']
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# COMMITMENT_SIGNED disconnects *during* the reconnect
# We can't bump because the last negotiation is in the wrong state
with pytest.raises(RpcError, match=r'Funding sigs for this channel not secured'):
l1.rpc.openchannel_bump(chan_id, chan_amount, initpsbt['psbt'])
# l2 reconnects, but doesn't have l1's commitment
> l2.daemon.wait_for_logs([r'Got dualopend reestablish',
r'No commitment, not sending our sigs',
# This is a BROKEN log, it's expected!
r'dualopend daemon died before signed PSBT returned'])
tests/test_opening.py:944:
...
> raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
E TimeoutError: Unable to find "[re.compile('dualopend daemon died before signed PSBT returned')]" in logs.
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
By having gossipwith filter out messages we don't want, we can get the counts of
expected messages correct, and not hit errors like this:
```
def test_gossip_throttle(node_factory, bitcoind, chainparams):
"""Make some gossip, test it gets throttled"""
l1, l2, l3, l4 = node_factory.line_graph(4, wait_for_announce=True,
opts=[{}, {}, {}, {'dev-throttle-gossip': None}])
# We expect: self-advertizement (3 messages for l1 and l4) plus
# 4 node announcements, 3 channel announcements and 6 channel updates.
# We also expect it to send a timestamp filter message.
# (We won't take long enough to get a ping!)
expected = 4 + 4 + 3 + 6 + 1
# l1 is unlimited
start_fast = time.time()
out1 = subprocess.run(['devtools/gossipwith',
'--all-gossip',
'--hex',
'--network={}'.format(TEST_NETWORK),
'--max-messages={}'.format(expected),
'{}@localhost:{}'.format(l1.info['id'], l1.port)],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout.split()
time_fast = time.time() - start_fast
assert time_fast < 2
# Remove timestamp filter, since timestamp will change!
out1 = [m for m in out1 if not m.startswith(b'0109')]
# l4 is throttled
start_slow = time.time()
out2 = subprocess.run(['devtools/gossipwith',
'--all-gossip',
'--hex',
'--network={}'.format(TEST_NETWORK),
'--max-messages={}'.format(expected),
'{}@localhost:{}'.format(l4.info['id'], l4.port)],
check=True,
timeout=TIMEOUT, stdout=subprocess.PIPE).stdout.split()
time_slow = time.time() - start_slow
assert time_slow > 3
# Remove timestamp filter, since timestamp will change!
out2 = [m for m in out2 if not m.startswith(b'0109')]
# Contents should be identical (once uniquified, since each
# doubles-up on its own gossip)
> assert set(out1) == set(out2)
E AssertionError: assert {b'010054b1907bdf639c9060e0fa4bca02419c46f75a99f0908b87a2e09711d5d031ba76b8fd07acc8be1b2fac9e31efb808e5d362c32ef4665...
E Extra items in the left set:
E b'01010ad5be8b9ba029245c2ae2d667af7ead7c0129c479c7fd7145a9b65931e90222082e6e4ab37ef60ebd10f1493d73e8bf7a40c4ae5f7d87cc...8488830b60f7e744ed9235eb0b1ba93283b315c035180266e44a554e494f524245414d2d333930353033622d6d6f64646564000000000000000000'
E Extra items in the right set:
E b'01079f87eb580b9e5f11dc211e9fb66abb3699999044f8fe146801162393364286c6000000010000006c010101'
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
So not every message type counts: this is useful when we want to get a specific number
of a specific type.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This caused a "flake" in testing, because it's wrong:
```
_____________________________ test_gossip_pruning ______________________________
[gw2] linux -- Python 3.10.16 /home/runner/.cache/pypoetry/virtualenvs/cln-meta-project-AqJ9wMix-py3.10/bin/python
node_factory = <pyln.testing.utils.NodeFactory object at 0x7f0267530490>
bitcoind = <pyln.testing.utils.BitcoinD object at 0x7f0267532b30>
def test_gossip_pruning(node_factory, bitcoind):
""" Create channel and see it being updated in time before pruning
"""
l1, l2, l3 = node_factory.get_nodes(3, opts={'dev-fast-gossip-prune': None,
'allow_bad_gossip': True,
'autoconnect-seeker-peers': 0})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
scid1, _ = l1.fundchannel(l2, 10**6)
scid2, _ = l2.fundchannel(l3, 10**6)
mine_funding_to_announce(bitcoind, [l1, l2, l3])
wait_for(lambda: l1.rpc.listchannels(source=l1.info['id'])['channels'] != [])
l1_initial_cupdate_timestamp = only_one(l1.rpc.listchannels(source=l1.info['id'])['channels'])['last_update']
# Get timestamps of initial updates, so we can ensure they change.
# Channels should be activated locally
> wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True] * 4)
```
Here you can see it has pruned:
```
lightningd-1 2025-01-24T07:39:40.873Z DEBUG gossipd: Pruning channel 105x1x0 from network view (ages 1737704380 and 0)
...
lightningd-1 2025-01-24T07:39:50.941Z UNUSUAL lightningd: Bad gossip order: could not find channel 105x1x0 for peer's channel update
```
Changelog-Fixed: Protocol: we were overzealous in pruning channels if we hadn't seen one side's gossip update yet.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
We can be a bit early in our assertion:
```
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "sqlite3-specific DB manip")
def test_reconnect_remote_sends_no_sigs(node_factory):
"""We re-announce, even when remote node doesn't send its announcement_signatures on reconnect.
"""
l1, l2 = node_factory.line_graph(2, wait_for_announce=True, opts={'may_reconnect': True,
'dev-no-reconnect': None})
# Wipe l2's gossip_store
l2.stop()
gs_path = os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'gossip_store')
os.unlink(gs_path)
l2.start()
# l2 will now uses (REMOTE's) announcement_signatures it has stored
wait_for(lambda: l2.rpc.listchannels()['channels'] != [])
# Remove remote signatures from l1 so it asks for them (and delete gossip store)
l1.db_manip("UPDATE channels SET remote_ann_node_sig=NULL, remote_ann_bitcoin_sig=NULL")
gs_path = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, 'gossip_store')
os.unlink(gs_path)
l1.restart()
l1.connect(l2)
l1needle = l1.daemon.logsearch_start
l2needle = l2.daemon.logsearch_start
# l1 asks once, l2 replies once.
# Make sure we get all the msgs!
time.sleep(5)
l1.daemon.wait_for_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')
l2.daemon.wait_for_log('peer_out WIRE_ANNOUNCEMENT_SIGNATURES')
l1msgs = [l.split()[4] for l in l1.daemon.logs[l1needle:] if 'WIRE_ANNOUNCEMENT_SIGNATURES' in l]
> assert l1msgs == ['peer_out', 'peer_in']
E AssertionError: assert ['peer_out'] == ['peer_out', 'peer_in']
E Right contains one more item: 'peer_in'
E Full diff:
E - ['peer_out', 'peer_in']
E + ['peer_out']
```
```
lightningd-2 2025-01-24T05:53:22.862Z DEBUG 0266e4598d1d3c415f572a8488830b60f7e744ed9235eb0b1ba93283b315c03518-connectd: peer_out WIRE_ANNOUNCEMENT_SIGNATURES
lightningd-1 2025-01-24T05:53:22.864Z DEBUG 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59-channeld-chan#1: peer_in WIRE_ANNOUNCEMENT_SIGNATURES
lightningd-1 2025-01-24T05:53:22.885Z DEBUG 022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59-chan#1: channel_gossip: received announcement sigs for 103x1x0 (we have 103x1x0)
{'github_repository': 'ElementsProject/lightning', 'github_sha': 'e9d36f2b8ecd45882753cbe062c355e40bc7109c', 'github_ref': 'refs/pull/8027/merge', 'github_ref_name': 'HEAD', 'github_run_id': 12943530601, 'github_head_ref':
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Sometimes they connect too fast, so we don't get a chance to ratelimit all of them:
```
def test_connect_ratelimit(node_factory, bitcoind):
"""l1 has 5 peers, restarts, make sure we limit"""
nodes = node_factory.get_nodes(6,
opts=[{'dev-limit-connections-inflight': None, 'may_reconnect': True}] + [{'may_reconnect': True}] * 5)
l1 = nodes[0]
nodes = nodes[1:]
addr = l1.rpc.newaddr()['bech32']
for n in nodes:
bitcoind.rpc.sendtoaddress(addr, (FUNDAMOUNT + 1000000) / 10**8)
bitcoind.generate_block(1, wait_for_mempool=len(nodes))
sync_blockheight(bitcoind, [l1])
for n in nodes:
l1.rpc.connect(n.info['id'], 'localhost', n.port)
l1.rpc.fundchannel(n.info['id'], FUNDAMOUNT)
# Make sure all channels are established and announced.
bitcoind.generate_block(6, wait_for_mempool=len(nodes))
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == len(nodes) * 2)
assert not l1.daemon.is_in_log('Unblocking for')
l1.restart()
# The first will be ok, but others should block and be unblocked.
> l1.daemon.wait_for_logs((['Unblocking for ']
+ ['Too many connections, waiting'])
* (len(nodes) - 1))
tests/test_connection.py:4721:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.testing.utils.LightningD object at 0x7f6e288a3a60>
regexs = ['Unblocking for ', 'Too many connections, waiting', 'Unblocking for ', 'Too many connections, waiting', 'Unblocking for ', 'Too many connections, waiting', ...]
timeout = 180
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
The logs contain tailed stdout of the process. We look for each regex
in `regexs`, starting from `logsearch_start` which normally is the
position of the last found entry of a previous wait-for logs call.
The ordering inside `regexs` doesn't matter.
We fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
while True:
if self.logsearch_start >= len(self.logs):
if not self.logs_catchup():
time.sleep(0.25)
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
> raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
E TimeoutError: Unable to find "[re.compile('Unblocking for '), re.compile('Too many connections, waiting')]" in logs.
```
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
```
____________________ ERROR at teardown of test_xpay_maxfee _____________________
...
# Format a nice list of everything that went wrong and raise an exception
request.node.has_errors = True
> raise ValueError(str(errors))
E ValueError:
E Node errors:
E - lightningd-1: Node exited with return code 1
E Global errors:
```
And:
```
@unittest.skipIf(TEST_NETWORK != 'regtest', 'too dusty on elements')
def test_xpay_maxfee(node_factory, bitcoind, chainparams):
"""Test which shows that we don't excees maxfee"""
outfile = tempfile.NamedTemporaryFile(prefix='gossip-store-')
subprocess.check_output(['devtools/gossmap-compress',
'decompress',
'--node-map=3301=022d223620a359a47ff7f7ac447c85c46c923da53389221a0054c11c1e3ca31d59',
'tests/data/gossip-store-2024-09-22.compressed',
outfile.name]).decode('utf-8').splitlines()
AMOUNT = 100_000_000
# l2 will warn l1 about its invalid gossip: ignore.
# We throttle l1's gossip to avoid massive log spam.
> l1, l2 = node_factory.line_graph(2,
# This is in sats, so 1000x amount we send.
fundamount=AMOUNT,
opts=[{'gossip_store_file': outfile.name,
'subdaemon': 'channeld:../tests/plugins/channeld_fakenet',
'allow_warning': True,
'dev-throttle-gossip': None},
{'allow_bad_gossip': True}])
tests/test_xpay.py:509:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
contrib/pyln-testing/pyln/testing/utils.py:1720: in line_graph
nodes = self.get_nodes(num_nodes, opts=opts)
contrib/pyln-testing/pyln/testing/utils.py:1602: in get_nodes
return [j.result() for j in jobs]
contrib/pyln-testing/pyln/testing/utils.py:1602: in <listcomp>
return [j.result() for j in jobs]
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/concurrent/futures/_base.py:458: in result
return self.__get_result()
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/concurrent/futures/_base.py:403: in __get_result
raise self._exception
/opt/hostedtoolcache/Python/3.10.16/x64/lib/python3.10/concurrent/futures/thread.py:58: in run
result = self.fn(*self.args, **self.kwargs)
contrib/pyln-testing/pyln/testing/utils.py:1653: in get_node
node.start(wait_for_bitcoind_sync)
contrib/pyln-testing/pyln/testing/utils.py:1015: in start
self.daemon.start(stderr_redir=stderr_redir)
contrib/pyln-testing/pyln/testing/utils.py:671: in start
self.wait_for_log("Server started with public key")
contrib/pyln-testing/pyln/testing/utils.py:355: in wait_for_log
return self.wait_for_logs([regex], timeout)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pyln.testing.utils.LightningD object at 0x7f27ab586c20>
regexs = ['Server started with public key'], timeout = 180
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
The logs contain tailed stdout of the process. We look for each regex
in `regexs`, starting from `logsearch_start` which normally is the
position of the last found entry of a previous wait-for logs call.
The ordering inside `regexs` doesn't matter.
We fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
while True:
if self.logsearch_start >= len(self.logs):
if not self.logs_catchup():
time.sleep(0.25)
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
> raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
E TimeoutError: Unable to find "[re.compile('Server started with public key')]" in logs.
```
gossipd (and other plugins) simply take too long to digest the gossmap under valgrind.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
I'd been resisting this, but some tests really do want "hang up after
*receiving* this", and it's more reliable than getting the peer to
"hang up afer *sending* this" which seems not always work.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Pyln logger's configuration was unexpectedly being overwritten by the root logger during the autogenerated examples test, complicating error debugging.
We resolved this by introducing a pytest fixture to reapply the logger configuration before the tests executes.
Changelog-None.
Fixes: https://github.com/ElementsProject/lightning/issues/8023
Currently on I see link failures like the following:
```bash
./configure
<snip>
checking for libsodium with IETF chacha20 variants... yes
<snip>
ar libccan.a
ld ccan/ccan/cdump/tools/cdump-enumstr
ld: library 'sodium' not found
clang: error: linker command failed with exit code 1 (use -v to see invocation)
```
The configure check passes under Clang here.
Changelog-Fixed: build: fix linking against libsodium on macOS.
The current check fails when building with newer GCC, i.e:
```bash
error: ‘crypto_secretstream_xchacha20poly1305_init_push’ reading 32 bytes from a region of size 3 [-Werror=stringop-overread]
12 | crypto_secretstream_xchacha20poly1305_init_push(&crypto_state, header,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 | data);
```
This inadvertently results in the release binaries for Ubuntu 22.04 &
24.04, not having a runtime libsodium dependency, but instead using the
bundled lib.
It's not clear to me this is actually enough to fix (all) the release builds,
as the build containers need to have `libsodium-dev`, not just `libsodium`
in them, and it's not clear to me which packages are actually present
looking at the repro build scripts.
Changelog-Fixed: build: libsodium configure check fixed to work with newer GCC.
The updated API requires typed htables to explicitly state whether they
allow duplicates: for most cases we don't, but we've had issues in the
past.
This is a big patch, but mainly mechanical.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
I wanted to make sure we didn't have a bug in our htable routine,
so I wrote this test. We don't, but leave it in.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
- Install `poetry-plugin-export` as a separate step.
- Remove `--no-update` option from `poetry lock` as it's now default behavior.
- Add `poetry lock` to the command chain after removing cln-rest and wss-proxy.
https://github.com/python-poetry/poetry/releases/tag/2.0.0
Undertaken to upgrade QEMU to 7.2. Also upgrades Python to 3.11
implicitly and migrates Python dependency management to virtual environments.
Changelog-Changed: Released Docker images are now based on Debian Bookworm
This doesn't do anything, because it's trying to create a symlink for a
verison that doesn't exist. The version installed via brew is 0.22.5.
In any case, on any recent macOS system, this should not be necessary.
Changelog-None.
Poetry will no longer include the `poetry-plugin-export` plugin by default, which is essential for exporting dependencies. So, we now need to install it explicitly.