pytest: Remove 3 stress-tests

These tests have proven to be:

 a) very expensive, as they spin up many nodes, and perform long setup
 b) are not testing anything specific, they just fuzz functionality
    that is already tested otherwise
 c) have not helped pinpoint any issues in living memory
 d) are very flaky, making for really bad signal-to-noise, so much
    that devs usually just restart without even looking at the logs
 e) even if we were to look at the logs, we'd be unable to reproduce
    due to the inherent randomness involved in these tests
 f) are really noisy neighbors, causing other tests to flake as well,
    further muddying the water

All in all, these tests are a waste of time, and source of
frustration.

[ Cleaned up python unused imports --RR ]
Changelog-None
This commit is contained in:
Christian Decker 2022-02-25 17:38:54 +01:00 committed by Rusty Russell
parent ec6d91fd54
commit 86b83e473b
2 changed files with 0 additions and 200 deletions

View file

@ -187,68 +187,6 @@ def test_closing_id(node_factory):
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@pytest.mark.slow_test
def test_closing_torture(node_factory, executor, bitcoind):
# We set up a fully-connected mesh of N nodes, then try
# closing them all at once.
amount = 10**6
num_nodes = 10 # => 45 channels (36 seconds on my laptop)
if node_factory.valgrind:
num_nodes -= 4 # => 15 (135 seconds)
nodes = node_factory.get_nodes(num_nodes)
# Make sure bitcoind has plenty of utxos
bitcoind.generate_block(num_nodes)
# Give them all plenty of UTXOs, make sure they see them
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
addr = nodes[i].rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
txs = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port)
txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid'])
# Make sure they're all in, then lock them in.
bitcoind.generate_block(1, wait_for_mempool=txs)
# Wait for them all to be CHANNELD_NORMAL
for n in nodes:
wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers']))
# Start closers: can take a long time under valgrind!
futures = []
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id']))
futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id']))
# Wait for close to finish
close_txs = set()
for f in futures:
# If one side completes closing, we'll get an error here 'Peer has no active channel'
try:
close_txs.add(f.result(TIMEOUT)['txid'])
except RpcError as err:
assert err.error['message'] == 'Peer has no active channel'
# Should have one close for each open.
assert len(close_txs) == len(txs)
# Get closes confirmed
bitcoind.generate_block(100, wait_for_mempool=list(close_txs))
# And make sure they hangup.
for n in nodes:
wait_for(lambda: n.rpc.listpeers()['peers'] == [])
@unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements')
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):

View file

@ -1,4 +1,3 @@
from collections import namedtuple
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from flaky import flaky # noqa: F401
@ -19,7 +18,6 @@ import os
import pytest
import random
import re
import shutil
import time
import unittest
import websocket
@ -585,47 +583,6 @@ def test_reconnect_no_update(node_factory, executor, bitcoind):
l1.daemon.wait_for_log(r"CLOSINGD_COMPLETE")
def test_connect_stresstest(node_factory, executor):
# This test is unreliable, but it's better than nothing.
l1, l2, l3 = node_factory.get_nodes(3, opts={'may_reconnect': True})
# Hack l3 into a clone of l2, to stress reconnect code.
l3.stop()
shutil.copyfile(os.path.join(l2.daemon.lightning_dir, TEST_NETWORK, 'hsm_secret'),
os.path.join(l3.daemon.lightning_dir, TEST_NETWORK, 'hsm_secret'))
l3.start()
l3.info = l3.rpc.getinfo()
assert l3.info['id'] == l2.info['id']
# We fire off random connect/disconnect commands.
actions = [
(l2.rpc.connect, l1.info['id'], 'localhost', l1.port),
(l3.rpc.connect, l1.info['id'], 'localhost', l1.port),
(l1.rpc.connect, l2.info['id'], 'localhost', l2.port),
(l1.rpc.connect, l3.info['id'], 'localhost', l3.port),
(l1.rpc.disconnect, l2.info['id'])
]
args = [random.choice(actions) for _ in range(1000)]
# We get them all to connect to each other.
futs = []
for a in args:
futs.append(executor.submit(*a))
# We don't actually care if they fail, since some will.
successes = 0
failures = 0
for f in futs:
if f.exception():
failures += 1
else:
f.result()
successes += 1
assert successes > failures
@pytest.mark.developer
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
@ -2914,101 +2871,6 @@ def test_fulfill_incoming_first(node_factory, bitcoind):
l3.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
@pytest.mark.slow_test
def test_restart_many_payments(node_factory, bitcoind):
l1 = node_factory.get_node(may_reconnect=True)
# On my laptop, these take 89 seconds and 12 seconds
if node_factory.valgrind:
num = 2
else:
num = 5
nodes = node_factory.get_nodes(num * 2, opts={'may_reconnect': True})
innodes = nodes[:num]
outnodes = nodes[num:]
# Fund up-front to save some time.
dests = {l1.rpc.newaddr()['bech32']: (10**6 + 1000000) / 10**8 * num}
for n in innodes:
dests[n.rpc.newaddr()['bech32']] = (10**6 + 1000000) / 10**8
bitcoind.rpc.sendmany("", dests)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1] + innodes)
# Nodes with channels into the main node
for n in innodes:
n.rpc.connect(l1.info['id'], 'localhost', l1.port)
n.rpc.fundchannel(l1.info['id'], 10**6)
# Nodes with channels out of the main node
for n in outnodes:
l1.rpc.connect(n.info['id'], 'localhost', n.port)
# OK to use change from previous fundings
l1.rpc.fundchannel(n.info['id'], 10**6, minconf=0)
# Now mine them, get scids
mine_funding_to_announce(bitcoind, [l1] + nodes,
num_blocks=6, wait_for_mempool=num * 2)
wait_for(lambda: [only_one(n.rpc.listpeers()['peers'])['channels'][0]['state'] for n in nodes] == ['CHANNELD_NORMAL'] * len(nodes))
inchans = []
for n in innodes:
inchans.append(only_one(n.rpc.listpeers()['peers'])['channels'][0]['short_channel_id'])
outchans = []
for n in outnodes:
outchans.append(only_one(n.rpc.listpeers()['peers'])['channels'][0]['short_channel_id'])
# Now make sure every node sees every channel.
for n in nodes + [l1]:
wait_for(lambda: [c['public'] for c in n.rpc.listchannels()['channels']] == [True] * len(nodes) * 2)
# Manually create routes, get invoices
Payment = namedtuple('Payment', ['innode', 'route', 'payment_hash', 'payment_secret'])
to_pay = []
for i in range(len(innodes)):
# This one will cause WIRE_INCORRECT_CLTV_EXPIRY from l1.
route = [{'msatoshi': 100001001,
'id': l1.info['id'],
'delay': 10,
'channel': inchans[i]},
{'msatoshi': 100000000,
'id': outnodes[i].info['id'],
'delay': 5,
'channel': outchans[i]}]
inv = outnodes[i].rpc.invoice(100000000, "invoice", "invoice")
payment_hash = inv['payment_hash']
to_pay.append(Payment(innodes[i], route, payment_hash, inv['payment_secret']))
# This one should be routed through to the outnode.
route = [{'msatoshi': 100001001,
'id': l1.info['id'],
'delay': 11,
'channel': inchans[i]},
{'msatoshi': 100000000,
'id': outnodes[i].info['id'],
'delay': 5,
'channel': outchans[i]}]
inv = outnodes[i].rpc.invoice(100000000, "invoice2", "invoice2")
payment_hash = inv['payment_hash']
to_pay.append(Payment(innodes[i], route, payment_hash, inv['payment_secret']))
# sendpay is async.
for p in to_pay:
p.innode.rpc.sendpay(p.route, p.payment_hash, p.payment_secret)
# Now restart l1 while traffic is flowing...
l1.restart()
# Wait for them to finish.
for n in innodes:
wait_for(lambda: 'pending' not in [p['status'] for p in n.rpc.listsendpays()['payments']])
@pytest.mark.skip('needs blackhold support')
@pytest.mark.developer("need dev-disconnect")
@pytest.mark.openchannel('v1')