2016-03-19 20:58:06 +01:00
|
|
|
#!/usr/bin/env python3
|
2021-07-28 13:57:16 +02:00
|
|
|
# Copyright (c) 2015-2021 The Bitcoin Core developers
|
2016-03-19 20:58:06 +01:00
|
|
|
# Distributed under the MIT software license, see the accompanying
|
2015-05-04 16:50:24 +02:00
|
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""Test processing of unrequested blocks.
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2020-07-09 17:59:54 +02:00
|
|
|
Setup: two nodes, node0 + node1, not connected to each other. Node1 will have
|
2017-10-11 21:38:56 +02:00
|
|
|
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-17 22:16:39 +02:00
|
|
|
We have one P2PInterface connection to node0 called test_node, and one to node1
|
2017-10-11 21:38:56 +02:00
|
|
|
called min_work_node.
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
The test:
|
|
|
|
1. Generate one block on each node, to leave IBD.
|
|
|
|
|
|
|
|
2. Mine a new block on each tip, and deliver to each node from node's peer.
|
2017-10-11 21:38:56 +02:00
|
|
|
The tip should advance for node0, but node1 should skip processing due to
|
|
|
|
nMinimumChainWork.
|
2017-10-06 20:32:07 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
Node1 is unused in tests 3-7:
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
3. Mine a block that forks from the genesis block, and deliver to test_node.
|
|
|
|
Node0 should not process this block (just accept the header), because it
|
|
|
|
is unrequested and doesn't have more or equal work to the tip.
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
4a,b. Send another two blocks that build on the forking block.
|
|
|
|
Node0 should process the second block but be stuck on the shorter chain,
|
|
|
|
because it's missing an intermediate block.
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
|
|
|
|
we currently store).
|
2015-06-02 21:17:36 +02:00
|
|
|
Node0 should process all but the last block (too far ahead in height).
|
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
5. Send a duplicate of the block in #3 to Node0.
|
|
|
|
Node0 should not process the block because it is unrequested, and stay on
|
|
|
|
the shorter chain.
|
|
|
|
|
|
|
|
6. Send Node0 an inv for the height 3 block produced in #4 above.
|
|
|
|
Node0 should figure out that Node0 has the missing height 2 block and send a
|
|
|
|
getdata.
|
|
|
|
|
|
|
|
7. Send Node0 the missing block again.
|
|
|
|
Node0 should process and the tip should advance.
|
2017-10-06 20:32:07 +02:00
|
|
|
|
2017-10-11 22:57:43 +02:00
|
|
|
8. Create a fork which is invalid at a height longer than the current chain
|
|
|
|
(ie to which the node will try to reorg) but which has headers built on top
|
|
|
|
of the invalid block. Check that we get disconnected if we send more headers
|
|
|
|
on the chain the node now knows to be invalid.
|
2017-10-06 20:32:07 +02:00
|
|
|
|
2017-10-11 22:57:43 +02:00
|
|
|
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
|
|
|
|
work on its chain).
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
import time
|
2018-07-07 00:10:35 +02:00
|
|
|
|
2018-07-30 10:16:40 +02:00
|
|
|
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
|
2020-04-25 13:44:44 +02:00
|
|
|
from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
|
2020-07-19 09:47:05 +02:00
|
|
|
from test_framework.p2p import p2p_lock, P2PInterface
|
2018-07-07 00:10:35 +02:00
|
|
|
from test_framework.test_framework import BitcoinTestFramework
|
2019-04-07 00:38:51 +02:00
|
|
|
from test_framework.util import (
|
|
|
|
assert_equal,
|
|
|
|
assert_raises_rpc_error,
|
|
|
|
)
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
|
2018-04-19 14:38:59 +02:00
|
|
|
class AcceptBlockTest(BitcoinTestFramework):
|
2017-06-10 00:21:21 +02:00
|
|
|
def set_test_params(self):
|
2016-05-14 13:01:31 +02:00
|
|
|
self.setup_clean_chain = True
|
2017-10-11 21:38:56 +02:00
|
|
|
self.num_nodes = 2
|
|
|
|
self.extra_args = [[], ["-minimumchainwork=0x10"]]
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
def setup_network(self):
|
2017-04-03 15:34:04 +02:00
|
|
|
self.setup_nodes()
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2022-08-02 22:48:57 +02:00
|
|
|
def check_hash_in_chaintips(self, node, blockhash):
|
|
|
|
tips = node.getchaintips()
|
|
|
|
for x in tips:
|
|
|
|
if x["hash"] == blockhash:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
def run_test(self):
|
2017-10-17 22:16:39 +02:00
|
|
|
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
|
|
|
|
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# 1. Have nodes mine a block (leave IBD)
|
2020-11-10 18:02:31 +01:00
|
|
|
[self.generate(n, 1, sync_fun=self.no_op) for n in self.nodes]
|
2018-09-15 11:32:12 +02:00
|
|
|
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 2. Send one block that builds on each tip.
|
2017-10-11 21:38:56 +02:00
|
|
|
# This should be accepted by node0
|
2015-05-04 16:50:24 +02:00
|
|
|
blocks_h2 = [] # the height 2 blocks on each node's chain
|
2016-03-20 18:18:32 +01:00
|
|
|
block_time = int(time.time()) + 1
|
2017-10-11 21:38:56 +02:00
|
|
|
for i in range(2):
|
2015-08-05 23:47:34 +02:00
|
|
|
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
|
2015-05-04 16:50:24 +02:00
|
|
|
blocks_h2[i].solve()
|
2015-06-02 21:17:36 +02:00
|
|
|
block_time += 1
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(blocks_h2[0]))
|
2022-08-02 22:48:57 +02:00
|
|
|
|
|
|
|
with self.nodes[1].assert_debug_log(expected_msgs=[f"AcceptBlockHeader: not adding new block header {blocks_h2[1].hash}, missing anti-dos proof-of-work validation"]):
|
|
|
|
min_work_node.send_and_ping(msg_block(blocks_h2[1]))
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
assert_equal(self.nodes[0].getblockcount(), 2)
|
2017-10-11 21:38:56 +02:00
|
|
|
assert_equal(self.nodes[1].getblockcount(), 1)
|
2022-08-02 22:48:57 +02:00
|
|
|
|
|
|
|
# Ensure that the header of the second block was also not accepted by node1
|
|
|
|
assert_equal(self.check_hash_in_chaintips(self.nodes[1], blocks_h2[1].hash), False)
|
2017-10-11 21:38:56 +02:00
|
|
|
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# 3. Send another block that builds on genesis.
|
|
|
|
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
|
|
|
|
block_time += 1
|
|
|
|
block_h1f.solve()
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(block_h1f))
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
tip_entry_found = False
|
2015-05-04 16:50:24 +02:00
|
|
|
for x in self.nodes[0].getchaintips():
|
2017-10-11 21:38:56 +02:00
|
|
|
if x['hash'] == block_h1f.hash:
|
2015-05-04 16:50:24 +02:00
|
|
|
assert_equal(x['status'], "headers-only")
|
2017-10-11 21:38:56 +02:00
|
|
|
tip_entry_found = True
|
2019-02-19 23:43:44 +01:00
|
|
|
assert tip_entry_found
|
2017-10-11 21:38:56 +02:00
|
|
|
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
|
|
|
|
|
|
|
|
# 4. Send another two block that build on the fork.
|
|
|
|
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
|
|
|
|
block_time += 1
|
|
|
|
block_h2f.solve()
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(block_h2f))
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# Since the earlier block was not processed by node, the new block
|
|
|
|
# can't be fully validated.
|
|
|
|
tip_entry_found = False
|
|
|
|
for x in self.nodes[0].getchaintips():
|
|
|
|
if x['hash'] == block_h2f.hash:
|
|
|
|
assert_equal(x['status'], "headers-only")
|
|
|
|
tip_entry_found = True
|
2019-02-19 23:43:44 +01:00
|
|
|
assert tip_entry_found
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# But this block should be accepted by node since it has equal work.
|
2017-10-11 22:04:13 +02:00
|
|
|
self.nodes[0].getblock(block_h2f.hash)
|
2017-10-11 21:38:56 +02:00
|
|
|
self.log.info("Second height 2 block accepted, but not reorg'ed to")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# 4b. Now send another block that builds on the forking chain.
|
|
|
|
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
|
|
|
|
block_h3.solve()
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(block_h3))
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# Since the earlier block was not processed by node, the new block
|
2015-05-04 16:50:24 +02:00
|
|
|
# can't be fully validated.
|
2017-10-11 21:38:56 +02:00
|
|
|
tip_entry_found = False
|
2015-05-04 16:50:24 +02:00
|
|
|
for x in self.nodes[0].getchaintips():
|
2017-10-11 21:38:56 +02:00
|
|
|
if x['hash'] == block_h3.hash:
|
2015-05-04 16:50:24 +02:00
|
|
|
assert_equal(x['status'], "headers-only")
|
2017-10-11 21:38:56 +02:00
|
|
|
tip_entry_found = True
|
2019-02-19 23:43:44 +01:00
|
|
|
assert tip_entry_found
|
2017-10-11 21:38:56 +02:00
|
|
|
self.nodes[0].getblock(block_h3.hash)
|
|
|
|
|
|
|
|
# But this block should be accepted by node since it has more work.
|
|
|
|
self.nodes[0].getblock(block_h3.hash)
|
|
|
|
self.log.info("Unrequested more-work block accepted")
|
|
|
|
|
|
|
|
# 4c. Now mine 288 more blocks and deliver; all should be processed but
|
2018-03-18 15:26:45 +01:00
|
|
|
# the last (height-too-high) on node (as long as it is not missing any headers)
|
2017-10-11 21:38:56 +02:00
|
|
|
tip = block_h3
|
|
|
|
all_blocks = []
|
|
|
|
for i in range(288):
|
|
|
|
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
|
|
|
|
next_block.solve()
|
|
|
|
all_blocks.append(next_block)
|
|
|
|
tip = next_block
|
|
|
|
|
|
|
|
# Now send the block at height 5 and check that it wasn't accepted (missing header)
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(all_blocks[1]))
|
2017-10-11 21:38:56 +02:00
|
|
|
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
|
|
|
|
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# The block at height 5 should be accepted if we provide the missing header, though
|
|
|
|
headers_message = msg_headers()
|
|
|
|
headers_message.headers.append(CBlockHeader(all_blocks[0]))
|
|
|
|
test_node.send_message(headers_message)
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(all_blocks[1]))
|
2017-10-11 21:38:56 +02:00
|
|
|
self.nodes[0].getblock(all_blocks[1].hash)
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# Now send the blocks in all_blocks
|
|
|
|
for i in range(288):
|
|
|
|
test_node.send_message(msg_block(all_blocks[i]))
|
|
|
|
test_node.sync_with_ping()
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-02-09 22:39:18 +01:00
|
|
|
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
|
|
|
|
for x in all_blocks[:-1]:
|
|
|
|
self.nodes[0].getblock(x.hash)
|
2017-07-12 16:33:46 +02:00
|
|
|
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
|
2015-06-02 21:17:36 +02:00
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
# 5. Test handling of unrequested block on the node that didn't process
|
|
|
|
# Should still not be processed (even though it has a child that has more
|
|
|
|
# work).
|
|
|
|
|
2017-10-11 21:38:56 +02:00
|
|
|
# The node should have requested the blocks at some point, so
|
|
|
|
# disconnect/reconnect first
|
|
|
|
|
2017-11-08 22:28:17 +01:00
|
|
|
self.nodes[0].disconnect_p2ps()
|
2017-12-07 19:40:39 +01:00
|
|
|
self.nodes[1].disconnect_p2ps()
|
2017-10-11 21:38:56 +02:00
|
|
|
|
2017-12-07 19:40:39 +01:00
|
|
|
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
|
|
|
|
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(block_h1f))
|
2015-05-04 16:50:24 +02:00
|
|
|
assert_equal(self.nodes[0].getblockcount(), 2)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Unrequested block that would complete more-work chain was ignored")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 6. Try to get node to request the missing block.
|
|
|
|
# Poke the node with an inv for block at height 3 and see if that
|
|
|
|
# triggers a getdata on block 2 (it should if block 2 is missing).
|
2020-07-19 09:47:05 +02:00
|
|
|
with p2p_lock:
|
2015-05-04 16:50:24 +02:00
|
|
|
# Clear state so we can check the getdata request
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getdata", None)
|
2020-04-25 13:44:44 +02:00
|
|
|
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
test_node.sync_with_ping()
|
2020-07-19 09:47:05 +02:00
|
|
|
with p2p_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
getdata = test_node.last_message["getdata"]
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2015-06-02 21:17:36 +02:00
|
|
|
# Check that the getdata includes the right block
|
2017-10-11 21:38:56 +02:00
|
|
|
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Inv at tip triggered getdata for unprocessed block")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 7. Send the missing block for the third time (now it is requested)
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(block_h1f))
|
2015-06-02 21:17:36 +02:00
|
|
|
assert_equal(self.nodes[0].getblockcount(), 290)
|
2017-10-11 22:57:43 +02:00
|
|
|
self.nodes[0].getblock(all_blocks[286].hash)
|
|
|
|
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
|
|
|
|
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
|
2020-07-09 17:59:54 +02:00
|
|
|
self.log.info("Successfully reorged to longer chain")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2017-10-11 22:57:43 +02:00
|
|
|
# 8. Create a chain which is invalid at a height longer than the
|
|
|
|
# current chain, but which has more blocks on top of that
|
|
|
|
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
|
|
|
|
block_289f.solve()
|
|
|
|
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
|
|
|
|
block_290f.solve()
|
|
|
|
# block_291 spends a coinbase below maturity!
|
2021-11-15 18:29:13 +01:00
|
|
|
tx_to_add = create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1)
|
|
|
|
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1, txlist=[tx_to_add])
|
2017-10-11 22:57:43 +02:00
|
|
|
block_291.solve()
|
|
|
|
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
|
|
|
|
block_292.solve()
|
|
|
|
|
|
|
|
# Now send all the headers on the chain and enough blocks to trigger reorg
|
|
|
|
headers_message = msg_headers()
|
|
|
|
headers_message.headers.append(CBlockHeader(block_289f))
|
|
|
|
headers_message.headers.append(CBlockHeader(block_290f))
|
|
|
|
headers_message.headers.append(CBlockHeader(block_291))
|
|
|
|
headers_message.headers.append(CBlockHeader(block_292))
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(headers_message)
|
2017-10-11 22:57:43 +02:00
|
|
|
|
|
|
|
tip_entry_found = False
|
|
|
|
for x in self.nodes[0].getchaintips():
|
|
|
|
if x['hash'] == block_292.hash:
|
|
|
|
assert_equal(x['status'], "headers-only")
|
|
|
|
tip_entry_found = True
|
2019-02-19 23:43:44 +01:00
|
|
|
assert tip_entry_found
|
2017-10-11 22:57:43 +02:00
|
|
|
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
|
|
|
|
|
|
|
|
test_node.send_message(msg_block(block_289f))
|
2020-04-01 15:37:20 +02:00
|
|
|
test_node.send_and_ping(msg_block(block_290f))
|
2017-10-11 22:57:43 +02:00
|
|
|
|
|
|
|
self.nodes[0].getblock(block_289f.hash)
|
|
|
|
self.nodes[0].getblock(block_290f.hash)
|
|
|
|
|
|
|
|
test_node.send_message(msg_block(block_291))
|
|
|
|
|
|
|
|
# At this point we've sent an obviously-bogus block, wait for full processing
|
2022-05-13 09:31:04 +02:00
|
|
|
# and assume disconnection
|
|
|
|
test_node.wait_for_disconnect()
|
|
|
|
|
|
|
|
self.nodes[0].disconnect_p2ps()
|
|
|
|
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
|
2017-10-11 22:57:43 +02:00
|
|
|
|
|
|
|
# We should have failed reorg and switched back to 290 (but have block 291)
|
|
|
|
assert_equal(self.nodes[0].getblockcount(), 290)
|
|
|
|
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
|
|
|
|
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
|
|
|
|
|
|
|
|
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
|
|
|
|
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
|
|
|
|
block_293.solve()
|
|
|
|
headers_message = msg_headers()
|
|
|
|
headers_message.headers.append(CBlockHeader(block_293))
|
|
|
|
test_node.send_message(headers_message)
|
|
|
|
test_node.wait_for_disconnect()
|
|
|
|
|
|
|
|
# 9. Connect node1 to node0 and ensure it is able to sync
|
2020-09-17 09:46:07 +02:00
|
|
|
self.connect_nodes(0, 1)
|
2019-04-07 00:19:45 +02:00
|
|
|
self.sync_blocks([self.nodes[0], self.nodes[1]])
|
2017-10-11 21:38:56 +02:00
|
|
|
self.log.info("Successfully synced nodes 1 and 0")
|
2017-10-06 20:32:07 +02:00
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
if __name__ == '__main__':
|
|
|
|
AcceptBlockTest().main()
|