2019-05-28 23:52:55 +02:00
#!/usr/bin/env python3
2020-04-16 19:14:08 +02:00
# Copyright (c) 2019-2020 The Bitcoin Core developers
2019-05-28 23:52:55 +02:00
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test transaction download behavior
"""
from test_framework . messages import (
CInv ,
CTransaction ,
FromHex ,
MSG_TX ,
MSG_TYPE_MASK ,
2020-04-21 17:02:46 +02:00
MSG_WTX ,
2019-05-28 23:52:55 +02:00
msg_inv ,
msg_notfound ,
)
2020-07-19 09:47:05 +02:00
from test_framework . p2p import (
2019-05-28 23:52:55 +02:00
P2PInterface ,
2020-07-19 09:47:05 +02:00
p2p_lock ,
2019-05-28 23:52:55 +02:00
)
from test_framework . test_framework import BitcoinTestFramework
from test_framework . util import (
assert_equal ,
)
from test_framework . address import ADDRESS_BCRT1_UNSPENDABLE
import time
class TestP2PConn ( P2PInterface ) :
def __init__ ( self ) :
super ( ) . __init__ ( )
self . tx_getdata_count = 0
def on_getdata ( self , message ) :
for i in message . inv :
2020-04-21 17:02:46 +02:00
if i . type & MSG_TYPE_MASK == MSG_TX or i . type & MSG_TYPE_MASK == MSG_WTX :
2019-05-28 23:52:55 +02:00
self . tx_getdata_count + = 1
# Constants from net_processing
GETDATA_TX_INTERVAL = 60 # seconds
INBOUND_PEER_TX_DELAY = 2 # seconds
2020-01-31 17:23:27 +01:00
TXID_RELAY_DELAY = 2 # seconds
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
OVERLOADED_PEER_DELAY = 2 # seconds
2019-05-28 23:52:55 +02:00
MAX_GETDATA_IN_FLIGHT = 100
2020-09-24 02:00:46 +02:00
MAX_PEER_TX_ANNOUNCEMENTS = 5000
2019-05-28 23:52:55 +02:00
# Python test constants
NUM_INBOUND = 10
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RELAY_DELAY
2019-05-28 23:52:55 +02:00
class TxDownloadTest ( BitcoinTestFramework ) :
def set_test_params ( self ) :
self . setup_clean_chain = False
self . num_nodes = 2
def test_tx_requests ( self ) :
self . log . info ( " Test that we request transactions from all our peers, eventually " )
txid = 0xdeadbeef
self . log . info ( " Announce the txid from each incoming peer to node 0 " )
2020-04-21 17:02:46 +02:00
msg = msg_inv ( [ CInv ( t = MSG_WTX , h = txid ) ] )
2019-05-28 23:52:55 +02:00
for p in self . nodes [ 0 ] . p2ps :
2020-04-01 15:37:20 +02:00
p . send_and_ping ( msg )
2019-05-28 23:52:55 +02:00
outstanding_peer_index = [ i for i in range ( len ( self . nodes [ 0 ] . p2ps ) ) ]
def getdata_found ( peer_index ) :
p = self . nodes [ 0 ] . p2ps [ peer_index ]
2020-07-19 09:47:05 +02:00
with p2p_lock :
2019-05-28 23:52:55 +02:00
return p . last_message . get ( " getdata " ) and p . last_message [ " getdata " ] . inv [ - 1 ] . hash == txid
node_0_mocktime = int ( time . time ( ) )
while outstanding_peer_index :
node_0_mocktime + = MAX_GETDATA_INBOUND_WAIT
self . nodes [ 0 ] . setmocktime ( node_0_mocktime )
2020-08-17 17:50:47 +02:00
self . wait_until ( lambda : any ( getdata_found ( i ) for i in outstanding_peer_index ) )
2019-05-28 23:52:55 +02:00
for i in outstanding_peer_index :
if getdata_found ( i ) :
outstanding_peer_index . remove ( i )
self . nodes [ 0 ] . setmocktime ( 0 )
self . log . info ( " All outstanding peers received a getdata " )
def test_inv_block ( self ) :
self . log . info ( " Generate a transaction on node 0 " )
tx = self . nodes [ 0 ] . createrawtransaction (
inputs = [ { # coinbase
" txid " : self . nodes [ 0 ] . getblock ( self . nodes [ 0 ] . getblockhash ( 1 ) ) [ ' tx ' ] [ 0 ] ,
" vout " : 0
} ] ,
outputs = { ADDRESS_BCRT1_UNSPENDABLE : 50 - 0.00025 } ,
)
tx = self . nodes [ 0 ] . signrawtransactionwithkey (
hexstring = tx ,
privkeys = [ self . nodes [ 0 ] . get_deterministic_priv_key ( ) . key ] ,
) [ ' hex ' ]
ctx = FromHex ( CTransaction ( ) , tx )
txid = int ( ctx . rehash ( ) , 16 )
self . log . info (
" Announce the transaction to all nodes from all {} incoming peers, but never send it " . format ( NUM_INBOUND ) )
2020-04-25 13:44:44 +02:00
msg = msg_inv ( [ CInv ( t = MSG_TX , h = txid ) ] )
2019-05-28 23:52:55 +02:00
for p in self . peers :
2020-04-01 15:37:20 +02:00
p . send_and_ping ( msg )
2019-05-28 23:52:55 +02:00
self . log . info ( " Put the tx in node 0 ' s mempool " )
self . nodes [ 0 ] . sendrawtransaction ( tx )
# Since node 1 is connected outbound to an honest peer (node 0), it
# should get the tx within a timeout. (Assuming that node 0
# announced the tx within the timeout)
# The timeout is the sum of
# * the worst case until the tx is first requested from an inbound
# peer, plus
# * the first time it is re-requested from the outbound peer, plus
# * 2 seconds to avoid races
2019-09-16 15:22:49 +02:00
assert self . nodes [ 1 ] . getpeerinfo ( ) [ 0 ] [ ' inbound ' ] == False
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
timeout = 2 + INBOUND_PEER_TX_DELAY + GETDATA_TX_INTERVAL
2019-05-28 23:52:55 +02:00
self . log . info ( " Tx should be received at node 1 after {} seconds " . format ( timeout ) )
self . sync_mempools ( timeout = timeout )
def test_in_flight_max ( self ) :
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
self . log . info ( " Test that we don ' t load peers with more than {} transaction requests immediately " . format ( MAX_GETDATA_IN_FLIGHT ) )
2019-05-28 23:52:55 +02:00
txids = [ i for i in range ( MAX_GETDATA_IN_FLIGHT + 2 ) ]
p = self . nodes [ 0 ] . p2ps [ 0 ]
2020-07-19 09:47:05 +02:00
with p2p_lock :
2019-05-28 23:52:55 +02:00
p . tx_getdata_count = 0
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
mock_time = int ( time . time ( ) + 1 )
self . nodes [ 0 ] . setmocktime ( mock_time )
for i in range ( MAX_GETDATA_IN_FLIGHT ) :
p . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = txids [ i ] ) ] ) )
p . sync_with_ping ( )
mock_time + = INBOUND_PEER_TX_DELAY
self . nodes [ 0 ] . setmocktime ( mock_time )
2020-08-17 17:50:47 +02:00
p . wait_until ( lambda : p . tx_getdata_count > = MAX_GETDATA_IN_FLIGHT )
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
for i in range ( MAX_GETDATA_IN_FLIGHT , len ( txids ) ) :
p . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = txids [ i ] ) ] ) )
p . sync_with_ping ( )
self . log . info ( " No more than {} requests should be seen within {} seconds after announcement " . format ( MAX_GETDATA_IN_FLIGHT , INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1 ) )
self . nodes [ 0 ] . setmocktime ( mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY - 1 )
p . sync_with_ping ( )
2020-07-19 09:47:05 +02:00
with p2p_lock :
2019-05-28 23:52:55 +02:00
assert_equal ( p . tx_getdata_count , MAX_GETDATA_IN_FLIGHT )
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
self . log . info ( " If we wait {} seconds after announcement, we should eventually get more requests " . format ( INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY ) )
self . nodes [ 0 ] . setmocktime ( mock_time + INBOUND_PEER_TX_DELAY + OVERLOADED_PEER_DELAY )
p . wait_until ( lambda : p . tx_getdata_count == len ( txids ) )
def test_expiry_fallback ( self ) :
self . log . info ( ' Check that expiry will select another peer for download ' )
WTXID = 0xffaa
peer1 = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
peer2 = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
for p in [ peer1 , peer2 ] :
p . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = WTXID ) ] ) )
# One of the peers is asked for the tx
peer2 . wait_until ( lambda : sum ( p . tx_getdata_count for p in [ peer1 , peer2 ] ) == 1 )
2020-07-19 09:47:05 +02:00
with p2p_lock :
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
peer_expiry , peer_fallback = ( peer1 , peer2 ) if peer1 . tx_getdata_count == 1 else ( peer2 , peer1 )
assert_equal ( peer_fallback . tx_getdata_count , 0 )
self . nodes [ 0 ] . setmocktime ( int ( time . time ( ) ) + GETDATA_TX_INTERVAL + 1 ) # Wait for request to peer_expiry to expire
peer_fallback . wait_until ( lambda : peer_fallback . tx_getdata_count > = 1 , timeout = 1 )
with p2p_lock :
assert_equal ( peer_fallback . tx_getdata_count , 1 )
self . restart_node ( 0 ) # reset mocktime
def test_disconnect_fallback ( self ) :
self . log . info ( ' Check that disconnect will select another peer for download ' )
WTXID = 0xffbb
peer1 = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
peer2 = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
for p in [ peer1 , peer2 ] :
p . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = WTXID ) ] ) )
# One of the peers is asked for the tx
peer2 . wait_until ( lambda : sum ( p . tx_getdata_count for p in [ peer1 , peer2 ] ) == 1 )
with p2p_lock :
peer_disconnect , peer_fallback = ( peer1 , peer2 ) if peer1 . tx_getdata_count == 1 else ( peer2 , peer1 )
assert_equal ( peer_fallback . tx_getdata_count , 0 )
peer_disconnect . peer_disconnect ( )
peer_disconnect . wait_for_disconnect ( )
peer_fallback . wait_until ( lambda : peer_fallback . tx_getdata_count > = 1 , timeout = 1 )
with p2p_lock :
assert_equal ( peer_fallback . tx_getdata_count , 1 )
def test_notfound_fallback ( self ) :
self . log . info ( ' Check that notfounds will select another peer for download immediately ' )
WTXID = 0xffdd
peer1 = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
peer2 = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
for p in [ peer1 , peer2 ] :
p . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = WTXID ) ] ) )
# One of the peers is asked for the tx
peer2 . wait_until ( lambda : sum ( p . tx_getdata_count for p in [ peer1 , peer2 ] ) == 1 )
with p2p_lock :
peer_notfound , peer_fallback = ( peer1 , peer2 ) if peer1 . tx_getdata_count == 1 else ( peer2 , peer1 )
assert_equal ( peer_fallback . tx_getdata_count , 0 )
peer_notfound . send_and_ping ( msg_notfound ( vec = [ CInv ( MSG_WTX , WTXID ) ] ) ) # Send notfound, so that fallback peer is selected
peer_fallback . wait_until ( lambda : peer_fallback . tx_getdata_count > = 1 , timeout = 1 )
with p2p_lock :
assert_equal ( peer_fallback . tx_getdata_count , 1 )
def test_preferred_inv ( self ) :
self . log . info ( ' Check that invs from preferred peers are downloaded immediately ' )
self . restart_node ( 0 , extra_args = [ ' -whitelist=noban@127.0.0.1 ' ] )
peer = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
peer . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = 0xff00ff00 ) ] ) )
peer . wait_until ( lambda : peer . tx_getdata_count > = 1 , timeout = 1 )
with p2p_lock :
assert_equal ( peer . tx_getdata_count , 1 )
2019-05-28 23:52:55 +02:00
2020-09-24 02:00:46 +02:00
def test_large_inv_batch ( self ) :
self . log . info ( ' Test how large inv batches are handled with relay permission ' )
self . restart_node ( 0 , extra_args = [ ' -whitelist=relay@127.0.0.1 ' ] )
peer = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
peer . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = wtxid ) for wtxid in range ( MAX_PEER_TX_ANNOUNCEMENTS + 1 ) ] ) )
peer . wait_until ( lambda : peer . tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS + 1 )
self . log . info ( ' Test how large inv batches are handled without relay permission ' )
self . restart_node ( 0 )
peer = self . nodes [ 0 ] . add_p2p_connection ( TestP2PConn ( ) )
peer . send_message ( msg_inv ( [ CInv ( t = MSG_WTX , h = wtxid ) for wtxid in range ( MAX_PEER_TX_ANNOUNCEMENTS + 1 ) ] ) )
peer . wait_until ( lambda : peer . tx_getdata_count == MAX_PEER_TX_ANNOUNCEMENTS )
peer . sync_with_ping ( )
with p2p_lock :
assert_equal ( peer . tx_getdata_count , MAX_PEER_TX_ANNOUNCEMENTS )
2020-04-14 03:39:19 +02:00
def test_spurious_notfound ( self ) :
self . log . info ( ' Check that spurious notfound is ignored ' )
2020-04-25 13:44:44 +02:00
self . nodes [ 0 ] . p2ps [ 0 ] . send_message ( msg_notfound ( vec = [ CInv ( MSG_TX , 1 ) ] ) )
2020-04-14 03:39:19 +02:00
2019-05-28 23:52:55 +02:00
def run_test ( self ) :
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
# Run tests without mocktime that only need one peer-connection first, to avoid restarting the nodes
self . test_expiry_fallback ( )
self . test_disconnect_fallback ( )
self . test_notfound_fallback ( )
self . test_preferred_inv ( )
2020-09-24 02:00:46 +02:00
self . test_large_inv_batch ( )
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
self . test_spurious_notfound ( )
2020-09-27 23:43:27 +02:00
# Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when
# the next trickle relay event happens.
Change transaction request logic to use txrequest
This removes most transaction request logic from net_processing, and
replaces it with calls to a global TxRequestTracker object.
The major changes are:
* Announcements from outbound (and whitelisted) peers are now always
preferred over those from inbound peers. This used to be the case for the
first request (by delaying the first request from inbound peers), and
a bias afters. The 2s delay for requests from inbound peers still exists,
but after that, if viable outbound peers remain for any given transaction,
they will always be tried first.
* No more hard cap of 100 in flight transactions per peer, as there is less
need for it (memory usage is linear in the number of announcements, but
independent from the number in flight, and CPU usage isn't affected by it).
Furthermore, if only one peer announces a transaction, and it has over 100
in flight and requestable already, we still want to request it from them.
The cap is replaced with an additional 2s delay (possibly combined with the
existing 2s delays for inbound connections, and for txid peers when wtxid
peers are available).
Includes functional tests written by Marco Falke and Antoine Riard.
2020-09-21 06:20:06 +02:00
for test in [ self . test_in_flight_max , self . test_inv_block , self . test_tx_requests ] :
2020-09-27 23:43:27 +02:00
self . stop_nodes ( )
self . start_nodes ( )
self . connect_nodes ( 1 , 0 )
# Setup the p2p connections
self . peers = [ ]
for node in self . nodes :
for _ in range ( NUM_INBOUND ) :
self . peers . append ( node . add_p2p_connection ( TestP2PConn ( ) ) )
self . log . info ( " Nodes are setup with {} incoming connections each " . format ( NUM_INBOUND ) )
test ( )
2019-05-28 23:52:55 +02:00
if __name__ == ' __main__ ' :
TxDownloadTest ( ) . main ( )