2016-03-19 20:58:06 +01:00
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
2020-04-16 19:14:08 +02:00
# Copyright (c) 2010-2020 The Bitcoin Core developers
2016-03-19 20:58:06 +01:00
# Distributed under the MIT software license, see the accompanying
2015-04-28 18:36:15 +02:00
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
2020-08-17 11:10:44 +02:00
""" Test objects for interacting with a bitcoind node over the p2p protocol.
2017-01-18 00:34:40 +01:00
2020-08-17 11:10:44 +02:00
The P2PInterface objects interact with the bitcoind nodes under test using the
node ' s p2p interface. They can be used to send messages to the node, and
callbacks can be registered that execute when messages are received from the
node . Messages are sent to / received from the node on an asyncio event loop .
State held inside the objects must be guarded by the p2p_lock to avoid data
races between the main testing thread and the event loop .
2017-01-18 00:34:40 +01:00
2017-10-17 22:16:39 +02:00
P2PConnection : A low - level connection object to a node ' s P2P interface
2017-11-22 17:45:14 +01:00
P2PInterface : A high - level interface object for communicating to a node over P2P
P2PDataStore : A p2p interface class that keeps a store of transactions and blocks
2020-01-31 03:52:25 +01:00
and can respond correctly to getdata and getheaders messages
P2PTxInvStore : A p2p interface class that inherits from P2PDataStore , and keeps
a count of how many times each txid has been announced . """
2018-06-18 23:28:37 +02:00
import asyncio
2017-03-29 20:07:39 +02:00
from collections import defaultdict
from io import BytesIO
2015-04-28 18:36:15 +02:00
import logging
2017-03-29 20:07:39 +02:00
import struct
import sys
2017-12-08 16:50:24 +01:00
import threading
2017-03-29 20:07:39 +02:00
2018-09-16 02:01:20 +02:00
from test_framework . messages import (
CBlockHeader ,
2020-06-14 12:54:37 +02:00
MAX_HEADERS_RESULTS ,
2018-09-16 02:01:20 +02:00
MIN_VERSION_SUPPORTED ,
msg_addr ,
2020-05-20 12:05:18 +02:00
msg_addrv2 ,
2018-09-16 02:01:20 +02:00
msg_block ,
MSG_BLOCK ,
msg_blocktxn ,
2020-05-04 20:10:18 +02:00
msg_cfcheckpt ,
2020-05-04 20:27:29 +02:00
msg_cfheaders ,
msg_cfilter ,
2018-09-16 02:01:20 +02:00
msg_cmpctblock ,
msg_feefilter ,
2020-04-03 15:54:14 +02:00
msg_filteradd ,
2020-03-31 00:10:32 +02:00
msg_filterclear ,
2020-03-12 18:36:46 +01:00
msg_filterload ,
2018-09-16 02:01:20 +02:00
msg_getaddr ,
msg_getblocks ,
msg_getblocktxn ,
msg_getdata ,
msg_getheaders ,
msg_headers ,
msg_inv ,
msg_mempool ,
2020-03-12 18:36:46 +01:00
msg_merkleblock ,
2018-09-16 02:01:20 +02:00
msg_notfound ,
msg_ping ,
msg_pong ,
2020-05-20 12:05:18 +02:00
msg_sendaddrv2 ,
2018-09-16 02:01:20 +02:00
msg_sendcmpct ,
msg_sendheaders ,
msg_tx ,
MSG_TX ,
MSG_TYPE_MASK ,
msg_verack ,
msg_version ,
2020-03-27 02:12:47 +01:00
MSG_WTX ,
msg_wtxidrelay ,
2018-09-16 02:01:20 +02:00
NODE_NETWORK ,
NODE_WITNESS ,
sha256 ,
)
2020-06-10 22:29:07 +02:00
from test_framework . util import (
MAX_NODES ,
p2p_port ,
wait_until_helper ,
)
2016-04-09 03:02:24 +02:00
2020-07-19 09:47:05 +02:00
logger = logging . getLogger ( " TestFramework.p2p " )
2017-02-15 18:21:22 +01:00
2017-10-17 13:51:50 +02:00
MESSAGEMAP = {
b " addr " : msg_addr ,
2020-05-20 12:05:18 +02:00
b " addrv2 " : msg_addrv2 ,
2017-10-17 13:51:50 +02:00
b " block " : msg_block ,
b " blocktxn " : msg_blocktxn ,
2020-05-04 20:10:18 +02:00
b " cfcheckpt " : msg_cfcheckpt ,
2020-05-04 20:27:29 +02:00
b " cfheaders " : msg_cfheaders ,
b " cfilter " : msg_cfilter ,
2017-10-17 13:51:50 +02:00
b " cmpctblock " : msg_cmpctblock ,
b " feefilter " : msg_feefilter ,
2020-04-03 15:54:14 +02:00
b " filteradd " : msg_filteradd ,
2020-03-31 00:10:32 +02:00
b " filterclear " : msg_filterclear ,
2020-03-12 18:36:46 +01:00
b " filterload " : msg_filterload ,
2017-10-17 13:51:50 +02:00
b " getaddr " : msg_getaddr ,
b " getblocks " : msg_getblocks ,
b " getblocktxn " : msg_getblocktxn ,
b " getdata " : msg_getdata ,
b " getheaders " : msg_getheaders ,
b " headers " : msg_headers ,
b " inv " : msg_inv ,
b " mempool " : msg_mempool ,
2020-03-12 18:36:46 +01:00
b " merkleblock " : msg_merkleblock ,
2018-09-16 02:01:20 +02:00
b " notfound " : msg_notfound ,
2017-10-17 13:51:50 +02:00
b " ping " : msg_ping ,
b " pong " : msg_pong ,
2020-05-20 12:05:18 +02:00
b " sendaddrv2 " : msg_sendaddrv2 ,
2017-10-17 13:51:50 +02:00
b " sendcmpct " : msg_sendcmpct ,
b " sendheaders " : msg_sendheaders ,
b " tx " : msg_tx ,
b " verack " : msg_verack ,
b " version " : msg_version ,
2020-03-27 02:12:47 +01:00
b " wtxidrelay " : msg_wtxidrelay ,
2017-10-17 13:51:50 +02:00
}
MAGIC_BYTES = {
" mainnet " : b " \xf9 \xbe \xb4 \xd9 " , # mainnet
" testnet3 " : b " \x0b \x11 \x09 \x07 " , # testnet3
" regtest " : b " \xfa \xbf \xb5 \xda " , # regtest
2019-08-05 17:01:54 +02:00
" signet " : b " \x0a \x03 \xcf \x40 " , # signet
2017-10-17 13:51:50 +02:00
}
2016-09-15 03:00:29 +02:00
2018-06-18 23:28:37 +02:00
class P2PConnection ( asyncio . Protocol ) :
2017-11-17 21:01:24 +01:00
""" A low-level connection object to a node ' s P2P interface.
2017-10-17 04:31:18 +02:00
2017-11-17 21:01:24 +01:00
This class is responsible for :
2015-04-28 18:36:15 +02:00
2017-11-17 21:01:24 +01:00
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads . It must be
2017-10-17 22:16:39 +02:00
sub - classed and the on_message ( ) callback overridden . """
2017-11-17 21:01:24 +01:00
def __init__ ( self ) :
2018-06-18 23:28:37 +02:00
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self . _transport = None
2018-06-21 03:24:29 +02:00
@property
def is_connected ( self ) :
2018-06-18 23:28:37 +02:00
return self . _transport is not None
2018-06-21 03:24:29 +02:00
2020-06-10 22:29:07 +02:00
def peer_connect_helper ( self , dstaddr , dstport , net , timeout_factor ) :
2018-06-18 23:28:37 +02:00
assert not self . is_connected
2020-05-18 06:15:55 +02:00
self . timeout_factor = timeout_factor
2015-04-28 18:36:15 +02:00
self . dstaddr = dstaddr
self . dstport = dstport
2018-06-18 23:28:37 +02:00
# The initial message to send after the connection was made:
self . on_connection_send_msg = None
2016-04-10 16:54:28 +02:00
self . recvbuf = b " "
2019-01-24 23:13:06 +01:00
self . magic_bytes = MAGIC_BYTES [ net ]
2020-06-10 22:29:07 +02:00
def peer_connect ( self , dstaddr , dstport , * , net , timeout_factor ) :
self . peer_connect_helper ( dstaddr , dstport , net , timeout_factor )
2015-04-28 18:36:15 +02:00
2018-06-18 23:28:37 +02:00
loop = NetworkThread . network_event_loop
2020-06-10 22:29:07 +02:00
logger . debug ( ' Connecting to Bitcoin Node: %s : %d ' % ( self . dstaddr , self . dstport ) )
coroutine = loop . create_connection ( lambda : self , host = self . dstaddr , port = self . dstport )
return lambda : loop . call_soon_threadsafe ( loop . create_task , coroutine )
def peer_accept_connection ( self , connect_id , connect_cb = lambda : None , * , net , timeout_factor ) :
self . peer_connect_helper ( ' 0 ' , 0 , net , timeout_factor )
logger . debug ( ' Listening for Bitcoin Node with id: {} ' . format ( connect_id ) )
return lambda : NetworkThread . listen ( self , connect_cb , idx = connect_id )
2015-04-28 18:36:15 +02:00
2017-11-17 21:01:24 +01:00
def peer_disconnect ( self ) :
# Connection could have already been closed by other end.
2018-06-18 23:28:37 +02:00
NetworkThread . network_event_loop . call_soon_threadsafe ( lambda : self . _transport and self . _transport . abort ( ) )
2017-11-17 21:01:24 +01:00
2017-10-17 13:51:50 +02:00
# Connection and disconnection methods
2018-06-18 23:28:37 +02:00
def connection_made ( self , transport ) :
""" asyncio callback when a connection is opened. """
assert not self . _transport
logger . debug ( " Connected & Listening: %s : %d " % ( self . dstaddr , self . dstport ) )
self . _transport = transport
if self . on_connection_send_msg :
self . send_message ( self . on_connection_send_msg )
self . on_connection_send_msg = None # Never used again
self . on_open ( )
def connection_lost ( self , exc ) :
""" asyncio callback when a connection is closed. """
if exc :
logger . warning ( " Connection lost to {} : {} due to {} " . format ( self . dstaddr , self . dstport , exc ) )
else :
logger . debug ( " Closed connection to: %s : %d " % ( self . dstaddr , self . dstport ) )
self . _transport = None
2016-04-10 16:54:28 +02:00
self . recvbuf = b " "
2017-11-17 21:01:24 +01:00
self . on_close ( )
2015-04-28 18:36:15 +02:00
2017-10-17 13:51:50 +02:00
# Socket read methods
2018-06-18 23:28:37 +02:00
def data_received ( self , t ) :
""" asyncio callback when data is read from the socket. """
2017-09-13 15:17:15 +02:00
if len ( t ) > 0 :
self . recvbuf + = t
2017-11-23 15:47:11 +01:00
self . _on_data ( )
def _on_data ( self ) :
""" Try to read P2P messages from the recv buffer.
2015-04-28 18:36:15 +02:00
2017-11-23 15:47:11 +01:00
This method reads data from the buffer in a loop . It deserializes ,
parses and verifies the P2P header , then passes the P2P payload to
the on_message callback for processing . """
2016-03-31 18:33:15 +02:00
try :
while True :
if len ( self . recvbuf ) < 4 :
2015-04-28 18:36:15 +02:00
return
2019-01-24 23:13:06 +01:00
if self . recvbuf [ : 4 ] != self . magic_bytes :
2019-03-19 22:15:59 +01:00
raise ValueError ( " magic bytes mismatch: {} != {} " . format ( repr ( self . magic_bytes ) , repr ( self . recvbuf ) ) )
2017-10-17 16:59:20 +02:00
if len ( self . recvbuf ) < 4 + 12 + 4 + 4 :
return
2020-04-10 22:56:07 +02:00
msgtype = self . recvbuf [ 4 : 4 + 12 ] . split ( b " \x00 " , 1 ) [ 0 ]
2017-10-17 16:59:20 +02:00
msglen = struct . unpack ( " <i " , self . recvbuf [ 4 + 12 : 4 + 12 + 4 ] ) [ 0 ]
checksum = self . recvbuf [ 4 + 12 + 4 : 4 + 12 + 4 + 4 ]
if len ( self . recvbuf ) < 4 + 12 + 4 + 4 + msglen :
return
msg = self . recvbuf [ 4 + 12 + 4 + 4 : 4 + 12 + 4 + 4 + msglen ]
th = sha256 ( msg )
h = sha256 ( th )
if checksum != h [ : 4 ] :
raise ValueError ( " got bad checksum " + repr ( self . recvbuf ) )
self . recvbuf = self . recvbuf [ 4 + 12 + 4 + 4 + msglen : ]
2020-04-10 22:56:07 +02:00
if msgtype not in MESSAGEMAP :
raise ValueError ( " Received unknown msgtype from %s : %d : ' %s ' %s " % ( self . dstaddr , self . dstport , msgtype , repr ( msg ) ) )
2017-10-17 16:59:20 +02:00
f = BytesIO ( msg )
2020-04-10 22:56:07 +02:00
t = MESSAGEMAP [ msgtype ] ( )
2017-10-17 16:59:20 +02:00
t . deserialize ( f )
2017-11-23 15:47:11 +01:00
self . _log_message ( " receive " , t )
self . on_message ( t )
2016-03-31 18:33:15 +02:00
except Exception as e :
2017-10-17 16:59:20 +02:00
logger . exception ( ' Error reading message: ' , repr ( e ) )
2017-09-13 15:17:15 +02:00
raise
2015-04-28 18:36:15 +02:00
2017-11-23 15:47:11 +01:00
def on_message ( self , message ) :
2017-11-17 21:01:24 +01:00
""" Callback for processing a P2P payload. Must be overridden by derived class. """
raise NotImplementedError
2017-10-17 13:51:50 +02:00
# Socket write methods
2018-06-21 03:24:29 +02:00
def send_message ( self , message ) :
2017-11-23 15:47:11 +01:00
""" Send a P2P message over the socket.
This method takes a P2P payload , builds the P2P header and adds
the message to the send buffer to be sent over the socket . """
2018-10-19 19:34:28 +02:00
tmsg = self . build_message ( message )
self . _log_message ( " send " , message )
return self . send_raw_message ( tmsg )
def send_raw_message ( self , raw_message_bytes ) :
2018-06-21 03:24:29 +02:00
if not self . is_connected :
raise IOError ( ' Not connected ' )
2018-07-23 14:43:45 +02:00
def maybe_write ( ) :
if not self . _transport :
return
2019-02-21 00:13:43 +01:00
if self . _transport . is_closing ( ) :
2018-07-23 14:43:45 +02:00
return
2018-10-19 19:34:28 +02:00
self . _transport . write ( raw_message_bytes )
2018-07-23 14:43:45 +02:00
NetworkThread . network_event_loop . call_soon_threadsafe ( maybe_write )
2015-04-28 18:36:15 +02:00
2017-10-17 13:51:50 +02:00
# Class utility methods
2015-04-28 18:36:15 +02:00
2018-10-19 19:34:28 +02:00
def build_message ( self , message ) :
2018-06-21 03:24:29 +02:00
""" Build a serialized P2P message """
2020-04-10 22:56:07 +02:00
msgtype = message . msgtype
2018-06-21 03:24:29 +02:00
data = message . serialize ( )
2019-01-24 23:13:06 +01:00
tmsg = self . magic_bytes
2020-04-10 22:56:07 +02:00
tmsg + = msgtype
tmsg + = b " \x00 " * ( 12 - len ( msgtype ) )
2018-06-21 03:24:29 +02:00
tmsg + = struct . pack ( " <I " , len ( data ) )
th = sha256 ( data )
h = sha256 ( th )
tmsg + = h [ : 4 ]
tmsg + = data
return tmsg
2017-03-31 22:44:41 +02:00
def _log_message ( self , direction , msg ) :
2017-11-23 15:47:11 +01:00
""" Logs a message being sent or received over the connection. """
2017-03-31 22:44:41 +02:00
if direction == " send " :
log_message = " Send message to "
elif direction == " receive " :
log_message = " Received message from "
log_message + = " %s : %d : %s " % ( self . dstaddr , self . dstport , repr ( msg ) [ : 500 ] )
if len ( log_message ) > 500 :
log_message + = " ... (msg truncated) "
logger . debug ( log_message )
2015-04-28 18:36:15 +02:00
2017-10-17 22:16:39 +02:00
class P2PInterface ( P2PConnection ) :
2017-11-17 21:01:24 +01:00
""" A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high - level callbacks for processing P2P message
payloads , as well as convenience methods for interacting with the
node over P2P .
2017-11-23 16:17:50 +01:00
Individual testcases should subclass this and override the on_ * methods
2017-10-17 22:16:39 +02:00
if they want to alter message handling behaviour . """
2020-10-17 17:20:43 +02:00
def __init__ ( self , support_addrv2 = False , wtxidrelay = True ) :
2017-11-17 21:01:24 +01:00
super ( ) . __init__ ( )
2017-11-23 16:17:50 +01:00
2020-07-27 13:55:49 +02:00
# Track number of messages of each type received.
# Should be read-only in a test.
2017-11-23 16:17:50 +01:00
self . message_count = defaultdict ( int )
2020-07-27 13:55:49 +02:00
# Track the most recent message of each type.
# To wait for a message to be received, pop that message from
2020-08-27 08:55:20 +02:00
# this and use self.wait_until.
2017-11-23 16:17:50 +01:00
self . last_message = { }
# A count of the number of ping messages we've sent to the node
self . ping_counter = 1
2017-11-17 21:01:24 +01:00
# The network services received from the peer
self . nServices = 0
2020-05-20 12:05:18 +02:00
self . support_addrv2 = support_addrv2
2020-10-17 17:20:43 +02:00
# If the peer supports wtxid-relay
self . wtxidrelay = wtxidrelay
2020-06-10 22:29:07 +02:00
def peer_connect_send_version ( self , services ) :
# Send a version msg
vt = msg_version ( )
vt . nServices = services
vt . addrTo . ip = self . dstaddr
vt . addrTo . port = self . dstport
vt . addrFrom . ip = " 0.0.0.0 "
vt . addrFrom . port = 0
self . on_connection_send_msg = vt # Will be sent in connection_made callback
def peer_connect ( self , * args , services = NODE_NETWORK | NODE_WITNESS , send_version = True , * * kwargs ) :
2018-06-18 23:28:37 +02:00
create_conn = super ( ) . peer_connect ( * args , * * kwargs )
2017-10-17 21:56:12 +02:00
if send_version :
2020-06-10 22:29:07 +02:00
self . peer_connect_send_version ( services )
return create_conn
def peer_accept_connection ( self , * args , services = NODE_NETWORK | NODE_WITNESS , * * kwargs ) :
create_conn = super ( ) . peer_accept_connection ( * args , * * kwargs )
self . peer_connect_send_version ( services )
2018-06-18 23:28:37 +02:00
return create_conn
2017-10-17 21:56:12 +02:00
2017-11-23 16:17:50 +01:00
# Message receiving methods
2017-11-17 21:01:24 +01:00
def on_message ( self , message ) :
2017-11-23 16:17:50 +01:00
""" Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type . """
2020-07-19 09:47:05 +02:00
with p2p_lock :
2017-11-23 16:17:50 +01:00
try :
2020-04-10 22:56:07 +02:00
msgtype = message . msgtype . decode ( ' ascii ' )
self . message_count [ msgtype ] + = 1
self . last_message [ msgtype ] = message
getattr ( self , ' on_ ' + msgtype ) ( message )
2017-11-23 16:17:50 +01:00
except :
2017-11-17 21:01:24 +01:00
print ( " ERROR delivering %s ( %s ) " % ( repr ( message ) , sys . exc_info ( ) [ 0 ] ) )
2017-11-23 16:17:50 +01:00
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
2017-11-17 21:01:24 +01:00
def on_open ( self ) :
pass
def on_close ( self ) :
pass
def on_addr ( self , message ) : pass
2020-05-20 12:05:18 +02:00
def on_addrv2 ( self , message ) : pass
2017-11-17 21:01:24 +01:00
def on_block ( self , message ) : pass
def on_blocktxn ( self , message ) : pass
2020-05-04 20:10:18 +02:00
def on_cfcheckpt ( self , message ) : pass
2020-05-04 20:27:29 +02:00
def on_cfheaders ( self , message ) : pass
def on_cfilter ( self , message ) : pass
2017-11-17 21:01:24 +01:00
def on_cmpctblock ( self , message ) : pass
def on_feefilter ( self , message ) : pass
2020-04-03 15:54:14 +02:00
def on_filteradd ( self , message ) : pass
2020-03-31 00:10:32 +02:00
def on_filterclear ( self , message ) : pass
2020-03-12 18:36:46 +01:00
def on_filterload ( self , message ) : pass
2017-11-17 21:01:24 +01:00
def on_getaddr ( self , message ) : pass
def on_getblocks ( self , message ) : pass
def on_getblocktxn ( self , message ) : pass
def on_getdata ( self , message ) : pass
def on_getheaders ( self , message ) : pass
def on_headers ( self , message ) : pass
def on_mempool ( self , message ) : pass
2020-03-12 18:36:46 +01:00
def on_merkleblock ( self , message ) : pass
2018-09-16 02:01:20 +02:00
def on_notfound ( self , message ) : pass
2017-11-17 21:01:24 +01:00
def on_pong ( self , message ) : pass
2020-05-20 12:05:18 +02:00
def on_sendaddrv2 ( self , message ) : pass
2017-11-17 21:01:24 +01:00
def on_sendcmpct ( self , message ) : pass
def on_sendheaders ( self , message ) : pass
def on_tx ( self , message ) : pass
2020-03-27 02:12:47 +01:00
def on_wtxidrelay ( self , message ) : pass
2017-11-17 21:01:24 +01:00
def on_inv ( self , message ) :
2017-11-23 16:17:50 +01:00
want = msg_getdata ( )
for i in message . inv :
if i . type != 0 :
want . inv . append ( i )
if len ( want . inv ) :
2017-11-17 21:01:24 +01:00
self . send_message ( want )
2017-11-23 16:17:50 +01:00
2017-11-17 21:01:24 +01:00
def on_ping ( self , message ) :
self . send_message ( msg_pong ( message . nonce ) )
2017-11-23 16:17:50 +01:00
2017-11-17 21:01:24 +01:00
def on_verack ( self , message ) :
2018-09-24 22:45:58 +02:00
pass
2017-11-23 16:17:50 +01:00
2017-11-17 21:01:24 +01:00
def on_version ( self , message ) :
2017-11-23 16:17:50 +01:00
assert message . nVersion > = MIN_VERSION_SUPPORTED , " Version {} received. Test framework only supports versions greater than {} " . format ( message . nVersion , MIN_VERSION_SUPPORTED )
2020-10-17 17:20:43 +02:00
if message . nVersion > = 70016 and self . wtxidrelay :
2020-04-21 17:02:46 +02:00
self . send_message ( msg_wtxidrelay ( ) )
2020-05-20 12:05:18 +02:00
if self . support_addrv2 :
self . send_message ( msg_sendaddrv2 ( ) )
2020-12-07 18:12:37 +01:00
self . send_message ( msg_verack ( ) )
2017-11-17 21:01:24 +01:00
self . nServices = message . nServices
2017-11-23 16:17:50 +01:00
# Connection helper methods
2020-07-11 10:41:23 +02:00
def wait_until ( self , test_function_in , * , timeout = 60 , check_connected = True ) :
def test_function ( ) :
if check_connected :
assert self . is_connected
return test_function_in ( )
2020-08-27 08:55:20 +02:00
wait_until_helper ( test_function , timeout = timeout , lock = p2p_lock , timeout_factor = self . timeout_factor )
2020-05-03 01:42:40 +02:00
2020-06-10 22:29:07 +02:00
def wait_for_connect ( self , timeout = 60 ) :
test_function = lambda : self . is_connected
wait_until_helper ( test_function , timeout = timeout , lock = p2p_lock )
2017-11-23 16:17:50 +01:00
def wait_for_disconnect ( self , timeout = 60 ) :
2018-06-21 03:24:29 +02:00
test_function = lambda : not self . is_connected
2020-07-11 10:41:23 +02:00
self . wait_until ( test_function , timeout = timeout , check_connected = False )
2017-11-23 16:17:50 +01:00
# Message receiving helper methods
2019-05-09 16:42:56 +02:00
def wait_for_tx ( self , txid , timeout = 60 ) :
def test_function ( ) :
if not self . last_message . get ( ' tx ' ) :
return False
return self . last_message [ ' tx ' ] . tx . rehash ( ) == txid
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2019-05-09 16:42:56 +02:00
2017-11-23 16:17:50 +01:00
def wait_for_block ( self , blockhash , timeout = 60 ) :
2019-07-26 22:14:36 +02:00
def test_function ( ) :
return self . last_message . get ( " block " ) and self . last_message [ " block " ] . block . rehash ( ) == blockhash
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2017-11-23 16:17:50 +01:00
2018-08-08 17:24:59 +02:00
def wait_for_header ( self , blockhash , timeout = 60 ) :
def test_function ( ) :
last_headers = self . last_message . get ( ' headers ' )
if not last_headers :
return False
2020-04-11 15:53:01 +02:00
return last_headers . headers [ 0 ] . rehash ( ) == int ( blockhash , 16 )
2018-08-08 17:24:59 +02:00
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2018-08-08 17:24:59 +02:00
2020-04-10 16:01:33 +02:00
def wait_for_merkleblock ( self , blockhash , timeout = 60 ) :
2020-03-12 18:36:46 +01:00
def test_function ( ) :
last_filtered_block = self . last_message . get ( ' merkleblock ' )
if not last_filtered_block :
return False
2020-04-11 15:53:01 +02:00
return last_filtered_block . merkleblock . header . rehash ( ) == int ( blockhash , 16 )
2020-03-12 18:36:46 +01:00
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2020-03-12 18:36:46 +01:00
2020-04-17 11:23:02 +02:00
def wait_for_getdata ( self , hash_list , timeout = 60 ) :
2017-11-22 17:45:14 +01:00
""" Waits for a getdata message.
2020-04-17 11:23:02 +02:00
The object hashes in the inventory vector must match the provided hash_list . """
2019-07-26 22:14:36 +02:00
def test_function ( ) :
2020-04-17 11:23:02 +02:00
last_data = self . last_message . get ( " getdata " )
if not last_data :
return False
return [ x . hash for x in last_data . inv ] == hash_list
2019-07-26 22:14:36 +02:00
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2017-11-23 16:17:50 +01:00
def wait_for_getheaders ( self , timeout = 60 ) :
2017-11-22 17:45:14 +01:00
""" Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate . the last_message [ " getheaders " ]
value must be explicitly cleared before calling this method , or this will return
immediately with success . TODO : change this method to take a hash value and only
return true if the correct block header has been requested . """
2019-07-26 22:14:36 +02:00
def test_function ( ) :
return self . last_message . get ( " getheaders " )
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2017-11-23 16:17:50 +01:00
def wait_for_inv ( self , expected_inv , timeout = 60 ) :
""" Waits for an INV message and checks that the first inv object in the message was as expected. """
if len ( expected_inv ) > 1 :
raise NotImplementedError ( " wait_for_inv() will only verify the first inv object " )
2019-07-26 22:14:36 +02:00
def test_function ( ) :
return self . last_message . get ( " inv " ) and \
2017-11-23 16:17:50 +01:00
self . last_message [ " inv " ] . inv [ 0 ] . type == expected_inv [ 0 ] . type and \
self . last_message [ " inv " ] . inv [ 0 ] . hash == expected_inv [ 0 ] . hash
2019-07-26 22:14:36 +02:00
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2017-11-23 16:17:50 +01:00
def wait_for_verack ( self , timeout = 60 ) :
2019-07-26 22:14:36 +02:00
def test_function ( ) :
2020-07-27 13:55:49 +02:00
return " verack " in self . last_message
2019-07-26 22:14:36 +02:00
2020-08-04 12:55:35 +02:00
self . wait_until ( test_function , timeout = timeout )
2017-11-23 16:17:50 +01:00
# Message sending helper functions
2018-10-19 19:34:28 +02:00
def send_and_ping ( self , message , timeout = 60 ) :
2017-11-23 16:17:50 +01:00
self . send_message ( message )
2018-10-19 19:34:28 +02:00
self . sync_with_ping ( timeout = timeout )
2017-11-23 16:17:50 +01:00
# Sync up with the node
def sync_with_ping ( self , timeout = 60 ) :
self . send_message ( msg_ping ( nonce = self . ping_counter ) )
2019-07-26 22:14:36 +02:00
def test_function ( ) :
return self . last_message . get ( " pong " ) and self . last_message [ " pong " ] . nonce == self . ping_counter
2020-05-03 01:42:40 +02:00
self . wait_until ( test_function , timeout = timeout )
2017-11-23 16:17:50 +01:00
self . ping_counter + = 1
2018-06-18 23:28:37 +02:00
# One lock for synchronizing all data access between the network event loop (see
2017-10-17 13:51:50 +02:00
# NetworkThread below) and the thread running the test logic. For simplicity,
2018-06-18 23:28:37 +02:00
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
2017-10-17 22:16:39 +02:00
# access to any data shared with the P2PInterface or P2PConnection.
2020-07-19 09:47:05 +02:00
p2p_lock = threading . Lock ( )
2017-12-08 16:50:24 +01:00
2018-06-18 23:28:37 +02:00
2017-12-08 16:50:24 +01:00
class NetworkThread ( threading . Thread ) :
2018-06-18 23:28:37 +02:00
network_event_loop = None
2017-12-08 16:50:24 +01:00
def __init__ ( self ) :
super ( ) . __init__ ( name = " NetworkThread " )
2018-06-18 23:28:37 +02:00
# There is only one event loop and no more than one thread must be created
assert not self . network_event_loop
2020-06-10 22:29:07 +02:00
NetworkThread . listeners = { }
NetworkThread . protos = { }
2018-06-18 23:28:37 +02:00
NetworkThread . network_event_loop = asyncio . new_event_loop ( )
2015-04-28 18:36:15 +02:00
def run ( self ) :
2018-06-18 23:28:37 +02:00
""" Start the network thread. """
self . network_event_loop . run_forever ( )
def close ( self , timeout = 10 ) :
""" Close the connections and network event loop. """
self . network_event_loop . call_soon_threadsafe ( self . network_event_loop . stop )
2020-08-27 08:55:20 +02:00
wait_until_helper ( lambda : not self . network_event_loop . is_running ( ) , timeout = timeout )
2018-06-18 23:28:37 +02:00
self . network_event_loop . close ( )
self . join ( timeout )
2019-10-25 14:28:08 +02:00
# Safe to remove event loop.
NetworkThread . network_event_loop = None
2017-11-22 17:45:14 +01:00
2020-06-10 22:29:07 +02:00
@classmethod
def listen ( cls , p2p , callback , port = None , addr = None , idx = 1 ) :
""" Ensure a listening server is running on the given port, and run the
protocol specified by ` p2p ` on the next connection to it . Once ready
for connections , call ` callback ` . """
if port is None :
assert 0 < idx < = MAX_NODES
port = p2p_port ( MAX_NODES - idx )
if addr is None :
addr = ' 127.0.0.1 '
coroutine = cls . create_listen_server ( addr , port , callback , p2p )
cls . network_event_loop . call_soon_threadsafe ( cls . network_event_loop . create_task , coroutine )
@classmethod
async def create_listen_server ( cls , addr , port , callback , proto ) :
def peer_protocol ( ) :
""" Returns a function that does the protocol handling for a new
connection . To allow different connections to have different
behaviors , the protocol function is first put in the cls . protos
dict . When the connection is made , the function removes the
protocol function from that dict , and returns it so the event loop
can start executing it . """
response = cls . protos . get ( ( addr , port ) )
cls . protos [ ( addr , port ) ] = None
return response
if ( addr , port ) not in cls . listeners :
# When creating a listener on a given (addr, port) we only need to
# do it once. If we want different behaviors for different
# connections, we can accomplish this by providing different
# `proto` functions
listener = await cls . network_event_loop . create_server ( peer_protocol , addr , port )
logger . debug ( " Listening server on %s : %d should be started " % ( addr , port ) )
cls . listeners [ ( addr , port ) ] = listener
cls . protos [ ( addr , port ) ] = proto
callback ( addr , port )
2017-11-22 17:45:14 +01:00
class P2PDataStore ( P2PInterface ) :
""" A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests . """
def __init__ ( self ) :
super ( ) . __init__ ( )
# store of blocks. key is block hash, value is a CBlock object
self . block_store = { }
self . last_block_hash = ' '
# store of txs. key is txid, value is a CTransaction object
self . tx_store = { }
self . getdata_requests = [ ]
def on_getdata ( self , message ) :
""" Check for the tx/block in our stores and if found, reply with an inv message. """
for inv in message . inv :
self . getdata_requests . append ( inv . hash )
if ( inv . type & MSG_TYPE_MASK ) == MSG_TX and inv . hash in self . tx_store . keys ( ) :
self . send_message ( msg_tx ( self . tx_store [ inv . hash ] ) )
elif ( inv . type & MSG_TYPE_MASK ) == MSG_BLOCK and inv . hash in self . block_store . keys ( ) :
self . send_message ( msg_block ( self . block_store [ inv . hash ] ) )
else :
logger . debug ( ' getdata message type {} received. ' . format ( hex ( inv . type ) ) )
def on_getheaders ( self , message ) :
""" Search back through our block store for the locator, and reply with a headers message if found. """
locator , hash_stop = message . locator , message . hashstop
# Assume that the most recent block added is the tip
if not self . block_store :
return
headers_list = [ self . block_store [ self . last_block_hash ] ]
while headers_list [ - 1 ] . sha256 not in locator . vHave :
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list [ - 1 ] . hashPrevBlock
if prev_block_hash in self . block_store :
2018-04-02 21:40:38 +02:00
prev_block_header = CBlockHeader ( self . block_store [ prev_block_hash ] )
2017-11-22 17:45:14 +01:00
headers_list . append ( prev_block_header )
if prev_block_header . sha256 == hash_stop :
# if this is the hashstop header, stop here
break
else :
logger . debug ( ' block hash {} not found in block store ' . format ( hex ( prev_block_hash ) ) )
break
# Truncate the list if there are too many headers
2020-06-14 12:54:37 +02:00
headers_list = headers_list [ : - MAX_HEADERS_RESULTS - 1 : - 1 ]
2017-11-22 17:45:14 +01:00
response = msg_headers ( headers_list )
if response is not None :
self . send_message ( response )
2018-11-13 17:14:47 +01:00
def send_blocks_and_test ( self , blocks , node , * , success = True , force_send = False , reject_reason = None , expect_disconnect = False , timeout = 60 ) :
2017-11-22 17:45:14 +01:00
""" Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
2018-11-13 17:14:47 +01:00
- if force_send is False : wait for getdata for each of the blocks . The on_getdata handler will
ensure that any getdata messages are responded to . Otherwise send the full block unsolicited .
2017-11-22 17:45:14 +01:00
- if success is True : assert that the node ' s tip advances to the most recent block
- if success is False : assert that the node ' s tip doesn ' t advance
2018-08-24 21:26:42 +02:00
- if reject_reason is set : assert that the correct reject message is logged """
2017-11-22 17:45:14 +01:00
2020-07-19 09:47:05 +02:00
with p2p_lock :
2017-11-22 17:45:14 +01:00
for block in blocks :
self . block_store [ block . sha256 ] = block
self . last_block_hash = block . sha256
2018-08-24 21:26:42 +02:00
reject_reason = [ reject_reason ] if reject_reason else [ ]
with node . assert_debug_log ( expected_msgs = reject_reason ) :
2018-11-13 17:14:47 +01:00
if force_send :
for b in blocks :
self . send_message ( msg_block ( block = b ) )
else :
2019-04-23 21:30:52 +02:00
self . send_message ( msg_headers ( [ CBlockHeader ( block ) for block in blocks ] ) )
2020-07-11 10:41:23 +02:00
self . wait_until (
lambda : blocks [ - 1 ] . sha256 in self . getdata_requests ,
timeout = timeout ,
check_connected = success ,
)
2017-11-22 17:45:14 +01:00
2018-08-24 21:26:42 +02:00
if expect_disconnect :
2018-10-10 08:51:19 +02:00
self . wait_for_disconnect ( timeout = timeout )
2018-08-24 21:26:42 +02:00
else :
2018-10-10 08:51:19 +02:00
self . sync_with_ping ( timeout = timeout )
2017-11-22 17:45:14 +01:00
2018-08-24 21:26:42 +02:00
if success :
2020-05-03 01:42:40 +02:00
self . wait_until ( lambda : node . getbestblockhash ( ) == blocks [ - 1 ] . hash , timeout = timeout )
2018-08-24 21:26:42 +02:00
else :
assert node . getbestblockhash ( ) != blocks [ - 1 ] . hash
2017-11-22 17:45:14 +01:00
2018-08-24 21:26:42 +02:00
def send_txs_and_test ( self , txs , node , * , success = True , expect_disconnect = False , reject_reason = None ) :
2017-11-22 17:45:14 +01:00
""" Send txs to test node and test whether they ' re accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
2018-04-24 19:49:56 +02:00
- if success is True / False : assert that the txs are / are not accepted to the mempool
2018-04-11 00:08:01 +02:00
- if expect_disconnect is True : Skip the sync with ping
2018-08-24 21:26:42 +02:00
- if reject_reason is set : assert that the correct reject message is logged . """
2017-11-22 17:45:14 +01:00
2020-07-19 09:47:05 +02:00
with p2p_lock :
2017-11-22 17:45:14 +01:00
for tx in txs :
self . tx_store [ tx . sha256 ] = tx
2018-08-24 21:26:42 +02:00
reject_reason = [ reject_reason ] if reject_reason else [ ]
with node . assert_debug_log ( expected_msgs = reject_reason ) :
2017-11-22 17:45:14 +01:00
for tx in txs :
2018-08-24 21:26:42 +02:00
self . send_message ( msg_tx ( tx ) )
2017-11-22 17:45:14 +01:00
2018-08-24 21:26:42 +02:00
if expect_disconnect :
self . wait_for_disconnect ( )
else :
self . sync_with_ping ( )
raw_mempool = node . getrawmempool ( )
if success :
# Check that all txs are now in the mempool
for tx in txs :
assert tx . hash in raw_mempool , " {} not found in mempool " . format ( tx . hash )
else :
# Check that none of the txs are now in the mempool
for tx in txs :
assert tx . hash not in raw_mempool , " {} tx found in mempool " . format ( tx . hash )
2020-01-31 03:52:25 +01:00
class P2PTxInvStore ( P2PInterface ) :
""" A P2PInterface which stores a count of how many times each txid has been announced. """
def __init__ ( self ) :
super ( ) . __init__ ( )
self . tx_invs_received = defaultdict ( int )
def on_inv ( self , message ) :
2020-05-09 22:42:15 +02:00
super ( ) . on_inv ( message ) # Send getdata in response.
2020-01-31 03:52:25 +01:00
# Store how many times invs have been received for each tx.
for i in message . inv :
2020-03-27 02:12:47 +01:00
if ( i . type == MSG_TX ) or ( i . type == MSG_WTX ) :
2020-01-31 03:52:25 +01:00
# save txid
self . tx_invs_received [ i . hash ] + = 1
def get_invs ( self ) :
2020-07-19 09:47:05 +02:00
with p2p_lock :
2020-01-31 03:52:25 +01:00
return list ( self . tx_invs_received . keys ( ) )
2020-05-09 22:42:15 +02:00
def wait_for_broadcast ( self , txns , timeout = 60 ) :
""" Waits for the txns (list of txids) to complete initial broadcast.
The mempool should mark unbroadcast = False for these transactions .
"""
# Wait until invs have been received (and getdatas sent) for each txid.
2020-07-11 10:41:23 +02:00
self . wait_until ( lambda : set ( self . tx_invs_received . keys ( ) ) == set ( [ int ( tx , 16 ) for tx in txns ] ) , timeout = timeout )
2020-05-09 22:42:15 +02:00
# Flush messages and wait for the getdatas to be processed
self . sync_with_ping ( )