mirror of
https://github.com/bitcoin/bitcoin.git
synced 2024-11-19 18:09:47 +01:00
Merge bitcoin/bitcoin#30413: p2p: Lazy init some bloom filters; fuzz version handshake
afd237bb5d
[fuzz] Harness for version handshake (dergoegge)a90ab4aec9
scripted-diff: Rename lazily initialized bloom filters (dergoegge)82de1bc478
[net processing] Lazily initialize m_recent_confirmed_transactions (dergoegge)fa0c87f19c
[net processing] Lazily initialize m_recent_rejects_reconsiderable (dergoegge)662e8db2d3
[net processing] Lazily initialize m_recent_rejects (dergoegge) Pull request description: This adds a fuzzing harness dedicated to the version handshake. To avoid determinism issues, the harness creates necessary components each iteration (addrman, peerman, etc). A harness like this would have easily caught https://bitcoincore.org/en/2024/07/03/disclose-timestamp-overflow/. As a performance optimization, this PR includes a change to `PeerManager` to lazily initialize various filters (to avoid large unnecessary memory allocations each iteration). ACKs for top commit: brunoerg: ACKafd237bb5d
marcofleon: Tested ACKafd237bb5d
. I compared the coverage of `net_processing` from this harness to the `process_message` and `process_messages` harnesses to see the differences. This target hits more specific parts of the version handshake. The stability looks good as well, at about 94%. glozow: utACKafd237bb5d
lazy blooms look ok mzumsande: Code Review ACKafd237bb5d
Tree-SHA512: 62bba20aec0cd220e62368354891f9790b81ad75e8adf7b22a76a6d4663bd26aedc4cae8083658a75ea9043d60aad3f0e58ad36bd7bbbf93ff1d16e317bf15cc
This commit is contained in:
commit
b8755164cf
@ -350,6 +350,7 @@ test_fuzz_fuzz_SOURCES = \
|
||||
test/fuzz/netaddress.cpp \
|
||||
test/fuzz/netbase_dns_lookup.cpp \
|
||||
test/fuzz/node_eviction.cpp \
|
||||
test/fuzz/p2p_handshake.cpp \
|
||||
test/fuzz/p2p_transport_serialization.cpp \
|
||||
test/fuzz/package_eval.cpp \
|
||||
test/fuzz/parse_hd_keypath.cpp \
|
||||
|
@ -584,7 +584,7 @@ private:
|
||||
* @param[in] maybe_add_extra_compact_tx Whether this tx should be added to vExtraTxnForCompact.
|
||||
* Set to false if the tx has already been rejected before,
|
||||
* e.g. is an orphan, to avoid adding duplicate entries.
|
||||
* Updates m_txrequest, m_recent_rejects, m_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */
|
||||
* Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. */
|
||||
void ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result,
|
||||
bool maybe_add_extra_compact_tx)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex);
|
||||
@ -776,9 +776,9 @@ private:
|
||||
/** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage.
|
||||
* Lock invariants:
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage.
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_recent_rejects.
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_recent_rejects_reconsiderable.
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_recent_confirmed_transactions.
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects.
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable.
|
||||
* - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions.
|
||||
* - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc).
|
||||
*/
|
||||
Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs);
|
||||
@ -856,9 +856,9 @@ private:
|
||||
/** Check whether we already have this gtxid in:
|
||||
* - mempool
|
||||
* - orphanage
|
||||
* - m_recent_rejects
|
||||
* - m_recent_rejects_reconsiderable (if include_reconsiderable = true)
|
||||
* - m_recent_confirmed_transactions
|
||||
* - m_lazy_recent_rejects
|
||||
* - m_lazy_recent_rejects_reconsiderable (if include_reconsiderable = true)
|
||||
* - m_lazy_recent_confirmed_transactions
|
||||
* */
|
||||
bool AlreadyHaveTx(const GenTxid& gtxid, bool include_reconsiderable)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex);
|
||||
@ -897,7 +897,18 @@ private:
|
||||
*
|
||||
* Memory used: 1.3 MB
|
||||
*/
|
||||
CRollingBloomFilter m_recent_rejects GUARDED_BY(m_tx_download_mutex){120'000, 0.000'001};
|
||||
std::unique_ptr<CRollingBloomFilter> m_lazy_recent_rejects GUARDED_BY(m_tx_download_mutex){nullptr};
|
||||
|
||||
CRollingBloomFilter& RecentRejectsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex)
|
||||
{
|
||||
AssertLockHeld(m_tx_download_mutex);
|
||||
|
||||
if (!m_lazy_recent_rejects) {
|
||||
m_lazy_recent_rejects = std::make_unique<CRollingBloomFilter>(120'000, 0.000'001);
|
||||
}
|
||||
|
||||
return *m_lazy_recent_rejects;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter for:
|
||||
@ -905,7 +916,7 @@ private:
|
||||
* eligible for reconsideration if submitted with other transactions.
|
||||
* (2) packages (see GetPackageHash) we have already rejected before and should not retry.
|
||||
*
|
||||
* Similar to m_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers
|
||||
* Similar to m_lazy_recent_rejects, this filter is used to save bandwidth when e.g. all of our peers
|
||||
* have larger mempools and thus lower minimum feerates than us.
|
||||
*
|
||||
* When a transaction's error is TxValidationResult::TX_RECONSIDERABLE (in a package or by
|
||||
@ -917,9 +928,20 @@ private:
|
||||
*
|
||||
* Reset this filter when the chain tip changes.
|
||||
*
|
||||
* Parameters are picked to be the same as m_recent_rejects, with the same rationale.
|
||||
* Parameters are picked to be the same as m_lazy_recent_rejects, with the same rationale.
|
||||
*/
|
||||
CRollingBloomFilter m_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){120'000, 0.000'001};
|
||||
std::unique_ptr<CRollingBloomFilter> m_lazy_recent_rejects_reconsiderable GUARDED_BY(m_tx_download_mutex){nullptr};
|
||||
|
||||
CRollingBloomFilter& RecentRejectsReconsiderableFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex)
|
||||
{
|
||||
AssertLockHeld(m_tx_download_mutex);
|
||||
|
||||
if (!m_lazy_recent_rejects_reconsiderable) {
|
||||
m_lazy_recent_rejects_reconsiderable = std::make_unique<CRollingBloomFilter>(120'000, 0.000'001);
|
||||
}
|
||||
|
||||
return *m_lazy_recent_rejects_reconsiderable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Filter for transactions that have been recently confirmed.
|
||||
@ -936,7 +958,18 @@ private:
|
||||
* transaction per day that would be inadvertently ignored (which is the
|
||||
* same probability that we have in the reject filter).
|
||||
*/
|
||||
CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){48'000, 0.000'001};
|
||||
std::unique_ptr<CRollingBloomFilter> m_lazy_recent_confirmed_transactions GUARDED_BY(m_tx_download_mutex){nullptr};
|
||||
|
||||
CRollingBloomFilter& RecentConfirmedTransactionsFilter() EXCLUSIVE_LOCKS_REQUIRED(m_tx_download_mutex)
|
||||
{
|
||||
AssertLockHeld(m_tx_download_mutex);
|
||||
|
||||
if (!m_lazy_recent_confirmed_transactions) {
|
||||
m_lazy_recent_confirmed_transactions = std::make_unique<CRollingBloomFilter>(48'000, 0.000'001);
|
||||
}
|
||||
|
||||
return *m_lazy_recent_confirmed_transactions;
|
||||
}
|
||||
|
||||
/**
|
||||
* For sending `inv`s to inbound peers, we use a single (exponentially
|
||||
@ -2080,8 +2113,8 @@ void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd)
|
||||
// If the chain tip has changed, previously rejected transactions might now be valid, e.g. due
|
||||
// to a timelock. Reset the rejection filters to give those transactions another chance if we
|
||||
// see them again.
|
||||
m_recent_rejects.reset();
|
||||
m_recent_rejects_reconsiderable.reset();
|
||||
RecentRejectsFilter().reset();
|
||||
RecentRejectsReconsiderableFilter().reset();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2119,9 +2152,9 @@ void PeerManagerImpl::BlockConnected(
|
||||
m_orphanage.EraseForBlock(*pblock);
|
||||
|
||||
for (const auto& ptx : pblock->vtx) {
|
||||
m_recent_confirmed_transactions.insert(ptx->GetHash().ToUint256());
|
||||
RecentConfirmedTransactionsFilter().insert(ptx->GetHash().ToUint256());
|
||||
if (ptx->HasWitness()) {
|
||||
m_recent_confirmed_transactions.insert(ptx->GetWitnessHash().ToUint256());
|
||||
RecentConfirmedTransactionsFilter().insert(ptx->GetWitnessHash().ToUint256());
|
||||
}
|
||||
m_txrequest.ForgetTxHash(ptx->GetHash());
|
||||
m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
|
||||
@ -2139,7 +2172,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo
|
||||
// presumably the most common case of relaying a confirmed transaction
|
||||
// should be just after a new block containing it is found.
|
||||
LOCK(m_tx_download_mutex);
|
||||
m_recent_confirmed_transactions.reset();
|
||||
RecentConfirmedTransactionsFilter().reset();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2300,11 +2333,11 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid, bool include_reconside
|
||||
if (m_orphanage.HaveTx(Wtxid::FromUint256(hash))) return true;
|
||||
}
|
||||
|
||||
if (include_reconsiderable && m_recent_rejects_reconsiderable.contains(hash)) return true;
|
||||
if (include_reconsiderable && RecentRejectsReconsiderableFilter().contains(hash)) return true;
|
||||
|
||||
if (m_recent_confirmed_transactions.contains(hash)) return true;
|
||||
if (RecentConfirmedTransactionsFilter().contains(hash)) return true;
|
||||
|
||||
return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid);
|
||||
return RecentRejectsFilter().contains(hash) || m_mempool.exists(gtxid);
|
||||
}
|
||||
|
||||
bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash)
|
||||
@ -3192,12 +3225,12 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx
|
||||
// for concerns around weakening security of unupgraded nodes
|
||||
// if we start doing this too early.
|
||||
if (state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) {
|
||||
// If the result is TX_RECONSIDERABLE, add it to m_recent_rejects_reconsiderable
|
||||
// If the result is TX_RECONSIDERABLE, add it to m_lazy_recent_rejects_reconsiderable
|
||||
// because we should not download or submit this transaction by itself again, but may
|
||||
// submit it as part of a package later.
|
||||
m_recent_rejects_reconsiderable.insert(ptx->GetWitnessHash().ToUint256());
|
||||
RecentRejectsReconsiderableFilter().insert(ptx->GetWitnessHash().ToUint256());
|
||||
} else {
|
||||
m_recent_rejects.insert(ptx->GetWitnessHash().ToUint256());
|
||||
RecentRejectsFilter().insert(ptx->GetWitnessHash().ToUint256());
|
||||
}
|
||||
m_txrequest.ForgetTxHash(ptx->GetWitnessHash());
|
||||
// If the transaction failed for TX_INPUTS_NOT_STANDARD,
|
||||
@ -3211,7 +3244,7 @@ void PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx
|
||||
// We only add the txid if it differs from the wtxid, to avoid wasting entries in the
|
||||
// rolling bloom filter.
|
||||
if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && ptx->HasWitness()) {
|
||||
m_recent_rejects.insert(ptx->GetHash().ToUint256());
|
||||
RecentRejectsFilter().insert(ptx->GetHash().ToUint256());
|
||||
m_txrequest.ForgetTxHash(ptx->GetHash());
|
||||
}
|
||||
if (maybe_add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) {
|
||||
@ -3266,7 +3299,7 @@ void PeerManagerImpl::ProcessPackageResult(const PackageToValidate& package_to_v
|
||||
const auto& senders = package_to_validate.m_senders;
|
||||
|
||||
if (package_result.m_state.IsInvalid()) {
|
||||
m_recent_rejects_reconsiderable.insert(GetPackageHash(package));
|
||||
RecentRejectsReconsiderableFilter().insert(GetPackageHash(package));
|
||||
}
|
||||
// We currently only expect to process 1-parent-1-child packages. Remove if this changes.
|
||||
if (!Assume(package.size() == 2)) return;
|
||||
@ -3320,7 +3353,7 @@ std::optional<PeerManagerImpl::PackageToValidate> PeerManagerImpl::Find1P1CPacka
|
||||
|
||||
const auto& parent_wtxid{ptx->GetWitnessHash()};
|
||||
|
||||
Assume(m_recent_rejects_reconsiderable.contains(parent_wtxid.ToUint256()));
|
||||
Assume(RecentRejectsReconsiderableFilter().contains(parent_wtxid.ToUint256()));
|
||||
|
||||
// Prefer children from this peer. This helps prevent censorship attempts in which an attacker
|
||||
// sends lots of fake children for the parent, and we (unluckily) keep selecting the fake
|
||||
@ -3332,7 +3365,7 @@ std::optional<PeerManagerImpl::PackageToValidate> PeerManagerImpl::Find1P1CPacka
|
||||
// most recent) one efficiently.
|
||||
for (const auto& child : cpfp_candidates_same_peer) {
|
||||
Package maybe_cpfp_package{ptx, child};
|
||||
if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) {
|
||||
if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) {
|
||||
return PeerManagerImpl::PackageToValidate{ptx, child, nodeid, nodeid};
|
||||
}
|
||||
}
|
||||
@ -3356,10 +3389,10 @@ std::optional<PeerManagerImpl::PackageToValidate> PeerManagerImpl::Find1P1CPacka
|
||||
|
||||
for (const auto index : tx_indices) {
|
||||
// If we already tried a package and failed for any reason, the combined hash was
|
||||
// cached in m_recent_rejects_reconsiderable.
|
||||
// cached in m_lazy_recent_rejects_reconsiderable.
|
||||
const auto [child_tx, child_sender] = cpfp_candidates_different_peer.at(index);
|
||||
Package maybe_cpfp_package{ptx, child_tx};
|
||||
if (!m_recent_rejects_reconsiderable.contains(GetPackageHash(maybe_cpfp_package))) {
|
||||
if (!RecentRejectsReconsiderableFilter().contains(GetPackageHash(maybe_cpfp_package))) {
|
||||
return PeerManagerImpl::PackageToValidate{ptx, child_tx, nodeid, child_sender};
|
||||
}
|
||||
}
|
||||
@ -4551,8 +4584,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
}
|
||||
}
|
||||
|
||||
if (m_recent_rejects_reconsiderable.contains(wtxid)) {
|
||||
// When a transaction is already in m_recent_rejects_reconsiderable, we shouldn't submit
|
||||
if (RecentRejectsReconsiderableFilter().contains(wtxid)) {
|
||||
// When a transaction is already in m_lazy_recent_rejects_reconsiderable, we shouldn't submit
|
||||
// it by itself again. However, look for a matching child in the orphanage, as it is
|
||||
// possible that they succeed as a package.
|
||||
LogPrint(BCLog::TXPACKAGES, "found tx %s (wtxid=%s) in reconsiderable rejects, looking for child in orphanage\n",
|
||||
@ -4564,20 +4597,20 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
ProcessPackageResult(package_to_validate.value(), package_result);
|
||||
}
|
||||
}
|
||||
// If a tx is detected by m_recent_rejects it is ignored. Because we haven't
|
||||
// If a tx is detected by m_lazy_recent_rejects it is ignored. Because we haven't
|
||||
// submitted the tx to our mempool, we won't have computed a DoS
|
||||
// score for it or determined exactly why we consider it invalid.
|
||||
//
|
||||
// This means we won't penalize any peer subsequently relaying a DoSy
|
||||
// tx (even if we penalized the first peer who gave it to us) because
|
||||
// we have to account for m_recent_rejects showing false positives. In
|
||||
// we have to account for m_lazy_recent_rejects showing false positives. In
|
||||
// other words, we shouldn't penalize a peer if we aren't *sure* they
|
||||
// submitted a DoSy tx.
|
||||
//
|
||||
// Note that m_recent_rejects doesn't just record DoSy or invalid
|
||||
// Note that m_lazy_recent_rejects doesn't just record DoSy or invalid
|
||||
// transactions, but any tx not accepted by the mempool, which may be
|
||||
// due to node policy (vs. consensus). So we can't blanket penalize a
|
||||
// peer simply for relaying a tx that our m_recent_rejects has caught,
|
||||
// peer simply for relaying a tx that our m_lazy_recent_rejects has caught,
|
||||
// regardless of false positives.
|
||||
return;
|
||||
}
|
||||
@ -4604,16 +4637,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
std::sort(unique_parents.begin(), unique_parents.end());
|
||||
unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
|
||||
|
||||
// Distinguish between parents in m_recent_rejects and m_recent_rejects_reconsiderable.
|
||||
// We can tolerate having up to 1 parent in m_recent_rejects_reconsiderable since we
|
||||
// submit 1p1c packages. However, fail immediately if any are in m_recent_rejects.
|
||||
// Distinguish between parents in m_lazy_recent_rejects and m_lazy_recent_rejects_reconsiderable.
|
||||
// We can tolerate having up to 1 parent in m_lazy_recent_rejects_reconsiderable since we
|
||||
// submit 1p1c packages. However, fail immediately if any are in m_lazy_recent_rejects.
|
||||
std::optional<uint256> rejected_parent_reconsiderable;
|
||||
for (const uint256& parent_txid : unique_parents) {
|
||||
if (m_recent_rejects.contains(parent_txid)) {
|
||||
if (RecentRejectsFilter().contains(parent_txid)) {
|
||||
fRejectedParents = true;
|
||||
break;
|
||||
} else if (m_recent_rejects_reconsiderable.contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) {
|
||||
// More than 1 parent in m_recent_rejects_reconsiderable: 1p1c will not be
|
||||
} else if (RecentRejectsReconsiderableFilter().contains(parent_txid) && !m_mempool.exists(GenTxid::Txid(parent_txid))) {
|
||||
// More than 1 parent in m_lazy_recent_rejects_reconsiderable: 1p1c will not be
|
||||
// sufficient to accept this package, so just give up here.
|
||||
if (rejected_parent_reconsiderable.has_value()) {
|
||||
fRejectedParents = true;
|
||||
@ -4633,7 +4666,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
// protocol for getting all unconfirmed parents.
|
||||
const auto gtxid{GenTxid::Txid(parent_txid)};
|
||||
AddKnownTx(*peer, parent_txid);
|
||||
// Exclude m_recent_rejects_reconsiderable: the missing parent may have been
|
||||
// Exclude m_lazy_recent_rejects_reconsiderable: the missing parent may have been
|
||||
// previously rejected for being too low feerate. This orphan might CPFP it.
|
||||
if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) AddTxAnnouncement(pfrom, gtxid, current_time);
|
||||
}
|
||||
@ -4658,8 +4691,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
// regardless of what witness is provided, we will not accept
|
||||
// this, so we don't need to allow for redownload of this txid
|
||||
// from any of our non-wtxidrelay peers.
|
||||
m_recent_rejects.insert(tx.GetHash().ToUint256());
|
||||
m_recent_rejects.insert(tx.GetWitnessHash().ToUint256());
|
||||
RecentRejectsFilter().insert(tx.GetHash().ToUint256());
|
||||
RecentRejectsFilter().insert(tx.GetWitnessHash().ToUint256());
|
||||
m_txrequest.ForgetTxHash(tx.GetHash());
|
||||
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
|
||||
}
|
||||
@ -6306,7 +6339,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
|
||||
entry.second.GetHash().ToString(), entry.first);
|
||||
}
|
||||
for (const GenTxid& gtxid : requestable) {
|
||||
// Exclude m_recent_rejects_reconsiderable: we may be requesting a missing parent
|
||||
// Exclude m_lazy_recent_rejects_reconsiderable: we may be requesting a missing parent
|
||||
// that was previously rejected for being too low feerate.
|
||||
if (!AlreadyHaveTx(gtxid, /*include_reconsiderable=*/false)) {
|
||||
LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx",
|
||||
|
107
src/test/fuzz/p2p_handshake.cpp
Normal file
107
src/test/fuzz/p2p_handshake.cpp
Normal file
@ -0,0 +1,107 @@
|
||||
// Copyright (c) 2020-present The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include <addrman.h>
|
||||
#include <consensus/consensus.h>
|
||||
#include <net.h>
|
||||
#include <net_processing.h>
|
||||
#include <node/warnings.h>
|
||||
#include <protocol.h>
|
||||
#include <script/script.h>
|
||||
#include <sync.h>
|
||||
#include <test/fuzz/FuzzedDataProvider.h>
|
||||
#include <test/fuzz/fuzz.h>
|
||||
#include <test/fuzz/util.h>
|
||||
#include <test/fuzz/util/net.h>
|
||||
#include <test/util/mining.h>
|
||||
#include <test/util/net.h>
|
||||
#include <test/util/setup_common.h>
|
||||
#include <test/util/validation.h>
|
||||
#include <util/time.h>
|
||||
#include <validationinterface.h>
|
||||
|
||||
#include <ios>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace {
|
||||
const TestingSetup* g_setup;
|
||||
|
||||
void initialize()
|
||||
{
|
||||
static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(
|
||||
/*chain_type=*/ChainType::REGTEST);
|
||||
g_setup = testing_setup.get();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
FUZZ_TARGET(p2p_handshake, .init = ::initialize)
|
||||
{
|
||||
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
|
||||
|
||||
ConnmanTestMsg& connman = static_cast<ConnmanTestMsg&>(*g_setup->m_node.connman);
|
||||
auto& chainman = static_cast<TestChainstateManager&>(*g_setup->m_node.chainman);
|
||||
SetMockTime(1610000000); // any time to successfully reset ibd
|
||||
chainman.ResetIbd();
|
||||
|
||||
node::Warnings warnings{};
|
||||
NetGroupManager netgroupman{{}};
|
||||
AddrMan addrman{netgroupman, /*deterministic=*/true, 0};
|
||||
auto peerman = PeerManager::make(connman, addrman,
|
||||
/*banman=*/nullptr, chainman,
|
||||
*g_setup->m_node.mempool, warnings,
|
||||
PeerManager::Options{
|
||||
.reconcile_txs = true,
|
||||
.deterministic_rng = true,
|
||||
});
|
||||
connman.SetMsgProc(peerman.get());
|
||||
|
||||
LOCK(NetEventsInterface::g_msgproc_mutex);
|
||||
|
||||
std::vector<CNode*> peers;
|
||||
const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3);
|
||||
for (int i = 0; i < num_peers_to_add; ++i) {
|
||||
peers.push_back(ConsumeNodeAsUniquePtr(fuzzed_data_provider, i).release());
|
||||
connman.AddTestNode(*peers.back());
|
||||
peerman->InitializeNode(
|
||||
*peers.back(),
|
||||
static_cast<ServiceFlags>(fuzzed_data_provider.ConsumeIntegral<uint64_t>()));
|
||||
}
|
||||
|
||||
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100)
|
||||
{
|
||||
CNode& connection = *PickValue(fuzzed_data_provider, peers);
|
||||
if (connection.fDisconnect || connection.fSuccessfullyConnected) {
|
||||
// Skip if the the connection was disconnected or if the version
|
||||
// handshake was already completed.
|
||||
continue;
|
||||
}
|
||||
|
||||
SetMockTime(GetTime() +
|
||||
fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(
|
||||
-std::chrono::seconds{10min}.count(), // Allow mocktime to go backwards slightly
|
||||
std::chrono::seconds{TIMEOUT_INTERVAL}.count()));
|
||||
|
||||
CSerializedNetMsg net_msg;
|
||||
net_msg.m_type = PickValue(fuzzed_data_provider, ALL_NET_MESSAGE_TYPES);
|
||||
net_msg.data = ConsumeRandomLengthByteVector(fuzzed_data_provider, MAX_PROTOCOL_MESSAGE_LENGTH);
|
||||
|
||||
connman.FlushSendBuffer(connection);
|
||||
(void)connman.ReceiveMsgFrom(connection, std::move(net_msg));
|
||||
|
||||
bool more_work{true};
|
||||
while (more_work) {
|
||||
connection.fPauseSend = false;
|
||||
|
||||
try {
|
||||
more_work = connman.ProcessMessagesOnce(connection);
|
||||
} catch (const std::ios_base::failure&) {
|
||||
}
|
||||
peerman->SendMessages(&connection);
|
||||
}
|
||||
}
|
||||
|
||||
g_setup->m_node.connman->StopNodes();
|
||||
}
|
@ -34,6 +34,11 @@ class Span;
|
||||
struct ConnmanTestMsg : public CConnman {
|
||||
using CConnman::CConnman;
|
||||
|
||||
void SetMsgProc(NetEventsInterface* msgproc)
|
||||
{
|
||||
m_msgproc = msgproc;
|
||||
}
|
||||
|
||||
void SetPeerConnectTimeout(std::chrono::seconds timeout)
|
||||
{
|
||||
m_peer_connect_timeout = timeout;
|
||||
|
@ -162,7 +162,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
|
||||
self.log.info("Generate a block")
|
||||
last_block = self.generate(self.nodes[0], 1)
|
||||
# generate() implicitly syncs blocks, so that peer 1 gets the block before timelock_tx
|
||||
# Otherwise, peer 1 would put the timelock_tx in m_recent_rejects
|
||||
# Otherwise, peer 1 would put the timelock_tx in m_lazy_recent_rejects
|
||||
|
||||
self.log.info("The time-locked transaction can now be spent")
|
||||
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
|
||||
|
@ -121,7 +121,7 @@ class P2PPermissionsTests(BitcoinTestFramework):
|
||||
tx.vout[0].nValue += 1
|
||||
txid = tx.rehash()
|
||||
# Send the transaction twice. The first time, it'll be rejected by ATMP because it conflicts
|
||||
# with a mempool transaction. The second time, it'll be in the m_recent_rejects filter.
|
||||
# with a mempool transaction. The second time, it'll be in the m_lazy_recent_rejects filter.
|
||||
p2p_rebroadcast_wallet.send_txs_and_test(
|
||||
[tx],
|
||||
self.nodes[1],
|
||||
|
Loading…
Reference in New Issue
Block a user