mirror of
https://github.com/bitcoin/bitcoin.git
synced 2024-11-19 09:53:47 +01:00
Merge #18017: txmempool: split epoch logic into class
fd6580e405
[refactor] txmempool: split epoch logic into class (Anthony Towns) Pull request description: Splits the epoch logic introduced in #17925 into a separate class. Uses clang's thread safety annotations and encapsulates the data more strongly to reduce chances of bugs from API misuse. ACKs for top commit: jonatack: ACKfd6580e405
using clang thread safety annotations looks like a very good idea, and the encapsulation this change adds should improve robustness (and possible unit test-ability) of the code. Verified that changing some of the locking duly provoked build-time warnings with Clang 9 on Debian and that small changes in the new `Epoch` class were covered by failing functional test assertions in `mempool_updatefromblock.py`, `mempool_resurrect.py`, and `mempool_reorg.py` hebasto: re-ACKfd6580e405
, since my [previous](https://github.com/bitcoin/bitcoin/pull/18017#pullrequestreview-569619362) review: Tree-SHA512: 7004623faa02b56639aa05ab7a078320a6d8d54ec62d8022876221e33f350f47df51ddff056c0de5be798f8eb39b5c03c2d3f035698555d70abc218e950f2f8c
This commit is contained in:
commit
b59f2787e5
@ -231,6 +231,7 @@ BITCOIN_CORE_H = \
|
||||
util/bip32.h \
|
||||
util/bytevectorhash.h \
|
||||
util/check.h \
|
||||
util/epochguard.h \
|
||||
util/error.h \
|
||||
util/fees.h \
|
||||
util/getuniquepath.h \
|
||||
|
@ -23,7 +23,7 @@ CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFe
|
||||
int64_t _nTime, unsigned int _entryHeight,
|
||||
bool _spendsCoinbase, int64_t _sigOpsCost, LockPoints lp)
|
||||
: tx(_tx), nFee(_nFee), nTxWeight(GetTransactionWeight(*tx)), nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime), entryHeight(_entryHeight),
|
||||
spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp), m_epoch(0)
|
||||
spendsCoinbase(_spendsCoinbase), sigOpCost(_sigOpsCost), lockPoints(lp)
|
||||
{
|
||||
nCountWithDescendants = 1;
|
||||
nSizeWithDescendants = GetTxSize();
|
||||
@ -132,7 +132,7 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
|
||||
// include them, and update their CTxMemPoolEntry::m_parents to include this tx.
|
||||
// we cache the in-mempool children to avoid duplicate updates
|
||||
{
|
||||
const auto epoch = GetFreshEpoch();
|
||||
WITH_FRESH_EPOCH(m_epoch);
|
||||
for (; iter != mapNextTx.end() && iter->first->hash == hash; ++iter) {
|
||||
const uint256 &childHash = iter->second->GetHash();
|
||||
txiter childIter = mapTx.find(childHash);
|
||||
@ -1119,22 +1119,3 @@ void CTxMemPool::SetIsLoaded(bool loaded)
|
||||
LOCK(cs);
|
||||
m_is_loaded = loaded;
|
||||
}
|
||||
|
||||
|
||||
CTxMemPool::EpochGuard CTxMemPool::GetFreshEpoch() const
|
||||
{
|
||||
return EpochGuard(*this);
|
||||
}
|
||||
CTxMemPool::EpochGuard::EpochGuard(const CTxMemPool& in) : pool(in)
|
||||
{
|
||||
assert(!pool.m_has_epoch_guard);
|
||||
++pool.m_epoch;
|
||||
pool.m_has_epoch_guard = true;
|
||||
}
|
||||
|
||||
CTxMemPool::EpochGuard::~EpochGuard()
|
||||
{
|
||||
// prevents stale results being used
|
||||
++pool.m_epoch;
|
||||
pool.m_has_epoch_guard = false;
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <primitives/transaction.h>
|
||||
#include <random.h>
|
||||
#include <sync.h>
|
||||
#include <util/epochguard.h>
|
||||
#include <util/hasher.h>
|
||||
|
||||
#include <boost/multi_index_container.hpp>
|
||||
@ -64,6 +65,7 @@ struct CompareIteratorByHash {
|
||||
return a->GetTx().GetHash() < b->GetTx().GetHash();
|
||||
}
|
||||
};
|
||||
|
||||
/** \class CTxMemPoolEntry
|
||||
*
|
||||
* CTxMemPoolEntry stores data about the corresponding transaction, as well
|
||||
@ -156,7 +158,7 @@ public:
|
||||
Children& GetMemPoolChildren() const { return m_children; }
|
||||
|
||||
mutable size_t vTxHashesIdx; //!< Index in mempool's vTxHashes
|
||||
mutable uint64_t m_epoch; //!< epoch when last touched, useful for graph algorithms
|
||||
mutable Epoch::Marker m_epoch_marker; //!< epoch when last touched, useful for graph algorithms
|
||||
};
|
||||
|
||||
// Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index.
|
||||
@ -486,8 +488,7 @@ private:
|
||||
mutable int64_t lastRollingFeeUpdate;
|
||||
mutable bool blockSinceLastRollingFeeBump;
|
||||
mutable double rollingMinimumFeeRate; //!< minimum fee to get into the pool, decreases exponentially
|
||||
mutable uint64_t m_epoch{0};
|
||||
mutable bool m_has_epoch_guard{false};
|
||||
mutable Epoch m_epoch GUARDED_BY(cs);
|
||||
|
||||
// In-memory counter for external mempool tracking purposes.
|
||||
// This number is incremented once every time a transaction
|
||||
@ -666,7 +667,7 @@ public:
|
||||
* for). Note: vHashesToUpdate should be the set of transactions from the
|
||||
* disconnected block that have been accepted back into the mempool.
|
||||
*/
|
||||
void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
|
||||
void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
|
||||
|
||||
/** Try to calculate all in-mempool ancestors of entry.
|
||||
* (these are all calculated including the tx itself)
|
||||
@ -827,52 +828,22 @@ private:
|
||||
*/
|
||||
void removeUnchecked(txiter entry, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
|
||||
public:
|
||||
/** EpochGuard: RAII-style guard for using epoch-based graph traversal algorithms.
|
||||
* When walking ancestors or descendants, we generally want to avoid
|
||||
* visiting the same transactions twice. Some traversal algorithms use
|
||||
* std::set (or setEntries) to deduplicate the transaction we visit.
|
||||
* However, use of std::set is algorithmically undesirable because it both
|
||||
* adds an asymptotic factor of O(log n) to traverals cost and triggers O(n)
|
||||
* more dynamic memory allocations.
|
||||
* In many algorithms we can replace std::set with an internal mempool
|
||||
* counter to track the time (or, "epoch") that we began a traversal, and
|
||||
* check + update a per-transaction epoch for each transaction we look at to
|
||||
* determine if that transaction has not yet been visited during the current
|
||||
* traversal's epoch.
|
||||
* Algorithms using std::set can be replaced on a one by one basis.
|
||||
* Both techniques are not fundamentally incompatible across the codebase.
|
||||
* Generally speaking, however, the remaining use of std::set for mempool
|
||||
* traversal should be viewed as a TODO for replacement with an epoch based
|
||||
* traversal, rather than a preference for std::set over epochs in that
|
||||
* algorithm.
|
||||
*/
|
||||
class EpochGuard {
|
||||
const CTxMemPool& pool;
|
||||
public:
|
||||
explicit EpochGuard(const CTxMemPool& in);
|
||||
~EpochGuard();
|
||||
};
|
||||
// N.B. GetFreshEpoch modifies mutable state via the EpochGuard construction
|
||||
// (and later destruction)
|
||||
EpochGuard GetFreshEpoch() const EXCLUSIVE_LOCKS_REQUIRED(cs);
|
||||
|
||||
/** visited marks a CTxMemPoolEntry as having been traversed
|
||||
* during the lifetime of the most recently created EpochGuard
|
||||
* during the lifetime of the most recently created Epoch::Guard
|
||||
* and returns false if we are the first visitor, true otherwise.
|
||||
*
|
||||
* An EpochGuard must be held when visited is called or an assert will be
|
||||
* An Epoch::Guard must be held when visited is called or an assert will be
|
||||
* triggered.
|
||||
*
|
||||
*/
|
||||
bool visited(txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
|
||||
assert(m_has_epoch_guard);
|
||||
bool ret = it->m_epoch >= m_epoch;
|
||||
it->m_epoch = std::max(it->m_epoch, m_epoch);
|
||||
return ret;
|
||||
bool visited(const txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
|
||||
{
|
||||
return m_epoch.visited(it->m_epoch_marker);
|
||||
}
|
||||
|
||||
bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
|
||||
assert(m_has_epoch_guard);
|
||||
bool visited(Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs, m_epoch)
|
||||
{
|
||||
assert(m_epoch.guarded()); // verify guard even when it==nullopt
|
||||
return !it || visited(*it);
|
||||
}
|
||||
};
|
||||
|
91
src/util/epochguard.h
Normal file
91
src/util/epochguard.h
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2009-2020 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_UTIL_EPOCHGUARD_H
|
||||
#define BITCOIN_UTIL_EPOCHGUARD_H
|
||||
|
||||
#include <threadsafety.h>
|
||||
|
||||
#include <cassert>
|
||||
|
||||
/** Epoch: RAII-style guard for using epoch-based graph traversal algorithms.
|
||||
* When walking ancestors or descendants, we generally want to avoid
|
||||
* visiting the same transactions twice. Some traversal algorithms use
|
||||
* std::set (or setEntries) to deduplicate the transaction we visit.
|
||||
* However, use of std::set is algorithmically undesirable because it both
|
||||
* adds an asymptotic factor of O(log n) to traversals cost and triggers O(n)
|
||||
* more dynamic memory allocations.
|
||||
* In many algorithms we can replace std::set with an internal mempool
|
||||
* counter to track the time (or, "epoch") that we began a traversal, and
|
||||
* check + update a per-transaction epoch for each transaction we look at to
|
||||
* determine if that transaction has not yet been visited during the current
|
||||
* traversal's epoch.
|
||||
* Algorithms using std::set can be replaced on a one by one basis.
|
||||
* Both techniques are not fundamentally incompatible across the codebase.
|
||||
* Generally speaking, however, the remaining use of std::set for mempool
|
||||
* traversal should be viewed as a TODO for replacement with an epoch based
|
||||
* traversal, rather than a preference for std::set over epochs in that
|
||||
* algorithm.
|
||||
*/
|
||||
|
||||
class LOCKABLE Epoch
|
||||
{
|
||||
private:
|
||||
uint64_t m_raw_epoch = 0;
|
||||
bool m_guarded = false;
|
||||
|
||||
public:
|
||||
Epoch() = default;
|
||||
Epoch(const Epoch&) = delete;
|
||||
Epoch& operator=(const Epoch&) = delete;
|
||||
|
||||
bool guarded() const { return m_guarded; }
|
||||
|
||||
class Marker
|
||||
{
|
||||
private:
|
||||
uint64_t m_marker = 0;
|
||||
|
||||
// only allow modification via Epoch member functions
|
||||
friend class Epoch;
|
||||
Marker& operator=(const Marker&) = delete;
|
||||
};
|
||||
|
||||
class SCOPED_LOCKABLE Guard
|
||||
{
|
||||
private:
|
||||
Epoch& m_epoch;
|
||||
|
||||
public:
|
||||
explicit Guard(Epoch& epoch) EXCLUSIVE_LOCK_FUNCTION(epoch) : m_epoch(epoch)
|
||||
{
|
||||
assert(!m_epoch.m_guarded);
|
||||
++m_epoch.m_raw_epoch;
|
||||
m_epoch.m_guarded = true;
|
||||
}
|
||||
~Guard() UNLOCK_FUNCTION()
|
||||
{
|
||||
assert(m_epoch.m_guarded);
|
||||
++m_epoch.m_raw_epoch; // ensure clear separation between epochs
|
||||
m_epoch.m_guarded = false;
|
||||
}
|
||||
};
|
||||
|
||||
bool visited(Marker& marker) const EXCLUSIVE_LOCKS_REQUIRED(*this)
|
||||
{
|
||||
assert(m_guarded);
|
||||
if (marker.m_marker < m_raw_epoch) {
|
||||
// marker is from a previous epoch, so this is its first visit
|
||||
marker.m_marker = m_raw_epoch;
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#define WITH_FRESH_EPOCH(epoch) const Epoch::Guard PASTE2(epoch_guard_, __COUNTER__)(epoch)
|
||||
|
||||
#endif // BITCOIN_UTIL_EPOCHGUARD_H
|
Loading…
Reference in New Issue
Block a user