Add pool based memory resource & allocator

A memory resource similar to std::pmr::unsynchronized_pool_resource, but
optimized for node-based containers.

Co-Authored-By: Pieter Wuille <pieter@wuille.net>
This commit is contained in:
Martin Leitner-Ankerl 2022-06-11 09:23:51 +02:00
parent 2305643646
commit b8401c3281
8 changed files with 688 additions and 0 deletions

View file

@ -261,6 +261,7 @@ BITCOIN_CORE_H = \
shutdown.h \
signet.h \
streams.h \
support/allocators/pool.h \
support/allocators/secure.h \
support/allocators/zeroafterfree.h \
support/cleanse.h \

View file

@ -42,6 +42,7 @@ bench_bench_bitcoin_SOURCES = \
bench/nanobench.h \
bench/peer_eviction.cpp \
bench/poly1305.cpp \
bench/pool.cpp \
bench/prevector.cpp \
bench/rollingbloom.cpp \
bench/rpc_blockchain.cpp \

View file

@ -116,6 +116,7 @@ BITCOIN_TESTS =\
test/pmt_tests.cpp \
test/policy_fee_tests.cpp \
test/policyestimator_tests.cpp \
test/pool_tests.cpp \
test/pow_tests.cpp \
test/prevector_tests.cpp \
test/raii_event_tests.cpp \

View file

@ -15,6 +15,7 @@ TEST_UTIL_H = \
test/util/logging.h \
test/util/mining.h \
test/util/net.h \
test/util/poolresourcetester.h \
test/util/random.h \
test/util/script.h \
test/util/setup_common.h \

50
src/bench/pool.cpp Normal file
View file

@ -0,0 +1,50 @@
// Copyright (c) 2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <bench/bench.h>
#include <support/allocators/pool.h>
#include <unordered_map>
template <typename Map>
void BenchFillClearMap(benchmark::Bench& bench, Map& map)
{
size_t batch_size = 5000;
// make sure each iteration of the benchmark contains exactly 5000 inserts and one clear.
// do this at least 10 times so we get reasonable accurate results
bench.batch(batch_size).minEpochIterations(10).run([&] {
auto rng = ankerl::nanobench::Rng(1234);
for (size_t i = 0; i < batch_size; ++i) {
map[rng()];
}
map.clear();
});
}
static void PoolAllocator_StdUnorderedMap(benchmark::Bench& bench)
{
auto map = std::unordered_map<uint64_t, uint64_t>();
BenchFillClearMap(bench, map);
}
static void PoolAllocator_StdUnorderedMapWithPoolResource(benchmark::Bench& bench)
{
using Map = std::unordered_map<uint64_t,
uint64_t,
std::hash<uint64_t>,
std::equal_to<uint64_t>,
PoolAllocator<std::pair<const uint64_t, uint64_t>,
sizeof(std::pair<const uint64_t, uint64_t>) + 4 * sizeof(void*),
alignof(void*)>>;
// make sure the resource supports large enough pools to hold the node. We do this by adding the size of a few pointers to it.
auto pool_resource = Map::allocator_type::ResourceType();
auto map = Map{0, std::hash<uint64_t>{}, std::equal_to<uint64_t>{}, &pool_resource};
BenchFillClearMap(bench, map);
}
BENCHMARK(PoolAllocator_StdUnorderedMap, benchmark::PriorityLevel::HIGH);
BENCHMARK(PoolAllocator_StdUnorderedMapWithPoolResource, benchmark::PriorityLevel::HIGH);

View file

@ -0,0 +1,349 @@
// Copyright (c) 2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SUPPORT_ALLOCATORS_POOL_H
#define BITCOIN_SUPPORT_ALLOCATORS_POOL_H
#include <array>
#include <cassert>
#include <cstddef>
#include <list>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
/**
* A memory resource similar to std::pmr::unsynchronized_pool_resource, but
* optimized for node-based containers. It has the following properties:
*
* * Owns the allocated memory and frees it on destruction, even when deallocate
* has not been called on the allocated blocks.
*
* * Consists of a number of pools, each one for a different block size.
* Each pool holds blocks of uniform size in a freelist.
*
* * Exhausting memory in a freelist causes a new allocation of a fixed size chunk.
* This chunk is used to carve out blocks.
*
* * Block sizes or alignments that can not be served by the pools are allocated
* and deallocated by operator new().
*
* PoolResource is not thread-safe. It is intended to be used by PoolAllocator.
*
* @tparam MAX_BLOCK_SIZE_BYTES Maximum size to allocate with the pool. If larger
* sizes are requested, allocation falls back to new().
*
* @tparam ALIGN_BYTES Required alignment for the allocations.
*
* An example: If you create a PoolResource<128, 8>(262144) and perform a bunch of
* allocations and deallocate 2 blocks with size 8 bytes, and 3 blocks with size 16,
* the members will look like this:
*
* m_free_lists m_allocated_chunks
* -------
* blocks 262144 B
* -------
* 1 8 B 8 B
* :
*
*
* 2 16 B 16 B 16 B
*
*
* . m_available_memory_end
* . m_available_memory_it
* .
*
*
* 16
*
*
* Here m_free_lists[1] holds the 2 blocks of size 8 bytes, and m_free_lists[2]
* holds the 3 blocks of size 16. The blocks came from the data stored in the
* m_allocated_chunks list. Each chunk has bytes 262144. The last chunk has still
* some memory available for the blocks, and when m_available_memory_it is at the
* end, a new chunk will be allocated and added to the list.
*/
template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
class PoolResource final
{
static_assert(ALIGN_BYTES > 0, "ALIGN_BYTES must be nonzero");
static_assert((ALIGN_BYTES & (ALIGN_BYTES - 1)) == 0, "ALIGN_BYTES must be a power of two");
/**
* In-place linked list of the allocations, used for the freelist.
*/
struct ListNode {
ListNode* m_next;
explicit ListNode(ListNode* next) : m_next(next) {}
};
static_assert(std::is_trivially_destructible_v<ListNode>, "Make sure we don't need to manually call a destructor");
/**
* Internal alignment value. The larger of the requested ALIGN_BYTES and alignof(FreeList).
*/
static constexpr std::size_t ELEM_ALIGN_BYTES = std::max(alignof(ListNode), ALIGN_BYTES);
static_assert((ELEM_ALIGN_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "ELEM_ALIGN_BYTES must be a power of two");
static_assert(sizeof(ListNode) <= ELEM_ALIGN_BYTES, "Units of size ELEM_SIZE_ALIGN need to be able to store a ListNode");
static_assert((MAX_BLOCK_SIZE_BYTES & (ELEM_ALIGN_BYTES - 1)) == 0, "MAX_BLOCK_SIZE_BYTES needs to be a multiple of the alignment.");
/**
* Size in bytes to allocate per chunk
*/
const size_t m_chunk_size_bytes;
/**
* Contains all allocated pools of memory, used to free the data in the destructor.
*/
std::list<std::byte*> m_allocated_chunks{};
/**
* Single linked lists of all data that came from deallocating.
* m_free_lists[n] will serve blocks of size n*ELEM_ALIGN_BYTES.
*/
std::array<ListNode*, MAX_BLOCK_SIZE_BYTES / ELEM_ALIGN_BYTES + 1> m_free_lists{};
/**
* Points to the beginning of available memory for carving out allocations.
*/
std::byte* m_available_memory_it = nullptr;
/**
* Points to the end of available memory for carving out allocations.
*
* That member variable is redundant, and is always equal to `m_allocated_chunks.back() + m_chunk_size_bytes`
* whenever it is accessed, but `m_available_memory_end` caches this for clarity and efficiency.
*/
std::byte* m_available_memory_end = nullptr;
/**
* How many multiple of ELEM_ALIGN_BYTES are necessary to fit bytes. We use that result directly as an index
* into m_free_lists. Round up for the special case when bytes==0.
*/
[[nodiscard]] static constexpr std::size_t NumElemAlignBytes(std::size_t bytes)
{
return (bytes + ELEM_ALIGN_BYTES - 1) / ELEM_ALIGN_BYTES + (bytes == 0);
}
/**
* True when it is possible to make use of the freelist
*/
[[nodiscard]] static constexpr bool IsFreeListUsable(std::size_t bytes, std::size_t alignment)
{
return alignment <= ELEM_ALIGN_BYTES && bytes <= MAX_BLOCK_SIZE_BYTES;
}
/**
* Replaces node with placement constructed ListNode that points to the previous node
*/
void PlacementAddToList(void* p, ListNode*& node)
{
node = new (p) ListNode{node};
}
/**
* Allocate one full memory chunk which will be used to carve out allocations.
* Also puts any leftover bytes into the freelist.
*
* Precondition: leftover bytes are either 0 or few enough to fit into a place in the freelist
*/
void AllocateChunk()
{
// if there is still any available memory left, put it into the freelist.
size_t remaining_available_bytes = std::distance(m_available_memory_it, m_available_memory_end);
if (0 != remaining_available_bytes) {
PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]);
}
void* storage = ::operator new (m_chunk_size_bytes, std::align_val_t{ELEM_ALIGN_BYTES});
m_available_memory_it = new (storage) std::byte[m_chunk_size_bytes];
m_available_memory_end = m_available_memory_it + m_chunk_size_bytes;
m_allocated_chunks.emplace_back(m_available_memory_it);
}
/**
* Access to internals for testing purpose only
*/
friend class PoolResourceTester;
public:
/**
* Construct a new PoolResource object which allocates the first chunk.
* chunk_size_bytes will be rounded up to next multiple of ELEM_ALIGN_BYTES.
*/
explicit PoolResource(std::size_t chunk_size_bytes)
: m_chunk_size_bytes(NumElemAlignBytes(chunk_size_bytes) * ELEM_ALIGN_BYTES)
{
assert(m_chunk_size_bytes >= MAX_BLOCK_SIZE_BYTES);
AllocateChunk();
}
/**
* Construct a new Pool Resource object, defaults to 2^18=262144 chunk size.
*/
PoolResource() : PoolResource(262144) {}
/**
* Disable copy & move semantics, these are not supported for the resource.
*/
PoolResource(const PoolResource&) = delete;
PoolResource& operator=(const PoolResource&) = delete;
PoolResource(PoolResource&&) = delete;
PoolResource& operator=(PoolResource&&) = delete;
/**
* Deallocates all memory allocated associated with the memory resource.
*/
~PoolResource()
{
for (std::byte* chunk : m_allocated_chunks) {
std::destroy(chunk, chunk + m_chunk_size_bytes);
::operator delete ((void*)chunk, std::align_val_t{ELEM_ALIGN_BYTES});
}
}
/**
* Allocates a block of bytes. If possible the freelist is used, otherwise allocation
* is forwarded to ::operator new().
*/
void* Allocate(std::size_t bytes, std::size_t alignment)
{
if (IsFreeListUsable(bytes, alignment)) {
const std::size_t num_alignments = NumElemAlignBytes(bytes);
if (nullptr != m_free_lists[num_alignments]) {
// we've already got data in the pool's freelist, unlink one element and return the pointer
// to the unlinked memory. Since FreeList is trivially destructible we can just treat it as
// uninitialized memory.
return std::exchange(m_free_lists[num_alignments], m_free_lists[num_alignments]->m_next);
}
// freelist is empty: get one allocation from allocated chunk memory.
const std::ptrdiff_t round_bytes = static_cast<std::ptrdiff_t>(num_alignments * ELEM_ALIGN_BYTES);
if (round_bytes > m_available_memory_end - m_available_memory_it) {
// slow path, only happens when a new chunk needs to be allocated
AllocateChunk();
}
// Make sure we use the right amount of bytes for that freelist (might be rounded up),
return std::exchange(m_available_memory_it, m_available_memory_it + round_bytes);
}
// Can't use the pool => use operator new()
return ::operator new (bytes, std::align_val_t{alignment});
}
/**
* Returns a block to the freelists, or deletes the block when it did not come from the chunks.
*/
void Deallocate(void* p, std::size_t bytes, std::size_t alignment) noexcept
{
if (IsFreeListUsable(bytes, alignment)) {
const std::size_t num_alignments = NumElemAlignBytes(bytes);
// put the memory block into the linked list. We can placement construct the FreeList
// into the memory since we can be sure the alignment is correct.
PlacementAddToList(p, m_free_lists[num_alignments]);
} else {
// Can't use the pool => forward deallocation to ::operator delete().
::operator delete (p, std::align_val_t{alignment});
}
}
/**
* Number of allocated chunks
*/
[[nodiscard]] std::size_t NumAllocatedChunks() const
{
return m_allocated_chunks.size();
}
/**
* Size in bytes to allocate per chunk, currently hardcoded to a fixed size.
*/
[[nodiscard]] size_t ChunkSizeBytes() const
{
return m_chunk_size_bytes;
}
};
/**
* Forwards all allocations/deallocations to the PoolResource.
*/
template <class T, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
class PoolAllocator
{
PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>* m_resource;
template <typename U, std::size_t M, std::size_t A>
friend class PoolAllocator;
public:
using value_type = T;
using ResourceType = PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>;
/**
* Not explicit so we can easily construct it with the correct resource
*/
PoolAllocator(ResourceType* resource) noexcept
: m_resource(resource)
{
}
PoolAllocator(const PoolAllocator& other) noexcept = default;
PoolAllocator& operator=(const PoolAllocator& other) noexcept = default;
template <class U>
PoolAllocator(const PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& other) noexcept
: m_resource(other.resource())
{
}
/**
* The rebind struct here is mandatory because we use non type template arguments for
* PoolAllocator. See https://en.cppreference.com/w/cpp/named_req/Allocator#cite_note-2
*/
template <typename U>
struct rebind {
using other = PoolAllocator<U, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>;
};
/**
* Forwards each call to the resource.
*/
T* allocate(size_t n)
{
return static_cast<T*>(m_resource->Allocate(n * sizeof(T), alignof(T)));
}
/**
* Forwards each call to the resource.
*/
void deallocate(T* p, size_t n) noexcept
{
m_resource->Deallocate(p, n * sizeof(T), alignof(T));
}
ResourceType* resource() const noexcept
{
return m_resource;
}
};
template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
bool operator==(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a,
const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept
{
return a.resource() == b.resource();
}
template <class T1, class T2, std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
bool operator!=(const PoolAllocator<T1, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& a,
const PoolAllocator<T2, MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& b) noexcept
{
return !(a == b);
}
#endif // BITCOIN_SUPPORT_ALLOCATORS_POOL_H

156
src/test/pool_tests.cpp Normal file
View file

@ -0,0 +1,156 @@
// Copyright (c) 2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <support/allocators/pool.h>
#include <test/util/poolresourcetester.h>
#include <test/util/random.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
#include <cstddef>
#include <cstdint>
#include <unordered_map>
#include <vector>
BOOST_FIXTURE_TEST_SUITE(pool_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(basic_allocating)
{
auto resource = PoolResource<8, 8>();
PoolResourceTester::CheckAllDataAccountedFor(resource);
// first chunk is already allocated
size_t expected_bytes_available = resource.ChunkSizeBytes();
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
// chunk is used, no more allocation
void* block = resource.Allocate(8, 8);
expected_bytes_available -= 8;
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
BOOST_TEST(0 == PoolResourceTester::FreeListSizes(resource)[1]);
resource.Deallocate(block, 8, 8);
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
// alignment is too small, but the best fitting freelist is used. Nothing is allocated.
void* b = resource.Allocate(8, 1);
BOOST_TEST(b == block); // we got the same block of memory as before
BOOST_TEST(0 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
resource.Deallocate(block, 8, 1);
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
// can't use resource because alignment is too big, allocate system memory
b = resource.Allocate(8, 16);
BOOST_TEST(b != block);
block = b;
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
resource.Deallocate(block, 8, 16);
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
// can't use chunk because size is too big
block = resource.Allocate(16, 8);
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
resource.Deallocate(block, 16, 8);
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
// it's possible that 0 bytes are allocated, make sure this works. In that case the call is forwarded to operator new
// 0 bytes takes one entry from the first freelist
void* p = resource.Allocate(0, 1);
BOOST_TEST(0 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
resource.Deallocate(p, 0, 1);
PoolResourceTester::CheckAllDataAccountedFor(resource);
BOOST_TEST(1 == PoolResourceTester::FreeListSizes(resource)[1]);
BOOST_TEST(expected_bytes_available == PoolResourceTester::AvailableMemoryFromChunk(resource));
}
// Allocates from 0 to n bytes were n > the PoolResource's data, and each should work
BOOST_AUTO_TEST_CASE(allocate_any_byte)
{
auto resource = PoolResource<128, 8>(1024);
uint8_t num_allocs = 200;
auto data = std::vector<Span<uint8_t>>();
// allocate an increasing number of bytes
for (uint8_t num_bytes = 0; num_bytes < num_allocs; ++num_bytes) {
uint8_t* bytes = new (resource.Allocate(num_bytes, 1)) uint8_t[num_bytes];
BOOST_TEST(bytes != nullptr);
data.emplace_back(bytes, num_bytes);
// set each byte to num_bytes
std::fill(bytes, bytes + num_bytes, num_bytes);
}
// now that we got all allocated, test if all still have the correct values, and give everything back to the allocator
uint8_t val = 0;
for (auto const& span : data) {
for (auto x : span) {
BOOST_TEST(val == x);
}
std::destroy(span.data(), span.data() + span.size());
resource.Deallocate(span.data(), span.size(), 1);
++val;
}
PoolResourceTester::CheckAllDataAccountedFor(resource);
}
BOOST_AUTO_TEST_CASE(random_allocations)
{
struct PtrSizeAlignment {
void* ptr;
size_t bytes;
size_t alignment;
};
// makes a bunch of random allocations and gives all of them back in random order.
auto resource = PoolResource<128, 8>(65536);
std::vector<PtrSizeAlignment> ptr_size_alignment{};
for (size_t i = 0; i < 1000; ++i) {
// make it a bit more likely to allocate than deallocate
if (ptr_size_alignment.empty() || 0 != InsecureRandRange(4)) {
// allocate a random item
std::size_t alignment = std::size_t{1} << InsecureRandRange(8); // 1, 2, ..., 128
std::size_t size = (InsecureRandRange(200) / alignment + 1) * alignment; // multiple of alignment
void* ptr = resource.Allocate(size, alignment);
BOOST_TEST(ptr != nullptr);
BOOST_TEST((reinterpret_cast<uintptr_t>(ptr) & (alignment - 1)) == 0);
ptr_size_alignment.push_back({ptr, size, alignment});
} else {
// deallocate a random item
auto& x = ptr_size_alignment[InsecureRandRange(ptr_size_alignment.size())];
resource.Deallocate(x.ptr, x.bytes, x.alignment);
x = ptr_size_alignment.back();
ptr_size_alignment.pop_back();
}
}
// deallocate all the rest
for (auto const& x : ptr_size_alignment) {
resource.Deallocate(x.ptr, x.bytes, x.alignment);
}
PoolResourceTester::CheckAllDataAccountedFor(resource);
}
BOOST_AUTO_TEST_SUITE_END()

View file

@ -0,0 +1,129 @@
// Copyright (c) 2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_TEST_UTIL_POOLRESOURCETESTER_H
#define BITCOIN_TEST_UTIL_POOLRESOURCETESTER_H
#include <support/allocators/pool.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>
/**
* Helper to get access to private parts of PoolResource. Used in unit tests and in the fuzzer
*/
class PoolResourceTester
{
struct PtrAndBytes {
uintptr_t ptr;
std::size_t size;
PtrAndBytes(const void* p, std::size_t s)
: ptr(reinterpret_cast<uintptr_t>(p)), size(s)
{
}
/**
* defines a sort ordering by the pointer value
*/
friend bool operator<(PtrAndBytes const& a, PtrAndBytes const& b)
{
return a.ptr < b.ptr;
}
};
public:
/**
* Extracts the number of elements per freelist
*/
template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
static std::vector<std::size_t> FreeListSizes(const PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& resource)
{
auto sizes = std::vector<std::size_t>();
for (const auto* ptr : resource.m_free_lists) {
size_t size = 0;
while (ptr != nullptr) {
++size;
ptr = ptr->m_next;
}
sizes.push_back(size);
}
return sizes;
}
/**
* How many bytes are still available from the last allocated chunk
*/
template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
static std::size_t AvailableMemoryFromChunk(const PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& resource)
{
return resource.m_available_memory_end - resource.m_available_memory_it;
}
/**
* Once all blocks are given back to the resource, tests that the freelists are consistent:
*
* * All data in the freelists must come from the chunks
* * Memory doesn't overlap
* * Each byte in the chunks can be accounted for in either the freelist or as available bytes.
*/
template <std::size_t MAX_BLOCK_SIZE_BYTES, std::size_t ALIGN_BYTES>
static void CheckAllDataAccountedFor(const PoolResource<MAX_BLOCK_SIZE_BYTES, ALIGN_BYTES>& resource)
{
// collect all free blocks by iterating all freelists
std::vector<PtrAndBytes> free_blocks;
for (std::size_t freelist_idx = 0; freelist_idx < resource.m_free_lists.size(); ++freelist_idx) {
std::size_t bytes = freelist_idx * resource.ELEM_ALIGN_BYTES;
auto* ptr = resource.m_free_lists[freelist_idx];
while (ptr != nullptr) {
free_blocks.emplace_back(ptr, bytes);
ptr = ptr->m_next;
}
}
// also add whatever has not yet been used for blocks
auto num_available_bytes = resource.m_available_memory_end - resource.m_available_memory_it;
if (num_available_bytes > 0) {
free_blocks.emplace_back(resource.m_available_memory_it, num_available_bytes);
}
// collect all chunks
std::vector<PtrAndBytes> chunks;
for (const std::byte* ptr : resource.m_allocated_chunks) {
chunks.emplace_back(ptr, resource.ChunkSizeBytes());
}
// now we have all the data from all freelists on the one hand side, and all chunks on the other hand side.
// To check if all of them match, sort by address and iterate.
std::sort(free_blocks.begin(), free_blocks.end());
std::sort(chunks.begin(), chunks.end());
auto chunk_it = chunks.begin();
auto chunk_ptr_remaining = chunk_it->ptr;
auto chunk_size_remaining = chunk_it->size;
for (const auto& free_block : free_blocks) {
if (chunk_size_remaining == 0) {
assert(chunk_it != chunks.end());
++chunk_it;
assert(chunk_it != chunks.end());
chunk_ptr_remaining = chunk_it->ptr;
chunk_size_remaining = chunk_it->size;
}
assert(free_block.ptr == chunk_ptr_remaining); // ensure addresses match
assert(free_block.size <= chunk_size_remaining); // ensure no overflow
assert((free_block.ptr & (resource.ELEM_ALIGN_BYTES - 1)) == 0); // ensure correct alignment
chunk_ptr_remaining += free_block.size;
chunk_size_remaining -= free_block.size;
}
// ensure we are at the end of the chunks
assert(chunk_ptr_remaining == chunk_it->ptr + chunk_it->size);
++chunk_it;
assert(chunk_it == chunks.end());
assert(chunk_size_remaining == 0);
}
};
#endif // BITCOIN_TEST_UTIL_POOLRESOURCETESTER_H