bitcoin/src/support/lockedpool.cpp
TheCharlatan 9d1dbbd4ce scripted-diff: Fix bitcoin_config_h includes
-BEGIN VERIFY SCRIPT-

regex_string='^(?!//).*(AC_APPLE_UNIVERSAL_BUILD|BOOST_PROCESS_USE_STD_FS|CHAR_EQUALS_INT8|CLIENT_VERSION_BUILD|CLIENT_VERSION_IS_RELEASE|CLIENT_VERSION_MAJOR|CLIENT_VERSION_MINOR|COPYRIGHT_HOLDERS|COPYRIGHT_HOLDERS_FINAL|COPYRIGHT_HOLDERS_SUBSTITUTION|COPYRIGHT_YEAR|ENABLE_ARM_SHANI|ENABLE_AVX2|ENABLE_EXTERNAL_SIGNER|ENABLE_SSE41|ENABLE_TRACING|ENABLE_WALLET|ENABLE_X86_SHANI|ENABLE_ZMQ|HAVE_BOOST|HAVE_BUILTIN_CLZL|HAVE_BUILTIN_CLZLL|HAVE_BYTESWAP_H|HAVE_CLMUL|HAVE_CONSENSUS_LIB|HAVE_CXX20|HAVE_DECL_BE16TOH|HAVE_DECL_BE32TOH|HAVE_DECL_BE64TOH|HAVE_DECL_BSWAP_16|HAVE_DECL_BSWAP_32|HAVE_DECL_BSWAP_64|HAVE_DECL_FORK|HAVE_DECL_FREEIFADDRS|HAVE_DECL_GETIFADDRS|HAVE_DECL_HTOBE16|HAVE_DECL_HTOBE32|HAVE_DECL_HTOBE64|HAVE_DECL_HTOLE16|HAVE_DECL_HTOLE32|HAVE_DECL_HTOLE64|HAVE_DECL_LE16TOH|HAVE_DECL_LE32TOH|HAVE_DECL_LE64TOH|HAVE_DECL_PIPE2|HAVE_DECL_SETSID|HAVE_DECL_STRERROR_R|HAVE_DEFAULT_VISIBILITY_ATTRIBUTE|HAVE_DLFCN_H|HAVE_DLLEXPORT_ATTRIBUTE|HAVE_ENDIAN_H|HAVE_EVHTTP_CONNECTION_GET_PEER_CONST_CHAR|HAVE_FDATASYNC|HAVE_GETENTROPY_RAND|HAVE_GETRANDOM|HAVE_GMTIME_R|HAVE_INTTYPES_H|HAVE_LIBADVAPI32|HAVE_LIBCOMCTL32|HAVE_LIBCOMDLG32|HAVE_LIBGDI32|HAVE_LIBIPHLPAPI|HAVE_LIBKERNEL32|HAVE_LIBOLE32|HAVE_LIBOLEAUT32|HAVE_LIBSHELL32|HAVE_LIBSHLWAPI|HAVE_LIBUSER32|HAVE_LIBUUID|HAVE_LIBWINMM|HAVE_LIBWS2_32|HAVE_MALLOC_INFO|HAVE_MALLOPT_ARENA_MAX|HAVE_MINIUPNPC_MINIUPNPC_H|HAVE_MINIUPNPC_UPNPCOMMANDS_H|HAVE_MINIUPNPC_UPNPERRORS_H|HAVE_NATPMP_H|HAVE_O_CLOEXEC|HAVE_POSIX_FALLOCATE|HAVE_PTHREAD|HAVE_PTHREAD_PRIO_INHERIT|HAVE_STDINT_H|HAVE_STDIO_H|HAVE_STDLIB_H|HAVE_STRERROR_R|HAVE_STRINGS_H|HAVE_STRING_H|HAVE_STRONG_GETAUXVAL|HAVE_SYSCTL|HAVE_SYSCTL_ARND|HAVE_SYSTEM|HAVE_SYS_ENDIAN_H|HAVE_SYS_PRCTL_H|HAVE_SYS_RESOURCES_H|HAVE_SYS_SELECT_H|HAVE_SYS_STAT_H|HAVE_SYS_SYSCTL_H|HAVE_SYS_TYPES_H|HAVE_SYS_VMMETER_H|HAVE_THREAD_LOCAL|HAVE_TIMINGSAFE_BCMP|HAVE_UNISTD_H|HAVE_VM_VM_PARAM_H|LT_OBJDIR|PACKAGE_BUGREPORT|PACKAGE_NAME|PACKAGE_STRING|PACKAGE_TARNAME|PACKAGE_URL|PACKAGE_VERSION|PTHREAD_CREATE_JOINABLE|QT_QPA_PLATFORM_ANDROID|QT_QPA_PLATFORM_COCOA|QT_QPA_PLATFORM_MINIMAL|QT_QPA_PLATFORM_WINDOWS|QT_QPA_PLATFORM_XCB|QT_STATICPLUGIN|STDC_HEADERS|STRERROR_R_CHAR_P|USE_ASM|USE_BDB|USE_DBUS|USE_NATPMP|USE_QRCODE|USE_SQLITE|USE_UPNP|_FILE_OFFSET_BITS|_LARGE_FILES)'

exclusion_files=":(exclude)src/minisketch :(exclude)src/crc32c :(exclude)src/secp256k1 :(exclude)src/crypto/sha256_arm_shani.cpp :(exclude)src/crypto/sha256_avx2.cpp :(exclude)src/crypto/sha256_sse41.cpp :(exclude)src/crypto/sha256_x86_shani.cpp"

git grep --perl-regexp --files-with-matches "$regex_string" -- '*.cpp' $exclusion_files | xargs git grep -L "bitcoin-config.h" | while read -r file; do line_number=$(awk -v my_file="$file" '/\/\/ file COPYING or https?:\/\/www.opensource.org\/licenses\/mit-license.php\./ {line = NR} /^\/\// && NR == line + 1 {while(getline && /^\/\//) line = NR} END {print line+1}' "$file"); sed -i "${line_number}i\\\\n\#if defined(HAVE_CONFIG_H)\\n#include <config/bitcoin-config.h>\\n\#endif" "$file"; done;

git grep --perl-regexp --files-with-matches "$regex_string" -- '*.h' $exclusion_files | xargs git grep -L "bitcoin-config.h" | while read -r file; do sed -i "/#define.*_H/a \\\\n\#if defined(HAVE_CONFIG_H)\\n#include <config/bitcoin-config.h>\\n\#endif" "$file"; done;

for file in $(git grep --files-with-matches 'bitcoin-config.h' -- '*.cpp' '*.h' $exclusion_files); do if ! grep -q --perl-regexp "$regex_string" $file; then sed -i '/HAVE_CONFIG_H/{N;N;N;d;}' $file; fi; done;

-END VERIFY SCRIPT-

The first command creates a regular expression for matching all bitcoin-config.h symbols in the following form: ^(?!//).*(AC_APPLE_UNIVERSAL_BUILD|BOOST_PROCESS_USE_STD_FS|...|_LARGE_FILES). It was generated with:
./autogen.sh && printf '^(?!//).*(%s)' $(awk '/^#undef/ {print $2}' src/config/bitcoin-config.h.in | paste -sd "|" -)

The second command holds a list of files and directories that should not be processed. These include subtree directories as well as some crypto files that already get their symbols through the makefile.

The third command checks for missing bitcoin-config headers in .cpp files and adds the header if it is missing.

The fourth command checks for missing bitcoin-config headers in .h files and adds the header if it is missing.

The fifth command checks for unneeded bitcoin-config headers in sources files and removes the header if it is unneeded.
2024-02-13 20:10:44 +00:00

404 lines
12 KiB
C++

// Copyright (c) 2016-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <support/lockedpool.h>
#include <support/cleanse.h>
#ifdef WIN32
#include <windows.h>
#else
#include <sys/mman.h> // for mmap
#include <sys/resource.h> // for getrlimit
#include <limits.h> // for PAGESIZE
#include <unistd.h> // for sysconf
#endif
#include <algorithm>
#include <limits>
#include <stdexcept>
#include <utility>
#ifdef ARENA_DEBUG
#include <iomanip>
#include <iostream>
#endif
LockedPoolManager* LockedPoolManager::_instance = nullptr;
/*******************************************************************************/
// Utilities
//
/** Align up to power of 2 */
static inline size_t align_up(size_t x, size_t align)
{
return (x + align - 1) & ~(align - 1);
}
/*******************************************************************************/
// Implementation: Arena
Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
base(base_in), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
{
// Start with one free chunk that covers the entire arena
auto it = size_to_free_chunk.emplace(size_in, base);
chunks_free.emplace(base, it);
chunks_free_end.emplace(static_cast<char*>(base) + size_in, it);
}
Arena::~Arena()
{
}
void* Arena::alloc(size_t size)
{
// Round to next multiple of alignment
size = align_up(size, alignment);
// Don't handle zero-sized chunks
if (size == 0)
return nullptr;
// Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key.
// This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review",
// Wilson et. al. 1995, https://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit
// policies seem to work well in practice.
auto size_ptr_it = size_to_free_chunk.lower_bound(size);
if (size_ptr_it == size_to_free_chunk.end())
return nullptr;
// Create the used-chunk, taking its space from the end of the free-chunk
const size_t size_remaining = size_ptr_it->first - size;
char* const free_chunk = static_cast<char*>(size_ptr_it->second);
auto allocated = chunks_used.emplace(free_chunk + size_remaining, size).first;
chunks_free_end.erase(free_chunk + size_ptr_it->first);
if (size_ptr_it->first == size) {
// whole chunk is used up
chunks_free.erase(size_ptr_it->second);
} else {
// still some memory left in the chunk
auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
chunks_free[size_ptr_it->second] = it_remaining;
chunks_free_end.emplace(free_chunk + size_remaining, it_remaining);
}
size_to_free_chunk.erase(size_ptr_it);
return allocated->first;
}
void Arena::free(void *ptr)
{
// Freeing the nullptr pointer is OK.
if (ptr == nullptr) {
return;
}
// Remove chunk from used map
auto i = chunks_used.find(ptr);
if (i == chunks_used.end()) {
throw std::runtime_error("Arena: invalid or double free");
}
auto freed = std::make_pair(static_cast<char*>(i->first), i->second);
chunks_used.erase(i);
// coalesce freed with previous chunk
auto prev = chunks_free_end.find(freed.first);
if (prev != chunks_free_end.end()) {
freed.first -= prev->second->first;
freed.second += prev->second->first;
size_to_free_chunk.erase(prev->second);
chunks_free_end.erase(prev);
}
// coalesce freed with chunk after freed
auto next = chunks_free.find(freed.first + freed.second);
if (next != chunks_free.end()) {
freed.second += next->second->first;
size_to_free_chunk.erase(next->second);
chunks_free.erase(next);
}
// Add/set space with coalesced free chunk
auto it = size_to_free_chunk.emplace(freed.second, freed.first);
chunks_free[freed.first] = it;
chunks_free_end[freed.first + freed.second] = it;
}
Arena::Stats Arena::stats() const
{
Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
for (const auto& chunk: chunks_used)
r.used += chunk.second;
for (const auto& chunk: chunks_free)
r.free += chunk.second->first;
r.total = r.used + r.free;
return r;
}
#ifdef ARENA_DEBUG
static void printchunk(void* base, size_t sz, bool used) {
std::cout <<
"0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
" 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
" 0x" << used << std::endl;
}
void Arena::walk() const
{
for (const auto& chunk: chunks_used)
printchunk(chunk.first, chunk.second, true);
std::cout << std::endl;
for (const auto& chunk: chunks_free)
printchunk(chunk.first, chunk.second->first, false);
std::cout << std::endl;
}
#endif
/*******************************************************************************/
// Implementation: Win32LockedPageAllocator
#ifdef WIN32
/** LockedPageAllocator specialized for Windows.
*/
class Win32LockedPageAllocator: public LockedPageAllocator
{
public:
Win32LockedPageAllocator();
void* AllocateLocked(size_t len, bool *lockingSuccess) override;
void FreeLocked(void* addr, size_t len) override;
size_t GetLimit() override;
private:
size_t page_size;
};
Win32LockedPageAllocator::Win32LockedPageAllocator()
{
// Determine system page size in bytes
SYSTEM_INFO sSysInfo;
GetSystemInfo(&sSysInfo);
page_size = sSysInfo.dwPageSize;
}
void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
{
len = align_up(len, page_size);
void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
if (addr) {
// VirtualLock is used to attempt to keep keying material out of swap. Note
// that it does not provide this as a guarantee, but, in practice, memory
// that has been VirtualLock'd almost never gets written to the pagefile
// except in rare circumstances where memory is extremely low.
*lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
}
return addr;
}
void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
{
len = align_up(len, page_size);
memory_cleanse(addr, len);
VirtualUnlock(const_cast<void*>(addr), len);
}
size_t Win32LockedPageAllocator::GetLimit()
{
size_t min, max;
if(GetProcessWorkingSetSize(GetCurrentProcess(), &min, &max) != 0) {
return min;
}
return std::numeric_limits<size_t>::max();
}
#endif
/*******************************************************************************/
// Implementation: PosixLockedPageAllocator
#ifndef WIN32
/** LockedPageAllocator specialized for OSes that don't try to be
* special snowflakes.
*/
class PosixLockedPageAllocator: public LockedPageAllocator
{
public:
PosixLockedPageAllocator();
void* AllocateLocked(size_t len, bool *lockingSuccess) override;
void FreeLocked(void* addr, size_t len) override;
size_t GetLimit() override;
private:
size_t page_size;
};
PosixLockedPageAllocator::PosixLockedPageAllocator()
{
// Determine system page size in bytes
#if defined(PAGESIZE) // defined in limits.h
page_size = PAGESIZE;
#else // assume some POSIX OS
page_size = sysconf(_SC_PAGESIZE);
#endif
}
void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
{
void *addr;
len = align_up(len, page_size);
addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
return nullptr;
}
if (addr) {
*lockingSuccess = mlock(addr, len) == 0;
#if defined(MADV_DONTDUMP) // Linux
madvise(addr, len, MADV_DONTDUMP);
#elif defined(MADV_NOCORE) // FreeBSD
madvise(addr, len, MADV_NOCORE);
#endif
}
return addr;
}
void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
{
len = align_up(len, page_size);
memory_cleanse(addr, len);
munlock(addr, len);
munmap(addr, len);
}
size_t PosixLockedPageAllocator::GetLimit()
{
#ifdef RLIMIT_MEMLOCK
struct rlimit rlim;
if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
if (rlim.rlim_cur != RLIM_INFINITY) {
return rlim.rlim_cur;
}
}
#endif
return std::numeric_limits<size_t>::max();
}
#endif
/*******************************************************************************/
// Implementation: LockedPool
LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in)
: allocator(std::move(allocator_in)), lf_cb(lf_cb_in)
{
}
LockedPool::~LockedPool() = default;
void* LockedPool::alloc(size_t size)
{
std::lock_guard<std::mutex> lock(mutex);
// Don't handle impossible sizes
if (size == 0 || size > ARENA_SIZE)
return nullptr;
// Try allocating from each current arena
for (auto &arena: arenas) {
void *addr = arena.alloc(size);
if (addr) {
return addr;
}
}
// If that fails, create a new one
if (new_arena(ARENA_SIZE, ARENA_ALIGN)) {
return arenas.back().alloc(size);
}
return nullptr;
}
void LockedPool::free(void *ptr)
{
std::lock_guard<std::mutex> lock(mutex);
// TODO we can do better than this linear search by keeping a map of arena
// extents to arena, and looking up the address.
for (auto &arena: arenas) {
if (arena.addressInArena(ptr)) {
arena.free(ptr);
return;
}
}
throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
}
LockedPool::Stats LockedPool::stats() const
{
std::lock_guard<std::mutex> lock(mutex);
LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
for (const auto &arena: arenas) {
Arena::Stats i = arena.stats();
r.used += i.used;
r.free += i.free;
r.total += i.total;
r.chunks_used += i.chunks_used;
r.chunks_free += i.chunks_free;
}
return r;
}
bool LockedPool::new_arena(size_t size, size_t align)
{
bool locked;
// If this is the first arena, handle this specially: Cap the upper size
// by the process limit. This makes sure that the first arena will at least
// be locked. An exception to this is if the process limit is 0:
// in this case no memory can be locked at all so we'll skip past this logic.
if (arenas.empty()) {
size_t limit = allocator->GetLimit();
if (limit > 0) {
size = std::min(size, limit);
}
}
void *addr = allocator->AllocateLocked(size, &locked);
if (!addr) {
return false;
}
if (locked) {
cumulative_bytes_locked += size;
} else if (lf_cb) { // Call the locking-failed callback if locking failed
if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
allocator->FreeLocked(addr, size);
return false;
}
}
arenas.emplace_back(allocator.get(), addr, size, align);
return true;
}
LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
{
}
LockedPool::LockedPageArena::~LockedPageArena()
{
allocator->FreeLocked(base, size);
}
/*******************************************************************************/
// Implementation: LockedPoolManager
//
LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator_in):
LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed)
{
}
bool LockedPoolManager::LockingFailed()
{
// TODO: log something but how? without including util.h
return true;
}
void LockedPoolManager::CreateInstance()
{
// Using a local static instance guarantees that the object is initialized
// when it's first needed and also deinitialized after all objects that use
// it are done with it. I can think of one unlikely scenario where we may
// have a static deinitialization order/problem, but the check in
// LockedPoolManagerBase's destructor helps us detect if that ever happens.
#ifdef WIN32
std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
#else
std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
#endif
static LockedPoolManager instance(std::move(allocator));
LockedPoolManager::_instance = &instance;
}