8 #if defined(HAVE_CONFIG_H) 16 #define _WIN32_WINNT 0x0501 17 #define WIN32_LEAN_AND_MEAN 1 24 #include <sys/resource.h> 38 static inline size_t align_up(
size_t x,
size_t align)
40 return (x + align - 1) & ~(align - 1);
47 base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
68 [=](
const std::map<char*, size_t>::value_type& chunk){
return chunk.second >=
size; });
73 auto alloced =
chunks_used.emplace(it->first + it->second - size, size).first;
74 if (!(it->second -= size))
76 return reinterpret_cast<void*
>(alloced->first);
80 template <
class Iterator,
class Pair>
bool extend(Iterator it,
const Pair& other) {
81 if (it->first + it->second == other.first) {
82 it->second += other.second;
98 throw std::runtime_error(
"Arena: invalid or double free");
116 r.used += chunk.second;
118 r.free += chunk.second;
119 r.total = r.used + r.free;
124 void printchunk(
char*
base,
size_t sz,
bool used) {
126 "0x" << std::hex << std::setw(16) << std::setfill(
'0') << base <<
127 " 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz <<
128 " 0x" << used << std::endl;
130 void Arena::walk()
const 133 printchunk(chunk.first, chunk.second,
true);
134 std::cout << std::endl;
136 printchunk(chunk.first, chunk.second,
false);
137 std::cout << std::endl;
150 Win32LockedPageAllocator();
151 void* AllocateLocked(
size_t len,
bool *lockingSuccess)
override;
152 void FreeLocked(
void* addr,
size_t len)
override;
153 size_t GetLimit()
override;
158 Win32LockedPageAllocator::Win32LockedPageAllocator()
161 SYSTEM_INFO sSysInfo;
162 GetSystemInfo(&sSysInfo);
163 page_size = sSysInfo.dwPageSize;
165 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
bool *lockingSuccess)
167 len = align_up(len, page_size);
168 void *addr = VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
174 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
178 void Win32LockedPageAllocator::FreeLocked(
void* addr,
size_t len)
180 len = align_up(len, page_size);
182 VirtualUnlock(const_cast<void*>(addr), len);
185 size_t Win32LockedPageAllocator::GetLimit()
203 void* AllocateLocked(
size_t len,
bool *lockingSuccess)
override;
204 void FreeLocked(
void* addr,
size_t len)
override;
205 size_t GetLimit()
override;
213 #if defined(PAGESIZE) // defined in limits.h 214 page_size = PAGESIZE;
215 #else // assume some POSIX OS 216 page_size = sysconf(_SC_PAGESIZE);
222 #ifndef MAP_ANONYMOUS 223 #define MAP_ANONYMOUS MAP_ANON 229 len = align_up(len, page_size);
230 addr = mmap(
nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|
MAP_ANONYMOUS, -1, 0);
232 *lockingSuccess = mlock(addr, len) == 0;
238 len = align_up(len, page_size);
245 #ifdef RLIMIT_MEMLOCK 247 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
248 if (rlim.rlim_cur != RLIM_INFINITY) {
249 return rlim.rlim_cur;
261 allocator(
std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
270 std::lock_guard<std::mutex> lock(
mutex);
277 for (
auto &arena:
arenas) {
278 void *addr = arena.alloc(size);
285 return arenas.back().alloc(size);
292 std::lock_guard<std::mutex> lock(
mutex);
295 for (
auto &arena:
arenas) {
296 if (arena.addressInArena(ptr)) {
301 throw std::runtime_error(
"LockedPool: invalid address not pointing to any arena");
306 std::lock_guard<std::mutex> lock(
mutex);
308 for (
const auto &arena:
arenas) {
332 void *addr =
allocator->AllocateLocked(size, &locked);
349 Arena(base_in, size_in, align_in), base(base_in),
size(size_in),
allocator(allocator_in)
379 std::unique_ptr<LockedPageAllocator>
allocator(
new Win32LockedPageAllocator());
384 LockedPoolManager::_instance = &instance;
static std::once_flag init_flag
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
bool extend(Iterator it, const Pair &other)
LockingFailed_Callback lf_cb
std::hash for asio::adress
Stats stats() const
Get pool usage statistics.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
OS-dependent allocation and deallocation of locked/pinned memory pages.
LockedPageAllocator * allocator
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
std::map< char *, size_t > chunks_used
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
char * base
Base address of arena.
uint8_t const size_t const size
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=0)
Create a new LockedPool.
PosixLockedPageAllocator()
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
size_t cumulative_bytes_locked
std::map< char *, size_t > chunks_free
Map of chunk address to chunk information.
Arena(void *base, size_t size, size_t alignment)
Stats stats() const
Get arena usage statistics.
std::unique_ptr< LockedPageAllocator > allocator