17 #ifndef _UTIL_POOL_ALLOCATOR_H_
18 #define _UTIL_POOL_ALLOCATOR_H_
25 #include <unordered_map>
31 #if defined(_MSC_VER) || defined(__APPLE__)
34 #define NOEXCEPT _GLIBCXX_USE_NOEXCEPT
43 template <
size_t ELEM_SIZE,
unsigned CHUNK_SIZE = 4096>
class pool_allocator {
51 void* allocate(uint64_t
id = 0);
84 using chunk_type = uint8_t[ELEM_SIZE];
85 std::vector<std::array<chunk_type, CHUNK_SIZE>*> chunks{};
86 std::deque<void*> free_list{};
87 std::unordered_map<void*, uint64_t> used_blocks{};
89 const bool debug_memory{getenv(
"TLM_MM_CHECK") !=
nullptr};
91 const bool debug_memory{
false};
98 typedef value_type* pointer;
99 typedef const value_type* const_pointer;
100 typedef value_type& reference;
101 typedef const value_type& const_reference;
102 typedef std::size_t size_type;
103 typedef std::ptrdiff_t difference_type;
113 ~stl_pool_allocator() NOEXCEPT {}
116 pointer address(reference r) {
return std::addressof(r); }
117 const_pointer address(const_reference r) {
return std::addressof(r); }
119 pointer allocate(size_type n,
const void* = 0) {
120 if(n > std::numeric_limits<std::size_t>::max() /
sizeof(value_type))
121 throw std::bad_array_new_length();
126 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 2>::get().allocate());
128 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 4>::get().allocate());
130 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 8>::get().allocate());
132 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 16>::get().allocate());
134 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 32>::get().allocate());
136 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 64>::get().allocate());
138 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 128>::get().allocate());
140 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 256>::get().allocate());
142 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 512, 2048>::get().allocate());
144 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 1024, 1024>::get().allocate());
146 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 2048, 512>::get().allocate());
148 return static_cast<value_type*
>(
util::pool_allocator<
sizeof(value_type) * 4096, 256>::get().allocate());
150 if(
auto p =
static_cast<value_type*
>(std::malloc(n *
sizeof(value_type))))
152 throw std::bad_alloc();
156 void deallocate(T* p, size_type n) noexcept {
189 size_type max_size() const noexcept {
return std::numeric_limits<size_type>::max() /
sizeof(T); }
191 bool operator==(stl_pool_allocator
const&) {
return true; }
192 bool operator!=(stl_pool_allocator
const& oAllocator) {
return !operator==(oAllocator); }
203 auto* check = getenv(
"TLM_MM_CHECK");
204 auto diff = get_capacity() - get_free_entries_count();
206 std::cerr << __FUNCTION__ <<
": detected memory leak upon destruction, " << diff <<
" of " << get_capacity()
207 <<
" entries are not free'd" << std::endl;
209 if(check && _stricmp(check,
"DEBUG") == 0) {
211 if(check && strcasecmp(check,
"DEBUG") == 0) {
213 std::vector<std::pair<void*, uint64_t>> elems(used_blocks.begin(), used_blocks.end());
214 std::sort(elems.begin(), elems.end(), [](std::pair<void*, uint64_t>
const& a, std::pair<void*, uint64_t>
const& b) ->
bool {
215 return a.second == b.second ? a.first < b.first : a.second < b.second;
217 std::cerr <<
"The 10 blocks with smallest id are:\n";
218 for(
size_t i = 0; i < std::min<decltype(i)>(10UL, elems.size()); ++i) {
219 std::cerr <<
"\taddr=" << elems[i].first <<
", id=" << elems[i].second <<
"\n";
230 if(!free_list.size())
232 auto ret = free_list.back();
233 free_list.pop_back();
234 memset(ret, 0, ELEM_SIZE);
236 used_blocks.insert({ret,
id});
242 free_list.push_back(p);
244 used_blocks.erase(p);
249 auto* chunk =
new std::array<chunk_type, CHUNK_SIZE>();
250 chunks.push_back(chunk);
251 for(
auto& p : *chunk)
252 free_list.push_back(&p[0]);
256 return chunks.size() * CHUNK_SIZE;
260 return free_list.size();
a generic pool allocator singleton not being MT-safe
pool_allocator(const pool_allocator &)=delete
deleted constructor
void resize()
add CHUNK_SIZE elements to the pool
static pool_allocator & get()
pool allocator getter
pool_allocator(pool_allocator &&)=delete
deleted constructor
size_t get_capacity()
get the number of allocated bytes
void free(void *p)
pit the memory back into the pool
pool_allocator & operator=(pool_allocator &&)=delete
deleted assignment operator
size_t get_free_entries_count()
get the number of free elements
~pool_allocator()
deleted destructor
pool_allocator & operator=(const pool_allocator &)=delete
deleted assignment operator
CONSTEXPR unsigned ilog2(uint32_t val)