memorypool.cpp 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. #include "pocketpy/common/memorypool.hpp"
  2. #include "pocketpy/common/config.h"
  3. #include <cstdlib>
  4. #include <cstring>
  5. #include <cassert>
  6. #include <stdexcept>
  7. namespace pkpy{
  8. struct LinkedListNode{
  9. LinkedListNode* prev;
  10. LinkedListNode* next;
  11. };
  12. template<typename T>
  13. struct DoubleLinkedList{
  14. static_assert(std::is_base_of_v<LinkedListNode, T>);
  15. int _size;
  16. LinkedListNode head;
  17. LinkedListNode tail;
  18. DoubleLinkedList(): _size(0){
  19. head.prev = nullptr;
  20. head.next = &tail;
  21. tail.prev = &head;
  22. tail.next = nullptr;
  23. }
  24. void push_back(T* node){
  25. node->prev = tail.prev;
  26. node->next = &tail;
  27. tail.prev->next = node;
  28. tail.prev = node;
  29. _size++;
  30. }
  31. void push_front(T* node){
  32. node->prev = &head;
  33. node->next = head.next;
  34. head.next->prev = node;
  35. head.next = node;
  36. _size++;
  37. }
  38. void pop_back(){
  39. assert(!empty());
  40. tail.prev->prev->next = &tail;
  41. tail.prev = tail.prev->prev;
  42. _size--;
  43. }
  44. void pop_front(){
  45. assert(!empty());
  46. head.next->next->prev = &head;
  47. head.next = head.next->next;
  48. _size--;
  49. }
  50. T* back() const {
  51. assert(!empty());
  52. return static_cast<T*>(tail.prev);
  53. }
  54. T* front() const {
  55. assert(!empty());
  56. return static_cast<T*>(head.next);
  57. }
  58. void erase(T* node){
  59. node->prev->next = node->next;
  60. node->next->prev = node->prev;
  61. _size--;
  62. }
  63. bool empty() const {
  64. return _size == 0;
  65. }
  66. int size() const { return _size; }
  67. template<typename Func>
  68. void apply(Func func){
  69. LinkedListNode* p = head.next;
  70. while(p != &tail){
  71. LinkedListNode* next = p->next;
  72. func(static_cast<T*>(p));
  73. p = next;
  74. }
  75. }
  76. };
  77. template<int __BlockSize>
  78. struct MemoryPool{
  79. static const int __MaxBlocks = 256*1024 / __BlockSize;
  80. static const int __MinArenaCount = PK_GC_MIN_THRESHOLD*100 / (256*1024);
  81. struct Block{
  82. void* arena;
  83. char data[__BlockSize];
  84. };
  85. struct Arena: LinkedListNode{
  86. Block _blocks[__MaxBlocks];
  87. Block* _free_list[__MaxBlocks];
  88. int _free_list_size;
  89. Arena(): _free_list_size(__MaxBlocks) {
  90. for(int i=0; i<__MaxBlocks; i++){
  91. _blocks[i].arena = this;
  92. _free_list[i] = &_blocks[i];
  93. }
  94. }
  95. bool empty() const { return _free_list_size == 0; }
  96. bool full() const { return _free_list_size == __MaxBlocks; }
  97. size_t allocated_size() const{
  98. return __BlockSize * (__MaxBlocks - _free_list_size);
  99. }
  100. Block* alloc(){
  101. assert(!empty());
  102. _free_list_size--;
  103. return _free_list[_free_list_size];
  104. }
  105. void dealloc(Block* block){
  106. assert(!full());
  107. _free_list[_free_list_size] = block;
  108. _free_list_size++;
  109. }
  110. };
  111. MemoryPool() = default;
  112. MemoryPool(const MemoryPool&) = delete;
  113. MemoryPool& operator=(const MemoryPool&) = delete;
  114. MemoryPool(MemoryPool&&) = delete;
  115. MemoryPool& operator=(MemoryPool&&) = delete;
  116. DoubleLinkedList<Arena> _arenas;
  117. DoubleLinkedList<Arena> _empty_arenas;
  118. void* alloc(size_t size){
  119. PK_GLOBAL_SCOPE_LOCK();
  120. if(size > __BlockSize){
  121. void* p = std::malloc(sizeof(void*) + size);
  122. std::memset(p, 0, sizeof(void*));
  123. return (char*)p + sizeof(void*);
  124. }
  125. if(_arenas.empty()){
  126. _arenas.push_back(new Arena());
  127. }
  128. Arena* arena = _arenas.back();
  129. void* p = arena->alloc()->data;
  130. if(arena->empty()){
  131. _arenas.pop_back();
  132. _empty_arenas.push_back(arena);
  133. }
  134. return p;
  135. }
  136. void dealloc(void* p){
  137. PK_GLOBAL_SCOPE_LOCK();
  138. assert(p != nullptr);
  139. Block* block = (Block*)((char*)p - sizeof(void*));
  140. if(block->arena == nullptr){
  141. std::free(block);
  142. }else{
  143. Arena* arena = (Arena*)block->arena;
  144. if(arena->empty()){
  145. _empty_arenas.erase(arena);
  146. _arenas.push_front(arena);
  147. arena->dealloc(block);
  148. }else{
  149. arena->dealloc(block);
  150. }
  151. }
  152. }
  153. void shrink_to_fit(){
  154. PK_GLOBAL_SCOPE_LOCK();
  155. if(_arenas.size() < __MinArenaCount) return;
  156. _arenas.apply([this](Arena* arena){
  157. if(arena->full()){
  158. _arenas.erase(arena);
  159. delete arena;
  160. }
  161. });
  162. }
  163. ~MemoryPool(){
  164. _arenas.apply([](Arena* arena){ delete arena; });
  165. _empty_arenas.apply([](Arena* arena){ delete arena; });
  166. }
  167. };
  168. static MemoryPool<128> pool128;
  169. void* pool128_alloc(size_t size) noexcept { return pool128.alloc(size); }
  170. void pool128_dealloc(void* p) noexcept { pool128.dealloc(p); }
  171. void pools_shrink_to_fit() noexcept {
  172. pool128.shrink_to_fit();
  173. }
  174. template<int BlockSize, int BlockCount>
  175. struct FixedMemoryPool{
  176. struct Block{
  177. char data[BlockSize];
  178. };
  179. static_assert(BlockSize % 4 == 0);
  180. static_assert(sizeof(Block) == BlockSize);
  181. Block _blocks[BlockCount];
  182. Block* _free_list[BlockCount];
  183. Block** _free_list_end;
  184. FixedMemoryPool() {
  185. _free_list_end = _free_list + BlockCount;
  186. for(int i = 0; i < BlockCount; ++i){
  187. _free_list[i] = _blocks + i;
  188. }
  189. }
  190. bool is_valid(void* p){
  191. return p >= _blocks && p < _blocks + BlockCount;
  192. }
  193. void* alloc(){
  194. PK_GLOBAL_SCOPE_LOCK()
  195. if(_free_list_end != _free_list){
  196. --_free_list_end;
  197. return *_free_list_end;
  198. }else{
  199. return std::malloc(BlockSize);
  200. }
  201. }
  202. void dealloc(void* p){
  203. PK_GLOBAL_SCOPE_LOCK()
  204. if(is_valid(p)){
  205. *_free_list_end = static_cast<Block*>(p);
  206. ++_free_list_end;
  207. }else{
  208. std::free(p);
  209. }
  210. }
  211. };
  212. static FixedMemoryPool<kPoolExprBlockSize, 32> PoolExpr;
  213. static FixedMemoryPool<kPoolFrameBlockSize, 128> PoolFrame;
  214. void* PoolExpr_alloc() noexcept { return PoolExpr.alloc(); }
  215. void PoolExpr_dealloc(void* p) noexcept { PoolExpr.dealloc(p); }
  216. void* PoolFrame_alloc() noexcept { return PoolFrame.alloc(); }
  217. void PoolFrame_dealloc(void* p) noexcept { PoolFrame.dealloc(p); }
  218. } // namespace pkpy