Tools made in assistance of the Metacall Project
25개 이상의 토픽을 선택하실 수 없습니다. Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

287 lines
6.4 KiB

  1. #pragma once
  2. #include <cstddef>
  3. #include <array>
  4. #include <utility>
  5. #include <atomic>
  6. #include <new>
  7. #include <memory>
  8. #include <cassert>
  9. #include <optional>
  10. #include <chrono>
  11. #include <thread>
  12. using namespace std::chrono_literals;
  13. /**
  14. Pensé en un mundo sin memoria, sin tiempo; consideré la posibilidad de un lenguaje
  15. que ignorara los sustantivos, un lenguaje de verbos impersonales y de indeclinables
  16. epítetos. Así fueron muriendo los días y con los días los años, pero algo parecido
  17. a la felicidad ocurrió una mañana. Llovió, con lentitud poderosa.
  18. **/
  19. namespace mct20 {
  20. template<typename T>
  21. class accessor {
  22. public:
  23. accessor(const T* ptr, std::atomic<unsigned int>& incremented_ref)
  24. : pointer(ptr)
  25. , reference_cnt(incremented_ref)
  26. {
  27. assert(reference_cnt.load() != 0);
  28. }
  29. accessor(const accessor& a)
  30. : pointer(a.pointer)
  31. , reference_cnt(a.reference_cnt)
  32. {
  33. reference_cnt.fetch_add(1);
  34. }
  35. accessor(const accessor&& a)
  36. : pointer(a.pointer)
  37. , reference_cnt(a.reference_cnt)
  38. {
  39. reference_cnt.fetch_add(1);
  40. }
  41. operator const T&() {
  42. return *pointer;
  43. }
  44. ~accessor() {
  45. reference_cnt.fetch_sub(1);
  46. }
  47. private:
  48. const T* pointer;
  49. std::atomic<unsigned int>& reference_cnt;
  50. };
  51. namespace _details_ {
  52. #ifdef __cpp_lib_hardware_interference_size
  53. constexpr size_t predictable_padding = std::hardware_constructive_interference_size;
  54. #else
  55. // Wild guess, may be suboptimal or plain wrong
  56. constexpr size_t predictable_padding = 128;
  57. #endif
  58. size_t rotl(size_t a, uint8_t b) {
  59. b%=sizeof(size_t)*8;
  60. return a << b | a >> (sizeof(size_t)*8 - b);
  61. }
  62. template<size_t against>
  63. constexpr size_t alignment =
  64. (against%predictable_padding!=0)*predictable_padding
  65. + (against/predictable_padding)*predictable_padding;
  66. template<typename K, typename V>
  67. class bucket {
  68. constexpr static uint32_t delete_mode = 65536;
  69. std::atomic<uint32_t> delete_lock;
  70. void reader_lock() {
  71. while(delete_lock.fetch_add(1) >= delete_mode) {
  72. delete_lock.fetch_sub(1);
  73. std::this_thread::yield();
  74. }
  75. return;
  76. }
  77. void writer_lock() {
  78. while(delete_lock.fetch_add(delete_mode) >= delete_mode) {
  79. delete_lock.fetch_sub(delete_mode);
  80. std::this_thread::yield();
  81. }
  82. while(delete_lock.load() != delete_mode) {
  83. }
  84. return;
  85. }
  86. void reader_unlock() {
  87. delete_lock.fetch_sub(1);
  88. }
  89. void writer_unlock() {
  90. delete_lock.fetch_sub(delete_mode);
  91. }
  92. struct RGuard {
  93. bucket& master;
  94. RGuard(bucket& m)
  95. : master(m)
  96. {master.reader_lock();}
  97. ~RGuard()
  98. {master.reader_unlock();}
  99. };
  100. struct WGuard {
  101. bucket& master;
  102. WGuard(bucket& m)
  103. : master(m)
  104. {master.writer_lock();}
  105. ~WGuard()
  106. {master.writer_unlock();}
  107. };
  108. public:
  109. bucket()
  110. : start{nullptr}
  111. {}
  112. void remove(size_t hash, const K& key) {
  113. WGuard _g(*this);
  114. auto it = start.load();
  115. auto prev = &start;
  116. do{
  117. if(it == nullptr) return;
  118. while(it->contents.hash != hash)
  119. {
  120. prev = reinterpret_cast<node_ptr*>(&(it->contents.next));
  121. it = (node*)it->contents.next.load();
  122. if(it == nullptr) return;
  123. }
  124. if(it->contents.key == key) {
  125. prev->store(reinterpret_cast<node*>(it->contents.next.load()));
  126. it->contents.references.fetch_sub(1);
  127. while(it->contents.references.load()!=0) {
  128. std::this_thread::yield();
  129. }
  130. delete it->contents.ptr;
  131. delete it;
  132. return;
  133. }
  134. prev = reinterpret_cast<node_ptr*>(&(it->contents.next));
  135. it = (node*)it->contents.next.load();
  136. } while(true);
  137. }
  138. void push(size_t hash, const K& key, const V& value) {
  139. RGuard _g(*this);
  140. auto t = new node{
  141. .contents = node_contents{
  142. .key{key},
  143. .ptr{new V{value}},
  144. .hash{hash},
  145. .references{1}
  146. }
  147. };
  148. t->contents.next.store(t);
  149. node* expect;
  150. do {
  151. expect = start.load();
  152. t->contents.next.store(expect);
  153. } while(
  154. !std::atomic_compare_exchange_strong(
  155. &start,
  156. &expect,
  157. t
  158. )
  159. );
  160. }
  161. std::optional<accessor<V>> get(const size_t hash, const K& key) {
  162. RGuard _g(*this);
  163. auto v = start.load();
  164. while(v) {
  165. if(v->contents.references.fetch_add(1)!=0)
  166. {
  167. if(v->contents.hash == hash) {
  168. if(v->contents.key == key) {
  169. return accessor<V>(
  170. v->contents.ptr,
  171. v->contents.references
  172. );
  173. } else {
  174. auto n = reinterpret_cast<node*>(v->contents.next.load());
  175. v->contents.references.fetch_sub(1);
  176. v = n;
  177. }
  178. } else {
  179. auto n = reinterpret_cast<node*>(v->contents.next.load());
  180. v->contents.references.fetch_sub(1);
  181. v = n;
  182. }
  183. }
  184. else
  185. {
  186. auto n = reinterpret_cast<node*>(v->contents.next.load());
  187. v->contents.references.fetch_sub(1);
  188. v = n;
  189. }
  190. }
  191. return std::nullopt;
  192. }
  193. struct node_contents{
  194. std::atomic<void*> next;
  195. const K key;
  196. const V* ptr;
  197. size_t hash;
  198. std::atomic<unsigned int> references;
  199. };
  200. using node = union {
  201. alignas(alignment<sizeof(node_contents)>) node_contents contents;
  202. };
  203. using node_ptr = std::atomic<node*>;
  204. node_ptr start;
  205. };
  206. }
  207. template<typename K, typename V, size_t bucket_count, typename hash = std::hash<K>>
  208. class lfhmap {
  209. using bucket = _details_::bucket<K, V>;
  210. public:
  211. std::optional<accessor<V>> get(const K& key) {
  212. auto l = hash{}(key);
  213. auto ret = buckets[l%bucket_count].get(l, key);
  214. if(ret) return ret;
  215. l = _details_::rotl(l, sizeof(size_t)*4);
  216. return buckets[l%bucket_count].get(l, key);
  217. }
  218. void set(const K& key, const V& value) {
  219. const auto l = hash{}(key);
  220. auto& ref = buckets[l%bucket_count];
  221. if(ref.start.load() == nullptr)
  222. {
  223. ref.push(l, key, value);
  224. return;
  225. }
  226. const auto l2 = _details_::rotl(l, sizeof(size_t)*4);
  227. auto& ref2 = buckets[l2%bucket_count];
  228. if(ref2.start.load() == nullptr)
  229. {
  230. ref2.push(l2, key, value);
  231. return;
  232. }
  233. if((l^l2)&1) {
  234. ref.push(l, key, value);
  235. } else {
  236. ref2.push(l2, key, value);
  237. }
  238. return;
  239. }
  240. void remove(const K& key) {
  241. const auto l = hash{}(key);
  242. auto& ref = buckets[l%bucket_count];
  243. ref.remove(l, key);
  244. const auto l2 = _details_::rotl(l, sizeof(size_t)*4);
  245. auto& ref2 = buckets[l2%bucket_count];
  246. ref2.remove(l2, key);
  247. return;
  248. }
  249. lfhmap() {
  250. for(auto& a : buckets) {
  251. a.start = nullptr;
  252. }
  253. }
  254. private:
  255. std::array<bucket, bucket_count> buckets;
  256. };
  257. }