A C++ library for logging very fast and without allocating.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

200 lignes
5.9 KiB

  1. #pragma once
  2. #include <cstddef>
  3. #include <atomic>
  4. #include <new>
  5. #include <cassert>
  6. #include <optional>
  7. #ifdef __cpp_lib_hardware_interference_size
  8. static constexpr size_t max_interference_size = std::max(std::hardware_constructive_interference_size, std::hardware_destructive_interference_size);
  9. static constexpr size_t line_length = std::hardware_constructive_interference_size;
  10. #else
  11. static constexpr size_t max_interference_size = 128;
  12. static constexpr size_t line_length = 64;
  13. #endif
  14. template<typename T>
  15. struct alignas(max_interference_size) padded_atomic final {
  16. std::atomic<T> value;
  17. };
  18. using offset_t = size_t;
  19. extern const size_t page_size;
  20. enum class overflow_response_t {
  21. must_wait,
  22. must_overflow
  23. };
  24. #ifdef __cpp_concepts
  25. template<typename T>
  26. concept OverflowStrategyType = requires (T strategy) {
  27. {T::on_overflow} -> std::same_as<overflow_response_t>;
  28. {strategy.wait()};
  29. };
  30. #endif
  31. struct force_contiguous_mode {};
  32. struct token_t {offset_t start; offset_t end;};
  33. struct disruptor_exception : public std::runtime_error {
  34. disruptor_exception() : std::runtime_error("Unknown error disruptor") {}
  35. explicit disruptor_exception(const char* str) : std::runtime_error(str) {}
  36. };
  37. template<typename OverflowStrategy>
  38. #ifdef __cpp_concepts
  39. requires OverflowStrategyType<OverflowStrategy>
  40. #endif
  41. class disruptor
  42. {
  43. char* buffer;
  44. size_t buffer_size;
  45. std::atomic<offset_t>& read_trailer;
  46. std::atomic<offset_t>& read_lead;
  47. std::atomic<offset_t>& write_trailer;
  48. std::atomic<offset_t>& write_lead;
  49. offset_t& max_offset;
  50. char* data_buffer;
  51. public:
  52. disruptor(char* _buffer, const size_t _buffer_size)
  53. : buffer(_buffer)
  54. , buffer_size(_buffer_size)
  55. , read_trailer (*reinterpret_cast<std::atomic<offset_t>*>(_buffer+max_interference_size*0))
  56. , read_lead (*reinterpret_cast<std::atomic<offset_t>*>(_buffer+max_interference_size*1))
  57. , write_trailer(*reinterpret_cast<std::atomic<offset_t>*>(_buffer+max_interference_size*2))
  58. , write_lead (*reinterpret_cast<std::atomic<offset_t>*>(_buffer+max_interference_size*3))
  59. , max_offset (*reinterpret_cast<offset_t*> (_buffer+max_interference_size*4))
  60. {
  61. if(buffer_size <= page_size) throw disruptor_exception("buffer size too small to build a disruptor");
  62. data_buffer = buffer+max_interference_size*5;
  63. if(data_buffer <= buffer+page_size) {
  64. data_buffer = buffer+page_size;
  65. max_offset = buffer_size - page_size;
  66. } else if(data_buffer > buffer+page_size) {
  67. size_t ctr = 0;
  68. do {
  69. ctr++;
  70. } while(data_buffer > buffer+page_size*ctr);
  71. data_buffer = buffer+page_size*ctr;
  72. max_offset = buffer_size - page_size*ctr;
  73. }
  74. }
  75. template<bool must_be_contiguous = false>
  76. std::optional<token_t> try_advance(
  77. std::atomic<offset_t>& lead,
  78. std::atomic<offset_t> fence,
  79. offset_t amount
  80. ) {
  81. offset_t old_offset = lead.load(std::memory_order_relaxed);
  82. offset_t new_offset = old_offset + amount;
  83. offset_t fence_v = fence.load(std::memory_order_relaxed);
  84. // Check if we jumped the fence
  85. if(fence_v <= new_offset && fence_v > old_offset) {
  86. goto handle_fence;
  87. }
  88. // Check if we jumped the fence while overflowing
  89. if(new_offset >= max_offset) {
  90. if constexpr(must_be_contiguous) {
  91. new_offset = amount;
  92. } else {
  93. new_offset %= max_offset;
  94. }
  95. if(fence_v <= new_offset) {
  96. goto handle_fence;
  97. }
  98. }
  99. goto handle_fence_end;
  100. handle_fence:
  101. if constexpr (OverflowStrategy::on_overflow == overflow_response_t::must_wait) {
  102. return std::nullopt;
  103. } else if constexpr (OverflowStrategy::on_overflow == overflow_response_t::must_overflow) {
  104. if(!fence.compare_exchange_weak(fence_v, new_offset, std::memory_order_release, std::memory_order_relaxed)) {
  105. return std::nullopt;
  106. } else {
  107. offset_t v;
  108. do {
  109. v = read_lead.load(std::memory_order_acquire);
  110. if(v >= new_offset) break;
  111. } while(read_lead.compare_exchange_weak(v, new_offset, std::memory_order_release, std::memory_order_relaxed));
  112. }
  113. } else {
  114. static_assert(
  115. OverflowStrategy::on_overflow == overflow_response_t::must_wait
  116. || OverflowStrategy::on_overflow == overflow_response_t::must_overflow
  117. );
  118. }
  119. handle_fence_end:
  120. if(!lead.compare_exchange_weak(old_offset, new_offset, std::memory_order_acquire, std::memory_order_relaxed)) {
  121. return std::nullopt;
  122. }
  123. return token_t{old_offset, new_offset};
  124. }
  125. token_t reserve_write(size_t sz) {
  126. std::optional<token_t> tok;
  127. OverflowStrategy waiter;
  128. while(true) {
  129. tok = try_advance(write_lead, read_trailer, sz);
  130. if(tok) break;
  131. waiter.wait();
  132. }
  133. return tok.value();
  134. }
  135. token_t reserve_write(size_t sz, force_contiguous_mode) {
  136. std::optional<token_t> tok;
  137. OverflowStrategy waiter;
  138. while(true) {
  139. tok = try_advance<true>(write_lead, read_trailer, sz);
  140. if(tok) break;
  141. waiter.wait();
  142. }
  143. return tok.value();
  144. }
  145. void conclude_write(token_t tok) noexcept(std::is_nothrow_invocable_v<typename OverflowStrategy::wait>) {
  146. OverflowStrategy waiter;
  147. while(!write_trailer.compare_exchange_weak(tok.start, tok.end, std::memory_order_release, std::memory_order_relaxed)) {
  148. waiter.wait();
  149. }
  150. }
  151. token_t reserve_read() {
  152. offset_t old_offset = read_lead.load(std::memory_order_relaxed);
  153. offset_t new_offset = write_trailer.load(std::memory_order_relaxed);
  154. if(old_offset > new_offset) new_offset = 0;
  155. OverflowStrategy waiter;
  156. while(!read_lead.compare_exchange_weak(old_offset, new_offset, std::memory_order_acquire, std::memory_order_relaxed)) {
  157. waiter.wait();
  158. }
  159. return token_t{old_offset, new_offset};
  160. }
  161. void conclude_read(token_t tok) noexcept(std::is_nothrow_invocable_v<typename OverflowStrategy::wait>) {
  162. OverflowStrategy waiter;
  163. while(!read_trailer.compare_exchange_weak(tok.start, tok.end, std::memory_order_release, std::memory_order_relaxed)) {
  164. waiter.wait();
  165. }
  166. }
  167. };
  168. struct OverflowWait {
  169. static constexpr overflow_response_t on_overflow = overflow_response_t::must_wait;
  170. void wait() {
  171. #ifdef __clang__ or __GNUC__
  172. __asm__("nop\n\t");
  173. #elif _MSC_VER
  174. __nop;
  175. #endif
  176. }
  177. };