#include "registry.h"
|
|
#include "disruptor.h"
|
|
#include "sl/register.h"
|
|
|
|
#include <concepts>
|
|
#include <atomic>
|
|
#include <unordered_map>
|
|
#include <functional>
|
|
#include <any>
|
|
|
|
|
|
/**
|
|
* @brief A fast semaphore exclusion handler WITHOUT deadlock detection or yielding
|
|
*/
|
|
template<std::integral T, T default_increment, T maximum_increment>
|
|
class fast_semaphore {
|
|
std::atomic<T> flag; //< This is our counter
|
|
public:
|
|
fast_semaphore() = default;
|
|
fast_semaphore(fast_semaphore&) = delete;
|
|
fast_semaphore(fast_semaphore&&) = delete;
|
|
|
|
/// 3 things may happen when trying to unlock
|
|
enum class tristate {
|
|
success = 1, //< The unlocking was successful
|
|
timing = 0, //< Someone interfered with the unlocking
|
|
error = -1 //< The unlocking would over-unlock the semaphore
|
|
};
|
|
|
|
/// We try locking until we succeed
|
|
template<T increment = default_increment>
|
|
void lock() {
|
|
while(not try_lock<increment>());
|
|
}
|
|
|
|
/// For locking, we try to atomically increment the counter while maintaining it below the set limit
|
|
template<T increment = default_increment>
|
|
[[nodiscard]] bool try_lock() {
|
|
T expect = flag.load(std::memory_order::acquire);
|
|
T target = expect + increment;
|
|
if(target > maximum_increment) return false;
|
|
return flag.compare_exchange_strong(expect,target,std::memory_order::release);
|
|
}
|
|
|
|
/// Similarly to locking, we try unlocking until we succeed (or reach an invalid state)
|
|
template<T increment = default_increment>
|
|
void unlock() {
|
|
tristate v;
|
|
do{ v = try_unlock<increment>(); }
|
|
while(v == tristate::timing);
|
|
if(v != tristate::success) {
|
|
throw std::runtime_error("Over unlocking may have happened: potential double unlocking issue");
|
|
}
|
|
}
|
|
|
|
/// Unlocking is the reverse of locking, we have to ensure to return an error if we try to go below zero
|
|
template<T increment = default_increment>
|
|
[[nodiscard]] tristate try_unlock() {
|
|
T expect = flag.load(std::memory_order::relaxed);
|
|
T target = expect - increment;
|
|
if(target < 0) return tristate::error;
|
|
return flag.compare_exchange_strong(expect,target,std::memory_order::release) ? tristate::success : tristate::timing;
|
|
}
|
|
};
|
|
|
|
using rw_lock_type = fast_semaphore<int32_t, 256, 256>;
|
|
|
|
class lock_handler_read {
|
|
rw_lock_type& ref;
|
|
explicit lock_handler_read(rw_lock_type& _ref) : ref(_ref) {
|
|
while(ref.try_lock<1>());
|
|
}
|
|
|
|
~lock_handler_read() {
|
|
while(ref.try_unlock<1>() != rw_lock_type::tristate::success);
|
|
}
|
|
};
|
|
|
|
class lock_handler_write {
|
|
rw_lock_type& ref;
|
|
explicit lock_handler_write(rw_lock_type& _ref) : ref(_ref) {
|
|
while(ref.try_lock());
|
|
}
|
|
|
|
~lock_handler_write() {
|
|
while(ref.try_unlock() != rw_lock_type::tristate::success);
|
|
}
|
|
};
|
|
|
|
static fast_semaphore<int32_t, 256, 256> registry_rw_lock;
|
|
|
|
|
|
struct registry_slab {
|
|
int id;
|
|
std::string name;
|
|
std::function<token_t(size_t)> reserve_write;
|
|
std::function<token_t(size_t)> reserve_write_c_align;
|
|
std::function<void(token_t)> conclude_write;
|
|
std::any disruptor;
|
|
};
|
|
|
|
/**
|
|
* @internal used because we need the pointer stability
|
|
* @see sl_transaction
|
|
*/
|
|
static std::unordered_map<int, registry_slab> registry_map;
|
|
|
|
BufferStrategyInternal::buffer_type BufferStrategyInternal::build_buffer(size_t) {
|
|
return {};
|
|
}
|
|
|
|
BufferStrategyShared::buffer_type BufferStrategyShared::build_buffer(size_t) {
|
|
return {};
|
|
}
|
|
|
|
BufferStrategyExternal::buffer_type BufferStrategyExternal::build_buffer(size_t) {
|
|
return {};
|
|
}
|
|
|
|
void SinkStrategyDirect::write(int fd, std::string_view data) {
|
|
|
|
}
|
|
|
|
void SinkStrategyFastest::write(int fd, std::string_view data) {
|
|
|
|
}
|
|
|
|
void SinkStrategyMmaped::write(int fd, std::string_view data) {
|
|
|
|
}
|
|
|
|
void SinkStrategyExternal::write(int fd, std::string_view data) {
|
|
|
|
}
|
|
|
|
void OverflowStrategyWait::wait() {
|
|
|
|
}
|
|
|
|
void OverflowStrategyContinue::wait() {
|
|
|
|
}
|
|
|
|
std::pair<std::string_view, std::string_view> OutputStrategyTimed::chunk(std::string_view) {
|
|
return {};
|
|
}
|
|
|
|
int OutputStrategyTimed::on_write_completed_event(std::string_view, int) {
|
|
return 0;
|
|
}
|
|
|
|
std::pair<std::string_view, std::string_view> OutputStrategySized::chunk(std::string_view) {
|
|
return {};
|
|
}
|
|
|
|
int OutputStrategySized::on_write_completed_event(std::string_view, int) {
|
|
return 0;
|
|
}
|
|
|
|
std::pair<std::string_view, std::string_view> OutputStrategySimple::chunk(std::string_view) {
|
|
return {};
|
|
}
|
|
|
|
int OutputStrategySimple::on_write_completed_event(std::string_view, int) {
|
|
return 0;
|
|
}
|