Browse Source

Refactoring after code review

channel
Ludovic 'Archivist' Lagouardette 3 years ago
parent
commit
ac9e33ad13
21 changed files with 500 additions and 501 deletions
  1. +5
    -2
      Makefile
  2. +2
    -0
      include/gp/algorithms/cvref.hpp
  3. +6
    -16
      include/gp/algorithms/min_max.hpp
  4. +2
    -0
      include/gp/algorithms/min_of.hpp
  5. +1
    -14
      include/gp/algorithms/modifiers.hpp
  6. +2
    -0
      include/gp/containers/probabilistic/quotient_filter.hpp
  7. +0
    -0
      include/gp/ipc/envelope/cbor.hpp
  8. +6
    -3
      include/gp/ipc/file_description.hpp
  9. +2
    -0
      include/gp/math/rendering/renderer.hpp
  10. +58
    -56
      include/gp/system/platforms/gcc-x86_64.hpp
  11. +49
    -48
      include/gp/system/process_data.hpp
  12. +0
    -104
      include/gp/system/runqueue.hpp
  13. +38
    -37
      include/gp/system/scheduler.hpp
  14. +80
    -77
      include/gp/system/scheduling/simple_scheduling.hpp
  15. +127
    -126
      include/gp/system/system.hpp
  16. +111
    -0
      include/gp/system/task_queue.hpp
  17. +0
    -1
      include/gp/utils/iterator.hpp
  18. +5
    -11
      tests.cpp
  19. +1
    -1
      tests/cbor_test.cpp
  20. +4
    -4
      tests/channel_test.cpp
  21. +1
    -1
      tests/test_scaffold.h

+ 5
- 2
Makefile View File

@ -1,5 +1,5 @@
CXX= clang++
CXXFLAGS= --std=c++2a -O0 -g -pthread -DGP_TESTS -DFUZZ_STRENGTH=100000 -DNO_BENCH=0 -pedantic \
CXXFLAGS= --std=c++20 -O2 -g -pthread -DGP_TESTS -DFUZZ_STRENGTH=100000 -DNO_BENCH=0 -pedantic \
-fprofile-instr-generate -fcoverage-mapping -Wno-unknown-attributes -fno-omit-frame-pointer \
EVERY_USEFUL_FILE= $(shell find include/ -name "*.hpp" -type "f")
@ -7,6 +7,9 @@ EVERY_TEST_FILE= $(shell find tests/ -name "*.cpp" -type "f")
# -fsanitize=address -fsanitize-blacklist=blacklist.txt
all: tests
docs: $(EVERY_USEFUL_FILE)
doxygen doxy.config
tests: bin/tests
LLVM_PROFILE_FILE="./bin/tests.profraw" ASAN_SYMBOLIZER_PATH=/usr/bin/llvm-symbolizer ./bin/tests
@llvm-profdata merge -sparse ./bin/tests.profraw -o ./bin/tests.profdata
@ -15,7 +18,7 @@ tests: bin/tests
bin/tests: tests.cpp $(EVERY_TEST_FILE) $(EVERY_USEFUL_FILE) ./tests/test_scaffold.h
@mkdir -p $(@D)
$(CXX) $(CXXFLAGS) -Itests -Iinclude tests.cpp -o $@
$(CXX) $(CXXFLAGS) -Itests -Iinclude tests.cpp $(EVERY_TEST_FILE) -o $@
clean: ./bin
@rm -rf $<

+ 2
- 0
include/gp/algorithms/cvref.hpp View File

@ -1,5 +1,7 @@
#pragma once
#include <type_traits>
namespace gp{
template< class T >
struct remove_cvref {

+ 6
- 16
include/gp/algorithms/min_max.hpp View File

@ -2,33 +2,31 @@
namespace gp{
template<typename T, typename U, typename ...rest>
constexpr T max(T first, U second, rest... args)
constexpr T min(T first, U second, rest... args)
{
if constexpr (sizeof...(args) == 0)
{
return first &gt; second ? first : second;
return first &lt; second ? first : second;
}
else
{
return max(first &gt; second ? first : second, args...);
return min(first &lt; second ? first : second, args...);
}
}
template<typename T, typename U, typename ...rest>
constexpr T min(T first, U second, rest... args)
constexpr T max(T first, U second, rest... args)
{
if constexpr (sizeof...(args) == 0)
{
return first &lt; second ? first : second;
return first &gt; second ? first : second;
}
else
{
return min(first &lt; second ? first : second, args...);
return max(first &gt; second ? first : second, args...);
}
}
template<typename T>
constexpr T clamp(T first, T value, T last)
{
@ -37,11 +35,3 @@ namespace gp{
return value;
}
}

+ 2
- 0
include/gp/algorithms/min_of.hpp View File

@ -6,8 +6,10 @@
namespace gp{
// TODO: this goes in functional
template<typename T>
T identity(T v){return v;}
template<typename T>
T& identity(T& v){return v;}

+ 1
- 14
include/gp/algorithms/modifiers.hpp View File

@ -7,9 +7,7 @@
#include <type_traits>
namespace gp {
// TODO: this goes into functional
template<typename F, typename ... Args>
auto bind_front(F&& func, Args&&... arg_parent)
{
@ -17,15 +15,4 @@ namespace gp {
return func(arg_parent..., argv...);
};
}
}

+ 2
- 0
include/gp/containers/probabilistic/quotient_filter.hpp View File

@ -100,6 +100,8 @@ namespace gp {
rem_t r : remainder;
};
static_assert(!(sizeof(size_t) <= 4 && magnitude > 31), "Incompatible size for quotient_filter: the filter would fill the address space");
gp::array<
node,
1 << magnitude

include/gp/ipc/enveloppe/cbor.hpp → include/gp/ipc/envelope/cbor.hpp View File


+ 6
- 3
include/gp/ipc/file_description.hpp View File

@ -6,6 +6,9 @@
namespace gp {
namespace system {
class system;
}
template<typename> class buffer;
@ -18,9 +21,9 @@ public:
virtual void get_attr() = 0;
virtual void seek() = 0;
virtual void close() = 0;
static file_description* open(k">class system&, gp::buffer<std::byte>);
static file_description* create(k">class system&, gp::buffer<std::byte>);
static file_description* remove(k">class system&, gp::buffer<std::byte>);
static file_description* open(n">system::system&, gp::buffer<std::byte>);
static file_description* create(n">system::system&, gp::buffer<std::byte>);
static file_description* remove(n">system::system&, gp::buffer<std::byte>);
};
}

+ 2
- 0
include/gp/math/rendering/renderer.hpp View File

@ -9,6 +9,8 @@
#include "gp/math.hpp"
#include "gp/math/rendering_math.hpp"
// TODO: Namespace this correctly
using vec2 = gp::math::vec2_g<>;
using vec3 = gp::math::vec3_g<>;
using vec4 = gp::math::vec4_g<>;

+ 58
- 56
include/gp/system/platforms/gcc-x86_64.hpp View File

@ -7,65 +7,67 @@
#define no_inline_decl(a) a __attribute__((noinline))
namespace gp{
namespace specifics {
struct platform_data {
platform_data() = default;
platform_data(gp::buffer<char> stack_str)
: stack_ptr((stack_str.end()-16).data)
, base_ptr(stack_ptr)
{}
namespace system {
namespace specifics {
struct platform_data {
platform_data() = default;
platform_data(gp::buffer<char> stack_str)
: stack_ptr((stack_str.end()-16).data)
, base_ptr(stack_ptr)
{}
uint64_t rbx, r12, r13, r14, r15;
uint64_t rbx, r12, r13, r14, r15;
void* stack_ptr;
void* base_ptr;
void* stack_ptr;
void* base_ptr;
void pull() __attribute__((always_inline))
{
__asm__ __volatile__(
"movq %%rsp, %0\n"
"movq %%rbp, %1\n"
"movq %%rbx, %2\n"
"movq %%r12, %3\n"
"movq %%r13, %4\n"
"movq %%r14, %5\n"
"movq %%r15, %6\n"
: "=m"(stack_ptr)
, "=m"(base_ptr)
, "=m"(rbx)
, "=m"(r12)
, "=m"(r13)
, "=m"(r14)
, "=m"(r15)
);
}
void pull() __attribute__((always_inline))
{
__asm__ __volatile__(
"movq %%rsp, %0\n"
"movq %%rbp, %1\n"
"movq %%rbx, %2\n"
"movq %%r12, %3\n"
"movq %%r13, %4\n"
"movq %%r14, %5\n"
"movq %%r15, %6\n"
: "=m"(stack_ptr)
, "=m"(base_ptr)
, "=m"(rbx)
, "=m"(r12)
, "=m"(r13)
, "=m"(r14)
, "=m"(r15)
);
}
void* push(void* location) __attribute__((always_inline))
{
volatile void* volatile tmp = static_cast<char*>(stack_ptr) - sizeof(void*);
*static_cast<volatile void* volatile * volatile>(tmp) = location;
__asm__ __volatile__(
"movq %1, %%rsp\n"
"movq %2, %%rbp\n"
"movq %3, %%rbx\n"
"movq %4, %%r12\n"
"movq %5, %%r13\n"
"movq %6, %%r14\n"
"movq %7, %%r15\n"
"popq %0\n"
: "+r"(location)
: "m"(tmp)
, "m"(base_ptr)
, "m"(rbx)
, "m"(r12)
, "m"(r13)
, "m"(r14)
, "m"(r15)
: "memory"
);
return location;
}
};
void* push(void* location) __attribute__((always_inline))
{
volatile void* volatile tmp = static_cast<char*>(stack_ptr) - sizeof(void*);
*static_cast<volatile void* volatile * volatile>(tmp) = location;
__asm__ __volatile__(
"movq %1, %%rsp\n"
"movq %2, %%rbp\n"
"movq %3, %%rbx\n"
"movq %4, %%r12\n"
"movq %5, %%r13\n"
"movq %6, %%r14\n"
"movq %7, %%r15\n"
"popq %0\n"
: "+r"(location)
: "m"(tmp)
, "m"(base_ptr)
, "m"(rbx)
, "m"(r12)
, "m"(r13)
, "m"(r14)
, "m"(r15)
: "memory"
);
return location;
}
};
}
}
}

+ 49
- 48
include/gp/system/process_data.hpp View File

@ -10,60 +10,61 @@
#include <atomic>
namespace gp {
namespace system {
enum class process_status {
inactive = 0,
running = 1,
waiting = 2,
finished = 3,
zombie = 4
};
enum class process_status {
inactive = 0,
running = 1,
waiting = 2,
finished = 3,
zombie = 4
};
using pid_t = size_t;
using pid_t = size_t;
struct base_process_info {
virtual void initialize() {}
virtual void checkpoint() {}
virtual void restore() {}
virtual void switch_in() {}
virtual void switch_out() {}
virtual void cleanup() {}
virtual ~base_process_info() {}
};
struct base_process_info {
virtual void initialize() {}
virtual void checkpoint() {}
virtual void restore() {}
virtual void switch_in() {}
virtual void switch_out() {}
virtual void cleanup() {}
virtual ~base_process_info() {}
};
struct process_data{
pid_t pid;
gp::function<void()> fn;
void* stack;
size_t stack_sz;
gp::process_status state;
std::atomic_bool is_running;
[[no_unique_address]] gp::specifics::platform_data specifics;
gp::unique_ptr<base_process_info> info;
struct process_data{
pid_t pid;
gp::function<void()> fn;
void* stack;
size_t stack_sz;
gp::system::process_status state;
std::atomic_bool is_running;
[[no_unique_address]] gp::system::specifics::platform_data specifics;
gp::unique_ptr<base_process_info> info;
process_data(gp::function<void()> _fn, void* _stack, size_t _stack_sz, gp::unique_ptr<base_process_info>&& _info)
: fn(_fn)
, stack(_stack)
, stack_sz(_stack_sz)
, state(gp::process_status::inactive)
, specifics(gp::buffer<char>{(char*)stack, stack_sz})
, info(gp::move(_info))
{}
process_data(gp::function<void()> _fn, void* _stack, size_t _stack_sz, gp::unique_ptr<base_process_info>&& _info)
: fn(_fn)
, stack(_stack)
, stack_sz(_stack_sz)
, state(gp::system::process_status::inactive)
, specifics(gp::buffer<char>{(char*)stack, stack_sz})
, info(gp::move(_info))
{}
process_data(process_data&& v)
: fn(v.fn)
, stack(v.stack)
, stack_sz(v.stack_sz)
, state(v.state)
, specifics(v.specifics)
, info(gp::move(v.info))
{}
process_data(process_data&& v)
: fn(v.fn)
, stack(v.stack)
, stack_sz(v.stack_sz)
, state(v.state)
, specifics(v.specifics)
, info(gp::move(v.info))
{}
~process_data() {
if(info) {
info->cleanup();
~process_data() {
if(info) {
info->cleanup();
}
}
}
};
};
}
}

+ 0
- 104
include/gp/system/runqueue.hpp View File

@ -1,104 +0,0 @@
#pragma once
#include "gp/system/process_data.hpp"
#include <atomic>
#include <new>
namespace gp {
struct topic_list{
struct node{
alignas(gp_config::limits::hardware_constructive_interference_size) std::atomic_bool is_locked;
gp::process_data* value;
alignas(gp_config::limits::hardware_constructive_interference_size) std::atomic<struct node*> next;
node()
{
is_locked = false;
value = nullptr;
next = nullptr;
}
node(node&& v)
{
v.try_acquire();
is_locked = false;
value = gp::move(v.value);
next = v.next.load();
}
bool try_acquire() noexcept {
bool expected = false;
return !(is_locked.compare_exchange_strong(expected, true));
}
void release() noexcept {
is_locked.store(false);
}
};
using node_ptr = struct node*;
using node_ptr_rep = std::atomic<struct node*>;
topic_list()
: start{nullptr}
, end{nullptr}
{}
node_ptr_rep start;
node_ptr_rep end;
// NODES ARE ACQUIRED ON POP
node_ptr try_pop() {
auto ptr = start.load();
if(!ptr) return nullptr;
if(ptr->try_acquire()) {
auto replace = ptr->next.load();
auto expected = ptr;
if(end.load() == ptr) {
replace = nullptr;
}
if(start.compare_exchange_strong(expected, replace)) {
end.store(nullptr);
return ptr;
} else {
return nullptr;
}
} else {
return nullptr;
}
}
// ONLY PUSH ACQUIRED NODES,
// RELEASE WHEN NO LONGER IN USE
bool try_push(node_ptr node) {
node->next.store(nullptr);
auto ed = end.load();
if(ed) {
if(ed->try_acquire()) {
auto old_ed = ed;
node->next.store(ed);
if(end.compare_exchange_strong(ed, node)) {
node->release();
old_ed->release();
return true;
} else {
node->release();
old_ed->release();
return false;
}
} else return false;
} else {
if(end.compare_exchange_strong(ed, node)) {
start.store(node);
node->release();
return true;
} else {
return false;
}
}
}
};
}

+ 38
- 37
include/gp/system/scheduler.hpp View File

@ -2,42 +2,43 @@
#include "gp/containers/indexed_array.hpp"
#include "gp/system/process_data.hpp"
#include "gp/system/runqueue.hpp"
namespace gp{
class system;
struct scheduler {
topic_list::node_ptr previous;
topic_list::node_ptr current;
size_t id;
system& sys;
process_data main_context_data;
topic_list::node main_context;
no_inline_decl(
void yield_to(topic_list::node_ptr target)
);
scheduler(k">classn> system&, size_t token);
scheduler(scheduler&& v)
: previous(v.previous)
, current(v.current)
, id(v.id)
, sys(v.sys)
, main_context_data(gp::move(v.main_context_data))
, main_context(gp::move(v.main_context))
{}
void run();
void yield();
~scheduler() {
}
};
#include "gp/system/task_queue.hpp"
namespace gp {
namespace system {
class system;
struct scheduler {
task_queue::node_ptr previous;
task_queue::node_ptr current;
size_t id;
system& sys;
process_data main_context_data;
task_queue::node main_context;
no_inline_decl(
void yield_to(task_queue::node_ptr target)
);
scheduler(system&, size_t token);
scheduler(scheduler&& v)
: previous(v.previous)
, current(v.current)
, id(v.id)
, sys(v.sys)
, main_context_data(gp::move(v.main_context_data))
, main_context(gp::move(v.main_context))
{}
void run();
void yield();
~scheduler() {
}
};
}
}

+ 80
- 77
include/gp/system/scheduling/simple_scheduling.hpp View File

@ -1,97 +1,100 @@
#pragma once
#include "gp/system/system.hpp"
k">namespace gp{
c1">// TODO: implement unsafe_bottleneck to use in the locked list
class simple_scheduling : public gp::scheduling_scheme {
struct locked_circular_buffer {
std::atomic_bool lock;
gp::array<gp::topic_list::node_ptr, gp_config::limits::max_processes> processes;
size_t read_idx = 0;
size_t write_idx = 0;
namespace gp{
namespace system {
class simple_scheduling : public gp::system::scheduling_scheme {
struct locked_circular_buffer {
std::atomic_bool lock;
gp::array<gp::system::task_queue::node_ptr, gp_config::limits::max_processes> processes;
size_t read_idx = 0;
size_t write_idx = 0;
void push(gp::topic_list::node_ptr node) {
bool t = true;
bool f = false;
while(not lock.compare_exchange_strong(f,t)){}
{
gp_config::assertion((write_idx + 1)%processes.size() != read_idx, "bad push to circular buffer");
processes[write_idx] = node;
write_idx=(write_idx+1)%processes.size();
void push(gp::system::task_queue::node_ptr node) {
bool t = true;
bool f = false;
while(not lock.compare_exchange_strong(f,t)){}
{
gp_config::assertion((write_idx + 1)%processes.size() != read_idx, "bad push to circular buffer");
processes[write_idx] = node;
write_idx=(write_idx+1)%processes.size();
}
while(not lock.compare_exchange_strong(t,f)){}
}
while(not lock.compare_exchange_strong(t,f)){}
}
gp::topic_list::node_ptr pop() {
bool t = true;
bool f = false;
gp::topic_list::node_ptr ret;
while(not lock.compare_exchange_strong(f,t)){}
{
if(read_idx == write_idx) {
ret = nullptr;
} else {
ret = processes[read_idx];
read_idx=(read_idx+1)%processes.size();
gp::system::task_queue::node_ptr pop() {
bool t = true;
bool f = false;
gp::system::task_queue::node_ptr ret;
while(not lock.compare_exchange_strong(f,t)){}
{
if(read_idx == write_idx) {
ret = nullptr;
} else {
ret = processes[read_idx];
read_idx=(read_idx+1)%processes.size();
}
}
while(not lock.compare_exchange_strong(t,f)){}
return ret;
}
while(not lock.compare_exchange_strong(t,f)){}
return ret;
}
};
};
locked_circular_buffer running;
locked_circular_buffer waiting;
locked_circular_buffer to_clean;
locked_circular_buffer naughty;
system* sys = nullptr;
locked_circular_buffer running;
locked_circular_buffer waiting;
locked_circular_buffer to_clean;
locked_circular_buffer naughty;
system* sys = nullptr;
public:
simple_scheduling()
{}
public:
simple_scheduling()
{}
virtual void link(system& value) {
gp_config::assertion(!(sys), "Double linkage detected");
sys = &value;
virtual void link(system& value) {
gp_config::assertion(!(sys), "Double linkage detected");
sys = &value;
sys->process_managers.emplace_back(*sys,0);
}
sys->process_managers.emplace_back(*sys,0);
}
virtual gp::topic_list::node_ptr one(size_t) {
auto v = running.pop();
do{
if(v) return v;
v = running.pop();
}while(true);
}
virtual gp::system::task_queue::node_ptr one(size_t) {
auto v = running.pop();
do{
if(v) return v;
v = running.pop();
}while(true);
}
virtual void push(gp::topic_list::node_ptr node) {
running.push(node);
}
virtual void push(gp::system::task_queue::node_ptr node) {
running.push(node);
}
virtual gp::topic_list::node_ptr next(size_t, gp::topic_list::node_ptr current) {
switch(current->value->state) {
case process_status::inactive:
case process_status::running:
running.push(current);
break;
case process_status::finished:
to_clean.push(current);
break;
case process_status::zombie:
naughty.push(current);
break;
case process_status::waiting:
waiting.push(current);
break;
virtual gp::system::task_queue::node_ptr next(size_t, gp::system::task_queue::node_ptr current) {
switch(current->value->state) {
case process_status::inactive:
case process_status::running:
running.push(current);
break;
case process_status::finished:
to_clean.push(current);
break;
case process_status::zombie:
naughty.push(current);
break;
case process_status::waiting:
waiting.push(current);
break;
}
return one(0);
}
return one(0);
}
virtual gp::scheduler& current_scheduler() {
return sys->process_managers[0];
}
};
virtual gp::system::scheduler& current_scheduler() {
return sys->process_managers[0];
}
};
}
}

+ 127
- 126
include/gp/system/system.hpp View File

@ -6,141 +6,142 @@
#include "gp/containers/vector.hpp"
#include "gp/ipc/file_description.hpp"
#include "gp/ipc/filesystem.hpp"
#include "gp/system/runqueue.hpp"
#include "gp/system/task_queue.hpp"
#include "gp/system/scheduler.hpp"
// THIS FILE IS A MESS
// Reworking will be suffering
namespace gp{
// TODO: thread safety
namespace system {
class scheduling_scheme {
public:
virtual task_queue::node_ptr one(size_t token) = 0;
virtual task_queue::node_ptr next(size_t token, task_queue::node_ptr current) = 0;
virtual void push(task_queue::node_ptr current) = 0;
virtual void link(system&) = 0;
virtual scheduler& current_scheduler() = 0;
};
class system {
friend struct scheduler;
public:
gp::reference_wrapper<gp::allocator> system_allocator;
gp::vector<gp::filesystem*> filesystems{system_allocator};
gp::vector<gp::system::scheduler> process_managers;
scheduling_scheme& scheme;
task_queue::node main_context;
system(allocator& v, scheduling_scheme& scheme_, gp::buffer<char> stack_estimate = gp::buffer<char>{nullptr, nullptr})
: system_allocator{v}
, process_managers{system_allocator}
, scheme{scheme_}
{
[[maybe_unused]] volatile char a;
if(stack_estimate.size() == 0) {
auto seed = (char*)&a;
auto jump = (uintptr_t)seed % gp_config::limits::process_stack;
seed -= jump + (jump == 0)*gp_config::limits::process_stack;
auto page_cnt = 1;
if(jump == 0) page_cnt++;
stack_estimate = gp::buffer<char>{seed, (size_t)(gp_config::limits::process_stack*page_cnt)};
}
main_context.value = (process_data*)system_allocator.get().allocate(sizeof(process_data));
new(main_context.value) process_data(gp::function<void()>([]() -> void{}, nullopt), stack_estimate.begin().data, stack_estimate.size(), gp::unique_ptr<base_process_info>::make(system_allocator));
gp_config::assertion(main_context.value != nullptr, "failed to allocate return to main switch");
scheme.link(*this);
}
size_t spawn(gp::function<void()> fn) {
constexpr size_t stack_sz = gp_config::limits::process_stack;
void* stack = system_allocator.get().allocate(stack_sz);
gp_config::assertion(stack != nullptr, "failed to allocate a stack");
process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
gp_config::assertion(stack != nullptr, "failed to allocate a process data");
new(created_process) process_data(fn, stack, stack_sz, gp::unique_ptr<base_process_info>::make(system_allocator));
task_queue::node_ptr pp = (task_queue::node_ptr)system_allocator.get().allocate(
sizeof(task_queue::node)
);
new(pp) task_queue::node();
pp->value = created_process;
auto pid = pp->value->pid;
scheme.push(pp);
return pid;
}
template<typename threading_function>
void run(threading_function thread_starter) {
for(auto& i : process_managers) {
gp::function<void(void)> runner{
[&](){
i.run();
},
system_allocator
};
thread_starter(
runner
);
}
}
void yield() {
scheme.current_scheduler().yield();
}
};
scheduler::scheduler(class system& v, size_t token)
: id(token)
, sys(v)
, main_context_data{gp::function<void()>{[](){}, v.system_allocator}, nullptr, size_t(0), gp::unique_ptr<base_process_info>::make(v.system_allocator)}
, main_context()
{
main_context.value = &main_context_data;
gp_config::assertion(!main_context.try_acquire(), "node should be not aquired on creation");
}
class scheduling_scheme {
public:
virtual topic_list::node_ptr one(size_t token) = 0;
virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0;
virtual void push(topic_list::node_ptr current) = 0;
virtual void link(class system&) = 0;
virtual scheduler& current_scheduler() = 0;
};
class system {
friend struct scheduler;
public:
gp::reference_wrapper<gp::allocator> system_allocator;
gp::vector<gp::filesystem*> filesystems{system_allocator};
gp::vector<gp::scheduler> process_managers;
scheduling_scheme& scheme;
topic_list::node main_context;
system(allocator& v, scheduling_scheme& scheme_, gp::buffer<char> stack_estimate = gp::buffer<char>{nullptr, nullptr})
: system_allocator{v}
, process_managers{system_allocator}
, scheme{scheme_}
{
[[maybe_unused]] volatile char a;
if(stack_estimate.size() == 0) {
auto seed = (char*)&a;
auto jump = (uintptr_t)seed % gp_config::limits::process_stack;
seed -= jump + (jump == 0)*gp_config::limits::process_stack;
auto page_cnt = 1;
if(jump == 0) page_cnt++;
stack_estimate = gp::buffer<char>{seed, (size_t)(gp_config::limits::process_stack*page_cnt)};
}
main_context.value = (process_data*)system_allocator.get().allocate(sizeof(process_data));
new(main_context.value) process_data(gp::function<void()>([]() -> void{}, nullopt), stack_estimate.begin().data, stack_estimate.size(), gp::unique_ptr<base_process_info>::make(system_allocator));
gp_config::assertion(main_context.value != nullptr, "failed to allocate return to main switch");
scheme.link(*this);
}
size_t spawn(gp::function<void()> fn) {
constexpr size_t stack_sz = gp_config::limits::process_stack;
void* stack = system_allocator.get().allocate(stack_sz);
gp_config::assertion(stack != nullptr, "failed to allocate a stack");
process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
gp_config::assertion(stack != nullptr, "failed to allocate a process data");
new(created_process) process_data(fn, stack, stack_sz, gp::unique_ptr<base_process_info>::make(system_allocator));
topic_list::node_ptr pp = (topic_list::node_ptr)system_allocator.get().allocate(
sizeof(topic_list::node)
);
new(pp) topic_list::node();
pp->value = created_process;
auto pid = pp->value->pid;
scheme.push(pp);
return pid;
}
no_inline_decl(inline scheduler* spawner (scheduler* new_p)) {
auto& proc = *new_p->current->value;
if(proc.state == gp::system::process_status::inactive) {
proc.state = gp::system::process_status::running;
proc.fn();
proc.state = gp::system::process_status::finished;
}
return new_p;
}
template<typename threading_function>
void run(threading_function thread_starter) {
for(auto& i : process_managers) {
gp::function<void(void)> runner{
[&](){
i.run();
},
system_allocator
};
thread_starter(
runner
);
void scheduler::yield_to(task_queue::node_ptr target)
{
previous = current;
current->value->specifics.pull();
current = target;
auto new_p = this;
do{
new_p = spawner(static_cast<scheduler*>(target->value->specifics.push(new_p)));
target = new_p->sys.scheme.next(new_p->id, new_p->current);
new_p->previous = new_p->current;
new_p->current->value->specifics.pull();
new_p->current = target;
} while(true);
}
}
void yield() {
scheme.current_scheduler().yield();
}
};
scheduler::scheduler(class system& v, size_t token)
: id(token)
, sys(v)
, main_context_data{gp::function<void()>{[](){}, v.system_allocator}, nullptr, size_t(0), gp::unique_ptr<base_process_info>::make(v.system_allocator)}
, main_context()
{
main_context.value = &main_context_data;
gp_config::assertion(!main_context.try_acquire(), "node should be not aquired on creation");
}
no_inline_decl(inline scheduler* spawner (scheduler* new_p)) {
auto& proc = *new_p->current->value;
if(proc.state == gp::process_status::inactive) {
proc.state = gp::process_status::running;
proc.fn();
proc.state = gp::process_status::finished;
}
return new_p;
}
void scheduler::yield_to(topic_list::node_ptr target)
{
previous = current;
current->value->specifics.pull();
current = target;
auto new_p = this;
do{
new_p = spawner(static_cast<scheduler*>(target->value->specifics.push(new_p)));
target = new_p->sys.scheme.next(new_p->id, new_p->current);
new_p->previous = new_p->current;
new_p->current->value->specifics.pull();
new_p->current = target;
} while(true);
}
void scheduler::yield(){
current = sys.scheme.next(id, current);
yield_to(current);
}
void scheduler::run()
{
main_context_data.pid = 0;
main_context_data.state = process_status::running;
main_context_data.specifics.pull();
current = &main_context;
sys.scheme.push(&main_context);
auto new_p = spawner(static_cast<scheduler*>(current->value->specifics.push(this)));
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
}
void scheduler::yield(){
current = sys.scheme.next(id, current);
yield_to(current);
}
void scheduler::run()
{
main_context_data.pid = 0;
main_context_data.state = process_status::running;
main_context_data.specifics.pull();
current = &main_context;
sys.scheme.push(&main_context);
auto new_p = spawner(static_cast<scheduler*>(current->value->specifics.push(this)));
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
}
}
}

+ 111
- 0
include/gp/system/task_queue.hpp View File

@ -0,0 +1,111 @@
#pragma once
#include "gp/system/process_data.hpp"
#include <atomic>
#include <new>
// TODO: rename to task_queue and renamespace to gp::system (filename included)
// TODO: noexcept everything
namespace gp {
namespace system {
struct task_queue{
struct node{
alignas(gp_config::limits::hardware_constructive_interference_size)
std::atomic_bool is_locked;
gp::system::process_data* value;
alignas(gp_config::limits::hardware_constructive_interference_size)
std::atomic<struct node*> next;
node()
{
is_locked = false;
value = nullptr;
next = nullptr;
}
node(node&& v)
{
v.try_acquire();
is_locked = false;
value = gp::move(v.value);
next = v.next.load();
}
bool try_acquire() noexcept {
bool expected = false;
return !(is_locked.compare_exchange_strong(expected, true));
}
void release() noexcept {
is_locked.store(false);
}
};
using node_ptr = struct node*;
using node_ptr_rep = std::atomic<struct node*>;
task_queue()
: start{nullptr}
, end{nullptr}
{}
node_ptr_rep start;
node_ptr_rep end;
// NODES ARE ACQUIRED ON POP
node_ptr try_pop() {
auto ptr = start.load();
if(!ptr) return nullptr;
if(ptr->try_acquire()) {
auto replace = ptr->next.load();
auto expected = ptr;
if(end.load() == ptr) {
replace = nullptr;
}
if(start.compare_exchange_strong(expected, replace)) {
end.store(nullptr);
return ptr;
} else {
return nullptr;
}
} else {
return nullptr;
}
}
// ONLY PUSH ACQUIRED NODES,
// RELEASE WHEN NO LONGER IN USE
bool try_push(node_ptr node) {
node->next.store(nullptr);
auto ed = end.load();
if(ed) {
if(ed->try_acquire()) {
auto old_ed = ed;
node->next.store(ed);
if(end.compare_exchange_strong(ed, node)) {
node->release();
old_ed->release();
return true;
} else {
node->release();
old_ed->release();
return false;
}
} else return false;
} else {
if(end.compare_exchange_strong(ed, node)) {
start.store(node);
node->release();
return true;
} else {
return false;
}
}
}
};
}
}

+ 0
- 1
include/gp/utils/iterator.hpp View File

@ -3,7 +3,6 @@
#include <cstddef>
#include <cstdint>
// BUG: none of this is in a namespace
// TODO: Specify the concept of an iterator
namespace gp {

+ 5
- 11
tests.cpp View File

@ -1,14 +1,6 @@
#include "gp_config.hpp"
#include "allocator.hpp"
#include "bloomfilter.cpp"
#include "cbor_test.cpp"
#include "channel_test.cpp"
#include "gp_test.cpp"
#include "math.cpp"
#include "meta_test.cpp"
#include "pair_test.cpp"
#include "quotient_filter.cpp"
#include "test_scaffold.h"
#include <iostream>
@ -16,6 +8,8 @@
alignas(2048) gp::array<char, 4096> static_mapper::store;
gp::buddy<> static_mapper::impl = gp::buddy<>{store.begin().data, store.size()};
std::vector<std::unique_ptr<test_scaffold>> tests;
int main()
{
uint failed = 0;
@ -24,13 +18,13 @@ int main()
{
++runned;
int value;
// try{
try{
value = test->run();
if(value)
{
std::cout << std::dec << test->name << " failed with "<< value << std::endl;
}
/* } catch (gp::runtime_error err) {
} catch (gp::runtime_error err) {
std::cout << test->name << " failed with an exception: " << err.what() << std::endl;
value = -1;
} catch (gp_config::assert_failure err) {
@ -39,7 +33,7 @@ int main()
} catch (...) {
std::cout << test->name << " failed with an exception" << std::endl;
value = -1;
}*/
}
failed += (value != 0);
}
std::cout << std::dec << "Runned "<<runned<<" tests with "<<failed<<" failures" << std::endl;

+ 1
- 1
tests/cbor_test.cpp View File

@ -2,7 +2,7 @@
#include <gp/algorithms/foreach.hpp>
#include <gp/utils/allocators/arena.hpp>
#include <gp/containers/array.hpp>
#include <gp/ipc/enveloppe/cbor.hpp>
#include <gp/ipc/envelope/cbor.hpp>
#include "test_scaffold.h"
struct cbor_test : public test_scaffold {

+ 4
- 4
tests/channel_test.cpp View File

@ -2,7 +2,7 @@
#include <gp/algorithms/foreach.hpp>
#include <gp/utils/allocators/buddy.hpp>
#include <gp/containers/array.hpp>
#include <gp/ipc/enveloppe/cbor.hpp>
#include <gp/ipc/envelope/cbor.hpp>
#include <gp/system/system.hpp>
#include <gp/system/scheduling/simple_scheduling.hpp>
#include "test_scaffold.h"
@ -35,7 +35,7 @@ struct point {
std::thread* leaver_4989487;
std::atomic_int quit_signal_4989487 = 0;
gp::system* sys_ptr_4989487;
gp::system::system* sys_ptr_4989487;
struct channel_test : public test_scaffold {
channel_test() {
@ -47,8 +47,8 @@ struct channel_test : public test_scaffold {
std::default_delete<gp::array<char, 4096*512>>
> store = std::make_unique<gp::array<char, 4096*512>>();
gp::buddy<> alloc{&*store->begin(), store->size()};
gp::simple_scheduling sched{};
gp::system& sys = *(new(alloc.allocate(sizeof(gp::system))) gp::system{alloc, sched});
gp::system::simple_scheduling sched{};
gp::system::system& sys = *(new(alloc.allocate(sizeof(gp::systemo">::system))) gp::system::system{alloc, sched});
struct terminator{};

+ 1
- 1
tests/test_scaffold.h View File

@ -25,7 +25,7 @@ struct test_scaffold{
virtual ~test_scaffold() = default;
};
std::vector<std::unique_ptr<test_scaffold>> tests;
extern std::vector<std::unique_ptr<test_scaffold>> tests;
struct append_test {
append_test(test_scaffold* ptr) {

Loading…
Cancel
Save