|
#pragma once
|
|
|
|
#include "gp/algorithms/foreach.hpp"
|
|
#include "gp/algorithms/reference.hpp"
|
|
#include "gp/utils/allocators/allocator.hpp"
|
|
#include "gp/containers/vector.hpp"
|
|
#include "gp/ipc/file_description.hpp"
|
|
#include "gp/ipc/filesystem.hpp"
|
|
#include "gp/system/runqueue.hpp"
|
|
#include "gp/system/scheduler.hpp"
|
|
|
|
|
|
namespace gp{
|
|
// TODO: thread safety
|
|
|
|
class scheduling_scheme {
|
|
public:
|
|
virtual topic_list::node_ptr one(size_t token) = 0;
|
|
virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0;
|
|
virtual void push(topic_list::node_ptr current) = 0;
|
|
virtual void link(class system&) = 0;
|
|
virtual scheduler& current_scheduler() = 0;
|
|
};
|
|
|
|
class system {
|
|
friend struct scheduler;
|
|
public:
|
|
gp::reference_wrapper<gp::allocator> system_allocator;
|
|
gp::vector<gp::filesystem*> filesystems{system_allocator};
|
|
gp::vector<gp::scheduler> process_managers;
|
|
scheduling_scheme& scheme;
|
|
topic_list::node main_context;
|
|
|
|
system(allocator& v, scheduling_scheme& scheme_, gp::buffer<char> stack_estimate = gp::buffer<char>{nullptr, nullptr})
|
|
: system_allocator{v}
|
|
, process_managers{system_allocator}
|
|
, scheme{scheme_}
|
|
{
|
|
[[maybe_unused]] volatile char a;
|
|
if(stack_estimate.size() == 0) {
|
|
auto seed = (char*)&a;
|
|
auto jump = (uintptr_t)seed % gp_config::limits::process_stack;
|
|
seed -= jump + (jump == 0)*gp_config::limits::process_stack;
|
|
auto page_cnt = 1;
|
|
if(jump == 0) page_cnt++;
|
|
stack_estimate = gp::buffer<char>{seed, (size_t)(gp_config::limits::process_stack*page_cnt)};
|
|
}
|
|
main_context.value = (process_data*)system_allocator.get().allocate(sizeof(process_data));
|
|
new(main_context.value) process_data(gp::function<void()>([]() -> void{}, nullopt), stack_estimate.begin().data, stack_estimate.size(), gp::unique_ptr<base_process_info>::make(system_allocator));
|
|
gp_config::assertion(main_context.value != nullptr, "failed to allocate return to main switch");
|
|
scheme.link(*this);
|
|
}
|
|
|
|
size_t spawn(gp::function<void()> fn) {
|
|
constexpr size_t stack_sz = gp_config::limits::process_stack;
|
|
void* stack = system_allocator.get().allocate(stack_sz);
|
|
gp_config::assertion(stack != nullptr, "failed to allocate a stack");
|
|
process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
|
|
gp_config::assertion(stack != nullptr, "failed to allocate a process data");
|
|
new(created_process) process_data(fn, stack, stack_sz, gp::unique_ptr<base_process_info>::make(system_allocator));
|
|
|
|
topic_list::node_ptr pp = (topic_list::node_ptr)system_allocator.get().allocate(
|
|
sizeof(topic_list::node)
|
|
);
|
|
new(pp) topic_list::node();
|
|
pp->value = created_process;
|
|
auto pid = pp->value->pid;
|
|
scheme.push(pp);
|
|
return pid;
|
|
}
|
|
|
|
template<typename threading_function>
|
|
void run(threading_function thread_starter) {
|
|
for(auto& i : process_managers) {
|
|
gp::function<void(void)> runner{
|
|
[&](){
|
|
i.run();
|
|
},
|
|
system_allocator
|
|
};
|
|
thread_starter(
|
|
runner
|
|
);
|
|
}
|
|
}
|
|
|
|
void yield() {
|
|
scheme.current_scheduler().yield();
|
|
}
|
|
};
|
|
|
|
scheduler::scheduler(class system& v, size_t token)
|
|
: id(token)
|
|
, sys(v)
|
|
, main_context_data{gp::function<void()>{[](){}, v.system_allocator}, nullptr, size_t(0), gp::unique_ptr<base_process_info>::make(v.system_allocator)}
|
|
, main_context()
|
|
{
|
|
main_context.value = &main_context_data;
|
|
gp_config::assertion(!main_context.try_acquire(), "node should be not aquired on creation");
|
|
}
|
|
|
|
no_inline_decl(inline scheduler* spawner (scheduler* new_p)) {
|
|
auto& proc = *new_p->current->value;
|
|
if(proc.state == gp::process_status::inactive) {
|
|
proc.state = gp::process_status::running;
|
|
proc.fn();
|
|
proc.state = gp::process_status::finished;
|
|
}
|
|
return new_p;
|
|
}
|
|
|
|
void scheduler::yield_to(topic_list::node_ptr target)
|
|
{
|
|
previous = current;
|
|
current->value->specifics.pull();
|
|
current = target;
|
|
auto new_p = this;
|
|
do{
|
|
new_p = spawner(static_cast<scheduler*>(target->value->specifics.push(new_p)));
|
|
target = new_p->sys.scheme.next(new_p->id, new_p->current);
|
|
new_p->previous = new_p->current;
|
|
new_p->current->value->specifics.pull();
|
|
new_p->current = target;
|
|
} while(true);
|
|
}
|
|
|
|
|
|
void scheduler::yield(){
|
|
current = sys.scheme.next(id, current);
|
|
yield_to(current);
|
|
}
|
|
|
|
|
|
void scheduler::run()
|
|
{
|
|
main_context_data.pid = 0;
|
|
main_context_data.state = process_status::running;
|
|
main_context_data.specifics.pull();
|
|
current = &main_context;
|
|
sys.scheme.push(&main_context);
|
|
auto new_p = spawner(static_cast<scheduler*>(current->value->specifics.push(this)));
|
|
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
|
|
}
|
|
|
|
|
|
}
|