#pragma once
|
|
|
|
#include "gp/algorithm/foreach.hpp"
|
|
#include "gp/algorithm/reference.hpp"
|
|
#include "gp/allocator/allocator.hpp"
|
|
#include "gp/vector.hpp"
|
|
#include "gp/vfs/file_description.hpp"
|
|
#include "gp/vfs/filesystem.hpp"
|
|
#include "gp/vfs/runqueue.hpp"
|
|
#include "gp/vfs/scheduler.hpp"
|
|
|
|
|
|
namespace gp{
|
|
// TODO: thread safety
|
|
|
|
class scheduling_scheme {
|
|
public:
|
|
virtual topic_list::node_ptr one(size_t token) = 0;
|
|
virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0;
|
|
virtual scheduler& current_scheduler() = 0;
|
|
};
|
|
|
|
class system {
|
|
gp::reference_wrapper<gp::allocator> system_allocator;
|
|
gp::vector<gp::filesystem*> filesystems{system_allocator};
|
|
gp::vector<gp::scheduler> process_managers;
|
|
scheduling_scheme& scheme;
|
|
|
|
|
|
friend class scheduler;
|
|
public:
|
|
system(allocator& v, scheduling_scheme& scheme_)
|
|
: system_allocator{v}
|
|
, process_managers{system_allocator}
|
|
, scheme{scheme_}
|
|
{}
|
|
|
|
size_t spawn(gp::function<void()> fn) {
|
|
constexpr size_t stack_sz = gp_config::limits::process_stack;
|
|
void* stack = system_allocator.get().allocate(stack_sz);
|
|
gp_config::assertion(stack != nullptr, "failed to allocate a stack");
|
|
process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
|
|
gp_config::assertion(stack != nullptr, "failed to allocate a process data");
|
|
new(created_process) process_data(fn, stack, stack_sz);
|
|
|
|
topic_list::node_ptr pp = (topic_list::node_ptr)system_allocator.get().allocate(
|
|
sizeof(topic_list::node)
|
|
);
|
|
new(pp) topic_list::node();
|
|
pp->value = created_process;
|
|
|
|
auto& sched = scheme.current_scheduler();
|
|
sched.yield_to(scheme.next(sched.id, pp));
|
|
return pp->value->pid;
|
|
}
|
|
};
|
|
|
|
scheduler::scheduler(class system& v, size_t token)
|
|
: id(token)
|
|
, sys(v)
|
|
{}
|
|
|
|
void scheduler::yield_to(topic_list::node_ptr target)
|
|
{
|
|
previous = current;
|
|
current->value->specifics.pull();
|
|
current = target;
|
|
no_inline_decl([&](scheduler* new_p)){
|
|
*new_p->previous->release();
|
|
auto& proc = *new_p->current->value;
|
|
if(proc.state == gp::process_status::inactive) {
|
|
proc.state = gp::process_status::running;
|
|
proc.fn();
|
|
proc.state = gp::process_status::finished;
|
|
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
|
|
}
|
|
}(static_cast<scheduler*>(target->value->specifics.push(this)));
|
|
}
|
|
|
|
|
|
void scheduler::yield(){
|
|
current = sys.scheme.next(id, current);
|
|
yield_to(current);
|
|
}
|
|
|
|
|
|
void scheduler::startup()
|
|
{
|
|
current = sys.scheme.one(id);
|
|
no_inline_decl([&](scheduler* new_p)){
|
|
auto& proc = *new_p->current->value;
|
|
if(proc.state == gp::process_status::inactive) {
|
|
proc.state = gp::process_status::running;
|
|
proc.fn();
|
|
proc.state = gp::process_status::finished;
|
|
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
|
|
}
|
|
}(static_cast<scheduler*>(target->value->specifics.push(this)));
|
|
}
|
|
|
|
|
|
}
|