|
|
@ -17,23 +17,39 @@ class scheduling_scheme { |
|
|
|
public: |
|
|
|
virtual topic_list::node_ptr one(size_t token) = 0; |
|
|
|
virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0; |
|
|
|
virtual void push(topic_list::node_ptr current) = 0; |
|
|
|
virtual void link(class system&) = 0; |
|
|
|
virtual scheduler& current_scheduler() = 0; |
|
|
|
}; |
|
|
|
|
|
|
|
class system { |
|
|
|
friend struct scheduler; |
|
|
|
public: |
|
|
|
gp::reference_wrapper<gp::allocator> system_allocator; |
|
|
|
gp::vector<gp::filesystem*> filesystems{system_allocator}; |
|
|
|
gp::vector<gp::scheduler> process_managers; |
|
|
|
scheduling_scheme& scheme; |
|
|
|
topic_list::node main_context; |
|
|
|
|
|
|
|
|
|
|
|
friend class scheduler; |
|
|
|
public: |
|
|
|
system(allocator& v, scheduling_scheme& scheme_) |
|
|
|
system(allocator& v, scheduling_scheme& scheme_, gp::buffer<char> stack_estimate = gp::buffer<char>{nullptr, nullptr}) |
|
|
|
: system_allocator{v} |
|
|
|
, process_managers{system_allocator} |
|
|
|
, scheme{scheme_} |
|
|
|
{} |
|
|
|
{ |
|
|
|
[[maybe_unused]] volatile char a; |
|
|
|
if(stack_estimate.size() == 0) { |
|
|
|
auto seed = (char*)&a; |
|
|
|
auto jump = (uintptr_t)seed % gp_config::limits::process_stack; |
|
|
|
seed -= jump + (jump == 0)*gp_config::limits::process_stack; |
|
|
|
auto page_cnt = 1; |
|
|
|
if(jump == 0) page_cnt++; |
|
|
|
stack_estimate = gp::buffer<char>{seed, (size_t)(gp_config::limits::process_stack*page_cnt)}; |
|
|
|
} |
|
|
|
main_context.value = (process_data*)system_allocator.get().allocate(sizeof(process_data)); |
|
|
|
new(main_context.value) process_data(gp::function<void()>([]() -> void{}, nullopt), stack_estimate.begin().data, stack_estimate.size()); |
|
|
|
gp_config::assertion(main_context.value != nullptr, "failed to allocate return to main switch"); |
|
|
|
scheme.link(*this); |
|
|
|
} |
|
|
|
|
|
|
|
size_t spawn(gp::function<void()> fn) { |
|
|
|
constexpr size_t stack_sz = gp_config::limits::process_stack; |
|
|
@ -48,33 +64,64 @@ public: |
|
|
|
); |
|
|
|
new(pp) topic_list::node(); |
|
|
|
pp->value = created_process; |
|
|
|
auto pid = pp->value->pid; |
|
|
|
scheme.push(pp); |
|
|
|
return pid; |
|
|
|
} |
|
|
|
|
|
|
|
template<typename threading_function> |
|
|
|
void run(threading_function thread_starter) { |
|
|
|
for(auto& i : process_managers) { |
|
|
|
gp::function<void(void)> runner{ |
|
|
|
[&](){ |
|
|
|
i.run(); |
|
|
|
}, |
|
|
|
system_allocator |
|
|
|
}; |
|
|
|
thread_starter( |
|
|
|
runner |
|
|
|
); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
auto& sched = scheme.current_scheduler(); |
|
|
|
sched.yield_to(scheme.next(sched.id, pp)); |
|
|
|
return pp->value->pid; |
|
|
|
void yield() { |
|
|
|
scheme.current_scheduler().yield(); |
|
|
|
} |
|
|
|
}; |
|
|
|
|
|
|
|
scheduler::scheduler(class system& v, size_t token) |
|
|
|
: id(token) |
|
|
|
, sys(v) |
|
|
|
{} |
|
|
|
, main_context_data{gp::function<void()>{[](){}, v.system_allocator}, nullptr, size_t(0)} |
|
|
|
, main_context() |
|
|
|
{ |
|
|
|
main_context.value = &main_context_data; |
|
|
|
gp_config::assertion(!main_context.try_acquire(), "node should be not aquired on creation"); |
|
|
|
} |
|
|
|
|
|
|
|
no_inline_decl(inline scheduler* spawner (scheduler* new_p)) { |
|
|
|
auto& proc = *new_p->current->value; |
|
|
|
if(proc.state == gp::process_status::inactive) { |
|
|
|
proc.state = gp::process_status::running; |
|
|
|
proc.fn(); |
|
|
|
proc.state = gp::process_status::finished; |
|
|
|
} |
|
|
|
return new_p; |
|
|
|
} |
|
|
|
|
|
|
|
void scheduler::yield_to(topic_list::node_ptr target) |
|
|
|
{ |
|
|
|
previous = current; |
|
|
|
current->value->specifics.pull(); |
|
|
|
current = target; |
|
|
|
no_inline_decl([&](scheduler* new_p)){ |
|
|
|
*new_p->previous->release(); |
|
|
|
auto& proc = *new_p->current->value; |
|
|
|
if(proc.state == gp::process_status::inactive) { |
|
|
|
proc.state = gp::process_status::running; |
|
|
|
proc.fn(); |
|
|
|
proc.state = gp::process_status::finished; |
|
|
|
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current)); |
|
|
|
} |
|
|
|
}(static_cast<scheduler*>(target->value->specifics.push(this))); |
|
|
|
auto new_p = this; |
|
|
|
do{ |
|
|
|
new_p = spawner(static_cast<scheduler*>(target->value->specifics.push(new_p))); |
|
|
|
target = new_p->sys.scheme.next(new_p->id, new_p->current); |
|
|
|
new_p->previous = new_p->current; |
|
|
|
new_p->current->value->specifics.pull(); |
|
|
|
new_p->current = target; |
|
|
|
} while(true); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -84,18 +131,15 @@ void scheduler::yield(){ |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void scheduler::startup() |
|
|
|
void scheduler::run() |
|
|
|
{ |
|
|
|
current = sys.scheme.one(id); |
|
|
|
no_inline_decl([&](scheduler* new_p)){ |
|
|
|
auto& proc = *new_p->current->value; |
|
|
|
if(proc.state == gp::process_status::inactive) { |
|
|
|
proc.state = gp::process_status::running; |
|
|
|
proc.fn(); |
|
|
|
proc.state = gp::process_status::finished; |
|
|
|
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current)); |
|
|
|
} |
|
|
|
}(static_cast<scheduler*>(target->value->specifics.push(this))); |
|
|
|
main_context_data.pid = 0; |
|
|
|
main_context_data.state = process_status::running; |
|
|
|
main_context_data.specifics.pull(); |
|
|
|
current = &main_context; |
|
|
|
sys.scheme.push(&main_context); |
|
|
|
auto new_p = spawner(static_cast<scheduler*>(current->value->specifics.push(this))); |
|
|
|
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current)); |
|
|
|
} |
|
|
|
|
|
|
|
|