浏览代码

advanced scheduling introduced

channel
Ludovic 'Archivist' Lagouardette 4 年前
父节点
当前提交
a295f8642c
共有 5 个文件被更改,包括 133 次插入107 次删除
  1. +3
    -0
      include/gp/vfs/process_data.hpp
  2. +19
    -25
      include/gp/vfs/runqueue.hpp
  3. +9
    -21
      include/gp/vfs/scheduler.hpp
  4. +44
    -0
      include/gp/vfs/scheduling/simple_lockfree_scheduling.hpp
  5. +58
    -61
      include/gp/vfs/system.hpp

+ 3
- 0
include/gp/vfs/process_data.hpp 查看文件

@ -19,7 +19,10 @@ namespace gp {
zombie = 4
};
using pid_t = size_t;
struct process_data{
pid_t pid;
gp::function<void()> fn;
void* stack;
size_t stack_sz;

+ 19
- 25
include/gp/vfs/runqueue.hpp 查看文件

@ -4,27 +4,26 @@
#include <atomic>
class runqueue{
struct node{
std::atomic_bool is_locked;
gp::process_data* value;
std::atomic<struct node*> next;
namespace gp {
struct topic_list{
struct node{
std::atomic_bool is_locked;
gp::process_data* value;
std::atomic<struct node*> next;
bool try_acquire() noexcept {
bool expected = false;
return !(is_locked.compare_exchange_strong(expected, true));
}
bool try_acquire() noexcept {
bool expected = false;
return !(is_locked.compare_exchange_strong(expected, true));
}
void release() noexcept {
is_locked.store(false);
}
};
void release() noexcept {
is_locked.store(false);
}
};
using node_ptr = struct node*;
using node_ptr_rep = std::atomic<struct node*>;
using node_ptr = struct node*;
using node_ptr_rep = std::atomic<struct node*>;
struct topic_list{
node_ptr_rep start;
node_ptr_rep end;
@ -49,7 +48,8 @@ class runqueue{
}
}
// ONLY PUSH ACQUIRED NODES
// ONLY PUSH ACQUIRED NODES,
// RELEASE WHEN NO LONGER IN USE
bool try_push(node_ptr node) {
auto ed = end.load();
if(ed) {
@ -57,18 +57,15 @@ class runqueue{
node->next.store(ed);
if(end.compare_exchange_strong(ed, node)) {
ed->release();
node->release();
return true;
} else {
ed->release();
node->release();
return false;
}
} else return false;
} else {
if(end.compare_exchange_strong(ed, node)) {
start.store(node);
node->release();
return true;
} else {
return false;
@ -76,7 +73,4 @@ class runqueue{
}
}
};
topic_list running;
topic_list waiting;
};
}

+ 9
- 21
include/gp/vfs/scheduler.hpp 查看文件

@ -2,39 +2,27 @@
#include "gp/indexed_array.hpp"
#include "gp/vfs/process_data.hpp"
#include "gp/vfs/runqueue.hpp"
namespace gp{
class system;
class scheduler {
gp::specifics::platform_data root;
size_t current = 0;
struct scheduler {
topic_list::node_ptr previous;
topic_list::node_ptr current;
size_t id;
system& sys;
no_inline_decl(
void yield_to(size_t target_pid)
void yield_to(topic_list::node_ptr target)
);
scheduler(class system&, size_t token);
public:
scheduler(class system&);
[[noreturn]] void run(allocator& alloc) {
again:
run_once();
cleanup(alloc);
goto again;
}
void cleanup(allocator& alloc);
void run_once();
[[noreturn]] void startup();
void yield(){
yield_to(0);
}
void yield();
};
}

+ 44
- 0
include/gp/vfs/scheduling/simple_lockfree_scheduling.hpp 查看文件

@ -0,0 +1,44 @@
#pragma once
#include "gp/vfs/system.hpp"
namespace gp{
class simple_lockfree_scheduling : gp::scheduling_scheme {
gp::topic_list running;
gp::topic_list waiting;
gp::topic_list to_clean;
gp::topic_list naughty;
scheduler me;
public:
virtual gp::topic_list::node_ptr one(size_t) {
auto v = running.try_pop();
do{
if(v) return v;
v = running.try_pop();
}while(true);
}
virtual gp::topic_list::node_ptr next(size_t, gp::topic_list::node_ptr current) {
switch(current->value->state) {
case process_status::inactive:
case process_status::running:
do{}while(!running.try_push(current));
break;
case process_status::finished:
do{}while(!to_clean.try_push(current));
break;
case process_status::zombie:
do{}while(!naughty.try_push(current));
break;
case process_status::waiting:
do{}while(!waiting.try_push(current));
break;
}
return one(0);
}
virtual gp::scheduler& current_scheduler() {
return me;
}
};
}

+ 58
- 61
include/gp/vfs/system.hpp 查看文件

@ -6,28 +6,34 @@
#include "gp/vector.hpp"
#include "gp/vfs/file_description.hpp"
#include "gp/vfs/filesystem.hpp"
#include "gp/vfs/runqueue.hpp"
#include "gp/vfs/scheduler.hpp"
namespace gp{
// TODO: thread safety
class scheduling_scheme {
public:
virtual topic_list::node_ptr one(size_t token) = 0;
virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0;
virtual scheduler& current_scheduler() = 0;
};
class system {
gp::reference_wrapper<gp::allocator> system_allocator;
gp::vector<gp::filesystem*> filesystems{system_allocator};
gp::vector<gp::scheduler> process_managers;
gp::indexed_array<gp::process_data*, gp_config::limits::max_processes> processes;
scheduling_scheme& scheme;
friend class scheduler;
public:
system(allocator& v, size_t scheduler_count)
system(allocator& v, scheduling_scheme& scheme_)
: system_allocator{v}
, process_managers{system_allocator}
{
gp_config::assertion(scheduler_count >= 1, "no scheduling in the system");
process_managers.reserve(scheduler_count);
gp::repeat(scheduler_count,[&](){
process_managers.emplace_back(*this);
});
}
, scheme{scheme_}
{}
size_t spawn(gp::function<void()> fn) {
constexpr size_t stack_sz = gp_config::limits::process_stack;
@ -36,70 +42,61 @@ public:
process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
gp_config::assertion(stack != nullptr, "failed to allocate a process data");
new(created_process) process_data(fn, stack, stack_sz);
return processes.push(created_process);
}
void run_once() {
(*process_managers.begin()).run_once();
(*process_managers.begin()).cleanup(system_allocator);
topic_list::node_ptr pp = (topic_list::node_ptr)system_allocator.get().allocate(
sizeof(topic_list::node)
);
new(pp) topic_list::node();
pp->value = created_process;
auto& sched = scheme.current_scheduler();
sched.yield_to(scheme.next(sched.id, pp));
return pp->value->pid;
}
};
scheduler::scheduler(class system& v)
: sys(v){}
scheduler::scheduler(class system& v, size_t token)
: id(token)
, sys(v)
{}
void scheduler::yield_to(size_t target_pid)
void scheduler::yield_to(topic_list::node_ptr target)
{
auto& cur = current ? sys.processes[current-1]->specifics : root;
auto& target = target_pid ? sys.processes[target_pid-1]->specifics : root;
current = target_pid;
cur.pull();
if(target_pid)
{
no_inline_decl([&](scheduler* new_p)){
auto& proc = *new_p->sys.processes[new_p->current-1];
if(proc.state == gp::process_status::inactive) {
proc.state = gp::process_status::running;
proc.fn();
proc.state = gp::process_status::finished;
new_p->yield_to(0);
}
}(static_cast<scheduler*>(target.push(this)));
} else {
[[maybe_unused]] volatile scheduler* new_p = static_cast<scheduler*>(target.push(this));
}
previous = current;
current->value->specifics.pull();
current = target;
no_inline_decl([&](scheduler* new_p)){
*new_p->previous->release();
auto& proc = *new_p->current->value;
if(proc.state == gp::process_status::inactive) {
proc.state = gp::process_status::running;
proc.fn();
proc.state = gp::process_status::finished;
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
}
}(static_cast<scheduler*>(target->value->specifics.push(this)));
}
void scheduler::cleanup(allocator& alloc) {
sys.processes.foreach(
[&] (size_t pid, process_data* process) {
if(
process->state == process_status::finished
) {
process->fds.foreach([](size_t, file_description* fdes){
fdes->close();
});
gp_config::assertion(alloc.deallocate(process->stack), "can't deallocate the stack");
sys.processes.mark_for_removal(pid);
gp_config::assertion(alloc.deallocate(process), "can't deallocate the process data");
}
}
);
sys.processes.sweep_removed();
void scheduler::yield(){
current = sys.scheme.next(id, current);
yield_to(current);
}
void scheduler::run_once() {
sys.processes.foreach(
[&] (size_t pid, process_data* process) {
if(
process->state == process_status::inactive
|| process->state == process_status::running
) {
yield_to(pid+1);
}
void scheduler::startup()
{
current = sys.scheme.one(id);
no_inline_decl([&](scheduler* new_p)){
auto& proc = *new_p->current->value;
if(proc.state == gp::process_status::inactive) {
proc.state = gp::process_status::running;
proc.fn();
proc.state = gp::process_status::finished;
new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
}
);
}(static_cast<scheduler*>(target->value->specifics.push(this)));
}
}

正在加载...
取消
保存