General Purpose library for Freestanding C++ and POSIX systems
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

146 lines
4.8 KiB

  1. #pragma once
  2. #include "gp/algorithms/foreach.hpp"
  3. #include "gp/algorithms/reference.hpp"
  4. #include "gp/utils/allocators/allocator.hpp"
  5. #include "gp/containers/vector.hpp"
  6. #include "gp/ipc/file_description.hpp"
  7. #include "gp/ipc/filesystem.hpp"
  8. #include "gp/system/task_queue.hpp"
  9. #include "gp/system/scheduler.hpp"
  10. // THIS FILE IS A MESS
  11. // Reworking will be suffering
  12. namespace gp{
  13. // TODO: thread safety
  14. namespace system {
  15. class scheduling_scheme {
  16. public:
  17. virtual task_queue::node_ptr one(size_t token) = 0;
  18. virtual task_queue::node_ptr next(size_t token, task_queue::node_ptr current) = 0;
  19. virtual void push(task_queue::node_ptr current) = 0;
  20. virtual void link(system&) = 0;
  21. virtual scheduler& current_scheduler() = 0;
  22. };
  23. class system {
  24. friend struct scheduler;
  25. public:
  26. gp::reference_wrapper<gp::allocator> system_allocator;
  27. gp::vector<gp::filesystem*> filesystems{system_allocator};
  28. gp::vector<gp::system::scheduler> process_managers;
  29. scheduling_scheme& scheme;
  30. task_queue::node main_context;
  31. system(allocator& v, scheduling_scheme& scheme_, gp::buffer<char> stack_estimate = gp::buffer<char>{nullptr, nullptr})
  32. : system_allocator{v}
  33. , process_managers{system_allocator}
  34. , scheme{scheme_}
  35. {
  36. [[maybe_unused]] volatile char a;
  37. if(stack_estimate.size() == 0) {
  38. auto seed = (char*)&a;
  39. auto jump = (uintptr_t)seed % gp_config::limits::process_stack;
  40. seed -= jump + (jump == 0)*gp_config::limits::process_stack;
  41. auto page_cnt = 1;
  42. if(jump == 0) page_cnt++;
  43. stack_estimate = gp::buffer<char>{seed, (size_t)(gp_config::limits::process_stack*page_cnt)};
  44. }
  45. main_context.value = (process_data*)system_allocator.get().allocate(sizeof(process_data));
  46. new(main_context.value) process_data(gp::function<void()>([]() -> void{}, nullopt), stack_estimate.begin().data, stack_estimate.size(), gp::unique_ptr<base_process_info>::make(system_allocator));
  47. gp_config::assertion(main_context.value != nullptr, "failed to allocate return to main switch");
  48. scheme.link(*this);
  49. }
  50. size_t spawn(gp::function<void()> fn) {
  51. constexpr size_t stack_sz = gp_config::limits::process_stack;
  52. void* stack = system_allocator.get().allocate(stack_sz);
  53. gp_config::assertion(stack != nullptr, "failed to allocate a stack");
  54. process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
  55. gp_config::assertion(stack != nullptr, "failed to allocate a process data");
  56. new(created_process) process_data(fn, stack, stack_sz, gp::unique_ptr<base_process_info>::make(system_allocator));
  57. task_queue::node_ptr pp = (task_queue::node_ptr)system_allocator.get().allocate(
  58. sizeof(task_queue::node)
  59. );
  60. new(pp) task_queue::node();
  61. pp->value = created_process;
  62. auto pid = pp->value->pid;
  63. scheme.push(pp);
  64. return pid;
  65. }
  66. template<typename threading_function>
  67. void run(threading_function thread_starter) {
  68. for(auto& i : process_managers) {
  69. gp::function<void(void)> runner{
  70. [&](){
  71. i.run();
  72. },
  73. system_allocator
  74. };
  75. thread_starter(
  76. runner
  77. );
  78. }
  79. }
  80. void yield() {
  81. scheme.current_scheduler().yield();
  82. }
  83. };
  84. scheduler::scheduler(class system& v, size_t token)
  85. : id(token)
  86. , sys(v)
  87. , main_context_data{gp::function<void()>{[](){}, v.system_allocator}, nullptr, size_t(0), gp::unique_ptr<base_process_info>::make(v.system_allocator)}
  88. , main_context()
  89. {
  90. main_context.value = &main_context_data;
  91. gp_config::assertion(!main_context.try_acquire(), "node should be not aquired on creation");
  92. }
  93. no_inline_decl(inline scheduler* spawner (scheduler* new_p)) {
  94. auto& proc = *new_p->current->value;
  95. if(proc.state == gp::system::process_status::inactive) {
  96. proc.state = gp::system::process_status::running;
  97. proc.fn();
  98. proc.state = gp::system::process_status::finished;
  99. }
  100. return new_p;
  101. }
  102. void scheduler::yield_to(task_queue::node_ptr target)
  103. {
  104. previous = current;
  105. current->value->specifics.pull();
  106. current = target;
  107. auto new_p = this;
  108. do{
  109. new_p = spawner(static_cast<scheduler*>(target->value->specifics.push(new_p)));
  110. target = new_p->sys.scheme.next(new_p->id, new_p->current);
  111. new_p->previous = new_p->current;
  112. new_p->current->value->specifics.pull();
  113. new_p->current = target;
  114. } while(true);
  115. }
  116. void scheduler::yield(){
  117. current = sys.scheme.next(id, current);
  118. yield_to(current);
  119. }
  120. void scheduler::run()
  121. {
  122. main_context_data.pid = 0;
  123. main_context_data.state = process_status::running;
  124. main_context_data.specifics.pull();
  125. current = &main_context;
  126. sys.scheme.push(&main_context);
  127. auto new_p = spawner(static_cast<scheduler*>(current->value->specifics.push(this)));
  128. new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
  129. }
  130. }
  131. }