General Purpose library for Freestanding C++ and POSIX systems
Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

101 řádky
2.8 KiB

  1. #pragma once
  2. #include "gp/algorithm/foreach.hpp"
  3. #include "gp/algorithm/reference.hpp"
  4. #include "gp/allocator/allocator.hpp"
  5. #include "gp/vector.hpp"
  6. #include "gp/vfs/file_description.hpp"
  7. #include "gp/vfs/filesystem.hpp"
  8. #include "gp/vfs/runqueue.hpp"
  9. #include "gp/vfs/scheduler.hpp"
  10. namespace gp{
  11. // TODO: thread safety
  12. class scheduling_scheme {
  13. public:
  14. virtual topic_list::node_ptr one(size_t token) = 0;
  15. virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0;
  16. virtual scheduler& current_scheduler() = 0;
  17. };
  18. class system {
  19. gp::reference_wrapper<gp::allocator> system_allocator;
  20. gp::vector<gp::filesystem*> filesystems{system_allocator};
  21. gp::vector<gp::scheduler> process_managers;
  22. scheduling_scheme& scheme;
  23. friend class scheduler;
  24. public:
  25. system(allocator& v, scheduling_scheme& scheme_)
  26. : system_allocator{v}
  27. , process_managers{system_allocator}
  28. , scheme{scheme_}
  29. {}
  30. size_t spawn(gp::function<void()> fn) {
  31. constexpr size_t stack_sz = gp_config::limits::process_stack;
  32. void* stack = system_allocator.get().allocate(stack_sz);
  33. gp_config::assertion(stack != nullptr, "failed to allocate a stack");
  34. process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
  35. gp_config::assertion(stack != nullptr, "failed to allocate a process data");
  36. new(created_process) process_data(fn, stack, stack_sz);
  37. topic_list::node_ptr pp = (topic_list::node_ptr)system_allocator.get().allocate(
  38. sizeof(topic_list::node)
  39. );
  40. new(pp) topic_list::node();
  41. pp->value = created_process;
  42. auto& sched = scheme.current_scheduler();
  43. sched.yield_to(scheme.next(sched.id, pp));
  44. return pp->value->pid;
  45. }
  46. };
  47. scheduler::scheduler(class system& v, size_t token)
  48. : id(token)
  49. , sys(v)
  50. {}
  51. void scheduler::yield_to(topic_list::node_ptr target)
  52. {
  53. previous = current;
  54. current->value->specifics.pull();
  55. current = target;
  56. no_inline_decl([&](scheduler* new_p)){
  57. *new_p->previous->release();
  58. auto& proc = *new_p->current->value;
  59. if(proc.state == gp::process_status::inactive) {
  60. proc.state = gp::process_status::running;
  61. proc.fn();
  62. proc.state = gp::process_status::finished;
  63. new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
  64. }
  65. }(static_cast<scheduler*>(target->value->specifics.push(this)));
  66. }
  67. void scheduler::yield(){
  68. current = sys.scheme.next(id, current);
  69. yield_to(current);
  70. }
  71. void scheduler::startup()
  72. {
  73. current = sys.scheme.one(id);
  74. no_inline_decl([&](scheduler* new_p)){
  75. auto& proc = *new_p->current->value;
  76. if(proc.state == gp::process_status::inactive) {
  77. proc.state = gp::process_status::running;
  78. proc.fn();
  79. proc.state = gp::process_status::finished;
  80. new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
  81. }
  82. }(static_cast<scheduler*>(target->value->specifics.push(this)));
  83. }
  84. }