General Purpose library for Freestanding C++ and POSIX systems
25'ten fazla konu seçemezsiniz Konular bir harf veya rakamla başlamalı, kısa çizgiler ('-') içerebilir ve en fazla 35 karakter uzunluğunda olabilir.

145 satır
4.4 KiB

3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
3 yıl önce
  1. #pragma once
  2. #include "gp/algorithm/foreach.hpp"
  3. #include "gp/algorithm/reference.hpp"
  4. #include "gp/allocator/allocator.hpp"
  5. #include "gp/vector.hpp"
  6. #include "gp/vfs/file_description.hpp"
  7. #include "gp/vfs/filesystem.hpp"
  8. #include "gp/vfs/runqueue.hpp"
  9. #include "gp/vfs/scheduler.hpp"
  10. namespace gp{
  11. // TODO: thread safety
  12. class scheduling_scheme {
  13. public:
  14. virtual topic_list::node_ptr one(size_t token) = 0;
  15. virtual topic_list::node_ptr next(size_t token, topic_list::node_ptr current) = 0;
  16. virtual void push(topic_list::node_ptr current) = 0;
  17. virtual void link(class system&) = 0;
  18. virtual scheduler& current_scheduler() = 0;
  19. };
  20. class system {
  21. friend struct scheduler;
  22. public:
  23. gp::reference_wrapper<gp::allocator> system_allocator;
  24. gp::vector<gp::filesystem*> filesystems{system_allocator};
  25. gp::vector<gp::scheduler> process_managers;
  26. scheduling_scheme& scheme;
  27. topic_list::node main_context;
  28. system(allocator& v, scheduling_scheme& scheme_, gp::buffer<char> stack_estimate = gp::buffer<char>{nullptr, nullptr})
  29. : system_allocator{v}
  30. , process_managers{system_allocator}
  31. , scheme{scheme_}
  32. {
  33. [[maybe_unused]] volatile char a;
  34. if(stack_estimate.size() == 0) {
  35. auto seed = (char*)&a;
  36. auto jump = (uintptr_t)seed % gp_config::limits::process_stack;
  37. seed -= jump + (jump == 0)*gp_config::limits::process_stack;
  38. auto page_cnt = 1;
  39. if(jump == 0) page_cnt++;
  40. stack_estimate = gp::buffer<char>{seed, (size_t)(gp_config::limits::process_stack*page_cnt)};
  41. }
  42. main_context.value = (process_data*)system_allocator.get().allocate(sizeof(process_data));
  43. new(main_context.value) process_data(gp::function<void()>([]() -> void{}, nullopt), stack_estimate.begin().data, stack_estimate.size(), gp::unique_ptr<base_process_info>::make(system_allocator));
  44. gp_config::assertion(main_context.value != nullptr, "failed to allocate return to main switch");
  45. scheme.link(*this);
  46. }
  47. size_t spawn(gp::function<void()> fn) {
  48. constexpr size_t stack_sz = gp_config::limits::process_stack;
  49. void* stack = system_allocator.get().allocate(stack_sz);
  50. gp_config::assertion(stack != nullptr, "failed to allocate a stack");
  51. process_data* created_process = (process_data*)system_allocator.get().allocate(sizeof(process_data));
  52. gp_config::assertion(stack != nullptr, "failed to allocate a process data");
  53. new(created_process) process_data(fn, stack, stack_sz, gp::unique_ptr<base_process_info>::make(system_allocator));
  54. topic_list::node_ptr pp = (topic_list::node_ptr)system_allocator.get().allocate(
  55. sizeof(topic_list::node)
  56. );
  57. new(pp) topic_list::node();
  58. pp->value = created_process;
  59. auto pid = pp->value->pid;
  60. scheme.push(pp);
  61. return pid;
  62. }
  63. template<typename threading_function>
  64. void run(threading_function thread_starter) {
  65. for(auto& i : process_managers) {
  66. gp::function<void(void)> runner{
  67. [&](){
  68. i.run();
  69. },
  70. system_allocator
  71. };
  72. thread_starter(
  73. runner
  74. );
  75. }
  76. }
  77. void yield() {
  78. scheme.current_scheduler().yield();
  79. }
  80. };
  81. scheduler::scheduler(class system& v, size_t token)
  82. : id(token)
  83. , sys(v)
  84. , main_context_data{gp::function<void()>{[](){}, v.system_allocator}, nullptr, size_t(0), gp::unique_ptr<base_process_info>::make(v.system_allocator)}
  85. , main_context()
  86. {
  87. main_context.value = &main_context_data;
  88. gp_config::assertion(!main_context.try_acquire(), "node should be not aquired on creation");
  89. }
  90. no_inline_decl(inline scheduler* spawner (scheduler* new_p)) {
  91. auto& proc = *new_p->current->value;
  92. if(proc.state == gp::process_status::inactive) {
  93. proc.state = gp::process_status::running;
  94. proc.fn();
  95. proc.state = gp::process_status::finished;
  96. }
  97. return new_p;
  98. }
  99. void scheduler::yield_to(topic_list::node_ptr target)
  100. {
  101. previous = current;
  102. current->value->specifics.pull();
  103. current = target;
  104. auto new_p = this;
  105. do{
  106. new_p = spawner(static_cast<scheduler*>(target->value->specifics.push(new_p)));
  107. target = new_p->sys.scheme.next(new_p->id, new_p->current);
  108. new_p->previous = new_p->current;
  109. new_p->current->value->specifics.pull();
  110. new_p->current = target;
  111. } while(true);
  112. }
  113. void scheduler::yield(){
  114. current = sys.scheme.next(id, current);
  115. yield_to(current);
  116. }
  117. void scheduler::run()
  118. {
  119. main_context_data.pid = 0;
  120. main_context_data.state = process_status::running;
  121. main_context_data.specifics.pull();
  122. current = &main_context;
  123. sys.scheme.push(&main_context);
  124. auto new_p = spawner(static_cast<scheduler*>(current->value->specifics.push(this)));
  125. new_p->yield_to(new_p->sys.scheme.next(new_p->id, new_p->current));
  126. }
  127. }