19 #include <x86gprintrin.h>
26 static void _cleaner_task_impl(
void);
27 static void _schedule(
void);
33 #define NAMED_TASKLIST(name) \
34 struct tasklist tasks_##name = { }; \
35 static inline void _enqueue_##name(struct task *task) { \
36 _enqueue_task(&tasks_##name, task); } \
37 static inline struct task *_dequeue_##name() { \
38 return _dequeue_task(&tasks_##name); }
41 static struct task _cleaner_task;
42 static struct task _first_task;
68 static uint64_t _idle_time = 0;
69 static uint64_t _idle_start = 0;
70 static uint64_t _last_time = 0;
71 static uint64_t _time_slice_remaining = 0;
72 static uint64_t _last_timer_time = 0;
73 static size_t _scheduler_lock = 0;
74 static size_t _scheduler_postpone_count = 0;
75 static bool _scheduler_postponed =
false;
76 static uint64_t _instr_per_ns;
78 static void _aquire_scheduler_lock()
81 _scheduler_postpone_count++;
85 static void _release_scheduler_lock()
87 _scheduler_postpone_count--;
88 if (_scheduler_postpone_count == 0) {
89 if (_scheduler_postponed) {
90 _scheduler_postponed =
false;
95 if (_scheduler_lock == 0) {
100 static void _discover_cpu_speed()
103 uint64_t curr_rtsc = __rdtsc();
105 curr_rtsc = __rdtsc() - curr_rtsc;
106 _instr_per_ns = curr_rtsc / 1000000;
108 if (_instr_per_ns == 0) _instr_per_ns = 1;
111 static inline uint64_t _get_cpu_time_ns()
113 return (__rdtsc()) / _instr_per_ns;
116 static void _print_task(
const char* tag,
const struct task *
task)
122 #define TASK_ACTION(action, task) do { _print_task(action, task); } while(0)
124 #define TASK_ACTION(action, task)
127 static void _print_tasklist(
const char *name,
const struct tasklist *list)
131 while (
task != NULL) {
132 _print_task(
"\t",
task);
137 static void _print_tasklist(
const struct task *
task)
140 const char *state_name = _state_names[
task->
state];
142 Logger::Warning(__func__,
"no tasklist available for %s tasks.", state_name);
146 _print_tasklist(state_name, list);
149 static void _on_timer();
154 struct task *this_task = &_first_task;
156 _discover_cpu_speed();
180 _last_time = _get_cpu_time_ns();
181 _last_timer_time = _last_time;
189 static void _task_starting()
197 if (_scheduler_lock == 0) {
202 static void _task_stopping()
208 panic(
"Attempted to schedule a stopped task");
213 static inline void _stack_push_word(
void **stack_pointer,
size_t value)
216 *(uintptr_t*)stack_pointer -=
sizeof(
size_t);
218 **(
size_t**)stack_pointer = value;
223 if (list->
head == NULL) {
226 if (list->
tail != NULL) {
235 static struct task *_dequeue_task(
struct tasklist *list)
238 if (list->
head == NULL) {
247 if (list->
head == NULL) {
260 if (previous != NULL && previous->
next !=
task) {
261 panic(
"Bogus arguments to _remove_task.");
269 list->
tail = previous;
272 if (previous != NULL) {
284 static struct task *_tasks_dequeue_ready()
291 struct task *new_task = storage;
292 if (storage == NULL) {
296 if (new_task == NULL) {
297 panic(
"Unable to allocate memory for new task struct.");
304 panic(
"Unable to allocate memory for new task stack.");
309 _stack_push_word(&stack_pointer, 0);
311 _stack_push_word(&stack_pointer, (
size_t)_task_stopping);
313 _stack_push_word(&stack_pointer, (
size_t)entry);
316 _stack_push_word(&stack_pointer, (
size_t)_task_starting);
318 _stack_push_word(&stack_pointer, 0);
319 _stack_push_word(&stack_pointer, 0);
320 _stack_push_word(&stack_pointer, 0);
321 _stack_push_word(&stack_pointer, 0);
322 new_task->
stack_top = (uintptr_t)stack_pointer;
324 new_task->
next = NULL;
338 uint64_t current_time = _get_cpu_time_ns();
339 uint64_t delta = current_time - _last_time;
345 _last_time = current_time;
348 static void _schedule()
350 if (_scheduler_postpone_count != 0) {
352 _scheduler_postponed =
true;
360 struct task *
task = _tasks_dequeue_ready();
370 _time_slice_remaining = 0;
378 _idle_start = _get_cpu_time_ns();
387 }
while (
task = _tasks_dequeue_ready(),
task == NULL);
392 _idle_start = _idle_start - _get_cpu_time_ns();
393 _idle_time += _idle_start;
401 _last_timer_time = _get_cpu_time_ns();
409 _aquire_scheduler_lock();
413 _release_scheduler_lock();
424 _aquire_scheduler_lock();
428 _release_scheduler_lock();
433 _aquire_scheduler_lock();
437 _release_scheduler_lock();
448 static void _on_timer()
450 _aquire_scheduler_lock();
452 struct task *pre = NULL;
453 struct task *
task = tasks_sleeping.head;
455 bool need_schedule =
false;
456 uint64_t time = _get_cpu_time_ns();
459 while (
task != NULL) {
463 _remove_task(&tasks_sleeping,
task, pre);
466 need_schedule =
true;
473 if (_time_slice_remaining != 0) {
474 time_delta = time - _last_timer_time;
475 _last_timer_time = time;
476 if (time_delta >= _time_slice_remaining) {
480 need_schedule =
true;
483 _time_slice_remaining -= time_delta;
491 _release_scheduler_lock();
497 _aquire_scheduler_lock();
503 _release_scheduler_lock();
516 _aquire_scheduler_lock();
526 _release_scheduler_lock();
529 static void _clean_stopped_task(
struct task *
task)
540 static void _cleaner_task_impl()
544 _aquire_scheduler_lock();
546 while (tasks_stopped.head != NULL) {
547 task = _dequeue_stopped();
549 _clean_stopped_task(
task);
556 _release_scheduler_lock();
562 _aquire_scheduler_lock();
572 _release_scheduler_lock();
577 _aquire_scheduler_lock();
595 }
while (
task != NULL);
601 _release_scheduler_lock();