Xyris  0.5
tasks.cpp
Go to the documentation of this file.
1 /**
2  * @file tasks.cpp
3  * @author Micah Switzer ([email protected])
4  * @brief
5  * @version 0.3
6  * @date 2020-08-29
7  *
8  * @copyright Copyright the Xyris Contributors (c) 2020
9  *
10  */
11 #include <Arch/Arch.hpp>
12 #include <Arch/Memory.hpp>
13 #include <Scheduler/tasks.hpp>
14 #include <Panic.hpp>
15 #include <Memory/heap.hpp>
16 #include <Library/stdio.hpp>
17 #include <Devices/Serial/rs232.hpp>
18 #include <stdint.h>
19 #include <x86gprintrin.h> // needed for __rdtsc
20 #include <Arch/i686/timer.hpp> // TODO: Remove ASAP
21 #include <Logger.hpp>
22 
23 /* forward declarations */
24 static void _enqueue_task(struct tasklist *, task *);
25 static struct task *_dequeue_task(struct tasklist *);
26 static void _cleaner_task_impl(void);
27 static void _schedule(void);
28 extern "C" void _tasks_enqueue_ready(struct task *task);
29 void tasks_update_time();
30 void _wakeup(struct task *task);
31 
32 /* macro to create a new named tasklist and associated helper functions */
33 #define NAMED_TASKLIST(name) \
34  struct tasklist tasks_##name = { /* Zero */ }; \
35  static inline void _enqueue_##name(struct task *task) { \
36  _enqueue_task(&tasks_##name, task); } \
37  static inline struct task *_dequeue_##name() { \
38  return _dequeue_task(&tasks_##name); }
39 
40 struct task *current_task = NULL;
41 static struct task _cleaner_task;
42 static struct task _first_task;
43 
44 struct tasklist tasks_ready = { /* Zero */ };
45 NAMED_TASKLIST(sleeping);
46 NAMED_TASKLIST(stopped);
47 
48 // map between task state and the list it is in
49 static struct tasklist *_state_lists[TASK_STATE_COUNT] = {
50  [TASK_RUNNING] = NULL, // not in a list
52  [TASK_SLEEPING] = &tasks_sleeping,
53  [TASK_BLOCKED] = NULL, // in a list specific to the blocking primitive
54  [TASK_STOPPED] = &tasks_stopped,
55  [TASK_PAUSED] = NULL, // not in a list
56 };
57 
58 // map between task state and its name
59 static const char *_state_names[TASK_STATE_COUNT] = {
60  [TASK_RUNNING] = "RUNNING",
61  [TASK_READY] = "READY",
62  [TASK_SLEEPING] = "SLEEPING",
63  [TASK_BLOCKED] = "BLOCKED",
64  [TASK_STOPPED] = "STOPPED",
65  [TASK_PAUSED] = "PAUSED",
66 };
67 
68 static uint64_t _idle_time = 0;
69 static uint64_t _idle_start = 0;
70 static uint64_t _last_time = 0;
71 static uint64_t _time_slice_remaining = 0;
72 static uint64_t _last_timer_time = 0;
73 static size_t _scheduler_lock = 0;
74 static size_t _scheduler_postpone_count = 0;
75 static bool _scheduler_postponed = false;
76 static uint64_t _instr_per_ns;
77 
78 static void _aquire_scheduler_lock()
79 {
80  asm volatile("cli");
81  _scheduler_postpone_count++;
82  _scheduler_lock++;
83 }
84 
85 static void _release_scheduler_lock()
86 {
87  _scheduler_postpone_count--;
88  if (_scheduler_postpone_count == 0) {
89  if (_scheduler_postponed) {
90  _scheduler_postponed = false;
91  _schedule();
92  }
93  }
94  _scheduler_lock--;
95  if (_scheduler_lock == 0) {
96  asm volatile("sti");
97  }
98 }
99 
100 static void _discover_cpu_speed()
101 {
102  uint32_t curr_tick = timer_tick;
103  uint64_t curr_rtsc = __rdtsc();
104  while (timer_tick != curr_tick + 1) { }
105  curr_rtsc = __rdtsc() - curr_rtsc;
106  _instr_per_ns = curr_rtsc / 1000000;
107  // will be inaccurate, but it's the best we can do in these circumstances
108  if (_instr_per_ns == 0) _instr_per_ns = 1;
109 }
110 
111 static inline uint64_t _get_cpu_time_ns()
112 {
113  return (__rdtsc()) / _instr_per_ns;
114 }
115 
116 static void _print_task(const char* tag, const struct task *task)
117 {
118  Logger::Debug(tag, "%s is %s", task->name, _state_names[task->state]);
119 }
120 
121 #ifdef DEBUG
122 #define TASK_ACTION(action, task) do { _print_task(action, task); } while(0)
123 #else
124 #define TASK_ACTION(action, task)
125 #endif
126 
127 static void _print_tasklist(const char *name, const struct tasklist *list)
128 {
129  struct task *task = list->head;
130  Logger::Verbose(__func__, "%s:", name);
131  while (task != NULL) {
132  _print_task("\t", task);
133  task = task->next;
134  }
135 }
136 
137 static void _print_tasklist(const struct task *task)
138 {
139  const struct tasklist *list = _state_lists[task->state];
140  const char *state_name = _state_names[task->state];
141  if (list == NULL) {
142  Logger::Warning(__func__, "no tasklist available for %s tasks.", state_name);
143  return;
144  }
145 
146  _print_tasklist(state_name, list);
147 }
148 
149 static void _on_timer();
150 
152 {
153  // get a pointer to the first task's tcb
154  struct task *this_task = &_first_task;
155  // discover the CPU speed for accurate scheduling
156  _discover_cpu_speed();
157  *this_task = {
158  // this will be filled in when we switch to another task for the first time
159  .stack_top = 0,
160  // this will be the same for kernel tasks
161  .page_dir = Memory::getPageDirPhysAddr(),
162  // this is a linked list with only this task
163  .next = NULL,
164  // this task is currently running
165  .state = TASK_RUNNING,
166  // just say that this task hasn't spent any time running yet
167  .time_used = 0,
168  // Set wakeup time to 0 to make compiler happy
169  .wakeup_time = 0,
170  // name
171  .name = "[main]",
172  // this is not backed by dynamic memory
173  .alloc = ALLOC_STATIC,
174  };
175  TASK_ACTION(__func__, this_task);
176  // create a task for the cleaner and set it's state to "paused"
177  (void) tasks_new(_cleaner_task_impl, &_cleaner_task, TASK_PAUSED, "[cleaner]");
178  _cleaner_task.state = TASK_PAUSED;
179  // update the timer variables
180  _last_time = _get_cpu_time_ns();
181  _last_timer_time = _last_time;
182  // enable time slices
183  _time_slice_remaining = TIME_SLICE_SIZE;
184  // this is the current task
185  current_task = this_task;
186  timer_register_callback(_on_timer);
187 }
188 
189 static void _task_starting()
190 {
191  // this is called whenever a new task is about to start
192  // it is run in the context of the new task
193 
194  // the task before this caused the scheduler to lock
195  // so we must unlock here
196  _scheduler_lock--;
197  if (_scheduler_lock == 0) {
198  asm volatile("sti");
199  }
200 }
201 
202 static void _task_stopping()
203 {
204  // this is called whenever a task is about to stop (i.e. it returned)
205  // it is run in the context of the stopping task
206  tasks_exit();
207  // prevent undefined behavior from returning to a random address
208  panic("Attempted to schedule a stopped task");
209 }
210 
211 // emulate a stack push
212 // this makes it easier and more intuitive to set up stacks
213 static inline void _stack_push_word(void **stack_pointer, size_t value)
214 {
215  // decrement the stack pointer
216  *(uintptr_t*)stack_pointer -= sizeof(size_t);
217  // place the new value at that new address
218  **(size_t**)stack_pointer = value;
219 }
220 
221 static void _enqueue_task(struct tasklist *list, task *task)
222 {
223  if (list->head == NULL) {
224  list->head = task;
225  }
226  if (list->tail != NULL) {
227  // the current last task's next pointer will be this task
228  list->tail->next = task;
229  }
230  // and now this task becomes the last task
231  task->next = NULL;
232  list->tail = task;
233 }
234 
235 static struct task *_dequeue_task(struct tasklist *list)
236 {
237  struct task *task;
238  if (list->head == NULL) {
239  // can't dequeue if there's not anything there
240  return NULL;
241  }
242  // the head of the list is the next item
243  task = list->head;
244  // the new head is the next task
245  list->head = task->next;
246  // so null its previous pointer
247  if (list->head == NULL) {
248  // if there are no more items in the list, then
249  // the last item in the list will also be null
250  list->tail = NULL;
251  }
252  // it doesn't make sense to have a next when it's not in a list
253  task->next = NULL;
254  return task;
255 }
256 
257 static void _remove_task(struct tasklist *list, struct task *task, struct task *previous)
258 {
259  // if this is true, something's not right...
260  if (previous != NULL && previous->next != task) {
261  panic("Bogus arguments to _remove_task.");
262  }
263  // update the head if necessary
264  if (list->head == task) {
265  list->head = task->next;
266  }
267  // update the tail if necessary
268  if (list->tail == task) {
269  list->tail = previous;
270  }
271  // update the previous task if necessary
272  if (previous != NULL) {
273  previous->next = task->next;
274  }
275  // it's not in any list anymore, so clear its next pointer
276  task->next = NULL;
277 }
278 
279 extern "C" void _tasks_enqueue_ready(struct task *task)
280 {
281  _enqueue_task(&tasks_ready, task);
282 }
283 
284 static struct task *_tasks_dequeue_ready()
285 {
286  return _dequeue_task(&tasks_ready);
287 }
288 
289 struct task *tasks_new(void (*entry)(void), struct task *storage, task_state state, const char *name)
290 {
291  struct task *new_task = storage;
292  if (storage == NULL) {
293  // allocate memory for our task structure
294  new_task = (struct task*)malloc(sizeof(struct task));
295  // panic if the alloc fails (we have no fallback)
296  if (new_task == NULL) {
297  panic("Unable to allocate memory for new task struct.");
298  }
299  }
300  // allocate a page for this stack (we might change this later)
301  // TODO: Should more than one page be allocated / freed?
302  uint8_t *stack = (uint8_t *)Memory::newPage(1);
303  if (stack == NULL) {
304  panic("Unable to allocate memory for new task stack.");
305  }
306  // remember, the stack grows up
307  void *stack_pointer = stack + ARCH_PAGE_SIZE;
308  // a null stack frame to make the panic screen happy
309  _stack_push_word(&stack_pointer, 0);
310  // the last thing to happen is the task stopping function
311  _stack_push_word(&stack_pointer, (size_t)_task_stopping);
312  // next entry is the main function to call (the start of the task)
313  _stack_push_word(&stack_pointer, (size_t)entry);
314  // when this task is started, the CPU will pop off this value which will become the new EIP
315  // we push this function to allow some setup code to be run from within the context of the new task
316  _stack_push_word(&stack_pointer, (size_t)_task_starting);
317  // our task switching code is going to pop four values off of the stack before returning
318  _stack_push_word(&stack_pointer, 0);
319  _stack_push_word(&stack_pointer, 0);
320  _stack_push_word(&stack_pointer, 0);
321  _stack_push_word(&stack_pointer, 0);
322  new_task->stack_top = (uintptr_t)stack_pointer;
323  new_task->page_dir = Memory::getPageDirPhysAddr();
324  new_task->next = NULL;
325  new_task->state = state;
326  new_task->time_used = 0;
327  new_task->name = name;
328  new_task->alloc = storage == NULL ? ALLOC_DYNAMIC : ALLOC_STATIC;
329  if (state == TASK_READY) {
330  _tasks_enqueue_ready(new_task);
331  }
332  TASK_ACTION(__func__, new_task);
333  return new_task;
334 }
335 
337 {
338  uint64_t current_time = _get_cpu_time_ns();
339  uint64_t delta = current_time - _last_time;
340  if (current_task == NULL) {
341  _idle_time += delta;
342  } else {
343  current_task->time_used += delta;
344  }
345  _last_time = current_time;
346 }
347 
348 static void _schedule()
349 {
350  if (_scheduler_postpone_count != 0) {
351  // don't schedule if there's more work to be done
352  _scheduler_postponed = true;
353  return;
354  }
355  if (current_task == NULL) {
356  // we are currently idling and will schedule at a later time
357  return;
358  }
359  // get the next task
360  struct task *task = _tasks_dequeue_ready();
361  // don't need to do anything if there's nothing ready to run
362  if (task == NULL) {
363  if (current_task->state == TASK_RUNNING) {
364  // still running the same task
365  // but also reset the time slice counter
366  _time_slice_remaining = TIME_SLICE_SIZE;
367  return;
368  }
369  // disable time slices because there are no tasks available to run
370  _time_slice_remaining = 0;
371  // count the time that this task ran for
373  /*** idle ***/
374  // borrow this task to return to once we're not idle anymore
375  struct task *borrowed = current_task;
376  // set the current task to null to indicate an idle state
377  current_task = NULL;
378  _idle_start = _get_cpu_time_ns();
379  do {
380  // enable interrupts to process timer and other events
381  asm ("sti");
382  // immediately halt the CPU
383  asm ("hlt");
384  // disable interrupts to restore our lock
385  asm ("cli");
386  // check if there's a task ready to be run
387  } while (task = _tasks_dequeue_ready(), task == NULL);
388  // count the time we spent idling
390  // reset the current task
391  current_task = borrowed;
392  _idle_start = _idle_start - _get_cpu_time_ns();
393  _idle_time += _idle_start;
394  } else {
395  // just do time accounting once
397  }
398  // reset the time slice because a new task is being scheduled
399  _time_slice_remaining = TIME_SLICE_SIZE;
400  // reset the last "timer time" since the time slice was reset
401  _last_timer_time = _get_cpu_time_ns();
402  // switch to the task
404 }
405 
407 {
408  // we must lock on all scheduling operations
409  _aquire_scheduler_lock();
410  // run the scheduler
411  _schedule();
412  // this will run when we switch back to the calling task
413  _release_scheduler_lock();
414 }
415 
417 {
419  return current_task->time_used;
420 }
421 
423 {
424  _aquire_scheduler_lock();
425  current_task->state = reason;
426  TASK_ACTION(__func__, current_task);
427  _schedule();
428  _release_scheduler_lock();
429 }
430 
431 void tasks_unblock(struct task *task)
432 {
433  _aquire_scheduler_lock();
434  task->state = TASK_READY;
435  TASK_ACTION(__func__, task);
437  _release_scheduler_lock();
438 }
439 
440 void _wakeup(struct task *task)
441 {
442  task->state = TASK_READY;
443  task->wakeup_time = (0ULL - 1);
445  TASK_ACTION(__func__, task);
446 }
447 
448 static void _on_timer()
449 {
450  _aquire_scheduler_lock();
451 
452  struct task *pre = NULL;
453  struct task *task = tasks_sleeping.head;
454  struct task *next;
455  bool need_schedule = false;
456  uint64_t time = _get_cpu_time_ns();
457  uint64_t time_delta;
458 
459  while (task != NULL) {
460  next = task->next;
461  if (time >= task->wakeup_time) {
462  Logger::Verbose(__func__, "timer: waking sleeping task");
463  _remove_task(&tasks_sleeping, task, pre);
464  _wakeup(task);
465  task->next = NULL;
466  need_schedule = true;
467  } else {
468  pre = task;
469  }
470  task = next;
471  }
472 
473  if (_time_slice_remaining != 0) {
474  time_delta = time - _last_timer_time;
475  _last_timer_time = time;
476  if (time_delta >= _time_slice_remaining) {
477  // schedule (and maybe pre-empt)
478  // the schedule function will reset the time slice
479  Logger::Trace(__func__, "timer: time slice expired");
480  need_schedule = true;
481  } else {
482  // decrement the time slice counter
483  _time_slice_remaining -= time_delta;
484  }
485  }
486 
487  if (need_schedule) {
488  _schedule();
489  }
490 
491  _release_scheduler_lock();
492 }
493 
494 void tasks_nano_sleep_until(uint64_t time)
495 {
496  // TODO: maybe validate that this time is in the future?
497  _aquire_scheduler_lock();
499  current_task->wakeup_time = time;
500  _enqueue_sleeping(current_task);
501  TASK_ACTION(__func__, current_task);
502  _schedule();
503  _release_scheduler_lock();
504 }
505 
506 void tasks_nano_sleep(uint64_t time)
507 {
508  tasks_nano_sleep_until(_get_cpu_time_ns() + time);
509 }
510 
512 {
513  // userspace cleanup can happen here
514  Logger::Debug(__func__, "task \"%s\" (0x%08lx) exiting", current_task->name, (uint32_t)current_task);
515 
516  _aquire_scheduler_lock();
517  // all scheduling-specific operations must happen here
518  _enqueue_stopped(current_task);
519 
520  // the ordering of these two should really be reversed
521  // but the scheduler currently isn't very smart
523 
524  tasks_unblock(&_cleaner_task);
525 
526  _release_scheduler_lock();
527 }
528 
529 static void _clean_stopped_task(struct task *task)
530 {
531  // free the stack page
532  uintptr_t page = Arch::Memory::pageAlign(task->stack_top);
533  // TODO: Should more than one page be allocated / freed?
534  Memory::freePage((void *)page, 1);
535  // somehow determine if the task was dynamically allocated or not
536  // just assume statically allocated tasks will never exit (bad idea)
537  if (task->alloc == ALLOC_DYNAMIC) free(task);
538 }
539 
540 static void _cleaner_task_impl()
541 {
542  for (;;) {
543  struct task *task;
544  _aquire_scheduler_lock();
545 
546  while (tasks_stopped.head != NULL) {
547  task = _dequeue_stopped();
548  Logger::Debug(__func__, "cleaning up task %s (0x%08lx)", task->name ? task->name : "N/A", (uint32_t)task);
549  _clean_stopped_task(task);
550  }
551 
552  // a schedule occuring at this point would be okay
553  // it just needs to occur before the loop repeats
555 
556  _release_scheduler_lock();
557  }
558 }
559 
560 void tasks_sync_block(struct task_sync *ts)
561 {
562  _aquire_scheduler_lock();
563 #ifdef DEBUG
564  if (ts->dbg_name != NULL) {
565  Logger::Debug(__func__, "blocking %s", ts->dbg_name);
566  }
567 #endif
568  // push the current task to the waiting queue
569  _enqueue_task(&ts->waiting, current_task);
570  // now block until the mutex is freed
572  _release_scheduler_lock();
573 }
574 
576 {
577  _aquire_scheduler_lock();
578 #ifdef DEBUG
579  if (ts->dbg_name != NULL) {
580  Logger::Debug(__func__, "unblocking %s", ts->dbg_name);
581  }
582 #endif
583  // iterate all tasks that were blocked and unblock them
584  struct task *task = ts->waiting.head;
585  struct task *next = NULL;
586  if (task == NULL) {
587  // no other tasks were blocked
588  goto exit;
589  }
590  do {
591  next = task->next;
592  _wakeup(task);
593  task->next = NULL;
594  task = next;
595  } while (task != NULL);
596  ts->waiting.head = NULL;
597  ts->waiting.tail = NULL;
598  // we woke up some tasks
599  _schedule();
600 exit:
601  _release_scheduler_lock();
602 }
stdio.hpp
tasks_init
void tasks_init()
Initializes the kernel task manager.
Definition: tasks.cpp:151
ALLOC_STATIC
@ ALLOC_STATIC
Definition: tasks.hpp:30
tasks_update_time
void tasks_update_time()
Definition: tasks.cpp:336
tasks_exit
void tasks_exit()
Exits the current task.
Definition: tasks.cpp:511
task
Definition: tasks.hpp:32
tasklist
Definition: tasks.hpp:48
TASK_READY
@ TASK_READY
Definition: tasks.hpp:22
TASK_SLEEPING
@ TASK_SLEEPING
Definition: tasks.hpp:23
tasks_unblock
void tasks_unblock(struct task *task)
Unblocks the current task.
Definition: tasks.cpp:431
task::time_used
uint64_t time_used
Definition: tasks.hpp:38
timer_register_callback
void timer_register_callback(void(*func)())
Definition: timer.cpp:61
task::alloc
task_alloc alloc
Definition: tasks.hpp:41
Memory::freePage
void freePage(void *page, size_t size)
Frees pages starting at a given page address.
Definition: paging.cpp:246
ALLOC_DYNAMIC
@ ALLOC_DYNAMIC
Definition: tasks.hpp:30
TASK_STATE_COUNT
@ TASK_STATE_COUNT
Definition: tasks.hpp:27
TASK_ACTION
#define TASK_ACTION(action, task)
Definition: tasks.cpp:124
_tasks_enqueue_ready
void _tasks_enqueue_ready(struct task *task)
Definition: tasks.cpp:279
free
void free(void *)
timer_tick
volatile uint32_t timer_tick
Definition: timer.cpp:15
TIME_SLICE_SIZE
#define TIME_SLICE_SIZE
Definition: tasks.hpp:17
task_sync::dbg_name
const char * dbg_name
Definition: tasks.hpp:58
tasks.hpp
timer.hpp
tasks_new
struct task * tasks_new(void(*entry)(void), struct task *storage, task_state state, const char *name)
Creates a new kernel task with a provided entry point, register storage struct, and task state struct...
Definition: tasks.cpp:289
Logger::Verbose
static void Verbose(const char *tag, const char *fmt,...)
Definition: Logger.cpp:61
tasks_nano_sleep
void tasks_nano_sleep(uint64_t time)
Sleep for a given period of time (in nanoseconds).
Definition: tasks.cpp:506
task_sync
Definition: tasks.hpp:55
TASK_RUNNING
@ TASK_RUNNING
Definition: tasks.hpp:21
Memory::getPageDirPhysAddr
uintptr_t getPageDirPhysAddr()
Gets the physical address of the current page directory.
Definition: paging.cpp:272
tasks_ready
struct tasklist tasks_ready
Definition: tasks.cpp:44
TASK_STOPPED
@ TASK_STOPPED
Definition: tasks.hpp:25
task::stack_top
uintptr_t stack_top
Definition: tasks.hpp:34
rs232.hpp
A simple, write-only driver for the RS232 serial device standard. Code mostly ported from Panix-Archi...
task_sync::waiting
struct tasklist waiting
Definition: tasks.hpp:59
Arch.hpp
Architecture control and initialization.
Memory::newPage
void * newPage(size_t size)
Returns a new page in memory for use. If less than one page is requested, exactly one page will be al...
Definition: paging.cpp:228
Panic.hpp
Kernel panic management.
TASK_PAUSED
@ TASK_PAUSED
Definition: tasks.hpp:26
tasklist::head
struct task * head
Definition: tasks.hpp:50
Logger::Trace
static void Trace(const char *tag, const char *fmt,...)
Definition: Logger.cpp:51
Logger::Warning
static void Warning(const char *tag, const char *fmt,...)
Definition: Logger.cpp:91
task_state
task_state
Definition: tasks.hpp:19
tasks_schedule
void tasks_schedule()
Tell the kernel task scheduler to schedule all of the added tasks.
Definition: tasks.cpp:406
task::state
task_state state
Definition: tasks.hpp:37
tasks_sync_unblock
void tasks_sync_unblock(struct task_sync *ts)
Definition: tasks.cpp:575
tasks_get_self_time
uint64_t tasks_get_self_time()
Returns the lifetime of the current task (in nanoseconds).
Definition: tasks.cpp:416
task::wakeup_time
uint64_t wakeup_time
Definition: tasks.hpp:39
Logger::Debug
static void Debug(const char *tag, const char *fmt,...)
Definition: Logger.cpp:71
task::name
const char * name
Definition: tasks.hpp:40
task::page_dir
uintptr_t page_dir
Definition: tasks.hpp:35
malloc
void * malloc(size_t)
Memory.hpp
Architecture memory management & paging API.
heap.hpp
Liballoc heap implementation.
tasks_sync_block
void tasks_sync_block(struct task_sync *ts)
Definition: tasks.cpp:560
NAMED_TASKLIST
#define NAMED_TASKLIST(name)
Definition: tasks.cpp:33
panic
void panic(const char *msg)
Halt the system and print the provided message on the panic screen.
Definition: Panic.cpp:82
tasks_nano_sleep_until
void tasks_nano_sleep_until(uint64_t time)
Sleeps until the provided absolute time (in nanoseconds).
Definition: tasks.cpp:494
_wakeup
void _wakeup(struct task *task)
Definition: tasks.cpp:440
ARCH_PAGE_SIZE
#define ARCH_PAGE_SIZE
Definition: Memory.i686.hpp:21
tasks_switch_to
void tasks_switch_to(struct task *task)
Switches to a provided task.
current_task
struct task * current_task
Definition: tasks.cpp:40
task::next
struct task * next
Definition: tasks.hpp:36
TASK_BLOCKED
@ TASK_BLOCKED
Definition: tasks.hpp:24
tasks_block_current
void tasks_block_current(task_state reason)
Blocks the current task.
Definition: tasks.cpp:422
tasklist::tail
struct task * tail
Definition: tasks.hpp:51
Logger.hpp