-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtask.c
More file actions
176 lines (157 loc) · 4.5 KB
/
task.c
File metadata and controls
176 lines (157 loc) · 4.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#include "hptdisplay.h"
#include "rpi3b.h"
struct task {
enum {
FREE,
SLEEPING,
BLOCKED,
RUNNING,
READY,
} status;
struct context ctx;
void *stackh;
void *stackt;
u64 wakeat;
u64 lastat;
struct task *next_waiter;
};
#define MAX_TASKS 32
// SAFETY: interrupts must be disabled when modifying tasks.
// this is because a timer interrupt could fire while modifying tasks,
// switching contexts and leaving tasks in an inconsistent state
static struct task tasks[MAX_TASKS] = {0};
static struct sched {
struct task *current;
struct context ctx;
} schedulers[CPUS] = {0};
static void task_trampoline(void) {
// release locks held in context switch
intr_popoff();
// load entry function pointer from context switch (register x19)
void (*entry)(void);
asm("mov %0, x19" : "=r"(entry));
entry();
// just in case entry() exits
task_exit();
}
void task_create(void (*entry)(void)) {
intr_pushoff();
// find a FREE task
struct task *t = NULL;
for (usize i = 0; i < MAX_TASKS; i++)
if (t == NULL && tasks[i].status == FREE)
t = &tasks[i];
if (t == NULL)
panic("task_create: no free tasks");
// add the task by creating context and setting to READY
t->status = READY;
t->stackh = alloc_page();
t->stackt = alloc_page();
t->ctx.lr = (usize)task_trampoline;
t->ctx.x19_x29[0] = (usize)entry;
t->ctx.sp = (usize)t->stackh + PAGESZ;
t->ctx.sp_el0 = (usize)t->stackt + PAGESZ;
intr_popoff();
}
void task_exit(void) {
struct sched *s = &schedulers[cpu_id()];
// free task in tasks list
intr_pushoff();
s->current->status = FREE;
alloc_freepage(s->current->stackh);
alloc_freepage(s->current->stackt);
_switch(NULL, &s->ctx); // go back to scheduler
// NOTE: this task will never return, so anything past the ctx switch won't
// matter
}
void task_yield(void) {
struct sched *s = &schedulers[cpu_id()];
if (s->current == NULL)
return;
// set task as runnable
intr_pushoff();
s->current->status = READY;
_switch(&s->current->ctx, &s->ctx); // go back to scheduler
intr_popoff();
}
void task_delay(u64 millis) {
struct sched *s = &schedulers[cpu_id()];
// set task as sleeping and clear from scheduler
intr_pushoff();
s->current->status = SLEEPING;
s->current->wakeat = timer_in(millis);
_switch(&s->current->ctx, &s->ctx); // go back to scheduler
intr_popoff();
}
void task_sched(void) {
struct sched *s = &schedulers[cpu_id()];
// scheduler runs forever (in its own context)
intr_pushoff();
for (;;) {
u64 time = timer_current();
// find a ready task or next wakeat
struct task *t = NULL;
u64 min_wakeat = ~0ULL;
u64 min_lastat = ~0ULL;
for (usize i = 0; i < MAX_TASKS; i++) {
// wake up sleeping tasks
if (tasks[i].status == SLEEPING) {
if (tasks[i].wakeat <= time)
tasks[i].status = READY;
else if (tasks[i].wakeat < min_wakeat)
min_wakeat = tasks[i].wakeat;
}
// find next runnable task
if (tasks[i].status == READY && tasks[i].lastat < min_lastat) {
min_lastat = tasks[i].lastat;
t = &tasks[i];
}
}
if (t != NULL) {
// run available task
t->status = RUNNING;
s->current = t;
timer_setalarm(timer_in(25)); // preempt task in 25ms
_switch(&s->ctx, &t->ctx);
s->current->lastat = timer_current();
s->current = NULL; // returning from switch, so no task
} else if (min_wakeat > time && (min_wakeat - time) > 1000) {
// set alarm for next wakeat and sleep the CPU
timer_setalarm(min_wakeat - 500);
intr_popoff();
wfi();
intr_pushoff();
}
}
}
void task_init(void) {
struct sched *s = &schedulers[cpu_id()];
s->current = NULL;
}
void sleeplock_acquire(struct sleeplock *lk) {
struct sched *s = &schedulers[cpu_id()];
intr_pushoff();
while (lk->locked) {
s->current->next_waiter = lk->waiter;
lk->waiter = s->current;
s->current->status = BLOCKED;
_switch(&s->current->ctx, &s->ctx); // go back to scheduler
// if we're here, then sleeplock_release was called and the
// entire waiter linked-list has been cleared
}
lk->locked = 1;
intr_popoff();
}
void sleeplock_release(struct sleeplock *lk) {
intr_pushoff();
lk->locked = 0;
struct task **cur = (struct task **)&lk->waiter;
// unblock all waiters associated with the lock
while (*cur != NULL) {
(*cur)->status = READY; // no longer blocked
struct task **next = &(*cur)->next_waiter;
*cur = NULL;
cur = next;
}
intr_popoff();
}