diff options
Diffstat (limited to 'litmus/sched_color.c')
-rw-r--r-- | litmus/sched_color.c | 811 |
1 files changed, 811 insertions, 0 deletions
diff --git a/litmus/sched_color.c b/litmus/sched_color.c new file mode 100644 index 000000000000..98a46bb1b06f --- /dev/null +++ b/litmus/sched_color.c | |||
@@ -0,0 +1,811 @@ | |||
1 | #include <linux/percpu.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/list.h> | ||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/slab.h> | ||
7 | |||
8 | #include <litmus/litmus.h> | ||
9 | #include <litmus/jobs.h> | ||
10 | #include <litmus/preempt.h> | ||
11 | #include <litmus/sched_plugin.h> | ||
12 | #include <litmus/edf_common.h> | ||
13 | #include <litmus/sched_trace.h> | ||
14 | #include <litmus/color.h> | ||
15 | #include <litmus/fifo_common.h> | ||
16 | #include <litmus/budget.h> | ||
17 | #include <litmus/rt_server.h> | ||
18 | #include <litmus/dgl.h> | ||
19 | |||
20 | /** | ||
21 | * @rt_server Common server functionality. | ||
22 | * @task Task used to schedule server. | ||
23 | * @timer Budget enforcement for @task | ||
24 | * @start_time If set, time at which server began running. | ||
25 | */ | ||
26 | struct fifo_server { | ||
27 | struct rt_server server; | ||
28 | struct task_struct* task; | ||
29 | struct enforcement_timer timer; | ||
30 | lt_t start_time; | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * @server Common server functionality. | ||
35 | * @edf_domain PEDF domain. | ||
36 | * @scheduled Task physically running on CPU. | ||
37 | * @fifo_server Server partitioned to this CPU. | ||
38 | */ | ||
39 | struct cpu_entry { | ||
40 | struct rt_server server; | ||
41 | rt_domain_t edf_domain; | ||
42 | struct task_struct* scheduled; | ||
43 | struct fifo_server fifo_server; | ||
44 | }; | ||
45 | |||
46 | DEFINE_PER_CPU(struct cpu_entry, color_cpus); | ||
47 | |||
48 | static rt_domain_t fifo_domain; | ||
49 | static raw_spinlock_t fifo_lock; | ||
50 | |||
51 | static struct dgl group_lock; | ||
52 | static raw_spinlock_t dgl_lock; | ||
53 | |||
54 | #define local_entry (&__get_cpu_var(color_cpus)) | ||
55 | #define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) | ||
56 | #define task_entry(task) remote_entry(get_partition(task)) | ||
57 | #define task_fserver(task) (&task_entry(task)->fifo_server.server) | ||
58 | #define entry_lock(entry) (&entry->edf_domain.ready_lock) | ||
59 | |||
60 | #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) | ||
61 | #define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->edf_domain) | ||
62 | #define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) | ||
63 | |||
64 | /* | ||
65 | * Requeue onto domain's release or ready queue based on task state. | ||
66 | */ | ||
67 | static void requeue(rt_domain_t *dom, struct task_struct* t) | ||
68 | { | ||
69 | if (is_server(t) && !tsk_rt(t)->present) | ||
70 | /* Remove stopped server from the system */ | ||
71 | return; | ||
72 | |||
73 | TRACE_TASK(t, "Requeueing\n"); | ||
74 | if (is_queued(t)) { | ||
75 | TRACE_TASK(t, "Already queued!\n"); | ||
76 | return; | ||
77 | } | ||
78 | |||
79 | set_rt_flags(t, RT_F_RUNNING); | ||
80 | if (is_released(t, litmus_clock())) | ||
81 | __add_ready(dom, t); | ||
82 | else | ||
83 | add_release(dom, t); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Relinquish resources held by @t (or its children). | ||
88 | */ | ||
89 | static void release_resources(struct task_struct *t) | ||
90 | { | ||
91 | struct task_struct *sched; | ||
92 | |||
93 | TRACE_TASK(t, "Releasing resources\n"); | ||
94 | |||
95 | if (is_server(t)) { | ||
96 | sched = task_fserver(t)->linked; | ||
97 | if (sched) | ||
98 | release_resources(sched); | ||
99 | } else if (is_kernel_np(t)) | ||
100 | remove_group_req(&group_lock, tsk_rt(t)->req); | ||
101 | tsk_rt(t)->kernel_np = 0; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Put in requests for resources needed by @t. If @t is a server, this will | ||
106 | * set @t's np flag to reflect resources held by @t's children. | ||
107 | */ | ||
108 | static void acquire_resources(struct task_struct *t) | ||
109 | { | ||
110 | int cpu; | ||
111 | struct rt_server *server; | ||
112 | struct task_struct *sched; | ||
113 | |||
114 | /* Can't acquire resources if t is not running */ | ||
115 | BUG_ON(!get_task_server(t)); | ||
116 | |||
117 | if (is_kernel_np(t)) { | ||
118 | TRACE_TASK(t, "Already contending for resources\n"); | ||
119 | return; | ||
120 | } | ||
121 | cpu = get_task_server(t)->cpu; | ||
122 | |||
123 | if (is_server(t)) { | ||
124 | server = task_fserver(t); | ||
125 | sched = server->linked; | ||
126 | |||
127 | /* Happens when server is booted off on completion or | ||
128 | * has just completed executing a task. | ||
129 | */ | ||
130 | if (sched && !is_kernel_np(sched)) | ||
131 | acquire_resources(sched); | ||
132 | |||
133 | /* Become np if there is a running task */ | ||
134 | if (sched && has_resources(sched, cpu)) { | ||
135 | TRACE_TASK(t, "Running task with resource\n"); | ||
136 | tsk_rt(t)->kernel_np = 1; | ||
137 | } else { | ||
138 | TRACE_TASK(t, "Running no resources\n"); | ||
139 | tsk_rt(t)->kernel_np = 0; | ||
140 | } | ||
141 | } else { | ||
142 | TRACE_TASK(t, "Acquiring resources\n"); | ||
143 | if (!has_resources(t, cpu)) | ||
144 | add_group_req(&group_lock, tsk_rt(t)->req, cpu); | ||
145 | tsk_rt(t)->kernel_np = 1; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Stop logically running the currently linked task. | ||
151 | */ | ||
152 | static void unlink(struct rt_server *server) | ||
153 | { | ||
154 | BUG_ON(!server->linked); | ||
155 | |||
156 | if (is_server(server->linked)) | ||
157 | task_fserver(server->linked)->running = 0; | ||
158 | |||
159 | |||
160 | sched_trace_server_switch_away(server->sid, 0, | ||
161 | server->linked->pid, | ||
162 | get_rt_job(server->linked)); | ||
163 | TRACE_TASK(server->linked, "No longer run by server %d\n", server->sid); | ||
164 | |||
165 | raw_spin_lock(&dgl_lock); | ||
166 | release_resources(server->linked); | ||
167 | raw_spin_unlock(&dgl_lock); | ||
168 | |||
169 | get_task_server(server->linked) = NULL; | ||
170 | server->linked = NULL; | ||
171 | } | ||
172 | |||
173 | static struct task_struct* schedule_server(struct rt_server *server); | ||
174 | |||
175 | /* | ||
176 | * Logically run @task. | ||
177 | */ | ||
178 | static void link(struct rt_server *server, struct task_struct *task) | ||
179 | { | ||
180 | struct rt_server *tserv; | ||
181 | |||
182 | BUG_ON(server->linked); | ||
183 | BUG_ON(!server->running); | ||
184 | BUG_ON(is_kernel_np(task)); | ||
185 | |||
186 | TRACE_TASK(task, "Run by server %d\n", server->sid); | ||
187 | |||
188 | if (is_server(task)) { | ||
189 | tserv = task_fserver(task); | ||
190 | tserv->running = 1; | ||
191 | schedule_server(tserv); | ||
192 | } | ||
193 | |||
194 | server->linked = task; | ||
195 | get_task_server(task) = server; | ||
196 | |||
197 | sched_trace_server_switch_to(server->sid, 0, | ||
198 | task->pid, get_rt_job(task)); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Complete job for task linked to @server. | ||
203 | */ | ||
204 | static void job_completion(struct rt_server *server) | ||
205 | { | ||
206 | struct task_struct *t = server->linked; | ||
207 | |||
208 | TRACE_TASK(t, "Job completed\n"); | ||
209 | if (is_server(t)) | ||
210 | sched_trace_server_completion(t->pid, get_rt_job(t)); | ||
211 | else | ||
212 | sched_trace_task_completion(t, 0); | ||
213 | |||
214 | unlink(server); | ||
215 | set_rt_flags(t, RT_F_SLEEP); | ||
216 | prepare_for_next_period(t); | ||
217 | |||
218 | if (is_server(t)) | ||
219 | sched_trace_server_release(t->pid, get_rt_job(t), | ||
220 | get_release(t), get_deadline(t)); | ||
221 | else | ||
222 | sched_trace_task_release(t); | ||
223 | |||
224 | if (is_running(t)) | ||
225 | server->requeue(server, t); | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Update @server state to reflect task's state. | ||
230 | */ | ||
231 | static void update_task(struct rt_server *server) | ||
232 | { | ||
233 | int oot, sleep, block, np; | ||
234 | struct task_struct *t = server->linked; | ||
235 | |||
236 | block = !is_running(t); | ||
237 | oot = budget_enforced(t) && budget_exhausted(t); | ||
238 | np = is_kernel_np(t); | ||
239 | sleep = get_rt_flags(t) == RT_F_SLEEP; | ||
240 | |||
241 | TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n", | ||
242 | block, oot, np, sleep); | ||
243 | |||
244 | if (block) | ||
245 | unlink(server); | ||
246 | else if (oot || sleep) | ||
247 | job_completion(server); | ||
248 | } | ||
249 | |||
250 | /* | ||
251 | * Link next task for @server. | ||
252 | */ | ||
253 | static struct task_struct* schedule_server(struct rt_server *server) | ||
254 | { | ||
255 | struct task_struct *next; | ||
256 | struct rt_server *lserver; | ||
257 | |||
258 | TRACE("Scheduling server %d\n", server->sid); | ||
259 | |||
260 | if (server->linked) { | ||
261 | if (is_server(server->linked)) { | ||
262 | lserver = task_fserver(server->linked); | ||
263 | lserver->update(lserver); | ||
264 | } | ||
265 | update_task(server); | ||
266 | } | ||
267 | |||
268 | next = server->linked; | ||
269 | if ((!next || !is_np(next)) && | ||
270 | server->need_preempt(server->domain, next)) { | ||
271 | if (next) { | ||
272 | TRACE_TASK(next, "Preempted\n"); | ||
273 | unlink(server); | ||
274 | server->requeue(server, next); | ||
275 | } | ||
276 | next = __take_ready(server->domain); | ||
277 | link(server, next); | ||
278 | } | ||
279 | |||
280 | return next; | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Dumb requeue for PEDF (CPU) servers. | ||
285 | */ | ||
286 | static void edf_requeue(struct rt_server *server, struct task_struct *t) | ||
287 | { | ||
288 | BUG_ON(is_be(t)); | ||
289 | requeue(server->domain, t); | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Locking requeue for FIFO servers. | ||
294 | */ | ||
295 | static void fifo_requeue(struct rt_server *server, struct task_struct *t) | ||
296 | { | ||
297 | BUG_ON(!is_be(t)); | ||
298 | raw_spin_lock(&fifo_lock); | ||
299 | requeue(server->domain, t); | ||
300 | raw_spin_unlock(&fifo_lock); | ||
301 | } | ||
302 | |||
303 | |||
304 | /* | ||
305 | * Locking take for FIFO servers. | ||
306 | */ | ||
307 | static struct task_struct* fifo_take(struct rt_server *server) | ||
308 | { | ||
309 | struct task_struct *ret; | ||
310 | |||
311 | raw_spin_lock(&fifo_lock); | ||
312 | ret = __take_ready(server->domain); | ||
313 | raw_spin_unlock(&fifo_lock); | ||
314 | |||
315 | return ret; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Update server state, including picking next running task and incrementing | ||
320 | * server execution time. | ||
321 | */ | ||
322 | static void fifo_update(struct rt_server *server) | ||
323 | { | ||
324 | lt_t delta; | ||
325 | struct fifo_server *fserver; | ||
326 | |||
327 | fserver = container_of(server, struct fifo_server, server); | ||
328 | TRACE_TASK(fserver->task, "Updating FIFO server\n"); | ||
329 | |||
330 | if (!server->linked || has_resources(server->linked, server->cpu)) { | ||
331 | /* Running here means linked to a parent server */ | ||
332 | BUG_ON(!server->running); | ||
333 | |||
334 | /* Stop executing */ | ||
335 | if (fserver->start_time) { | ||
336 | delta = litmus_clock() - fserver->start_time; | ||
337 | tsk_rt(fserver->task)->job_params.exec_time += delta; | ||
338 | fserver->start_time = 0; | ||
339 | cancel_enforcement_timer(&fserver->timer); | ||
340 | } else { | ||
341 | /* Server is linked, but not executing */ | ||
342 | BUG_ON(fserver->timer.armed); | ||
343 | } | ||
344 | |||
345 | /* Calculate next task */ | ||
346 | schedule_server(&fserver->server); | ||
347 | |||
348 | /* Reserve needed resources */ | ||
349 | raw_spin_lock(&dgl_lock); | ||
350 | acquire_resources(fserver->task); | ||
351 | raw_spin_unlock(&dgl_lock); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Triggers preemption on edf-scheduled "linked" field only. | ||
357 | */ | ||
358 | static void color_edf_release(rt_domain_t *edf, struct bheap *tasks) | ||
359 | { | ||
360 | unsigned long flags; | ||
361 | struct cpu_entry *entry; | ||
362 | |||
363 | TRACE_TASK(bheap2task(bheap_peek(edf->order, tasks)), | ||
364 | "Released set of EDF tasks\n"); | ||
365 | |||
366 | entry = container_of(edf, struct cpu_entry, edf_domain); | ||
367 | raw_spin_lock_irqsave(entry_lock(entry), flags); | ||
368 | |||
369 | __merge_ready(edf, tasks); | ||
370 | |||
371 | if (edf_preemption_needed(edf, entry->server.linked) && | ||
372 | (!entry->server.linked || !is_kernel_np(entry->server.linked))) { | ||
373 | litmus_reschedule(entry->server.cpu); | ||
374 | } | ||
375 | |||
376 | raw_spin_unlock_irqrestore(entry_lock(entry), flags); | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Triggers preemption on first FIFO server which is running NULL. | ||
381 | */ | ||
382 | static void check_for_fifo_preempt(void) | ||
383 | { | ||
384 | int ret = 0, cpu; | ||
385 | struct cpu_entry *entry; | ||
386 | struct rt_server *cpu_server, *fifo_server; | ||
387 | |||
388 | TRACE("Checking for FIFO preempt\n"); | ||
389 | |||
390 | for_each_online_cpu(cpu) { | ||
391 | entry = remote_entry(cpu); | ||
392 | cpu_server = &entry->server; | ||
393 | fifo_server = &entry->fifo_server.server; | ||
394 | |||
395 | raw_spin_lock(entry_lock(entry)); | ||
396 | raw_spin_lock(&fifo_lock); | ||
397 | |||
398 | if (cpu_server->linked && is_server(cpu_server->linked) && | ||
399 | !fifo_server->linked) { | ||
400 | litmus_reschedule(cpu); | ||
401 | ret = 1; | ||
402 | } | ||
403 | |||
404 | raw_spin_unlock(&fifo_lock); | ||
405 | raw_spin_unlock(entry_lock(entry)); | ||
406 | |||
407 | if (ret) | ||
408 | break; | ||
409 | } | ||
410 | } | ||
411 | |||
412 | static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks) | ||
413 | { | ||
414 | unsigned long flags; | ||
415 | |||
416 | TRACE_TASK(bheap2task(bheap_peek(dom->order, tasks)), | ||
417 | "Released set of FIFO tasks\n"); | ||
418 | local_irq_save(flags); | ||
419 | |||
420 | raw_spin_lock(&fifo_lock); | ||
421 | __merge_ready(dom, tasks); | ||
422 | raw_spin_unlock(&fifo_lock); | ||
423 | |||
424 | check_for_fifo_preempt(); | ||
425 | |||
426 | local_irq_restore(flags); | ||
427 | } | ||
428 | |||
429 | #define cpu_empty(entry, run) \ | ||
430 | (!(run) || (is_server(run) && !(entry)->fifo_server.server.linked)) | ||
431 | |||
432 | static struct task_struct* color_schedule(struct task_struct *prev) | ||
433 | { | ||
434 | unsigned long flags; | ||
435 | int server_running; | ||
436 | struct cpu_entry *entry = local_entry; | ||
437 | struct task_struct *next, *plink = entry->server.linked; | ||
438 | |||
439 | TRACE("Reschedule on %d at %llu\n", entry->server.cpu, litmus_clock()); | ||
440 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
441 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
442 | |||
443 | raw_spin_lock_irqsave(entry_lock(entry), flags); | ||
444 | |||
445 | if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { | ||
446 | TRACE_TASK(prev, "Snuck in on new!\n"); | ||
447 | requeue(task_dom(entry, prev), prev); | ||
448 | } | ||
449 | |||
450 | /* Pick next top-level task */ | ||
451 | next = schedule_server(&entry->server); | ||
452 | /* Schedule hierarchically */ | ||
453 | server_running = next && is_server(next); | ||
454 | if (server_running) | ||
455 | next = task_fserver(next)->linked; | ||
456 | |||
457 | /* Selected tasks must contend for group lock */ | ||
458 | if (next) { | ||
459 | raw_spin_lock(&dgl_lock); | ||
460 | acquire_resources(next); | ||
461 | if (has_resources(next, entry->server.cpu)) { | ||
462 | TRACE_TASK(next, "Has group lock\n"); | ||
463 | sched_trace_task_resume(next, 1); | ||
464 | } else { | ||
465 | TRACE_TASK(next, "Does not have lock, 0x%p does\n", | ||
466 | group_lock.acquired[entry->server.cpu]); | ||
467 | if (next != prev) | ||
468 | sched_trace_task_block(next, 1); | ||
469 | next = NULL; | ||
470 | server_running = 0; | ||
471 | } | ||
472 | raw_spin_unlock(&dgl_lock); | ||
473 | } | ||
474 | |||
475 | /* Server is blocked if its running task is blocked. Note that if the | ||
476 | * server has no running task, the server will now execute NULL. | ||
477 | */ | ||
478 | if (server_running) { | ||
479 | TRACE_TASK(entry->server.linked, "Server running\n"); | ||
480 | arm_enforcement_timer(&entry->fifo_server.timer, | ||
481 | entry->fifo_server.task); | ||
482 | entry->fifo_server.start_time = litmus_clock(); | ||
483 | } | ||
484 | |||
485 | if (prev) | ||
486 | tsk_rt(prev)->scheduled_on = NO_CPU; | ||
487 | if (next) | ||
488 | tsk_rt(next)->scheduled_on = entry->server.cpu; | ||
489 | |||
490 | entry->scheduled = next; | ||
491 | sched_state_task_picked(); | ||
492 | |||
493 | raw_spin_unlock_irqrestore(entry_lock(entry), flags); | ||
494 | |||
495 | return entry->scheduled; | ||
496 | } | ||
497 | |||
498 | static void color_task_new(struct task_struct *t, int on_rq, int running) | ||
499 | { | ||
500 | unsigned long flags; | ||
501 | int i, replicas; | ||
502 | raw_spinlock_t *lock; | ||
503 | struct cpu_entry *entry; | ||
504 | struct dgl_group_req *req; | ||
505 | |||
506 | TRACE_TASK(t, "New colored task\n"); | ||
507 | local_irq_save(flags); | ||
508 | |||
509 | entry = (is_be(t)) ? local_entry : task_entry(t); | ||
510 | lock = task_lock(entry, t); | ||
511 | |||
512 | release_at(t, litmus_clock()); | ||
513 | |||
514 | /* Create request for dynamic group locks */ | ||
515 | req = kmalloc(sizeof(*req), GFP_ATOMIC); | ||
516 | dgl_group_req_init(req); | ||
517 | for (i = 0; i < NUM_RESOURCES; i++) { | ||
518 | replicas = get_control_page(t)->requests[i]; | ||
519 | if (replicas) | ||
520 | set_req(req, i, replicas); | ||
521 | } | ||
522 | tsk_rt(t)->req = req; | ||
523 | |||
524 | /* Join system */ | ||
525 | raw_spin_lock(lock); | ||
526 | if (running) { | ||
527 | TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu); | ||
528 | BUG_ON(entry->scheduled); | ||
529 | entry->scheduled = t; | ||
530 | tsk_rt(t)->scheduled_on = entry->server.cpu; | ||
531 | } else | ||
532 | requeue(task_dom(entry, t), t); | ||
533 | raw_spin_unlock(lock); | ||
534 | |||
535 | /* Trigger preemptions */ | ||
536 | if (is_be(t)) | ||
537 | check_for_fifo_preempt(); | ||
538 | else | ||
539 | litmus_reschedule(entry->server.cpu); | ||
540 | |||
541 | local_irq_restore(flags); | ||
542 | } | ||
543 | |||
544 | static void color_task_wake_up(struct task_struct *task) | ||
545 | { | ||
546 | unsigned long flags; | ||
547 | struct cpu_entry* entry = task_entry(task); | ||
548 | raw_spinlock_t *lock = task_lock(entry, task); | ||
549 | lt_t now = litmus_clock(); | ||
550 | |||
551 | TRACE_TASK(task, "Wake up at %llu\n", now); | ||
552 | |||
553 | local_irq_save(flags); | ||
554 | |||
555 | /* Abuse sporadic model */ | ||
556 | if (is_tardy(task, now)) { | ||
557 | release_at(task, now); | ||
558 | sched_trace_task_release(task); | ||
559 | } | ||
560 | |||
561 | /* Re-enter system */ | ||
562 | if (entry->scheduled != task) { | ||
563 | raw_spin_lock(lock); | ||
564 | requeue(task_dom(entry, task), task); | ||
565 | raw_spin_unlock(lock); | ||
566 | } else { | ||
567 | TRACE_TASK(task, "Is already scheduled on %d!\n", | ||
568 | entry->scheduled); | ||
569 | } | ||
570 | |||
571 | /* Trigger preemptions */ | ||
572 | if (is_be(task)) | ||
573 | check_for_fifo_preempt(); | ||
574 | else | ||
575 | litmus_reschedule(entry->server.cpu); | ||
576 | |||
577 | local_irq_restore(flags); | ||
578 | } | ||
579 | |||
580 | static void color_task_block(struct task_struct *t) | ||
581 | { | ||
582 | TRACE_TASK(t, "Block at %llu, state=%d\n", litmus_clock(), t->state); | ||
583 | BUG_ON(!is_realtime(t)); | ||
584 | BUG_ON(is_queued(t)); | ||
585 | } | ||
586 | |||
587 | static void color_task_exit(struct task_struct * t) | ||
588 | { | ||
589 | unsigned long flags; | ||
590 | struct cpu_entry *entry = task_entry(t); | ||
591 | raw_spinlock_t *lock = task_lock(entry, t); | ||
592 | |||
593 | TRACE_TASK(t, "RIP, now reschedule\n"); | ||
594 | |||
595 | local_irq_save(flags); | ||
596 | |||
597 | /* Remove from scheduler consideration */ | ||
598 | if (is_queued(t)) { | ||
599 | raw_spin_lock(lock); | ||
600 | remove(task_dom(entry, t), t); | ||
601 | raw_spin_unlock(lock); | ||
602 | } | ||
603 | |||
604 | /* Stop parent server */ | ||
605 | if (get_task_server(t)) | ||
606 | unlink(get_task_server(t)); | ||
607 | |||
608 | /* Unschedule running task */ | ||
609 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
610 | entry = remote_entry(tsk_rt(t)->scheduled_on); | ||
611 | |||
612 | raw_spin_lock(entry_lock(entry)); | ||
613 | |||
614 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
615 | entry->scheduled = NULL; | ||
616 | litmus_reschedule(entry->server.cpu); | ||
617 | |||
618 | raw_spin_unlock(entry_lock(entry)); | ||
619 | } | ||
620 | |||
621 | /* Remove dgl request from system */ | ||
622 | raw_spin_lock(&dgl_lock); | ||
623 | release_resources(t); | ||
624 | raw_spin_unlock(&dgl_lock); | ||
625 | kfree(tsk_rt(t)->req); | ||
626 | |||
627 | local_irq_restore(flags); | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * Non-be tasks must have migrated to the right CPU. | ||
632 | */ | ||
633 | static long color_admit_task(struct task_struct* t) | ||
634 | { | ||
635 | int ret = is_be(t) || task_cpu(t) == get_partition(t) ? 0 : -EINVAL; | ||
636 | if (!ret) { | ||
637 | printk(KERN_WARNING "Task failed to migrate to CPU %d\n", | ||
638 | get_partition(t)); | ||
639 | } | ||
640 | return ret; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * Load server parameters. | ||
645 | */ | ||
646 | static long color_activate_plugin(void) | ||
647 | { | ||
648 | int cpu, ret = 0; | ||
649 | struct rt_task tp; | ||
650 | struct task_struct *server_task; | ||
651 | struct cpu_entry *entry; | ||
652 | lt_t now = litmus_clock(); | ||
653 | |||
654 | for_each_online_cpu(cpu) { | ||
655 | entry = remote_entry(cpu); | ||
656 | server_task = entry->fifo_server.task; | ||
657 | |||
658 | raw_spin_lock(entry_lock(entry)); | ||
659 | |||
660 | ret = color_server_params(cpu, &tp.exec_cost, | ||
661 | &tp.period); | ||
662 | if (ret) { | ||
663 | printk(KERN_WARNING "Uninitialized server for CPU %d\n", | ||
664 | entry->server.cpu); | ||
665 | goto loop_end; | ||
666 | } | ||
667 | |||
668 | /* Fill rt parameters */ | ||
669 | tp.phase = 0; | ||
670 | tp.cpu = cpu; | ||
671 | tp.cls = RT_CLASS_SOFT; | ||
672 | tp.budget_policy = PRECISE_ENFORCEMENT; | ||
673 | tsk_rt(server_task)->task_params = tp; | ||
674 | tsk_rt(server_task)->present = 1; | ||
675 | |||
676 | /* Make runnable */ | ||
677 | release_at(server_task, now); | ||
678 | entry->fifo_server.start_time = 0; | ||
679 | entry->scheduled = NULL; | ||
680 | |||
681 | if (!is_queued(server_task)) | ||
682 | requeue(&entry->edf_domain, server_task); | ||
683 | |||
684 | TRACE_TASK(server_task, "Created server with wcet: %llu, " | ||
685 | "period: %llu\n", tp.exec_cost, tp.period); | ||
686 | |||
687 | loop_end: | ||
688 | raw_spin_unlock(entry_lock(entry)); | ||
689 | } | ||
690 | |||
691 | return ret; | ||
692 | } | ||
693 | |||
694 | /* | ||
695 | * Mark servers as unused, making future calls to requeue fail. | ||
696 | */ | ||
697 | static long color_deactivate_plugin(void) | ||
698 | { | ||
699 | int cpu; | ||
700 | struct cpu_entry *entry; | ||
701 | |||
702 | for_each_online_cpu(cpu) { | ||
703 | entry = remote_entry(cpu); | ||
704 | if (entry->fifo_server.task) { | ||
705 | tsk_rt(entry->fifo_server.task)->present = 0; | ||
706 | } | ||
707 | } | ||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | /* | ||
712 | * Dump container and server parameters for tracing. | ||
713 | */ | ||
714 | static void color_release_ts(lt_t time) | ||
715 | { | ||
716 | int cpu, fifo_cid; | ||
717 | char fifo_name[TASK_COMM_LEN], cpu_name[TASK_COMM_LEN]; | ||
718 | struct cpu_entry *entry; | ||
719 | struct task_struct *stask; | ||
720 | |||
721 | strcpy(cpu_name, "CPU"); | ||
722 | strcpy(fifo_name, "BE"); | ||
723 | |||
724 | fifo_cid = num_online_cpus(); | ||
725 | trace_litmus_container_param(fifo_cid, fifo_name); | ||
726 | |||
727 | for_each_online_cpu(cpu) { | ||
728 | entry = remote_entry(cpu); | ||
729 | trace_litmus_container_param(cpu, cpu_name); | ||
730 | trace_litmus_server_param(entry->server.sid, cpu, 0, 0); | ||
731 | stask = entry->fifo_server.task; | ||
732 | trace_litmus_server_param(stask->pid, fifo_cid, | ||
733 | get_exec_cost(stask), | ||
734 | get_rt_period(stask)); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | static struct sched_plugin color_plugin __cacheline_aligned_in_smp = { | ||
739 | .plugin_name = "COLOR", | ||
740 | .task_new = color_task_new, | ||
741 | .complete_job = complete_job, | ||
742 | .task_exit = color_task_exit, | ||
743 | .schedule = color_schedule, | ||
744 | .task_wake_up = color_task_wake_up, | ||
745 | .task_block = color_task_block, | ||
746 | .admit_task = color_admit_task, | ||
747 | |||
748 | .release_ts = color_release_ts, | ||
749 | |||
750 | .activate_plugin = color_activate_plugin, | ||
751 | .deactivate_plugin = color_deactivate_plugin, | ||
752 | }; | ||
753 | |||
754 | static int __init init_color(void) | ||
755 | { | ||
756 | int cpu; | ||
757 | struct cpu_entry *entry; | ||
758 | struct task_struct *server_task; | ||
759 | struct fifo_server *fifo_server; | ||
760 | struct rt_server *cpu_server; | ||
761 | |||
762 | for_each_online_cpu(cpu) { | ||
763 | entry = remote_entry(cpu); | ||
764 | edf_domain_init(&entry->edf_domain, NULL, color_edf_release); | ||
765 | |||
766 | entry->scheduled = NULL; | ||
767 | |||
768 | /* Create FIFO server */ | ||
769 | fifo_server = &entry->fifo_server; | ||
770 | init_rt_server(&fifo_server->server, | ||
771 | cpu + num_online_cpus() + 1, | ||
772 | cpu, | ||
773 | &fifo_domain, | ||
774 | fifo_preemption_needed, | ||
775 | fifo_requeue, fifo_update, fifo_take); | ||
776 | |||
777 | |||
778 | /* Create task struct for FIFO server */ | ||
779 | server_task = kmalloc(sizeof(struct task_struct), GFP_ATOMIC); | ||
780 | memset(server_task, 0, sizeof(*server_task)); | ||
781 | server_task->policy = SCHED_LITMUS; | ||
782 | strcpy(server_task->comm, "server"); | ||
783 | server_task->pid = fifo_server->server.sid; | ||
784 | fifo_server->task = server_task; | ||
785 | |||
786 | /* Create rt_params for FIFO server */ | ||
787 | tsk_rt(server_task)->heap_node = bheap_node_alloc(GFP_ATOMIC); | ||
788 | tsk_rt(server_task)->rel_heap = release_heap_alloc(GFP_ATOMIC); | ||
789 | bheap_node_init(&tsk_rt(server_task)->heap_node, server_task); | ||
790 | tsk_rt(server_task)->is_server = 1; | ||
791 | |||
792 | /* Create CPU server */ | ||
793 | cpu_server = &entry->server; | ||
794 | init_rt_server(cpu_server, cpu + 1, cpu, | ||
795 | &entry->edf_domain, edf_preemption_needed, | ||
796 | edf_requeue, NULL, NULL); | ||
797 | cpu_server->running = 1; | ||
798 | |||
799 | init_enforcement_timer(&fifo_server->timer); | ||
800 | } | ||
801 | |||
802 | fifo_domain_init(&fifo_domain, NULL, color_fifo_release); | ||
803 | raw_spin_lock_init(&fifo_lock); | ||
804 | |||
805 | dgl_init(&group_lock); | ||
806 | raw_spin_lock_init(&dgl_lock); | ||
807 | |||
808 | return register_sched_plugin(&color_plugin); | ||
809 | } | ||
810 | |||
811 | module_init(init_color); | ||