aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-09-10 07:00:01 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-09-10 07:00:01 -0400
commit474407b2855672396e4e778d23274c347012a460 (patch)
treed7e22f0f45329e1765e27ef6deb08d4654a37625
parent2aba2d37926a1a69ef22ccfbdc298324ab21af4f (diff)
Remove non-mainline G-EDF plugins
This removes the plugins created for RTSS'09 that we don't want to track in mainline Linux.
-rw-r--r--litmus/Makefile5
-rw-r--r--litmus/sched_gedf.c621
-rw-r--r--litmus/sched_ghq_edf.c720
-rw-r--r--litmus/sched_gq_edf.c606
4 files changed, 1 insertions, 1951 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index d9e8dc042e..59fd6f7328 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -10,10 +10,7 @@ obj-y = sched_plugin.o litmus.o \
10 sched_gsn_edf.o \ 10 sched_gsn_edf.o \
11 sched_psn_edf.o \ 11 sched_psn_edf.o \
12 sched_cedf.o \ 12 sched_cedf.o \
13 sched_pfair.o \ 13 sched_pfair.o
14 sched_gq_edf.o \
15 sched_gedf.o \
16 sched_ghq_edf.o
17 14
18obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 15obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
19obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 16obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/sched_gedf.c b/litmus/sched_gedf.c
deleted file mode 100644
index 9d07b1b73c..0000000000
--- a/litmus/sched_gedf.c
+++ /dev/null
@@ -1,621 +0,0 @@
1
2#include <linux/spinlock.h>
3#include <linux/percpu.h>
4#include <linux/sched.h>
5
6#include <litmus/litmus.h>
7#include <litmus/jobs.h>
8#include <litmus/sched_plugin.h>
9#include <litmus/edf_common.h>
10#include <litmus/sched_trace.h>
11
12#include <litmus/heap.h>
13#include <litmus/cheap.h>
14
15#include <linux/module.h>
16
17#define GEDF_MAX_TASKS 1000
18
19/* cpu_entry_t - maintain the linked and scheduled state
20 */
21typedef struct {
22 int cpu;
23 struct task_struct* linked; /* only RT tasks */
24 int picked; /* linked was seen */
25 struct task_struct* scheduled; /* only RT tasks */
26 struct heap_node* hn;
27} cpu_entry_t;
28DEFINE_PER_CPU(cpu_entry_t, gedf_cpu_entries);
29
30cpu_entry_t* gedf_cpus[NR_CPUS];
31
32/* the cpus queue themselves according to priority in here */
33static struct heap_node gedf_heap_node[NR_CPUS];
34static struct heap gedf_cpu_heap;
35
36DEFINE_SPINLOCK(gedf_cpu_lock); /* synchronize access to cpu heap */
37
38static struct cheap_node gedf_cheap_nodes[GEDF_MAX_TASKS];
39static struct cheap gedf_ready_queue;
40
41static rt_domain_t gedf; /* used only for the release queue */
42
43static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
44{
45 cpu_entry_t *a, *b;
46 a = _a->value;
47 b = _b->value;
48 /* Note that a and b are inverted: we want the lowest-priority CPU at
49 * the top of the heap.
50 */
51 return edf_higher_prio(b->linked, a->linked);
52}
53
54static void remove_from_cpu_heap(cpu_entry_t* entry)
55{
56 if (likely(heap_node_in_heap(entry->hn)))
57 heap_delete(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
58}
59
60/* update_cpu_position - Move the cpu entry to the correct place to maintain
61 * order in the cpu queue. Caller must hold gedf lock.
62 */
63static void update_cpu_position(cpu_entry_t *entry)
64{
65 remove_from_cpu_heap(entry);
66 heap_insert(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
67}
68
69/* caller must hold gedf lock */
70static cpu_entry_t* lowest_prio_cpu(int take)
71{
72 struct heap_node* hn;
73 if (take)
74 hn = heap_take(cpu_lower_prio, &gedf_cpu_heap);
75 else
76 hn = heap_peek(cpu_lower_prio, &gedf_cpu_heap);
77 return hn ? hn->value : NULL;
78}
79
80
81/* link_task_to_cpu - Update the link of a CPU.
82 * Handles the case where the to-be-linked task is already
83 * scheduled on a different CPU.
84 */
85static noinline void link_task_to_cpu(struct task_struct* linked,
86 cpu_entry_t *entry)
87{
88 cpu_entry_t *sched = NULL;
89 struct task_struct* tmp;
90 int on_cpu;
91
92 BUG_ON(linked && !is_realtime(linked));
93
94 /* Currently linked task is set to be unlinked. */
95 if (entry->linked) {
96 entry->linked->rt_param.linked_on = NO_CPU;
97 }
98
99 /* Link new task to CPU. */
100 if (linked) {
101 set_rt_flags(linked, RT_F_RUNNING);
102 /* handle task is already scheduled somewhere! */
103 on_cpu = linked->rt_param.scheduled_on;
104 if (on_cpu != NO_CPU) {
105 sched = &per_cpu(gedf_cpu_entries, on_cpu);
106 /* this should only happen if not linked already */
107 BUG_ON(sched->linked == linked);
108
109 /* If we are already scheduled on the CPU to which we
110 * wanted to link, we don't need to do the swap --
111 * we just link ourselves to the CPU and depend on
112 * the caller to get things right.
113 *
114 * But only swap if the other node is in the queue.
115 * If it is not, then it is being updated
116 * concurrently and some other task was already
117 * picked for it.
118 */
119 if (entry != sched && heap_node_in_heap(sched->hn)) {
120 TRACE_TASK(linked,
121 "already scheduled on %d, "
122 "updating link.\n",
123 sched->cpu);
124 tmp = sched->linked;
125 linked->rt_param.linked_on = sched->cpu;
126 sched->linked = linked;
127 sched->picked = 1;
128 update_cpu_position(sched);
129 linked = tmp;
130 }
131 }
132 if (linked) /* might be NULL due to swap */
133 linked->rt_param.linked_on = entry->cpu;
134 }
135 entry->linked = linked;
136 entry->picked = entry == sched; /* set to one if we linked to the
137 * the CPU that the task is
138 * executing on
139 */
140 if (linked)
141 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
142 else
143 TRACE("NULL linked to %d.\n", entry->cpu);
144 update_cpu_position(entry);
145}
146
147/* unlink - Make sure a task is not linked any longer to an entry
148 * where it was linked before. Must hold gedf_lock.
149 */
150static noinline void unlink(struct task_struct* t)
151{
152 cpu_entry_t *entry;
153
154 if (t->rt_param.linked_on != NO_CPU) {
155 /* unlink */
156 entry = &per_cpu(gedf_cpu_entries, t->rt_param.linked_on);
157 t->rt_param.linked_on = NO_CPU;
158 link_task_to_cpu(NULL, entry);
159 }
160}
161
162
163/* preempt - force a CPU to reschedule
164 */
165static noinline void preempt(cpu_entry_t *entry)
166{
167 if (smp_processor_id() == entry->cpu)
168 set_tsk_need_resched(current);
169 else
170 smp_send_reschedule(entry->cpu);
171}
172
173
174static void add_to_ready_queue(struct task_struct* task)
175{
176 TRACE_TASK(task, "adding to ready queue\n");
177 cheap_insert((cheap_prio_t) edf_higher_prio,
178 &gedf_ready_queue,
179 task,
180 smp_processor_id());
181}
182
183/* requeue - Put an unlinked task into gsn-edf domain.
184 * Caller must hold gedf_lock.
185 *
186 * call unlocked, but with preemptions disabled!
187 */
188static noinline void requeue(struct task_struct* task)
189{
190 if (is_released(task, litmus_clock()))
191 add_to_ready_queue(task);
192 else
193 /* it has got to wait */
194 add_release(&gedf, task);
195}
196
197static int preemption_required(cpu_entry_t* last,
198 struct task_struct* task)
199{
200 if (edf_higher_prio(task, last->linked)) {
201 /* yes, drop lock before dequeuing task
202 * and dequeue cpu state
203 */
204 last = lowest_prio_cpu(1);
205 lockdep_on(); /* let lockdep see we actually released it */
206 spin_unlock(&gedf_cpu_lock);
207 lockdep_off();
208 return 1;
209 } else
210 return 0;
211}
212
213/* check for any necessary preemptions */
214static void check_for_preemptions(void)
215{
216 int done = 0;
217 unsigned long flags;
218 struct task_struct *task, *unlinked;
219 cpu_entry_t* last;
220
221
222 local_irq_save(flags);
223 while (!done) {
224 unlinked = NULL;
225 spin_lock(&gedf_cpu_lock);
226 last = lowest_prio_cpu(0);
227 if (likely(last)) {
228 task = cheap_take_if(
229 (cheap_take_predicate_t) preemption_required,
230 last,
231 (cheap_prio_t) edf_higher_prio,
232 &gedf_ready_queue);
233 if (task) {
234 TRACE_TASK(task, "removed from ready Q\n");
235 /* cpu lock was dropped, reacquire */
236 spin_lock(&gedf_cpu_lock);
237 if (last->linked && !last->picked)
238 /* can be requeued by us */
239 unlinked = last->linked;
240 TRACE("check_for_preemptions: "
241 "attempting to link task %d to %d\n",
242 task->pid, last->cpu);
243 link_task_to_cpu(task, last);
244 update_cpu_position(last);
245 } else
246 /* no preemption required */
247 done = 1;
248 } else
249 /* all gone, being checked elsewhere? */
250 done = 1;
251 spin_unlock(&gedf_cpu_lock);
252 if (unlinked)
253 /* stick it back into the queue */
254 requeue(unlinked);
255 if (last && !done)
256 /* we have a preemption, send IPI */
257 preempt(last);
258 }
259 local_irq_restore(flags);
260}
261
262/* gedf_job_arrival: task is either resumed or released
263 * call only unlocked!
264 */
265static noinline void gedf_job_arrival(struct task_struct* task)
266{
267 requeue(task);
268 check_for_preemptions();
269}
270
271static void gedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
272{
273 struct heap_node* hn;
274 struct task_struct* t;
275 unsigned long flags;
276
277
278 local_irq_save(flags);
279 /* insert unlocked */
280 while ((hn = heap_take(edf_ready_order, tasks))) {
281 t = (struct task_struct*) hn->value;
282 TRACE_TASK(t, "to be merged into ready queue "
283 "(is_released:%d, is_running:%d)\n",
284 is_released(t, litmus_clock()),
285 is_running(t));
286 add_to_ready_queue(t);
287 }
288
289 local_irq_restore(flags);
290 check_for_preemptions();
291}
292
293/* caller holds gedf_lock */
294static noinline int job_completion(cpu_entry_t* entry, int forced)
295{
296
297 struct task_struct *t = entry->scheduled;
298
299 sched_trace_task_completion(t, forced);
300
301 TRACE_TASK(t, "job_completion().\n");
302
303 /* set flags */
304 set_rt_flags(t, RT_F_SLEEP);
305 /* prepare for next period */
306 prepare_for_next_period(t);
307 if (is_released(t, litmus_clock()))
308 sched_trace_task_release(t);
309
310
311 if (is_released(t, litmus_clock())){
312 /* we changed the priority, see if we need to preempt */
313 set_rt_flags(t, RT_F_RUNNING);
314 update_cpu_position(entry);
315 return 1;
316 }
317 else {
318 /* it has got to wait */
319 unlink(t);
320 add_release(&gedf, t);
321 return 0;
322 }
323}
324
325/* gedf_tick - this function is called for every local timer
326 * interrupt.
327 *
328 * checks whether the current task has expired and checks
329 * whether we need to preempt it if it has not expired
330 */
331static void gedf_tick(struct task_struct* t)
332{
333 if (is_realtime(t) && budget_exhausted(t))
334 set_tsk_need_resched(t);
335}
336
337static struct task_struct* gedf_schedule(struct task_struct * prev)
338{
339 cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
340 int out_of_time, sleep, preempt, exists, blocks;
341 struct task_struct* next = NULL;
342
343 /* Bail out early if we are the release master.
344 * The release master never schedules any real-time tasks.
345 */
346 if (gedf.release_master == entry->cpu)
347 return NULL;
348
349 TRACE_TASK(prev, "invoked gedf_schedule.\n");
350
351 /* sanity checking */
352 BUG_ON(entry->scheduled && entry->scheduled != prev);
353 BUG_ON(entry->scheduled && !is_realtime(prev));
354 BUG_ON(is_realtime(prev) && !entry->scheduled);
355
356 /* (0) Determine state */
357 exists = entry->scheduled != NULL;
358 blocks = exists && !is_running(entry->scheduled);
359 out_of_time = exists && budget_exhausted(entry->scheduled);
360 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
361
362 spin_lock(&gedf_cpu_lock);
363
364 preempt = entry->scheduled != entry->linked;
365
366 if (exists)
367 TRACE_TASK(prev,
368 "blocks:%d out_of_time:%d sleep:%d preempt:%d "
369 "state:%d sig:%d\n",
370 blocks, out_of_time, sleep, preempt,
371 prev->state, signal_pending(prev));
372 if (preempt && entry->linked)
373 TRACE_TASK(prev, "will be preempted by %s/%d\n",
374 entry->linked->comm, entry->linked->pid);
375
376 /* If a task blocks we have no choice but to reschedule.
377 */
378 if (blocks)
379 unlink(entry->scheduled);
380
381
382 /* Any task that is preemptable and either exhausts its execution
383 * budget or wants to sleep completes. We may have to reschedule after
384 * this. Don't do a job completion if we block (can't have timers
385 * running for blocked jobs). Preemptions go first for the same reason.
386 */
387 if ((out_of_time || sleep) && !blocks && !preempt) {
388 if (job_completion(entry, !sleep)) {
389 /* Task might stay with us.
390 * Drop locks and check for preemptions.
391 */
392 spin_unlock(&gedf_cpu_lock);
393 /* anything to update ? */
394 check_for_preemptions();
395 spin_lock(&gedf_cpu_lock);
396 /* if something higher priority got linked,
397 * then we need to add the task into the
398 * ready queue (since it wasn't added by
399 * check_for_preemptions b/c picked==1.
400 */
401 if (entry->linked != prev)
402 add_to_ready_queue(prev);
403 }
404 }
405
406 /* Link pending task if we became unlinked.
407 * NOTE: Do not hold locks while performing ready queue updates
408 * since we want concurrent access to the queue.
409 */
410 if (!entry->linked) {
411 if (exists)
412 /* We are committed to descheduling; erase marker
413 * before we drop the lock.
414 */
415 tsk_rt(prev)->scheduled_on = NO_CPU;
416 spin_unlock(&gedf_cpu_lock);
417 check_for_preemptions(); /* update links */
418 spin_lock(&gedf_cpu_lock);
419 }
420
421 /* The final scheduling decision. Do we need to switch for some reason?
422 * If linked is different from scheduled, then select linked as next.
423 */
424 if (entry->linked != entry->scheduled) {
425 /* Schedule a linked job? */
426 if (entry->linked) {
427 entry->linked->rt_param.scheduled_on = entry->cpu;
428 next = entry->linked;
429 }
430 if (entry->scheduled)
431 entry->scheduled->rt_param.scheduled_on = NO_CPU;
432 } else
433 /* Only override Linux scheduler if we have a real-time task
434 * scheduled that needs to continue.
435 */
436 if (exists)
437 next = prev;
438
439 /* Mark entry->linked as being ours. Do this unconditionally since
440 * entry->linked might have become reassigned to us while we dropped
441 * the lock even though we never descheduled it. In this case,
442 * entry->picked became reset.
443 */
444 entry->picked = 1;
445 if (next)
446 tsk_rt(next)->scheduled_on = entry->cpu;
447 spin_unlock(&gedf_cpu_lock);
448 if (exists && preempt && !blocks)
449 /* stick preempted task back into the ready queue */
450 gedf_job_arrival(prev);
451
452 if (next)
453 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
454 else if (exists && !next)
455 TRACE("becomes idle at %llu.\n", litmus_clock());
456
457 return next;
458}
459
460
461/* _finish_switch - we just finished the switch away from prev
462 */
463static void gedf_finish_switch(struct task_struct *prev)
464{
465 cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
466
467 entry->scheduled = is_realtime(current) ? current : NULL;
468 TRACE_TASK(prev, "switched away from\n");
469}
470
471
472/* Prepare a task for running in RT mode
473 */
474static void gedf_task_new(struct task_struct * t, int on_rq, int running)
475{
476 unsigned long flags;
477 cpu_entry_t* entry;
478
479 TRACE("gedf: task new %d\n", t->pid);
480
481 spin_lock_irqsave(&gedf_cpu_lock, flags);
482
483 /* setup job params */
484 release_at(t, litmus_clock());
485
486 if (running) {
487 entry = &per_cpu(gedf_cpu_entries, task_cpu(t));
488 BUG_ON(entry->scheduled);
489 if (entry->cpu != gedf.release_master) {
490 entry->scheduled = t;
491 t->rt_param.scheduled_on = task_cpu(t);
492 } else {
493 preempt(entry);
494 tsk_rt(t)->scheduled_on = NO_CPU;
495 }
496 } else {
497 tsk_rt(t)->scheduled_on = NO_CPU;
498 }
499 tsk_rt(t)->linked_on = NO_CPU;
500
501 spin_unlock_irqrestore(&gedf_cpu_lock, flags);
502
503 if (!running || entry->cpu == gedf.release_master)
504 /* schedule() will not insert task into ready_queue */
505 gedf_job_arrival(t);
506}
507
508static void gedf_task_wake_up(struct task_struct *task)
509{
510 unsigned long flags;
511 lt_t now;
512
513 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
514
515 spin_lock_irqsave(&gedf_cpu_lock, flags);
516 now = litmus_clock();
517 if (is_tardy(task, now)) {
518 /* new sporadic release */
519 release_at(task, now);
520 sched_trace_task_release(task);
521 }
522 spin_unlock_irqrestore(&gedf_cpu_lock, flags);
523 gedf_job_arrival(task);
524}
525
526static void gedf_task_block(struct task_struct *t)
527{
528 TRACE_TASK(t, "block at %llu\n", litmus_clock());
529}
530
531static void gedf_task_exit(struct task_struct * t)
532{
533 unsigned long flags;
534
535 /* unlink if necessary */
536 spin_lock_irqsave(&gedf_cpu_lock, flags);
537 /* remove from CPU state, if necessary */
538 unlink(t);
539 if (tsk_rt(t)->scheduled_on != NO_CPU) {
540 gedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
541 tsk_rt(t)->scheduled_on = NO_CPU;
542 } else {
543 /* FIXME: If t is currently queued, then we need to
544 * dequeue it now; otherwise it will probably
545 * cause a crash once it is dequeued.
546 */
547 TRACE_TASK(t, "called gedf_task_exit(), "
548 "but is not scheduled!\n");
549 }
550 spin_unlock_irqrestore(&gedf_cpu_lock, flags);
551
552 TRACE_TASK(t, "RIP\n");
553}
554
555static long gedf_admit_task(struct task_struct* tsk)
556{
557 return 0;
558}
559
560
561static long gedf_activate_plugin(void)
562{
563 int cpu;
564 cpu_entry_t *entry;
565
566 heap_init(&gedf_cpu_heap);
567 gedf.release_master = atomic_read(&release_master_cpu);
568
569 for_each_online_cpu(cpu) {
570 entry = &per_cpu(gedf_cpu_entries, cpu);
571 heap_node_init(&entry->hn, entry);
572 entry->linked = NULL;
573 entry->scheduled = NULL;
574 entry->picked = 0;
575 if (cpu != gedf.release_master) {
576 TRACE("G-EDF: Initializing CPU #%d.\n", cpu);
577 update_cpu_position(entry);
578 } else {
579 TRACE("G-EDF: CPU %d is release master.\n", cpu);
580 }
581 }
582 return 0;
583}
584
585
586/* Plugin object */
587static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = {
588 .plugin_name = "G-EDF",
589 .finish_switch = gedf_finish_switch,
590 .tick = gedf_tick,
591 .task_new = gedf_task_new,
592 .complete_job = complete_job,
593 .task_exit = gedf_task_exit,
594 .schedule = gedf_schedule,
595 .task_wake_up = gedf_task_wake_up,
596 .task_block = gedf_task_block,
597 .admit_task = gedf_admit_task,
598 .activate_plugin = gedf_activate_plugin,
599};
600
601
602static int __init init_gedf(void)
603{
604 int cpu;
605 cpu_entry_t *entry;
606
607 cheap_init(&gedf_ready_queue, GEDF_MAX_TASKS, gedf_cheap_nodes);
608 /* initialize CPU state */
609 for (cpu = 0; cpu < NR_CPUS; cpu++) {
610 entry = &per_cpu(gedf_cpu_entries, cpu);
611 gedf_cpus[cpu] = entry;
612 entry->cpu = cpu;
613 entry->hn = &gedf_heap_node[cpu];
614 heap_node_init(&entry->hn, entry);
615 }
616 edf_domain_init(&gedf, NULL, gedf_release_jobs);
617 return register_sched_plugin(&gedf_plugin);
618}
619
620
621module_init(init_gedf);
diff --git a/litmus/sched_ghq_edf.c b/litmus/sched_ghq_edf.c
deleted file mode 100644
index a9b1d06dd9..0000000000
--- a/litmus/sched_ghq_edf.c
+++ /dev/null
@@ -1,720 +0,0 @@
1#include <linux/spinlock.h>
2#include <linux/percpu.h>
3#include <linux/sched.h>
4
5#include <litmus/litmus.h>
6#include <litmus/jobs.h>
7#include <litmus/sched_plugin.h>
8#include <litmus/edf_common.h>
9#include <litmus/sched_trace.h>
10
11#include <litmus/heap.h>
12
13#include <linux/module.h>
14
15/* cpu_entry_t - maintain the linked and scheduled state
16 */
17typedef struct {
18 int cpu;
19 struct task_struct* linked; /* only RT tasks */
20 int picked; /* linked was seen */
21 struct task_struct* scheduled; /* only RT tasks */
22 struct heap_node* hn;
23} cpu_entry_t;
24DEFINE_PER_CPU(cpu_entry_t, ghqedf_cpu_entries);
25
26DEFINE_SPINLOCK(ghqedf_cpu_lock); /* synchronize access to cpu heap */
27
28cpu_entry_t* ghqedf_cpus[NR_CPUS];
29
30/* the cpus queue themselves according to priority in here */
31static struct heap_node ghqedf_heap_node[NR_CPUS];
32static struct heap ghqedf_cpu_heap;
33
34static rt_domain_t ghqedf; /* used only for the release queue */
35
36struct subqueue {
37 struct heap queue;
38 struct task_struct* top;
39 struct heap_node* hn;
40 spinlock_t lock;
41};
42
43/* per-cpu sub queue */
44//DEFINE_PER_CPU(struct subqueue, ghqedf_subqueue);
45
46struct subqueue ghqedf_subqueue[NR_CPUS];
47
48/* heap nodes for subqueue::hn field */
49static struct heap_node ghqedf_subqueue_heap_node[NR_CPUS];
50
51/* queue of sub queues */
52struct heap master_queue;
53
54/* re-use ready queue lock */
55#define master_lock (ghqedf.ready_lock)
56
57static int subqueue_higher_prio(struct heap_node *_a, struct heap_node *_b)
58{
59 struct subqueue *a, *b;
60 a = _a->value;
61 b = _b->value;
62 return edf_higher_prio(a->top, b->top);
63}
64
65static void subqueues_init(void)
66{
67 int cpu;
68 struct subqueue *q;
69
70 heap_init(&master_queue);
71
72 for_each_online_cpu(cpu) {
73// q = &per_cpu(ghqedf_subqueue, cpu);
74 q = ghqedf_subqueue + cpu;
75 heap_init(&q->queue);
76 q->top = NULL;
77 q->hn = ghqedf_subqueue_heap_node + cpu;
78 heap_node_init(&q->hn, q);
79 spin_lock_init(&q->lock);
80 heap_insert(subqueue_higher_prio, &master_queue, q->hn);
81 }
82}
83
84static void __update_top(struct subqueue* q)
85{
86 struct heap_node *tmp;
87
88 tmp = heap_peek(edf_ready_order, &q->queue);
89 q->top = tmp ? tmp->value : NULL;
90}
91
92static void update_top(struct subqueue* q)
93{
94 spin_lock(&q->lock);
95 __update_top(q);
96 spin_unlock(&q->lock);
97}
98
99static void merge_into_ready_queue(struct heap *h)
100{
101// struct subqueue *q = &__get_cpu_var(ghqedf_subqueue);
102 struct subqueue *q = ghqedf_subqueue + smp_processor_id();
103 struct heap_node *tmp;
104 void *old_top;
105 int changed_top = 0;
106
107 spin_lock(&q->lock);
108 tmp = heap_peek(edf_ready_order, &q->queue);
109 old_top = tmp ? tmp->value : NULL;
110 heap_union(edf_ready_order, &q->queue, h);
111 /* is the new min the task that we just inserted? */
112 changed_top = !old_top ||
113 heap_peek(edf_ready_order, &q->queue)->value != old_top;
114 spin_unlock(&q->lock);
115 if (changed_top) {
116 /* need to update master queue */
117 spin_lock(&master_lock);
118 /* If it is not in the heap then it is already
119 * being updated concurrently, so we skip it.
120 */
121 if (likely(heap_node_in_heap(q->hn))) {
122 heap_delete(subqueue_higher_prio, &master_queue, q->hn);
123 update_top(q);
124 heap_insert(subqueue_higher_prio, &master_queue, q->hn);
125 } else
126 TRACE("not updating subqueue top\n");
127 spin_unlock(&master_lock);
128 }
129}
130
131static void add_to_ready_queue(struct task_struct *t)
132{
133 struct heap tmp;
134
135 TRACE_TASK(t, "adding to ready queue\n");
136 heap_init(&tmp);
137 heap_insert(edf_ready_order, &tmp, tsk_rt(t)->heap_node);
138 merge_into_ready_queue(&tmp);
139}
140
141
142static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
143{
144 cpu_entry_t *a, *b;
145 a = _a->value;
146 b = _b->value;
147 /* Note that a and b are inverted: we want the lowest-priority CPU at
148 * the top of the heap.
149 */
150 return edf_higher_prio(b->linked, a->linked);
151}
152
153static void remove_from_cpu_heap(cpu_entry_t* entry)
154{
155 if (likely(heap_node_in_heap(entry->hn)))
156 heap_delete(cpu_lower_prio, &ghqedf_cpu_heap, entry->hn);
157}
158
159/* update_cpu_position - Move the cpu entry to the correct place to maintain
160 * order in the cpu queue. Caller must hold ghqedf lock.
161 */
162static void update_cpu_position(cpu_entry_t *entry)
163{
164 remove_from_cpu_heap(entry);
165 heap_insert(cpu_lower_prio, &ghqedf_cpu_heap, entry->hn);
166}
167
168/* caller must hold ghqedf lock */
169static cpu_entry_t* lowest_prio_cpu(int take)
170{
171 struct heap_node* hn;
172 if (take)
173 hn = heap_take(cpu_lower_prio, &ghqedf_cpu_heap);
174 else
175 hn = heap_peek(cpu_lower_prio, &ghqedf_cpu_heap);
176 return hn ? hn->value : NULL;
177}
178
179
180/* link_task_to_cpu - Update the link of a CPU.
181 * Handles the case where the to-be-linked task is already
182 * scheduled on a different CPU.
183 */
184static noinline void link_task_to_cpu(struct task_struct* linked,
185 cpu_entry_t *entry)
186{
187 cpu_entry_t *sched = NULL;
188 struct task_struct* tmp;
189 int on_cpu;
190
191 BUG_ON(linked && !is_realtime(linked));
192
193 /* Currently linked task is set to be unlinked. */
194 if (entry->linked) {
195 entry->linked->rt_param.linked_on = NO_CPU;
196 }
197
198 /* Link new task to CPU. */
199 if (linked) {
200 set_rt_flags(linked, RT_F_RUNNING);
201 /* handle task is already scheduled somewhere! */
202 on_cpu = linked->rt_param.scheduled_on;
203 if (on_cpu != NO_CPU) {
204 sched = &per_cpu(ghqedf_cpu_entries, on_cpu);
205 /* this should only happen if not linked already */
206 BUG_ON(sched->linked == linked);
207
208 /* If we are already scheduled on the CPU to which we
209 * wanted to link, we don't need to do the swap --
210 * we just link ourselves to the CPU and depend on
211 * the caller to get things right.
212 *
213 * But only swap if the other node is in the queue.
214 * If it is not, then it is being updated
215 * concurrently and some other task was already
216 * picked for it.
217 */
218 if (entry != sched && heap_node_in_heap(sched->hn)) {
219 TRACE_TASK(linked,
220 "already scheduled on %d, "
221 "updating link.\n",
222 sched->cpu);
223 tmp = sched->linked;
224 linked->rt_param.linked_on = sched->cpu;
225 sched->linked = linked;
226 sched->picked = 1;
227 update_cpu_position(sched);
228 linked = tmp;
229 }
230 }
231 if (linked) /* might be NULL due to swap */
232 linked->rt_param.linked_on = entry->cpu;
233 }
234 entry->linked = linked;
235 entry->picked = entry == sched; /* set to one if we linked to the
236 * the CPU that the task is
237 * executing on
238 */
239 if (linked)
240 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
241 else
242 TRACE("NULL linked to %d.\n", entry->cpu);
243 update_cpu_position(entry);
244}
245
246/* unlink - Make sure a task is not linked any longer to an entry
247 * where it was linked before. Must hold ghqedf_lock.
248 */
249static noinline void unlink(struct task_struct* t)
250{
251 cpu_entry_t *entry;
252
253 if (t->rt_param.linked_on != NO_CPU) {
254 /* unlink */
255 entry = &per_cpu(ghqedf_cpu_entries, t->rt_param.linked_on);
256 t->rt_param.linked_on = NO_CPU;
257 link_task_to_cpu(NULL, entry);
258 }
259}
260
261
262/* preempt - force a CPU to reschedule
263 */
264static noinline void preempt(cpu_entry_t *entry)
265{
266 if (smp_processor_id() == entry->cpu)
267 set_tsk_need_resched(current);
268 else
269 smp_send_reschedule(entry->cpu);
270}
271
272/* requeue - Put an unlinked task into gsn-edf domain.
273 * Caller must hold ghqedf_lock.
274 *
275 * call unlocked, but with preemptions disabled!
276 */
277static noinline void requeue(struct task_struct* task)
278{
279 if (is_released(task, litmus_clock()))
280 add_to_ready_queue(task);
281 else
282 /* it has got to wait */
283 add_release(&ghqedf, task);
284}
285
286
287static struct task_struct* take_if_preempt_required(cpu_entry_t* last)
288{
289 struct heap_node* tmp;
290 struct subqueue* q;
291 struct task_struct* t;
292 int preempt_required = 0;
293
294 spin_lock(&master_lock);
295 tmp = heap_peek(subqueue_higher_prio, &master_queue);
296 BUG_ON(!tmp); /* should be there */
297 q = tmp->value;
298
299 spin_lock(&q->lock);
300 tmp = heap_peek(edf_ready_order, &q->queue);
301 t = tmp ? tmp->value : NULL;
302 preempt_required = edf_higher_prio(t, last->linked);
303 if (preempt_required) {
304 /* take it out */
305 last = lowest_prio_cpu(1);
306 spin_unlock(&ghqedf_cpu_lock);
307 heap_delete(subqueue_higher_prio, &master_queue, q->hn);
308 }
309 /* drop lock master lock while we update subqueue */
310 spin_unlock(&master_lock);
311
312 if (preempt_required) {
313 heap_delete(edf_ready_order, &q->queue, tmp);
314 /* precompute, so that next lookup is O(1) */
315 __update_top(q);
316 spin_unlock(&q->lock);
317
318 /* re-insert with new priority */
319 spin_lock(&master_lock);
320 /* update, with right locking order */
321 update_top(q);
322 heap_insert(subqueue_higher_prio, &master_queue, q->hn);
323 spin_unlock(&master_lock);
324
325 return t;
326 } else {
327 spin_unlock(&q->lock);
328 return NULL;
329 }
330}
331
332
333/* check for any necessary preemptions */
334static void check_for_preemptions(void)
335{
336 int done = 0;
337 unsigned long flags;
338 struct task_struct *task, *unlinked;
339 cpu_entry_t* last;
340
341
342 local_irq_save(flags);
343 while (!done) {
344 unlinked = NULL;
345 spin_lock(&ghqedf_cpu_lock);
346 last = lowest_prio_cpu(0);
347 if (likely(last)) {
348 task = take_if_preempt_required(last);
349 if (task) {
350 TRACE_TASK(task, "removed from ready Q\n");
351 /* cpu lock was dropped, reacquire */
352 spin_lock(&ghqedf_cpu_lock);
353 if (last->linked && !last->picked)
354 /* can be requeued by us */
355 unlinked = last->linked;
356 TRACE("check_for_preemptions: "
357 "attempting to link task %d to %d\n",
358 task->pid, last->cpu);
359 link_task_to_cpu(task, last);
360 update_cpu_position(last);
361 } else
362 /* no preemption required */
363 done = 1;
364 } else
365 /* all gone, being checked elsewhere? */
366 done = 1;
367 spin_unlock(&ghqedf_cpu_lock);
368 if (unlinked)
369 /* stick it back into the queue */
370 requeue(unlinked);
371 if (last && !done)
372 /* we have a preemption, send IPI */
373 preempt(last);
374 }
375 TRACE("done with preemption checking\n");
376 local_irq_restore(flags);
377}
378
379/* ghqedf_job_arrival: task is either resumed or released
380 * call only unlocked!
381 */
382static noinline void ghqedf_job_arrival(struct task_struct* task)
383{
384 requeue(task);
385 check_for_preemptions();
386}
387
388static void ghqedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
389{
390 unsigned long flags;
391
392 TRACE("release_jobs() invoked\n");
393 local_irq_save(flags);
394 /* insert unlocked */
395 merge_into_ready_queue(tasks);
396 local_irq_restore(flags);
397 check_for_preemptions();
398}
399
400/* caller holds ghqedf_lock */
401static noinline int job_completion(cpu_entry_t* entry, int forced)
402{
403
404 struct task_struct *t = entry->scheduled;
405
406 sched_trace_task_completion(t, forced);
407
408 TRACE_TASK(t, "job_completion().\n");
409
410 /* set flags */
411 set_rt_flags(t, RT_F_SLEEP);
412 /* prepare for next period */
413 prepare_for_next_period(t);
414 if (is_released(t, litmus_clock()))
415 sched_trace_task_release(t);
416
417
418 if (is_released(t, litmus_clock())){
419 /* we changed the priority, see if we need to preempt */
420 set_rt_flags(t, RT_F_RUNNING);
421 update_cpu_position(entry);
422 return 1;
423 }
424 else {
425 /* it has got to wait */
426 unlink(t);
427 add_release(&ghqedf, t);
428 return 0;
429 }
430}
431
432/* ghqedf_tick - this function is called for every local timer
433 * interrupt.
434 *
435 * checks whether the current task has expired and checks
436 * whether we need to preempt it if it has not expired
437 */
438static void ghqedf_tick(struct task_struct* t)
439{
440 if (is_realtime(t) && budget_exhausted(t))
441 set_tsk_need_resched(t);
442}
443
444static struct task_struct* ghqedf_schedule(struct task_struct * prev)
445{
446 cpu_entry_t* entry = &__get_cpu_var(ghqedf_cpu_entries);
447 int out_of_time, sleep, preempt, exists, blocks;
448 struct task_struct* next = NULL;
449
450 /* Bail out early if we are the release master.
451 * The release master never schedules any real-time tasks.
452 */
453 if (ghqedf.release_master == entry->cpu)
454 return NULL;
455
456// TRACE_TASK(prev, "invoked ghqedf_schedule.\n");
457
458 /* sanity checking */
459 BUG_ON(entry->scheduled && entry->scheduled != prev);
460 BUG_ON(entry->scheduled && !is_realtime(prev));
461 BUG_ON(is_realtime(prev) && !entry->scheduled);
462
463 /* (0) Determine state */
464 exists = entry->scheduled != NULL;
465 blocks = exists && !is_running(entry->scheduled);
466 out_of_time = exists && budget_exhausted(entry->scheduled);
467 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
468
469 spin_lock(&ghqedf_cpu_lock);
470
471 preempt = entry->scheduled != entry->linked;
472
473 if (exists)
474 TRACE_TASK(prev,
475 "blocks:%d out_of_time:%d sleep:%d preempt:%d "
476 "state:%d sig:%d\n",
477 blocks, out_of_time, sleep, preempt,
478 prev->state, signal_pending(prev));
479 if (preempt && entry->linked)
480 TRACE_TASK(prev, "will be preempted by %s/%d\n",
481 entry->linked->comm, entry->linked->pid);
482
483 /* If a task blocks we have no choice but to reschedule.
484 */
485 if (blocks)
486 unlink(entry->scheduled);
487
488
489 /* Any task that is preemptable and either exhausts its execution
490 * budget or wants to sleep completes. We may have to reschedule after
491 * this. Don't do a job completion if we block (can't have timers
492 * running for blocked jobs). Preemptions go first for the same reason.
493 */
494 if ((out_of_time || sleep) && !blocks && !preempt) {
495 if (job_completion(entry, !sleep)) {
496 /* Task might stay with us.
497 * Drop locks and check for preemptions.
498 */
499 spin_unlock(&ghqedf_cpu_lock);
500 /* anything to update ? */
501 check_for_preemptions();
502 spin_lock(&ghqedf_cpu_lock);
503 /* if something higher priority got linked,
504 * then we need to add the task into the
505 * ready queue (since it wasn't added by
506 * check_for_preemptions b/c picked==1.
507 */
508 if (entry->linked != prev)
509 add_to_ready_queue(prev);
510 }
511 }
512
513 /* Link pending task if we became unlinked.
514 * NOTE: Do not hold locks while performing ready queue updates
515 * since we want concurrent access to the queue.
516 */
517 if (!entry->linked) {
518 if (exists)
519 /* We are committed to descheduling; erase marker
520 * before we drop the lock.
521 */
522 tsk_rt(prev)->scheduled_on = NO_CPU;
523 spin_unlock(&ghqedf_cpu_lock);
524 check_for_preemptions(); /* update links */
525 spin_lock(&ghqedf_cpu_lock);
526 }
527
528 /* The final scheduling decision. Do we need to switch for some reason?
529 * If linked is different from scheduled, then select linked as next.
530 */
531 if (entry->linked != entry->scheduled) {
532 /* Schedule a linked job? */
533 if (entry->linked) {
534 entry->linked->rt_param.scheduled_on = entry->cpu;
535 entry->picked = 1;
536 next = entry->linked;
537 }
538 if (entry->scheduled)
539 entry->scheduled->rt_param.scheduled_on = NO_CPU;
540 } else
541 /* Only override Linux scheduler if we have a real-time task
542 * scheduled that needs to continue.
543 */
544 if (exists)
545 next = prev;
546
547 spin_unlock(&ghqedf_cpu_lock);
548 if (exists && preempt && !blocks)
549 /* stick preempted task back into the ready queue */
550 ghqedf_job_arrival(prev);
551
552 if (next)
553 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
554 else if (exists && !next)
555 TRACE("becomes idle at %llu.\n", litmus_clock());
556
557 return next;
558}
559
560
561/* _finish_switch - we just finished the switch away from prev
562 */
563static void ghqedf_finish_switch(struct task_struct *prev)
564{
565 cpu_entry_t* entry = &__get_cpu_var(ghqedf_cpu_entries);
566
567 entry->scheduled = is_realtime(current) ? current : NULL;
568 TRACE_TASK(prev, "switched away from\n");
569}
570
571
572/* Prepare a task for running in RT mode
573 */
574static void ghqedf_task_new(struct task_struct * t, int on_rq, int running)
575{
576 unsigned long flags;
577 cpu_entry_t* entry;
578
579 TRACE("ghqedf: task new %d\n", t->pid);
580
581 spin_lock_irqsave(&ghqedf_cpu_lock, flags);
582
583 /* setup job params */
584 release_at(t, litmus_clock());
585
586 if (running) {
587 entry = &per_cpu(ghqedf_cpu_entries, task_cpu(t));
588 BUG_ON(entry->scheduled);
589 if (entry->cpu != ghqedf.release_master) {
590 entry->scheduled = t;
591 t->rt_param.scheduled_on = task_cpu(t);
592 } else {
593 preempt(entry);
594 tsk_rt(t)->scheduled_on = NO_CPU;
595 }
596 } else {
597 tsk_rt(t)->scheduled_on = NO_CPU;
598 }
599 tsk_rt(t)->linked_on = NO_CPU;
600
601 spin_unlock_irqrestore(&ghqedf_cpu_lock, flags);
602
603 if (!running || entry->cpu == ghqedf.release_master)
604 ghqedf_job_arrival(t);
605}
606
607static void ghqedf_task_wake_up(struct task_struct *task)
608{
609 unsigned long flags;
610 lt_t now;
611
612 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
613
614 spin_lock_irqsave(&ghqedf_cpu_lock, flags);
615 now = litmus_clock();
616 if (is_tardy(task, now)) {
617 /* new sporadic release */
618 release_at(task, now);
619 sched_trace_task_release(task);
620 }
621 spin_unlock_irqrestore(&ghqedf_cpu_lock, flags);
622 ghqedf_job_arrival(task);
623}
624
625static void ghqedf_task_block(struct task_struct *t)
626{
627 TRACE_TASK(t, "block at %llu\n", litmus_clock());
628}
629
630static void ghqedf_task_exit(struct task_struct * t)
631{
632 unsigned long flags;
633
634 /* unlink if necessary */
635 spin_lock_irqsave(&ghqedf_cpu_lock, flags);
636 /* remove from CPU state, if necessary */
637 unlink(t);
638 if (tsk_rt(t)->scheduled_on != NO_CPU) {
639 ghqedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
640 tsk_rt(t)->scheduled_on = NO_CPU;
641 } else {
642 /* FIXME: If t is currently queued, then we need to
643 * dequeue it now; otherwise it will probably
644 * cause a crash once it is dequeued.
645 */
646 TRACE_TASK(t, "called ghqedf_task_exit(), "
647 "but is not scheduled!\n");
648 }
649 spin_unlock_irqrestore(&ghqedf_cpu_lock, flags);
650
651 TRACE_TASK(t, "RIP\n");
652}
653
654static long ghqedf_admit_task(struct task_struct* tsk)
655{
656 return 0;
657}
658
659
660static long ghqedf_activate_plugin(void)
661{
662 int cpu;
663 cpu_entry_t *entry;
664
665 heap_init(&ghqedf_cpu_heap);
666 subqueues_init();
667 ghqedf.release_master = atomic_read(&release_master_cpu);
668
669 for_each_online_cpu(cpu) {
670 entry = &per_cpu(ghqedf_cpu_entries, cpu);
671 heap_node_init(&entry->hn, entry);
672 entry->linked = NULL;
673 entry->scheduled = NULL;
674 entry->picked = 0;
675 if (cpu != ghqedf.release_master) {
676 TRACE("G-EDF: Initializing CPU #%d.\n", cpu);
677 update_cpu_position(entry);
678 } else {
679 TRACE("G-EDF: CPU %d is release master.\n", cpu);
680 }
681 }
682 return 0;
683}
684
685
686/* Plugin object */
687static struct sched_plugin ghqedf_plugin __cacheline_aligned_in_smp = {
688 .plugin_name = "GHQ-EDF",
689 .finish_switch = ghqedf_finish_switch,
690 .tick = ghqedf_tick,
691 .task_new = ghqedf_task_new,
692 .complete_job = complete_job,
693 .task_exit = ghqedf_task_exit,
694 .schedule = ghqedf_schedule,
695 .task_wake_up = ghqedf_task_wake_up,
696 .task_block = ghqedf_task_block,
697 .admit_task = ghqedf_admit_task,
698 .activate_plugin = ghqedf_activate_plugin,
699};
700
701
702static int __init init_ghqedf(void)
703{
704 int cpu;
705 cpu_entry_t *entry;
706
707 /* initialize CPU state */
708 for (cpu = 0; cpu < NR_CPUS; cpu++) {
709 entry = &per_cpu(ghqedf_cpu_entries, cpu);
710 ghqedf_cpus[cpu] = entry;
711 entry->cpu = cpu;
712 entry->hn = &ghqedf_heap_node[cpu];
713 heap_node_init(&entry->hn, entry);
714 }
715 edf_domain_init(&ghqedf, NULL, ghqedf_release_jobs);
716 return register_sched_plugin(&ghqedf_plugin);
717}
718
719
720module_init(init_ghqedf);
diff --git a/litmus/sched_gq_edf.c b/litmus/sched_gq_edf.c
deleted file mode 100644
index 7b6e8ddbe8..0000000000
--- a/litmus/sched_gq_edf.c
+++ /dev/null
@@ -1,606 +0,0 @@
1/* A quantum-based implementation of G-EDF.
2 *
3 * Based on GSN-EDF.
4 */
5
6#include <linux/spinlock.h>
7#include <linux/percpu.h>
8#include <linux/sched.h>
9
10#include <litmus/litmus.h>
11#include <litmus/jobs.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/edf_common.h>
14#include <litmus/sched_trace.h>
15
16#include <litmus/heap.h>
17
18#include <linux/module.h>
19
20/* cpu_state_t - maintain the linked and scheduled state
21 */
22typedef struct {
23 int cpu;
24 struct task_struct* linked; /* only RT tasks */
25 struct task_struct* scheduled; /* only RT tasks */
26 struct task_struct* absentee; /* blocked quantum owner */
27 struct heap_node* hn;
28} cpu_state_t;
29DEFINE_PER_CPU(cpu_state_t, gq_cpu_entries);
30
31cpu_state_t* gq_cpus[NR_CPUS];
32
33/* the cpus queue themselves according to priority in here */
34static struct heap_node gq_heap_node[NR_CPUS];
35static struct heap gq_cpu_heap;
36/* jobs to be merged at the beginning of the next quantum */
37static struct heap gq_released_heap;
38
39
40static rt_domain_t gqedf;
41#define gq_lock (gqedf.ready_lock)
42
43DEFINE_SPINLOCK(gq_release_lock);
44
45static void preempt(cpu_state_t *entry)
46{
47 if (smp_processor_id() == entry->cpu)
48 set_tsk_need_resched(current);
49 else
50 smp_send_reschedule(entry->cpu);
51}
52
53static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
54{
55 cpu_state_t *a, *b;
56 a = _a->value;
57 b = _b->value;
58 /* Note that a and b are inverted: we want the lowest-priority CPU at
59 * the top of the heap.
60 */
61 return edf_higher_prio(b->linked, a->linked);
62}
63
64/* update_cpu_position - Move the cpu entry to the correct place to maintain
65 * order in the cpu queue. Caller must hold gqedf lock.
66 */
67static void update_cpu_position(cpu_state_t *entry)
68{
69 if (likely(heap_node_in_heap(entry->hn)))
70 heap_delete(cpu_lower_prio, &gq_cpu_heap, entry->hn);
71 heap_insert(cpu_lower_prio, &gq_cpu_heap, entry->hn);
72}
73
74/* caller must hold gqedf lock */
75static cpu_state_t* lowest_prio_cpu(void)
76{
77 struct heap_node* hn;
78 hn = heap_peek(cpu_lower_prio, &gq_cpu_heap);
79 return hn->value; //hn ? hn->value : NULL;
80}
81
82/* link_task_to_cpu - Update the link of a CPU.
83 * Handles the case where the to-be-linked task is already
84 * scheduled on a different CPU.
85 */
86static noinline void link_task_to_cpu(struct task_struct* linked,
87 cpu_state_t *entry)
88{
89 cpu_state_t *sched;
90 struct task_struct* tmp;
91 int on_cpu;
92
93 BUG_ON(linked && !is_realtime(linked));
94 /* don't relink tasks that are already linked */
95 BUG_ON(linked && tsk_rt(linked)->linked_on != NO_CPU);
96
97 /* Currently linked task is set to be unlinked. */
98 if (entry->linked) {
99 entry->linked->rt_param.linked_on = NO_CPU;
100 }
101
102 /* Link new task to CPU. */
103 if (linked) {
104 set_rt_flags(linked, RT_F_RUNNING);
105 /* handle task is already scheduled somewhere! */
106 on_cpu = linked->rt_param.scheduled_on;
107 if (on_cpu != NO_CPU) {
108 sched = &per_cpu(gq_cpu_entries, on_cpu);
109 /* this should only happen if not linked already */
110 BUG_ON(sched->linked == linked);
111
112 /* If we are already scheduled on the CPU to which we
113 * wanted to link, we don't need to do the swap --
114 * we just link ourselves to the CPU and depend on
115 * the caller to get things right.
116 */
117 if (entry != sched) {
118 TRACE_TASK(linked,
119 "already scheduled on %d, updating link.\n",
120 sched->cpu);
121 tmp = sched->linked;
122 linked->rt_param.linked_on = sched->cpu;
123 sched->linked = linked;
124 update_cpu_position(sched);
125 linked = tmp;
126 }
127 }
128 if (linked) /* might be NULL due to swap */
129 linked->rt_param.linked_on = entry->cpu;
130 }
131 entry->linked = linked;
132 if (linked)
133 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
134 else
135 TRACE("NULL linked to %d.\n", entry->cpu);
136 update_cpu_position(entry);
137}
138
139/* unlink - Make sure a task is not linked any longer to an entry
140 * where it was linked before. Must hold gq_lock.
141 */
142static noinline void unlink(struct task_struct* t)
143{
144 cpu_state_t *entry;
145
146 if (unlikely(!t)) {
147 TRACE_BUG_ON(!t);
148 return;
149 }
150
151 if (t->rt_param.linked_on != NO_CPU) {
152 /* unlink */
153 entry = &per_cpu(gq_cpu_entries, t->rt_param.linked_on);
154 t->rt_param.linked_on = NO_CPU;
155 link_task_to_cpu(NULL, entry);
156 } else if (is_queued(t)) {
157 /* This is an interesting situation: t is scheduled,
158 * but was just recently unlinked. It cannot be
159 * linked anywhere else (because then it would have
160 * been relinked to this CPU), thus it must be in some
161 * queue. We must remove it from the list in this
162 * case.
163 */
164 TRACE_TASK(t, "unlink() -> remove()\n");
165 remove(&gqedf, t);
166 }
167}
168
169
170/* requeue - Put an unlinked task into gsn-edf domain.
171 * Caller must hold gq_lock.
172 */
173static noinline void requeue(struct task_struct* task)
174{
175 BUG_ON(!task);
176 /* sanity check before insertion */
177 BUG_ON(is_queued(task));
178
179 if (is_released(task, litmus_clock()))
180 __add_ready(&gqedf, task);
181 else
182 /* it has got to wait */
183 add_release(&gqedf, task);
184}
185
186/* check for any necessary preemptions */
187static void link_highest_priority_jobs(void)
188{
189 struct task_struct *task;
190 cpu_state_t* last;
191
192 for(last = lowest_prio_cpu();
193// last &&
194 edf_preemption_needed(&gqedf, last->linked);
195 last = lowest_prio_cpu()) {
196 TRACE("last cpu:%d linked:%s/%d preempt:%d\n",
197 last->cpu,
198 last->linked ? last->linked->comm : "---",
199 last->linked ? last->linked->pid : 0,
200 edf_preemption_needed(&gqedf, last->linked));
201 /* preemption necessary */
202 task = __take_ready(&gqedf);
203 TRACE("attempting to link task %d to %d at %llu\n",
204 task->pid, last->cpu, litmus_clock());
205 if (last->linked) {
206 TRACE_TASK(last->linked, "requeued.\n");
207 requeue(last->linked);
208 }
209 link_task_to_cpu(task, last);
210 }
211}
212
213/* caller holds gq_lock */
214static void job_completion(struct task_struct *t, int forced)
215{
216
217 sched_trace_task_completion(t, forced);
218
219 TRACE_TASK(t, "job_completion(forced=%d), state:%d\n", forced,
220 t->state);
221
222 /* prepare for next period */
223 prepare_for_next_period(t);
224 if (is_released(t, litmus_clock()))
225 sched_trace_task_release(t);
226 /* unlink */
227 unlink(t);
228 /* requeue
229 * But don't requeue a blocking task. */
230 if (is_running(t))
231 requeue(t);
232 else
233 TRACE_TASK(t, "job_completion(): not requeued, is not running. "
234 "state:%d\n", t->state);
235}
236
237
238static void gq_add_released_queue(struct task_struct *t)
239{
240 spin_lock(&gq_release_lock);
241 heap_insert(edf_ready_order, &gq_released_heap, tsk_rt(t)->heap_node);
242 spin_unlock(&gq_release_lock);
243}
244
245/* caller holds gq_lock, irqs are disabled */
246static void merge_released_queue(void)
247{
248#ifdef CONFIG_SCHED_DEBUG_TRACE
249 struct heap_node* hn;
250 struct task_struct* t;
251#endif
252
253 spin_lock(&gq_release_lock);
254
255#ifdef CONFIG_SCHED_DEBUG_TRACE
256 /* do it individually (= slooow)
257 * so that we can trace each merge
258 */
259
260
261 while ((hn = heap_take(edf_ready_order, &gq_released_heap))) {
262 t = (struct task_struct*) hn->value;
263 TRACE_TASK(t, "merged into ready queue (is_released:%d)\n",
264 is_released(t, litmus_clock()));
265 __add_ready(&gqedf, t);
266 }
267#else
268 __merge_ready(&gqedf, &gq_released_heap);
269#endif
270
271 spin_unlock(&gq_release_lock);
272}
273
274/* gq_tick - this function is called for every local timer
275 * interrupt.
276 *
277 * checks whether the current task has expired and checks
278 * whether we need to preempt it if it has not expired
279 */
280static void gq_tick(struct task_struct* t)
281{
282 unsigned long flags;
283 cpu_state_t* entry;
284
285 spin_lock_irqsave(&gq_lock, flags);
286 entry = &__get_cpu_var(gq_cpu_entries);
287 entry->absentee = NULL;
288
289 merge_released_queue();
290 /* update linked if required */
291 link_highest_priority_jobs();
292
293 if (entry->linked != entry->scheduled ||
294 (is_realtime(t) && budget_exhausted(t)))
295 /* let's reschedule */
296 set_tsk_need_resched(t);
297
298 spin_unlock_irqrestore(&gq_lock, flags);
299}
300
301static struct task_struct* gq_schedule(struct task_struct * prev)
302{
303 cpu_state_t* entry = &__get_cpu_var(gq_cpu_entries);
304 int sleep, preempt, exists, blocks, out_of_time;
305 struct task_struct* next = NULL;
306
307 /* Bail out early if we are the release master.
308 * The release master never schedules any real-time tasks.
309 */
310 if (gqedf.release_master == entry->cpu)
311 return NULL;
312
313 spin_lock(&gq_lock);
314
315 /* sanity checking */
316 BUG_ON(entry->scheduled && entry->scheduled != prev);
317 BUG_ON(entry->scheduled && !is_realtime(prev));
318 BUG_ON(is_realtime(prev) && !entry->scheduled);
319 BUG_ON(entry->scheduled && tsk_rt(entry->scheduled)->scheduled_on != entry->cpu);
320 BUG_ON(entry->scheduled && tsk_rt(entry->scheduled)->scheduled_on != entry->cpu);
321
322 /* Determine state */
323 exists = entry->scheduled != NULL;
324 blocks = exists && !is_running(entry->scheduled);
325 out_of_time = exists && budget_exhausted(entry->scheduled);
326 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
327 preempt = entry->scheduled != entry->linked;
328
329 BUG_ON(blocks && sleep);
330
331 TRACE_TASK(prev, "invoked gq_schedule.\n");
332
333 if (exists)
334 TRACE_TASK(prev,
335 "blocks:%d sleep:%d preempt:%d "
336 "state:%d sig:%d out_of_time:%d\n",
337 blocks, sleep, preempt,
338 prev->state, signal_pending(prev),
339 out_of_time);
340 if (entry->linked && preempt)
341 TRACE_TASK(prev, "will be preempted by %s/%d\n",
342 entry->linked->comm, entry->linked->pid);
343
344
345 if (blocks) {
346 /* Record task identity so that the task
347 * can be rescheduled when it resumes,
348 * but only do so when prev has not been
349 * preempted anyway.
350 */
351 if (!preempt) {
352 entry->absentee = prev;
353 tsk_rt(prev)->last_cpu = entry->cpu;
354 }
355 unlink(entry->scheduled);
356 }
357
358 if (sleep || out_of_time)
359 job_completion(entry->scheduled, !sleep);
360
361 /* The final scheduling decision. Do we need to switch for some reason?
362 * If linked is different from scheduled, then select linked as next.
363 */
364 TRACE("gq: linked=%p scheduled=%p\n", entry->linked, entry->scheduled);
365 if (entry->linked != entry->scheduled) {
366 /* Schedule a linked job? */
367 if (entry->linked) {
368 entry->linked->rt_param.scheduled_on = entry->cpu;
369 next = entry->linked;
370 }
371 if (entry->scheduled) {
372 /* kick this task off the CPU */
373 entry->scheduled->rt_param.scheduled_on = NO_CPU;
374 TRACE_TASK(entry->scheduled, "scheduled_on <- NO_CPU\n");
375 }
376 } else
377 /* Only override Linux scheduler if we have a real-time task
378 * scheduled that needs to continue.
379 */
380 if (exists)
381 next = prev;
382
383 spin_unlock(&gq_lock);
384
385 TRACE("gq_lock released, next=0x%p\n", next);
386
387
388 if (next)
389 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
390 else if (exists && !next)
391 TRACE("becomes idle at %llu.\n", litmus_clock());
392
393
394 return next;
395}
396
397
398/* _finish_switch - we just finished the switch away from prev
399 */
400static void gq_finish_switch(struct task_struct *prev)
401{
402 cpu_state_t* entry = &__get_cpu_var(gq_cpu_entries);
403
404 entry->scheduled = is_realtime(current) ? current : NULL;
405 TRACE_TASK(prev, "switched away from\n");
406}
407
408
409/* Prepare a task for running in RT mode
410 */
411static void gq_task_new(struct task_struct * t, int on_rq, int running)
412{
413 unsigned long flags;
414 cpu_state_t* entry = NULL;
415 int on_rm = 0;
416
417 spin_lock_irqsave(&gq_lock, flags);
418
419 /* setup job params */
420 release_at(t, litmus_clock());
421
422 if (running) {
423 entry = &per_cpu(gq_cpu_entries, task_cpu(t));
424 BUG_ON(entry->scheduled);
425 on_rm = entry->cpu == gqedf.release_master;
426 }
427
428 TRACE_TASK(t, "gq edf: task new (running:%d on_rm:%d)\n",
429 running, on_rm);
430
431 if (running && on_rm)
432 preempt(entry);
433
434 if (running && !on_rm) {
435 /* just leave it where it is, CPU was real-time idle */
436 tsk_rt(t)->scheduled_on = task_cpu(t);
437 tsk_rt(t)->linked_on = task_cpu(t);
438 entry->scheduled = t;
439 if (entry->linked != NULL) {
440 /* Something raced and got assigned here.
441 * Kick it back into the queue, since t is
442 * already executing.
443 */
444 tsk_rt(entry->linked)->linked_on = NO_CPU;
445 __add_ready(&gqedf, entry->linked);
446 }
447 entry->linked = t;
448 }
449
450 if (!running || on_rm) {
451 /* should be released properly */
452 tsk_rt(t)->scheduled_on = NO_CPU;
453 tsk_rt(t)->linked_on = NO_CPU;
454 gq_add_released_queue(t);
455 }
456
457 spin_unlock_irqrestore(&gq_lock, flags);
458}
459
460static void gq_task_wake_up(struct task_struct *task)
461{
462 unsigned long flags;
463 cpu_state_t* entry;
464 lt_t now;
465
466 spin_lock_irqsave(&gq_lock, flags);
467
468 now = litmus_clock();
469 if (is_tardy(task, now)) {
470 TRACE_TASK(task, "wake_up => new release\n");
471 /* Since task came back after its deadline, we
472 * assume that resuming indidates a new job release.
473 */
474 release_at(task, now);
475 sched_trace_task_release(task);
476 gq_add_released_queue(task);
477 } else if (is_released(task, now)) {
478 set_rt_flags(task, RT_F_RUNNING);
479 entry = &per_cpu(gq_cpu_entries, tsk_rt(task)->last_cpu);
480 /* check if task is still the quantum owner on its CPU */
481 if (entry->linked == NULL && entry->absentee == task) {
482 TRACE_TASK(task, "wake_up => is quantum owner\n");
483 /* can resume right away */
484 link_task_to_cpu(task, entry);
485 if (entry->scheduled != task)
486 preempt(entry);
487 } else {
488 /* becomes eligible at next quantum */
489 TRACE_TASK(task, "wake_up => released_queue\n");
490 gq_add_released_queue(task);
491 }
492 } else {
493 /* last suspension occurred together with a
494 * job completion
495 */
496 TRACE_TASK(task, "wake_up => not yet released!\n");
497 requeue(task);
498 }
499 spin_unlock_irqrestore(&gq_lock, flags);
500}
501
502static void gq_task_block(struct task_struct *t)
503{
504 TRACE_TASK(t, "block at %llu\n", litmus_clock());
505}
506
507
508static void gq_task_exit(struct task_struct * t)
509{
510 unsigned long flags;
511
512 /* unlink if necessary */
513 spin_lock_irqsave(&gq_lock, flags);
514 unlink(t);
515 if (tsk_rt(t)->scheduled_on != NO_CPU) {
516 gq_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
517 tsk_rt(t)->scheduled_on = NO_CPU;
518 }
519 spin_unlock_irqrestore(&gq_lock, flags);
520
521 BUG_ON(!is_realtime(t));
522 TRACE_TASK(t, "RIP\n");
523}
524
525
526
527static void gq_release_jobs(rt_domain_t* rt, struct heap* tasks)
528{
529 unsigned long flags;
530
531 spin_lock_irqsave(&gq_release_lock, flags);
532 TRACE("gq_release_jobs() at %llu\n", litmus_clock());
533
534 /* save tasks to queue so that they can be merged on next quantum */
535 heap_union(edf_ready_order, &gq_released_heap, tasks);
536
537 spin_unlock_irqrestore(&gq_release_lock, flags);
538}
539
540static long gq_admit_task(struct task_struct* tsk)
541{
542 return 0;
543}
544
545
546static long gq_activate_plugin(void)
547{
548 int cpu;
549 cpu_state_t *entry;
550
551 heap_init(&gq_cpu_heap);
552 heap_init(&gq_released_heap);
553 gqedf.release_master = atomic_read(&release_master_cpu);
554
555
556 for_each_online_cpu(cpu) {
557 entry = &per_cpu(gq_cpu_entries, cpu);
558 heap_node_init(&entry->hn, entry);
559 entry->linked = NULL;
560 entry->scheduled = NULL;
561 entry->absentee = NULL;
562 if (cpu != gqedf.release_master) {
563 TRACE("GQ-EDF: Initializing CPU #%d.\n", cpu);
564 update_cpu_position(entry);
565 } else {
566 TRACE("GQ-EDF: CPU %d is release master.\n", cpu);
567 }
568 }
569 return 0;
570}
571
572/* Plugin object */
573static struct sched_plugin gq_edf_plugin __cacheline_aligned_in_smp = {
574 .plugin_name = "GQ-EDF",
575 .finish_switch = gq_finish_switch,
576 .tick = gq_tick,
577 .task_new = gq_task_new,
578 .complete_job = complete_job,
579 .task_exit = gq_task_exit,
580 .schedule = gq_schedule,
581 .task_wake_up = gq_task_wake_up,
582 .task_block = gq_task_block,
583 .admit_task = gq_admit_task,
584 .activate_plugin = gq_activate_plugin,
585};
586
587
588static int __init init_gq_edf(void)
589{
590 int cpu;
591 cpu_state_t *entry;
592
593 /* initialize CPU state */
594 for (cpu = 0; cpu < NR_CPUS; cpu++) {
595 entry = &per_cpu(gq_cpu_entries, cpu);
596 gq_cpus[cpu] = entry;
597 entry->cpu = cpu;
598 entry->hn = &gq_heap_node[cpu];
599 heap_node_init(&entry->hn, entry);
600 }
601 edf_domain_init(&gqedf, NULL, gq_release_jobs);
602 return register_sched_plugin(&gq_edf_plugin);
603}
604
605
606module_init(init_gq_edf);