aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:55 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:20:35 -0400
commit02da1bac9739050917862c82bdc75c3a0eb43179 (patch)
tree6d0d96376c3c78d4dce10659ac1b0525d0b4c45f
parent47efe5234212e0f72369a6964dfd84d4e9968a11 (diff)
Add GSN-EDF scheduler plugin
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/sched_gsn_edf.c1070
2 files changed, 1071 insertions, 0 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 4e53c4f69744..895cf3a2d599 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -19,6 +19,7 @@ obj-y = sched_plugin.o litmus.o \
19 binheap.o \ 19 binheap.o \
20 ctrldev.o \ 20 ctrldev.o \
21 uncachedev.o \ 21 uncachedev.o \
22 sched_gsn_edf.o \
22 sched_psn_edf.o 23 sched_psn_edf.o
23 24
24 25
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
new file mode 100644
index 000000000000..6e2ff6fa689d
--- /dev/null
+++ b/litmus/sched_gsn_edf.c
@@ -0,0 +1,1070 @@
1/*
2 * litmus/sched_gsn_edf.c
3 *
4 * Implementation of the GSN-EDF scheduling algorithm.
5 *
6 * This version uses the simple approach and serializes all scheduling
7 * decisions by the use of a queue lock. This is probably not the
8 * best way to do it, but it should suffice for now.
9 */
10
11#include <linux/spinlock.h>
12#include <linux/percpu.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15
16#include <litmus/litmus.h>
17#include <litmus/jobs.h>
18#include <litmus/sched_plugin.h>
19#include <litmus/edf_common.h>
20#include <litmus/sched_trace.h>
21#include <litmus/trace.h>
22
23#include <litmus/preempt.h>
24#include <litmus/budget.h>
25
26#include <litmus/bheap.h>
27
28#ifdef CONFIG_SCHED_CPU_AFFINITY
29#include <litmus/affinity.h>
30#endif
31
32/* to set up domain/cpu mappings */
33#include <litmus/litmus_proc.h>
34
35#include <linux/module.h>
36
37/* Overview of GSN-EDF operations.
38 *
39 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
40 * description only covers how the individual operations are implemented in
41 * LITMUS.
42 *
43 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
44 * structure (NOT the actually scheduled
45 * task). If there is another linked task To
46 * already it will set To->linked_on = NO_CPU
47 * (thereby removing its association with this
48 * CPU). However, it will not requeue the
49 * previously linked task (if any). It will set
50 * T's state to not completed and check whether
51 * it is already running somewhere else. If T
52 * is scheduled somewhere else it will link
53 * it to that CPU instead (and pull the linked
54 * task to cpu). T may be NULL.
55 *
56 * unlink(T) - Unlink removes T from all scheduler data
57 * structures. If it is linked to some CPU it
58 * will link NULL to that CPU. If it is
59 * currently queued in the gsnedf queue it will
60 * be removed from the rt_domain. It is safe to
61 * call unlink(T) if T is not linked. T may not
62 * be NULL.
63 *
64 * requeue(T) - Requeue will insert T into the appropriate
65 * queue. If the system is in real-time mode and
66 * the T is released already, it will go into the
67 * ready queue. If the system is not in
68 * real-time mode is T, then T will go into the
69 * release queue. If T's release time is in the
70 * future, it will go into the release
71 * queue. That means that T's release time/job
72 * no/etc. has to be updated before requeu(T) is
73 * called. It is not safe to call requeue(T)
74 * when T is already queued. T may not be NULL.
75 *
76 * gsnedf_job_arrival(T) - This is the catch all function when T enters
77 * the system after either a suspension or at a
78 * job release. It will queue T (which means it
79 * is not safe to call gsnedf_job_arrival(T) if
80 * T is already queued) and then check whether a
81 * preemption is necessary. If a preemption is
82 * necessary it will update the linkage
83 * accordingly and cause scheduled to be called
84 * (either with an IPI or need_resched). It is
85 * safe to call gsnedf_job_arrival(T) if T's
86 * next job has not been actually released yet
87 * (releast time in the future). T will be put
88 * on the release queue in that case.
89 *
90 * curr_job_completion() - Take care of everything that needs to be done
91 * to prepare the current task for its next
92 * release and place it in the right queue with
93 * gsnedf_job_arrival().
94 *
95 *
96 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
97 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
98 * the functions will automatically propagate pending task from the ready queue
99 * to a linked task. This is the job of the calling function ( by means of
100 * __take_ready).
101 */
102
103
104/* cpu_entry_t - maintain the linked and scheduled state
105 */
106typedef struct {
107 int cpu;
108 struct task_struct* linked; /* only RT tasks */
109 struct task_struct* scheduled; /* only RT tasks */
110 struct bheap_node* hn;
111} cpu_entry_t;
112DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
113
114cpu_entry_t* gsnedf_cpus[NR_CPUS];
115
116/* the cpus queue themselves according to priority in here */
117static struct bheap_node gsnedf_heap_node[NR_CPUS];
118static struct bheap gsnedf_cpu_heap;
119
120static rt_domain_t gsnedf;
121#define gsnedf_lock (gsnedf.ready_lock)
122
123
124/* Uncomment this if you want to see all scheduling decisions in the
125 * TRACE() log.
126#define WANT_ALL_SCHED_EVENTS
127 */
128
129static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
130{
131 cpu_entry_t *a, *b;
132 a = _a->value;
133 b = _b->value;
134 /* Note that a and b are inverted: we want the lowest-priority CPU at
135 * the top of the heap.
136 */
137 return edf_higher_prio(b->linked, a->linked);
138}
139
140/* update_cpu_position - Move the cpu entry to the correct place to maintain
141 * order in the cpu queue. Caller must hold gsnedf lock.
142 */
143static void update_cpu_position(cpu_entry_t *entry)
144{
145 if (likely(bheap_node_in_heap(entry->hn)))
146 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
147 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
148}
149
150/* caller must hold gsnedf lock */
151static cpu_entry_t* lowest_prio_cpu(void)
152{
153 struct bheap_node* hn;
154 hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap);
155 return hn->value;
156}
157
158
159/* link_task_to_cpu - Update the link of a CPU.
160 * Handles the case where the to-be-linked task is already
161 * scheduled on a different CPU.
162 */
163static noinline void link_task_to_cpu(struct task_struct* linked,
164 cpu_entry_t *entry)
165{
166 cpu_entry_t *sched;
167 struct task_struct* tmp;
168 int on_cpu;
169
170 BUG_ON(linked && !is_realtime(linked));
171
172 /* Currently linked task is set to be unlinked. */
173 if (entry->linked) {
174 entry->linked->rt_param.linked_on = NO_CPU;
175 }
176
177 /* Link new task to CPU. */
178 if (linked) {
179 /* handle task is already scheduled somewhere! */
180 on_cpu = linked->rt_param.scheduled_on;
181 if (on_cpu != NO_CPU) {
182 sched = &per_cpu(gsnedf_cpu_entries, on_cpu);
183 /* this should only happen if not linked already */
184 BUG_ON(sched->linked == linked);
185
186 /* If we are already scheduled on the CPU to which we
187 * wanted to link, we don't need to do the swap --
188 * we just link ourselves to the CPU and depend on
189 * the caller to get things right.
190 */
191 if (entry != sched) {
192 TRACE_TASK(linked,
193 "already scheduled on %d, updating link.\n",
194 sched->cpu);
195 tmp = sched->linked;
196 linked->rt_param.linked_on = sched->cpu;
197 sched->linked = linked;
198 update_cpu_position(sched);
199 linked = tmp;
200 }
201 }
202 if (linked) /* might be NULL due to swap */
203 linked->rt_param.linked_on = entry->cpu;
204 }
205 entry->linked = linked;
206#ifdef WANT_ALL_SCHED_EVENTS
207 if (linked)
208 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
209 else
210 TRACE("NULL linked to %d.\n", entry->cpu);
211#endif
212 update_cpu_position(entry);
213}
214
215/* unlink - Make sure a task is not linked any longer to an entry
216 * where it was linked before. Must hold gsnedf_lock.
217 */
218static noinline void unlink(struct task_struct* t)
219{
220 cpu_entry_t *entry;
221
222 if (t->rt_param.linked_on != NO_CPU) {
223 /* unlink */
224 entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on);
225 t->rt_param.linked_on = NO_CPU;
226 link_task_to_cpu(NULL, entry);
227 } else if (is_queued(t)) {
228 /* This is an interesting situation: t is scheduled,
229 * but was just recently unlinked. It cannot be
230 * linked anywhere else (because then it would have
231 * been relinked to this CPU), thus it must be in some
232 * queue. We must remove it from the list in this
233 * case.
234 */
235 remove(&gsnedf, t);
236 }
237}
238
239
240/* preempt - force a CPU to reschedule
241 */
242static void preempt(cpu_entry_t *entry)
243{
244 preempt_if_preemptable(entry->scheduled, entry->cpu);
245}
246
247/* requeue - Put an unlinked task into gsn-edf domain.
248 * Caller must hold gsnedf_lock.
249 */
250static noinline void requeue(struct task_struct* task)
251{
252 BUG_ON(!task);
253 /* sanity check before insertion */
254 BUG_ON(is_queued(task));
255
256 if (is_early_releasing(task) || is_released(task, litmus_clock()))
257 __add_ready(&gsnedf, task);
258 else {
259 /* it has got to wait */
260 add_release(&gsnedf, task);
261 }
262}
263
264#ifdef CONFIG_SCHED_CPU_AFFINITY
265static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start)
266{
267 cpu_entry_t *affinity;
268
269 get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries,
270#ifdef CONFIG_RELEASE_MASTER
271 gsnedf.release_master,
272#else
273 NO_CPU,
274#endif
275 cpu_online_mask);
276
277 return(affinity);
278}
279#endif
280
281/* check for any necessary preemptions */
282static void check_for_preemptions(void)
283{
284 struct task_struct *task;
285 cpu_entry_t *last;
286
287
288#ifdef CONFIG_PREFER_LOCAL_LINKING
289 cpu_entry_t *local;
290
291 /* Before linking to other CPUs, check first whether the local CPU is
292 * idle. */
293 local = this_cpu_ptr(&gsnedf_cpu_entries);
294 task = __peek_ready(&gsnedf);
295
296 if (task && !local->linked
297#ifdef CONFIG_RELEASE_MASTER
298 && likely(local->cpu != gsnedf.release_master)
299#endif
300 ) {
301 task = __take_ready(&gsnedf);
302 TRACE_TASK(task, "linking to local CPU %d to avoid IPI\n", local->cpu);
303 link_task_to_cpu(task, local);
304 preempt(local);
305 }
306#endif
307
308 for (last = lowest_prio_cpu();
309 edf_preemption_needed(&gsnedf, last->linked);
310 last = lowest_prio_cpu()) {
311 /* preemption necessary */
312 task = __take_ready(&gsnedf);
313 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
314 task->pid, last->cpu);
315
316#ifdef CONFIG_SCHED_CPU_AFFINITY
317 {
318 cpu_entry_t *affinity =
319 gsnedf_get_nearest_available_cpu(
320 &per_cpu(gsnedf_cpu_entries, task_cpu(task)));
321 if (affinity)
322 last = affinity;
323 else if (requeue_preempted_job(last->linked))
324 requeue(last->linked);
325 }
326#else
327 if (requeue_preempted_job(last->linked))
328 requeue(last->linked);
329#endif
330
331 link_task_to_cpu(task, last);
332 preempt(last);
333 }
334}
335
336/* gsnedf_job_arrival: task is either resumed or released */
337static noinline void gsnedf_job_arrival(struct task_struct* task)
338{
339 BUG_ON(!task);
340
341 requeue(task);
342 check_for_preemptions();
343}
344
345static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
346{
347 unsigned long flags;
348
349 raw_spin_lock_irqsave(&gsnedf_lock, flags);
350
351 __merge_ready(rt, tasks);
352 check_for_preemptions();
353
354 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
355}
356
357/* caller holds gsnedf_lock */
358static noinline void curr_job_completion(int forced)
359{
360 struct task_struct *t = current;
361 BUG_ON(!t);
362
363 sched_trace_task_completion(t, forced);
364
365 TRACE_TASK(t, "job_completion(forced=%d).\n", forced);
366
367 /* set flags */
368 tsk_rt(t)->completed = 0;
369 /* prepare for next period */
370 prepare_for_next_period(t);
371 if (is_early_releasing(t) || is_released(t, litmus_clock()))
372 sched_trace_task_release(t);
373 /* unlink */
374 unlink(t);
375 /* requeue
376 * But don't requeue a blocking task. */
377 if (is_current_running())
378 gsnedf_job_arrival(t);
379}
380
381/* Getting schedule() right is a bit tricky. schedule() may not make any
382 * assumptions on the state of the current task since it may be called for a
383 * number of reasons. The reasons include a scheduler_tick() determined that it
384 * was necessary, because sys_exit_np() was called, because some Linux
385 * subsystem determined so, or even (in the worst case) because there is a bug
386 * hidden somewhere. Thus, we must take extreme care to determine what the
387 * current state is.
388 *
389 * The CPU could currently be scheduling a task (or not), be linked (or not).
390 *
391 * The following assertions for the scheduled task could hold:
392 *
393 * - !is_running(scheduled) // the job blocks
394 * - scheduled->timeslice == 0 // the job completed (forcefully)
395 * - is_completed() // the job completed (by syscall)
396 * - linked != scheduled // we need to reschedule (for any reason)
397 * - is_np(scheduled) // rescheduling must be delayed,
398 * sys_exit_np must be requested
399 *
400 * Any of these can occur together.
401 */
402static struct task_struct* gsnedf_schedule(struct task_struct * prev)
403{
404 cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries);
405 int out_of_time, sleep, preempt, np, exists, blocks;
406 struct task_struct* next = NULL;
407
408#ifdef CONFIG_RELEASE_MASTER
409 /* Bail out early if we are the release master.
410 * The release master never schedules any real-time tasks.
411 */
412 if (unlikely(gsnedf.release_master == entry->cpu)) {
413 sched_state_task_picked();
414 return NULL;
415 }
416#endif
417
418 raw_spin_lock(&gsnedf_lock);
419
420 /* sanity checking */
421 BUG_ON(entry->scheduled && entry->scheduled != prev);
422 BUG_ON(entry->scheduled && !is_realtime(prev));
423 BUG_ON(is_realtime(prev) && !entry->scheduled);
424
425 /* (0) Determine state */
426 exists = entry->scheduled != NULL;
427 blocks = exists && !is_current_running();
428 out_of_time = exists && budget_enforced(entry->scheduled)
429 && budget_exhausted(entry->scheduled);
430 np = exists && is_np(entry->scheduled);
431 sleep = exists && is_completed(entry->scheduled);
432 preempt = entry->scheduled != entry->linked;
433
434#ifdef WANT_ALL_SCHED_EVENTS
435 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
436#endif
437
438 if (exists)
439 TRACE_TASK(prev,
440 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
441 "state:%d sig:%d\n",
442 blocks, out_of_time, np, sleep, preempt,
443 prev->state, signal_pending(prev));
444 if (entry->linked && preempt)
445 TRACE_TASK(prev, "will be preempted by %s/%d\n",
446 entry->linked->comm, entry->linked->pid);
447
448
449 /* If a task blocks we have no choice but to reschedule.
450 */
451 if (blocks)
452 unlink(entry->scheduled);
453
454 /* Request a sys_exit_np() call if we would like to preempt but cannot.
455 * We need to make sure to update the link structure anyway in case
456 * that we are still linked. Multiple calls to request_exit_np() don't
457 * hurt.
458 */
459 if (np && (out_of_time || preempt || sleep)) {
460 unlink(entry->scheduled);
461 request_exit_np(entry->scheduled);
462 }
463
464 /* Any task that is preemptable and either exhausts its execution
465 * budget or wants to sleep completes. We may have to reschedule after
466 * this. Don't do a job completion if we block (can't have timers running
467 * for blocked jobs).
468 */
469 if (!np && (out_of_time || sleep))
470 curr_job_completion(!sleep);
471
472 /* Link pending task if we became unlinked.
473 */
474 if (!entry->linked)
475 link_task_to_cpu(__take_ready(&gsnedf), entry);
476
477 /* The final scheduling decision. Do we need to switch for some reason?
478 * If linked is different from scheduled, then select linked as next.
479 */
480 if ((!np || blocks) &&
481 entry->linked != entry->scheduled) {
482 /* Schedule a linked job? */
483 if (entry->linked) {
484 entry->linked->rt_param.scheduled_on = entry->cpu;
485 next = entry->linked;
486 TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id());
487 }
488 if (entry->scheduled) {
489 /* not gonna be scheduled soon */
490 entry->scheduled->rt_param.scheduled_on = NO_CPU;
491 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
492 }
493 } else
494 /* Only override Linux scheduler if we have a real-time task
495 * scheduled that needs to continue.
496 */
497 if (exists)
498 next = prev;
499
500 sched_state_task_picked();
501
502 raw_spin_unlock(&gsnedf_lock);
503
504#ifdef WANT_ALL_SCHED_EVENTS
505 TRACE("gsnedf_lock released, next=0x%p\n", next);
506
507 if (next)
508 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
509 else if (exists && !next)
510 TRACE("becomes idle at %llu.\n", litmus_clock());
511#endif
512
513
514 return next;
515}
516
517
518/* _finish_switch - we just finished the switch away from prev
519 */
520static void gsnedf_finish_switch(struct task_struct *prev)
521{
522 cpu_entry_t* entry = this_cpu_ptr(&gsnedf_cpu_entries);
523
524 entry->scheduled = is_realtime(current) ? current : NULL;
525#ifdef WANT_ALL_SCHED_EVENTS
526 TRACE_TASK(prev, "switched away from\n");
527#endif
528}
529
530
531/* Prepare a task for running in RT mode
532 */
533static void gsnedf_task_new(struct task_struct * t, int on_rq, int is_scheduled)
534{
535 unsigned long flags;
536 cpu_entry_t* entry;
537
538 TRACE("gsn edf: task new %d\n", t->pid);
539
540 raw_spin_lock_irqsave(&gsnedf_lock, flags);
541
542 /* setup job params */
543 release_at(t, litmus_clock());
544
545 if (is_scheduled) {
546 entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t));
547 BUG_ON(entry->scheduled);
548
549#ifdef CONFIG_RELEASE_MASTER
550 if (entry->cpu != gsnedf.release_master) {
551#endif
552 entry->scheduled = t;
553 tsk_rt(t)->scheduled_on = task_cpu(t);
554#ifdef CONFIG_RELEASE_MASTER
555 } else {
556 /* do not schedule on release master */
557 preempt(entry); /* force resched */
558 tsk_rt(t)->scheduled_on = NO_CPU;
559 }
560#endif
561 } else {
562 t->rt_param.scheduled_on = NO_CPU;
563 }
564 t->rt_param.linked_on = NO_CPU;
565
566 if (on_rq || is_scheduled)
567 gsnedf_job_arrival(t);
568 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
569}
570
571static void gsnedf_task_wake_up(struct task_struct *task)
572{
573 unsigned long flags;
574 lt_t now;
575
576 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
577
578 raw_spin_lock_irqsave(&gsnedf_lock, flags);
579 now = litmus_clock();
580 if (is_sporadic(task) && is_tardy(task, now)) {
581 /* new sporadic release */
582 release_at(task, now);
583 sched_trace_task_release(task);
584 }
585 gsnedf_job_arrival(task);
586 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
587}
588
589static void gsnedf_task_block(struct task_struct *t)
590{
591 unsigned long flags;
592
593 TRACE_TASK(t, "block at %llu\n", litmus_clock());
594
595 /* unlink if necessary */
596 raw_spin_lock_irqsave(&gsnedf_lock, flags);
597 unlink(t);
598 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
599
600 BUG_ON(!is_realtime(t));
601}
602
603
604static void gsnedf_task_exit(struct task_struct * t)
605{
606 unsigned long flags;
607
608 /* unlink if necessary */
609 raw_spin_lock_irqsave(&gsnedf_lock, flags);
610 unlink(t);
611 if (tsk_rt(t)->scheduled_on != NO_CPU) {
612 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
613 tsk_rt(t)->scheduled_on = NO_CPU;
614 }
615 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
616
617 BUG_ON(!is_realtime(t));
618 TRACE_TASK(t, "RIP\n");
619}
620
621
622static long gsnedf_admit_task(struct task_struct* tsk)
623{
624 return 0;
625}
626
627#ifdef CONFIG_LITMUS_LOCKING
628
629#include <litmus/fdso.h>
630
631/* called with IRQs off */
632static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
633{
634 int linked_on;
635 int check_preempt = 0;
636
637 raw_spin_lock(&gsnedf_lock);
638
639 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
640 tsk_rt(t)->inh_task = prio_inh;
641
642 linked_on = tsk_rt(t)->linked_on;
643
644 /* If it is scheduled, then we need to reorder the CPU heap. */
645 if (linked_on != NO_CPU) {
646 TRACE_TASK(t, "%s: linked on %d\n",
647 __FUNCTION__, linked_on);
648 /* Holder is scheduled; need to re-order CPUs.
649 * We can't use heap_decrease() here since
650 * the cpu_heap is ordered in reverse direction, so
651 * it is actually an increase. */
652 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap,
653 gsnedf_cpus[linked_on]->hn);
654 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap,
655 gsnedf_cpus[linked_on]->hn);
656 } else {
657 /* holder may be queued: first stop queue changes */
658 raw_spin_lock(&gsnedf.release_lock);
659 if (is_queued(t)) {
660 TRACE_TASK(t, "%s: is queued\n",
661 __FUNCTION__);
662 /* We need to update the position of holder in some
663 * heap. Note that this could be a release heap if we
664 * budget enforcement is used and this job overran. */
665 check_preempt =
666 !bheap_decrease(edf_ready_order,
667 tsk_rt(t)->heap_node);
668 } else {
669 /* Nothing to do: if it is not queued and not linked
670 * then it is either sleeping or currently being moved
671 * by other code (e.g., a timer interrupt handler) that
672 * will use the correct priority when enqueuing the
673 * task. */
674 TRACE_TASK(t, "%s: is NOT queued => Done.\n",
675 __FUNCTION__);
676 }
677 raw_spin_unlock(&gsnedf.release_lock);
678
679 /* If holder was enqueued in a release heap, then the following
680 * preemption check is pointless, but we can't easily detect
681 * that case. If you want to fix this, then consider that
682 * simply adding a state flag requires O(n) time to update when
683 * releasing n tasks, which conflicts with the goal to have
684 * O(log n) merges. */
685 if (check_preempt) {
686 /* heap_decrease() hit the top level of the heap: make
687 * sure preemption checks get the right task, not the
688 * potentially stale cache. */
689 bheap_uncache_min(edf_ready_order,
690 &gsnedf.ready_queue);
691 check_for_preemptions();
692 }
693 }
694
695 raw_spin_unlock(&gsnedf_lock);
696}
697
698/* called with IRQs off */
699static void clear_priority_inheritance(struct task_struct* t)
700{
701 raw_spin_lock(&gsnedf_lock);
702
703 /* A job only stops inheriting a priority when it releases a
704 * resource. Thus we can make the following assumption.*/
705 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
706
707 TRACE_TASK(t, "priority restored\n");
708 tsk_rt(t)->inh_task = NULL;
709
710 /* Check if rescheduling is necessary. We can't use heap_decrease()
711 * since the priority was effectively lowered. */
712 unlink(t);
713 gsnedf_job_arrival(t);
714
715 raw_spin_unlock(&gsnedf_lock);
716}
717
718
719/* ******************** FMLP support ********************** */
720
721/* struct for semaphore with priority inheritance */
722struct fmlp_semaphore {
723 struct litmus_lock litmus_lock;
724
725 /* current resource holder */
726 struct task_struct *owner;
727
728 /* highest-priority waiter */
729 struct task_struct *hp_waiter;
730
731 /* FIFO queue of waiting tasks */
732 wait_queue_head_t wait;
733};
734
735static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock)
736{
737 return container_of(lock, struct fmlp_semaphore, litmus_lock);
738}
739
740/* caller is responsible for locking */
741struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem,
742 struct task_struct* skip)
743{
744 struct list_head *pos;
745 struct task_struct *queued, *found = NULL;
746
747 list_for_each(pos, &sem->wait.task_list) {
748 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
749 task_list)->private;
750
751 /* Compare task prios, find high prio task. */
752 if (queued != skip && edf_higher_prio(queued, found))
753 found = queued;
754 }
755 return found;
756}
757
758int gsnedf_fmlp_lock(struct litmus_lock* l)
759{
760 struct task_struct* t = current;
761 struct fmlp_semaphore *sem = fmlp_from_lock(l);
762 wait_queue_t wait;
763 unsigned long flags;
764
765 if (!is_realtime(t))
766 return -EPERM;
767
768 /* prevent nested lock acquisition --- not supported by FMLP */
769 if (tsk_rt(t)->num_locks_held)
770 return -EBUSY;
771
772 spin_lock_irqsave(&sem->wait.lock, flags);
773
774 if (sem->owner) {
775 /* resource is not free => must suspend and wait */
776
777 init_waitqueue_entry(&wait, t);
778
779 /* FIXME: interruptible would be nice some day */
780 set_task_state(t, TASK_UNINTERRUPTIBLE);
781
782 __add_wait_queue_tail_exclusive(&sem->wait, &wait);
783
784 /* check if we need to activate priority inheritance */
785 if (edf_higher_prio(t, sem->hp_waiter)) {
786 sem->hp_waiter = t;
787 if (edf_higher_prio(t, sem->owner))
788 set_priority_inheritance(sem->owner, sem->hp_waiter);
789 }
790
791 TS_LOCK_SUSPEND;
792
793 /* release lock before sleeping */
794 spin_unlock_irqrestore(&sem->wait.lock, flags);
795
796 /* We depend on the FIFO order. Thus, we don't need to recheck
797 * when we wake up; we are guaranteed to have the lock since
798 * there is only one wake up per release.
799 */
800
801 schedule();
802
803 TS_LOCK_RESUME;
804
805 /* Since we hold the lock, no other task will change
806 * ->owner. We can thus check it without acquiring the spin
807 * lock. */
808 BUG_ON(sem->owner != t);
809 } else {
810 /* it's ours now */
811 sem->owner = t;
812
813 spin_unlock_irqrestore(&sem->wait.lock, flags);
814 }
815
816 tsk_rt(t)->num_locks_held++;
817
818 return 0;
819}
820
821int gsnedf_fmlp_unlock(struct litmus_lock* l)
822{
823 struct task_struct *t = current, *next;
824 struct fmlp_semaphore *sem = fmlp_from_lock(l);
825 unsigned long flags;
826 int err = 0;
827
828 spin_lock_irqsave(&sem->wait.lock, flags);
829
830 if (sem->owner != t) {
831 err = -EINVAL;
832 goto out;
833 }
834
835 tsk_rt(t)->num_locks_held--;
836
837 /* check if there are jobs waiting for this resource */
838 next = __waitqueue_remove_first(&sem->wait);
839 if (next) {
840 /* next becomes the resouce holder */
841 sem->owner = next;
842 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid);
843
844 /* determine new hp_waiter if necessary */
845 if (next == sem->hp_waiter) {
846 TRACE_TASK(next, "was highest-prio waiter\n");
847 /* next has the highest priority --- it doesn't need to
848 * inherit. However, we need to make sure that the
849 * next-highest priority in the queue is reflected in
850 * hp_waiter. */
851 sem->hp_waiter = find_hp_waiter(sem, next);
852 if (sem->hp_waiter)
853 TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n");
854 else
855 TRACE("no further waiters\n");
856 } else {
857 /* Well, if next is not the highest-priority waiter,
858 * then it ought to inherit the highest-priority
859 * waiter's priority. */
860 set_priority_inheritance(next, sem->hp_waiter);
861 }
862
863 /* wake up next */
864 wake_up_process(next);
865 } else
866 /* becomes available */
867 sem->owner = NULL;
868
869 /* we lose the benefit of priority inheritance (if any) */
870 if (tsk_rt(t)->inh_task)
871 clear_priority_inheritance(t);
872
873out:
874 spin_unlock_irqrestore(&sem->wait.lock, flags);
875
876 return err;
877}
878
879int gsnedf_fmlp_close(struct litmus_lock* l)
880{
881 struct task_struct *t = current;
882 struct fmlp_semaphore *sem = fmlp_from_lock(l);
883 unsigned long flags;
884
885 int owner;
886
887 spin_lock_irqsave(&sem->wait.lock, flags);
888
889 owner = sem->owner == t;
890
891 spin_unlock_irqrestore(&sem->wait.lock, flags);
892
893 if (owner)
894 gsnedf_fmlp_unlock(l);
895
896 return 0;
897}
898
899void gsnedf_fmlp_free(struct litmus_lock* lock)
900{
901 kfree(fmlp_from_lock(lock));
902}
903
904static struct litmus_lock_ops gsnedf_fmlp_lock_ops = {
905 .close = gsnedf_fmlp_close,
906 .lock = gsnedf_fmlp_lock,
907 .unlock = gsnedf_fmlp_unlock,
908 .deallocate = gsnedf_fmlp_free,
909};
910
911static struct litmus_lock* gsnedf_new_fmlp(void)
912{
913 struct fmlp_semaphore* sem;
914
915 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
916 if (!sem)
917 return NULL;
918
919 sem->owner = NULL;
920 sem->hp_waiter = NULL;
921 init_waitqueue_head(&sem->wait);
922 sem->litmus_lock.ops = &gsnedf_fmlp_lock_ops;
923
924 return &sem->litmus_lock;
925}
926
927/* **** lock constructor **** */
928
929
930static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
931 void* __user unused)
932{
933 int err = -ENXIO;
934
935 /* GSN-EDF currently only supports the FMLP for global resources. */
936 switch (type) {
937
938 case FMLP_SEM:
939 /* Flexible Multiprocessor Locking Protocol */
940 *lock = gsnedf_new_fmlp();
941 if (*lock)
942 err = 0;
943 else
944 err = -ENOMEM;
945 break;
946
947 };
948
949 return err;
950}
951
952#endif
953
954static struct domain_proc_info gsnedf_domain_proc_info;
955static long gsnedf_get_domain_proc_info(struct domain_proc_info **ret)
956{
957 *ret = &gsnedf_domain_proc_info;
958 return 0;
959}
960
961static void gsnedf_setup_domain_proc(void)
962{
963 int i, cpu;
964 int release_master =
965#ifdef CONFIG_RELEASE_MASTER
966 atomic_read(&release_master_cpu);
967#else
968 NO_CPU;
969#endif
970 int num_rt_cpus = num_online_cpus() - (release_master != NO_CPU);
971 struct cd_mapping *map;
972
973 memset(&gsnedf_domain_proc_info, sizeof(gsnedf_domain_proc_info), 0);
974 init_domain_proc_info(&gsnedf_domain_proc_info, num_rt_cpus, 1);
975 gsnedf_domain_proc_info.num_cpus = num_rt_cpus;
976 gsnedf_domain_proc_info.num_domains = 1;
977
978 gsnedf_domain_proc_info.domain_to_cpus[0].id = 0;
979 for (cpu = 0, i = 0; cpu < num_online_cpus(); ++cpu) {
980 if (cpu == release_master)
981 continue;
982 map = &gsnedf_domain_proc_info.cpu_to_domains[i];
983 map->id = cpu;
984 cpumask_set_cpu(0, map->mask);
985 ++i;
986
987 /* add cpu to the domain */
988 cpumask_set_cpu(cpu,
989 gsnedf_domain_proc_info.domain_to_cpus[0].mask);
990 }
991}
992
993static long gsnedf_activate_plugin(void)
994{
995 int cpu;
996 cpu_entry_t *entry;
997
998 bheap_init(&gsnedf_cpu_heap);
999#ifdef CONFIG_RELEASE_MASTER
1000 gsnedf.release_master = atomic_read(&release_master_cpu);
1001#endif
1002
1003 for_each_online_cpu(cpu) {
1004 entry = &per_cpu(gsnedf_cpu_entries, cpu);
1005 bheap_node_init(&entry->hn, entry);
1006 entry->linked = NULL;
1007 entry->scheduled = NULL;
1008#ifdef CONFIG_RELEASE_MASTER
1009 if (cpu != gsnedf.release_master) {
1010#endif
1011 TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu);
1012 update_cpu_position(entry);
1013#ifdef CONFIG_RELEASE_MASTER
1014 } else {
1015 TRACE("GSN-EDF: CPU %d is release master.\n", cpu);
1016 }
1017#endif
1018 }
1019
1020 gsnedf_setup_domain_proc();
1021
1022 return 0;
1023}
1024
1025static long gsnedf_deactivate_plugin(void)
1026{
1027 destroy_domain_proc_info(&gsnedf_domain_proc_info);
1028 return 0;
1029}
1030
1031/* Plugin object */
1032static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1033 .plugin_name = "GSN-EDF",
1034 .finish_switch = gsnedf_finish_switch,
1035 .task_new = gsnedf_task_new,
1036 .complete_job = complete_job,
1037 .task_exit = gsnedf_task_exit,
1038 .schedule = gsnedf_schedule,
1039 .task_wake_up = gsnedf_task_wake_up,
1040 .task_block = gsnedf_task_block,
1041 .admit_task = gsnedf_admit_task,
1042 .activate_plugin = gsnedf_activate_plugin,
1043 .deactivate_plugin = gsnedf_deactivate_plugin,
1044 .get_domain_proc_info = gsnedf_get_domain_proc_info,
1045#ifdef CONFIG_LITMUS_LOCKING
1046 .allocate_lock = gsnedf_allocate_lock,
1047#endif
1048};
1049
1050
1051static int __init init_gsn_edf(void)
1052{
1053 int cpu;
1054 cpu_entry_t *entry;
1055
1056 bheap_init(&gsnedf_cpu_heap);
1057 /* initialize CPU state */
1058 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1059 entry = &per_cpu(gsnedf_cpu_entries, cpu);
1060 gsnedf_cpus[cpu] = entry;
1061 entry->cpu = cpu;
1062 entry->hn = &gsnedf_heap_node[cpu];
1063 bheap_node_init(&entry->hn, entry);
1064 }
1065 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs);
1066 return register_sched_plugin(&gsn_edf_plugin);
1067}
1068
1069
1070module_init(init_gsn_edf);