aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c688
1 files changed, 53 insertions, 635 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index e9c5e531b1ae..7876d707d939 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -14,7 +14,7 @@
14 14
15#include <litmus/litmus.h> 15#include <litmus/litmus.h>
16#include <litmus/jobs.h> 16#include <litmus/jobs.h>
17#include <litmus/sched_plugin.h> 17#include <litmus/sched_global_plugin.h>
18#include <litmus/edf_common.h> 18#include <litmus/edf_common.h>
19#include <litmus/sched_trace.h> 19#include <litmus/sched_trace.h>
20 20
@@ -24,578 +24,50 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27/* Overview of GSN-EDF operations.
28 *
29 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
30 * description only covers how the individual operations are implemented in
31 * LITMUS.
32 *
33 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
34 * structure (NOT the actually scheduled
35 * task). If there is another linked task To
36 * already it will set To->linked_on = NO_CPU
37 * (thereby removing its association with this
38 * CPU). However, it will not requeue the
39 * previously linked task (if any). It will set
40 * T's state to RT_F_RUNNING and check whether
41 * it is already running somewhere else. If T
42 * is scheduled somewhere else it will link
43 * it to that CPU instead (and pull the linked
44 * task to cpu). T may be NULL.
45 *
46 * unlink(T) - Unlink removes T from all scheduler data
47 * structures. If it is linked to some CPU it
48 * will link NULL to that CPU. If it is
49 * currently queued in the gsnedf queue it will
50 * be removed from the rt_domain. It is safe to
51 * call unlink(T) if T is not linked. T may not
52 * be NULL.
53 *
54 * requeue(T) - Requeue will insert T into the appropriate
55 * queue. If the system is in real-time mode and
56 * the T is released already, it will go into the
57 * ready queue. If the system is not in
58 * real-time mode is T, then T will go into the
59 * release queue. If T's release time is in the
60 * future, it will go into the release
61 * queue. That means that T's release time/job
62 * no/etc. has to be updated before requeu(T) is
63 * called. It is not safe to call requeue(T)
64 * when T is already queued. T may not be NULL.
65 *
66 * gsnedf_job_arrival(T) - This is the catch all function when T enters
67 * the system after either a suspension or at a
68 * job release. It will queue T (which means it
69 * is not safe to call gsnedf_job_arrival(T) if
70 * T is already queued) and then check whether a
71 * preemption is necessary. If a preemption is
72 * necessary it will update the linkage
73 * accordingly and cause scheduled to be called
74 * (either with an IPI or need_resched). It is
75 * safe to call gsnedf_job_arrival(T) if T's
76 * next job has not been actually released yet
77 * (releast time in the future). T will be put
78 * on the release queue in that case.
79 *
80 * job_completion(T) - Take care of everything that needs to be done
81 * to prepare T for its next release and place
82 * it in the right queue with
83 * gsnedf_job_arrival().
84 *
85 *
86 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
87 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
88 * the functions will automatically propagate pending task from the ready queue
89 * to a linked task. This is the job of the calling function ( by means of
90 * __take_ready).
91 */
92 27
93
94/* cpu_entry_t - maintain the linked and scheduled state
95 */
96typedef struct {
97 int cpu;
98 struct task_struct* linked; /* only RT tasks */
99 struct task_struct* scheduled; /* only RT tasks */
100 struct bheap_node* hn;
101} cpu_entry_t;
102DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); 28DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries);
103 29
104cpu_entry_t* gsnedf_cpus[NR_CPUS]; 30#define gsnedf_lock (gsn_edf_plugin.domain.ready_lock)
105
106/* the cpus queue themselves according to priority in here */
107static struct bheap_node gsnedf_heap_node[NR_CPUS];
108static struct bheap gsnedf_cpu_heap;
109
110static rt_domain_t gsnedf;
111#define gsnedf_lock (gsnedf.ready_lock)
112
113
114/* Uncomment this if you want to see all scheduling decisions in the
115 * TRACE() log.
116#define WANT_ALL_SCHED_EVENTS
117 */
118
119static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
120{
121 cpu_entry_t *a, *b;
122 a = _a->value;
123 b = _b->value;
124 /* Note that a and b are inverted: we want the lowest-priority CPU at
125 * the top of the heap.
126 */
127 return edf_higher_prio(b->linked, a->linked);
128}
129
130/* update_cpu_position - Move the cpu entry to the correct place to maintain
131 * order in the cpu queue. Caller must hold gsnedf lock.
132 */
133static void update_cpu_position(cpu_entry_t *entry)
134{
135 if (likely(bheap_node_in_heap(entry->hn)))
136 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
137 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn);
138}
139 31
140/* caller must hold gsnedf lock */ 32#ifdef CONFIG_FMLP
141static cpu_entry_t* lowest_prio_cpu(void) 33static long gsnedf_pi_block(struct pi_semaphore *sem,
142{ 34 struct task_struct *new_waiter);
143 struct bheap_node* hn; 35static long gsnedf_inherit_priority(struct pi_semaphore *sem,
144 hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); 36 struct task_struct *new_owner);
145 return hn->value; 37static long gsnedf_return_priority(struct pi_semaphore *sem);
146}
147
148
149/* link_task_to_cpu - Update the link of a CPU.
150 * Handles the case where the to-be-linked task is already
151 * scheduled on a different CPU.
152 */
153static noinline void link_task_to_cpu(struct task_struct* linked,
154 cpu_entry_t *entry)
155{
156 cpu_entry_t *sched;
157 struct task_struct* tmp;
158 int on_cpu;
159
160 BUG_ON(linked && !is_realtime(linked));
161
162 /* Currently linked task is set to be unlinked. */
163 if (entry->linked) {
164 entry->linked->rt_param.linked_on = NO_CPU;
165 }
166
167 /* Link new task to CPU. */
168 if (linked) {
169 set_rt_flags(linked, RT_F_RUNNING);
170 /* handle task is already scheduled somewhere! */
171 on_cpu = linked->rt_param.scheduled_on;
172 if (on_cpu != NO_CPU) {
173 sched = &per_cpu(gsnedf_cpu_entries, on_cpu);
174 /* this should only happen if not linked already */
175 BUG_ON(sched->linked == linked);
176
177 /* If we are already scheduled on the CPU to which we
178 * wanted to link, we don't need to do the swap --
179 * we just link ourselves to the CPU and depend on
180 * the caller to get things right.
181 */
182 if (entry != sched) {
183 TRACE_TASK(linked,
184 "already scheduled on %d, updating link.\n",
185 sched->cpu);
186 tmp = sched->linked;
187 linked->rt_param.linked_on = sched->cpu;
188 sched->linked = linked;
189 update_cpu_position(sched);
190 linked = tmp;
191 }
192 }
193 if (linked) /* might be NULL due to swap */
194 linked->rt_param.linked_on = entry->cpu;
195 }
196 entry->linked = linked;
197#ifdef WANT_ALL_SCHED_EVENTS
198 if (linked)
199 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
200 else
201 TRACE("NULL linked to %d.\n", entry->cpu);
202#endif
203 update_cpu_position(entry);
204}
205
206/* unlink - Make sure a task is not linked any longer to an entry
207 * where it was linked before. Must hold gsnedf_lock.
208 */
209static noinline void unlink(struct task_struct* t)
210{
211 cpu_entry_t *entry;
212
213 if (t->rt_param.linked_on != NO_CPU) {
214 /* unlink */
215 entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on);
216 t->rt_param.linked_on = NO_CPU;
217 link_task_to_cpu(NULL, entry);
218 } else if (is_queued(t)) {
219 /* This is an interesting situation: t is scheduled,
220 * but was just recently unlinked. It cannot be
221 * linked anywhere else (because then it would have
222 * been relinked to this CPU), thus it must be in some
223 * queue. We must remove it from the list in this
224 * case.
225 */
226 remove(&gsnedf, t);
227 }
228}
229
230
231/* preempt - force a CPU to reschedule
232 */
233static void preempt(cpu_entry_t *entry)
234{
235 preempt_if_preemptable(entry->scheduled, entry->cpu);
236}
237
238/* requeue - Put an unlinked task into gsn-edf domain.
239 * Caller must hold gsnedf_lock.
240 */
241static noinline void requeue(struct task_struct* task)
242{
243 BUG_ON(!task);
244 /* sanity check before insertion */
245 BUG_ON(is_queued(task));
246
247 if (is_released(task, litmus_clock()))
248 __add_ready(&gsnedf, task);
249 else {
250 /* it has got to wait */
251 add_release(&gsnedf, task);
252 }
253}
254
255/* check for any necessary preemptions */
256static void check_for_preemptions(void)
257{
258 struct task_struct *task;
259 cpu_entry_t* last;
260
261 for(last = lowest_prio_cpu();
262 edf_preemption_needed(&gsnedf, last->linked);
263 last = lowest_prio_cpu()) {
264 /* preemption necessary */
265 task = __take_ready(&gsnedf);
266 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
267 task->pid, last->cpu);
268 if (last->linked)
269 requeue(last->linked);
270 link_task_to_cpu(task, last);
271 preempt(last);
272 }
273}
274
275/* gsnedf_job_arrival: task is either resumed or released */
276static noinline void gsnedf_job_arrival(struct task_struct* task)
277{
278 BUG_ON(!task);
279
280 requeue(task);
281 check_for_preemptions();
282}
283
284static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
285{
286 unsigned long flags;
287
288 raw_spin_lock_irqsave(&gsnedf_lock, flags);
289
290 __merge_ready(rt, tasks);
291 check_for_preemptions();
292
293 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
294}
295
296/* caller holds gsnedf_lock */
297static noinline void job_completion(struct task_struct *t, int forced)
298{
299 BUG_ON(!t);
300
301 sched_trace_task_completion(t, forced);
302
303 TRACE_TASK(t, "job_completion().\n");
304
305 /* set flags */
306 set_rt_flags(t, RT_F_SLEEP);
307 /* prepare for next period */
308 prepare_for_next_period(t);
309 if (is_released(t, litmus_clock()))
310 sched_trace_task_release(t);
311 /* unlink */
312 unlink(t);
313 /* requeue
314 * But don't requeue a blocking task. */
315 if (is_running(t))
316 gsnedf_job_arrival(t);
317}
318
319/* gsnedf_tick - this function is called for every local timer
320 * interrupt.
321 *
322 * checks whether the current task has expired and checks
323 * whether we need to preempt it if it has not expired
324 */
325static void gsnedf_tick(struct task_struct* t)
326{
327 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
328 if (!is_np(t)) {
329 /* np tasks will be preempted when they become
330 * preemptable again
331 */
332 litmus_reschedule_local();
333 TRACE("gsnedf_scheduler_tick: "
334 "%d is preemptable "
335 " => FORCE_RESCHED\n", t->pid);
336 } else if (is_user_np(t)) {
337 TRACE("gsnedf_scheduler_tick: "
338 "%d is non-preemptable, "
339 "preemption delayed.\n", t->pid);
340 request_exit_np(t);
341 }
342 }
343}
344
345/* Getting schedule() right is a bit tricky. schedule() may not make any
346 * assumptions on the state of the current task since it may be called for a
347 * number of reasons. The reasons include a scheduler_tick() determined that it
348 * was necessary, because sys_exit_np() was called, because some Linux
349 * subsystem determined so, or even (in the worst case) because there is a bug
350 * hidden somewhere. Thus, we must take extreme care to determine what the
351 * current state is.
352 *
353 * The CPU could currently be scheduling a task (or not), be linked (or not).
354 *
355 * The following assertions for the scheduled task could hold:
356 *
357 * - !is_running(scheduled) // the job blocks
358 * - scheduled->timeslice == 0 // the job completed (forcefully)
359 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall)
360 * - linked != scheduled // we need to reschedule (for any reason)
361 * - is_np(scheduled) // rescheduling must be delayed,
362 * sys_exit_np must be requested
363 *
364 * Any of these can occur together.
365 */
366static struct task_struct* gsnedf_schedule(struct task_struct * prev)
367{
368 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
369 int out_of_time, sleep, preempt, np, exists, blocks;
370 struct task_struct* next = NULL;
371
372#ifdef CONFIG_RELEASE_MASTER
373 /* Bail out early if we are the release master.
374 * The release master never schedules any real-time tasks.
375 */
376 if (gsnedf.release_master == entry->cpu)
377 return NULL;
378#endif
379
380 raw_spin_lock(&gsnedf_lock);
381
382 /* sanity checking */
383 BUG_ON(entry->scheduled && entry->scheduled != prev);
384 BUG_ON(entry->scheduled && !is_realtime(prev));
385 BUG_ON(is_realtime(prev) && !entry->scheduled);
386
387 /* (0) Determine state */
388 exists = entry->scheduled != NULL;
389 blocks = exists && !is_running(entry->scheduled);
390 out_of_time = exists &&
391 budget_enforced(entry->scheduled) &&
392 budget_exhausted(entry->scheduled);
393 np = exists && is_np(entry->scheduled);
394 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
395 preempt = entry->scheduled != entry->linked;
396
397#ifdef WANT_ALL_SCHED_EVENTS
398 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
399#endif
400
401 if (exists)
402 TRACE_TASK(prev,
403 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
404 "state:%d sig:%d\n",
405 blocks, out_of_time, np, sleep, preempt,
406 prev->state, signal_pending(prev));
407 if (entry->linked && preempt)
408 TRACE_TASK(prev, "will be preempted by %s/%d\n",
409 entry->linked->comm, entry->linked->pid);
410
411
412 /* If a task blocks we have no choice but to reschedule.
413 */
414 if (blocks)
415 unlink(entry->scheduled);
416
417 /* Request a sys_exit_np() call if we would like to preempt but cannot.
418 * We need to make sure to update the link structure anyway in case
419 * that we are still linked. Multiple calls to request_exit_np() don't
420 * hurt.
421 */
422 if (np && (out_of_time || preempt || sleep)) {
423 unlink(entry->scheduled);
424 request_exit_np(entry->scheduled);
425 }
426
427 /* Any task that is preemptable and either exhausts its execution
428 * budget or wants to sleep completes. We may have to reschedule after
429 * this. Don't do a job completion if we block (can't have timers running
430 * for blocked jobs). Preemption go first for the same reason.
431 */
432 if (!np && (out_of_time || sleep) && !blocks && !preempt)
433 job_completion(entry->scheduled, !sleep);
434
435 /* Link pending task if we became unlinked.
436 */
437 if (!entry->linked)
438 link_task_to_cpu(__take_ready(&gsnedf), entry);
439
440 /* The final scheduling decision. Do we need to switch for some reason?
441 * If linked is different from scheduled, then select linked as next.
442 */
443 if ((!np || blocks) &&
444 entry->linked != entry->scheduled) {
445 /* Schedule a linked job? */
446 if (entry->linked) {
447 entry->linked->rt_param.scheduled_on = entry->cpu;
448 next = entry->linked;
449 }
450 if (entry->scheduled) {
451 /* not gonna be scheduled soon */
452 entry->scheduled->rt_param.scheduled_on = NO_CPU;
453 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
454 }
455 } else
456 /* Only override Linux scheduler if we have a real-time task
457 * scheduled that needs to continue.
458 */
459 if (exists)
460 next = prev;
461
462 sched_state_task_picked();
463
464 raw_spin_unlock(&gsnedf_lock);
465
466#ifdef WANT_ALL_SCHED_EVENTS
467 TRACE("gsnedf_lock released, next=0x%p\n", next);
468
469 if (next)
470 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
471 else if (exists && !next)
472 TRACE("becomes idle at %llu.\n", litmus_clock());
473#endif
474
475
476 return next;
477}
478
479
480/* _finish_switch - we just finished the switch away from prev
481 */
482static void gsnedf_finish_switch(struct task_struct *prev)
483{
484 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
485
486 entry->scheduled = is_realtime(current) ? current : NULL;
487#ifdef WANT_ALL_SCHED_EVENTS
488 TRACE_TASK(prev, "switched away from\n");
489#endif
490}
491
492
493/* Prepare a task for running in RT mode
494 */
495static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
496{
497 unsigned long flags;
498 cpu_entry_t* entry;
499
500 TRACE("gsn edf: task new %d\n", t->pid);
501
502 raw_spin_lock_irqsave(&gsnedf_lock, flags);
503
504 /* setup job params */
505 release_at(t, litmus_clock());
506
507 if (running) {
508 entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t));
509 BUG_ON(entry->scheduled);
510
511#ifdef CONFIG_RELEASE_MASTER
512 if (entry->cpu != gsnedf.release_master) {
513#endif
514 entry->scheduled = t;
515 tsk_rt(t)->scheduled_on = task_cpu(t);
516#ifdef CONFIG_RELEASE_MASTER
517 } else {
518 /* do not schedule on release master */
519 preempt(entry); /* force resched */
520 tsk_rt(t)->scheduled_on = NO_CPU;
521 }
522#endif 38#endif
523 } else {
524 t->rt_param.scheduled_on = NO_CPU;
525 }
526 t->rt_param.linked_on = NO_CPU;
527
528 gsnedf_job_arrival(t);
529 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
530}
531 39
532static void gsnedf_task_wake_up(struct task_struct *task) 40/* GSN-EDF Plugin object */
533{ 41static struct sched_global_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
534 unsigned long flags; 42 .plugin = {
535 lt_t now; 43 .plugin_name = "GSN-EDF",
536 44 .finish_switch = gblv_finish_switch,
537 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 45 .tick = gblv_tick,
538 46 .task_new = gblv_task_new,
539 raw_spin_lock_irqsave(&gsnedf_lock, flags); 47 .complete_job = complete_job,
540 /* We need to take suspensions because of semaphores into 48 .task_exit = gblv_task_exit,
541 * account! If a job resumes after being suspended due to acquiring 49 .schedule = gblv_schedule,
542 * a semaphore, it should never be treated as a new job release. 50 .task_wake_up = gblv_task_wake_up,
543 */ 51 .task_block = gblv_task_block,
544 if (get_rt_flags(task) == RT_F_EXIT_SEM) { 52 #ifdef CONFIG_FMLP
545 set_rt_flags(task, RT_F_RUNNING); 53 .fmlp_active = 1,
546 } else { 54 .pi_block = gsnedf_pi_block,
547 now = litmus_clock(); 55 .inherit_priority = gsnedf_inherit_priority,
548 if (is_tardy(task, now)) { 56 .return_priority = gsnedf_return_priority,
549 /* new sporadic release */ 57 #endif
550 release_at(task, now); 58 .admit_task = gblv_admit_task,
551 sched_trace_task_release(task); 59 .activate_plugin = gbl_activate_plugin
552 } 60 },
553 else { 61
554 if (task->rt.time_slice) { 62 .prio_order = edf_higher_prio,
555 /* came back in time before deadline 63 .take_ready = __take_ready,
556 */ 64 .add_ready = __add_ready,
557 set_rt_flags(task, RT_F_RUNNING); 65 .job_arrival = gblv_job_arrival,
558 } 66 .job_completion = gbl_job_completion
559 } 67};
560 }
561 gsnedf_job_arrival(task);
562 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
563}
564
565static void gsnedf_task_block(struct task_struct *t)
566{
567 unsigned long flags;
568
569 TRACE_TASK(t, "block at %llu\n", litmus_clock());
570
571 /* unlink if necessary */
572 raw_spin_lock_irqsave(&gsnedf_lock, flags);
573 unlink(t);
574 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
575
576 BUG_ON(!is_realtime(t));
577}
578
579
580static void gsnedf_task_exit(struct task_struct * t)
581{
582 unsigned long flags;
583
584 /* unlink if necessary */
585 raw_spin_lock_irqsave(&gsnedf_lock, flags);
586 unlink(t);
587 if (tsk_rt(t)->scheduled_on != NO_CPU) {
588 gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
589 tsk_rt(t)->scheduled_on = NO_CPU;
590 }
591 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
592 68
593 BUG_ON(!is_realtime(t));
594 TRACE_TASK(t, "RIP\n");
595}
596 69
597#ifdef CONFIG_FMLP 70#ifdef CONFIG_FMLP
598
599/* Update the queue position of a task that got it's priority boosted via 71/* Update the queue position of a task that got it's priority boosted via
600 * priority inheritance. */ 72 * priority inheritance. */
601static void update_queue_position(struct task_struct *holder) 73static void update_queue_position(struct task_struct *holder)
@@ -618,13 +90,13 @@ static void update_queue_position(struct task_struct *holder)
618 * We can't use heap_decrease() here since 90 * We can't use heap_decrease() here since
619 * the cpu_heap is ordered in reverse direction, so 91 * the cpu_heap is ordered in reverse direction, so
620 * it is actually an increase. */ 92 * it is actually an increase. */
621 bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, 93 bheap_delete(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap,
622 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 94 gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn);
623 bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, 95 bheap_insert(gbl_cpu_lower_prio, &gsn_edf_plugin.cpu_heap,
624 gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); 96 gsn_edf_plugin.cpus[tsk_rt(holder)->linked_on]->hn);
625 } else { 97 } else {
626 /* holder may be queued: first stop queue changes */ 98 /* holder may be queued: first stop queue changes */
627 raw_spin_lock(&gsnedf.release_lock); 99 raw_spin_lock(&gsn_edf_plugin.domain.release_lock);
628 if (is_queued(holder)) { 100 if (is_queued(holder)) {
629 TRACE_TASK(holder, "%s: is queued\n", 101 TRACE_TASK(holder, "%s: is queued\n",
630 __FUNCTION__); 102 __FUNCTION__);
@@ -642,7 +114,7 @@ static void update_queue_position(struct task_struct *holder)
642 TRACE_TASK(holder, "%s: is NOT queued => Done.\n", 114 TRACE_TASK(holder, "%s: is NOT queued => Done.\n",
643 __FUNCTION__); 115 __FUNCTION__);
644 } 116 }
645 raw_spin_unlock(&gsnedf.release_lock); 117 raw_spin_unlock(&gsn_edf_plugin.domain.release_lock);
646 118
647 /* If holder was enqueued in a release heap, then the following 119 /* If holder was enqueued in a release heap, then the following
648 * preemption check is pointless, but we can't easily detect 120 * preemption check is pointless, but we can't easily detect
@@ -654,9 +126,9 @@ static void update_queue_position(struct task_struct *holder)
654 /* heap_decrease() hit the top level of the heap: make 126 /* heap_decrease() hit the top level of the heap: make
655 * sure preemption checks get the right task, not the 127 * sure preemption checks get the right task, not the
656 * potentially stale cache. */ 128 * potentially stale cache. */
657 bheap_uncache_min(edf_ready_order, 129 bheap_uncache_min(gbl_ready_order,
658 &gsnedf.ready_queue); 130 &gsn_edf_plugin.domain.ready_queue);
659 check_for_preemptions(); 131 gbl_check_for_preemptions();
660 } 132 }
661 } 133 }
662} 134}
@@ -740,8 +212,8 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
740 t->rt_param.inh_task = NULL; 212 t->rt_param.inh_task = NULL;
741 213
742 /* Check if rescheduling is necessary */ 214 /* Check if rescheduling is necessary */
743 unlink(t); 215 gbl_unlink(t);
744 gsnedf_job_arrival(t); 216 gsn_edf_plugin.job_arrival(t);
745 raw_spin_unlock(&gsnedf_lock); 217 raw_spin_unlock(&gsnedf_lock);
746 } 218 }
747 219
@@ -750,78 +222,24 @@ static long gsnedf_return_priority(struct pi_semaphore *sem)
750 222
751#endif 223#endif
752 224
753static long gsnedf_admit_task(struct task_struct* tsk)
754{
755 return 0;
756}
757
758static long gsnedf_activate_plugin(void)
759{
760 int cpu;
761 cpu_entry_t *entry;
762
763 bheap_init(&gsnedf_cpu_heap);
764#ifdef CONFIG_RELEASE_MASTER
765 gsnedf.release_master = atomic_read(&release_master_cpu);
766#endif
767
768 for_each_online_cpu(cpu) {
769 entry = &per_cpu(gsnedf_cpu_entries, cpu);
770 bheap_node_init(&entry->hn, entry);
771 entry->linked = NULL;
772 entry->scheduled = NULL;
773#ifdef CONFIG_RELEASE_MASTER
774 if (cpu != gsnedf.release_master) {
775#endif
776 TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu);
777 update_cpu_position(entry);
778#ifdef CONFIG_RELEASE_MASTER
779 } else {
780 TRACE("GSN-EDF: CPU %d is release master.\n", cpu);
781 }
782#endif
783 }
784 return 0;
785}
786
787/* Plugin object */
788static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
789 .plugin_name = "GSN-EDF",
790 .finish_switch = gsnedf_finish_switch,
791 .tick = gsnedf_tick,
792 .task_new = gsnedf_task_new,
793 .complete_job = complete_job,
794 .task_exit = gsnedf_task_exit,
795 .schedule = gsnedf_schedule,
796 .task_wake_up = gsnedf_task_wake_up,
797 .task_block = gsnedf_task_block,
798#ifdef CONFIG_FMLP
799 .fmlp_active = 1,
800 .pi_block = gsnedf_pi_block,
801 .inherit_priority = gsnedf_inherit_priority,
802 .return_priority = gsnedf_return_priority,
803#endif
804 .admit_task = gsnedf_admit_task,
805 .activate_plugin = gsnedf_activate_plugin,
806};
807
808 225
809static int __init init_gsn_edf(void) 226static int __init init_gsn_edf(void)
810{ 227{
811 int cpu; 228 int cpu;
812 cpu_entry_t *entry; 229 cpu_entry_t *entry;
813 230
814 bheap_init(&gsnedf_cpu_heap); 231 bheap_init(&gsn_edf_plugin.cpu_heap);
815 /* initialize CPU state */ 232 /* initialize CPU state */
816 for (cpu = 0; cpu < NR_CPUS; cpu++) { 233 for (cpu = 0; cpu < NR_CPUS; cpu++) {
817 entry = &per_cpu(gsnedf_cpu_entries, cpu); 234 entry = &per_cpu(gsnedf_cpu_entries, cpu);
818 gsnedf_cpus[cpu] = entry; 235 gsn_edf_plugin.cpus[cpu] = entry;
819 entry->cpu = cpu; 236 entry->cpu = cpu;
820 entry->hn = &gsnedf_heap_node[cpu]; 237 entry->hn = &gsn_edf_plugin.heap_node[cpu];
821 bheap_node_init(&entry->hn, entry); 238 bheap_node_init(&entry->hn, entry);
822 } 239 }
823 edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); 240 gbl_domain_init(&gsn_edf_plugin, NULL, gbl_release_jobs);
824 return register_sched_plugin(&gsn_edf_plugin); 241
242 return register_sched_plugin(&gsn_edf_plugin.plugin);
825} 243}
826 244
827 245