aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2010-09-23 13:04:09 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2010-09-23 13:05:06 -0400
commitfee982671d23523fee2e45f4e21f085c81e4fac2 (patch)
tree74a24b058c5cb4976ce5d05499d0dd421518f885
parent136a08dbe8c28e751b01e932420f715edb229f6b (diff)
Initial updates to add criticality
-rw-r--r--include/litmus/rt_param.h1
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/sched_mc.c691
3 files changed, 694 insertions, 1 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index a7a183f34a80..ea7f45ede132 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -38,6 +38,7 @@ struct rt_task {
38 lt_t period; 38 lt_t period;
39 lt_t phase; 39 lt_t phase;
40 unsigned int cpu; 40 unsigned int cpu;
41 unsigned int crit;
41 task_class_t cls; 42 task_class_t cls;
42 budget_policy_t budget_policy; /* ignored by pfair */ 43 budget_policy_t budget_policy; /* ignored by pfair */
43}; 44};
diff --git a/litmus/Makefile b/litmus/Makefile
index 30369787ece2..a5d04aa6539f 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -13,7 +13,8 @@ obj-y = sched_plugin.o litmus.o \
13 bheap.o \ 13 bheap.o \
14 ctrldev.o \ 14 ctrldev.o \
15 sched_gsn_edf.o \ 15 sched_gsn_edf.o \
16 sched_psn_edf.o 16 sched_psn_edf.o \
17 sched_mc.o
17 18
18obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 19obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
19obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 20obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
new file mode 100644
index 000000000000..ea4c703424af
--- /dev/null
+++ b/litmus/sched_mc.c
@@ -0,0 +1,691 @@
1/*
2 * litmus/sched_mc.c
3 *
4 * Implementation of the Mixed Criticality scheduling algorithm.
5 *
6 * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010)
7 *
8 * This version uses the simple approach and serializes all scheduling
9 * decisions by the use of a queue lock. This is probably not the
10 * best way to do it, but it should suffice for now.
11 */
12
13#include <linux/spinlock.h>
14#include <linux/percpu.h>
15#include <linux/sched.h>
16
17#include <litmus/litmus.h>
18#include <litmus/jobs.h>
19#include <litmus/sched_plugin.h>
20#include <litmus/edf_common.h>
21#include <litmus/sched_trace.h>
22
23#include <litmus/bheap.h>
24
25#include <linux/module.h>
26
27/* Overview of MC operations.
28 *
29 * For a detailed explanation of MC have a look at the FMLP paper. This
30 * description only covers how the individual operations are implemented in
31 * LITMUS.
32 *
33 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
34 * structure (NOT the actually scheduled
35 * task). If there is another linked task To
36 * already it will set To->linked_on = NO_CPU
37 * (thereby removing its association with this
38 * CPU). However, it will not requeue the
39 * previously linked task (if any). It will set
40 * T's state to RT_F_RUNNING and check whether
41 * it is already running somewhere else. If T
42 * is scheduled somewhere else it will link
43 * it to that CPU instead (and pull the linked
44 * task to cpu). T may be NULL.
45 *
46 * unlink(T) - Unlink removes T from all scheduler data
47 * structures. If it is linked to some CPU it
48 * will link NULL to that CPU. If it is
49 * currently queued in the mc queue it will
50 * be removed from the rt_domain. It is safe to
51 * call unlink(T) if T is not linked. T may not
52 * be NULL.
53 *
54 * requeue(T) - Requeue will insert T into the appropriate
55 * queue. If the system is in real-time mode and
56 * the T is released already, it will go into the
57 * ready queue. If the system is not in
58 * real-time mode is T, then T will go into the
59 * release queue. If T's release time is in the
60 * future, it will go into the release
61 * queue. That means that T's release time/job
62 * no/etc. has to be updated before requeu(T) is
63 * called. It is not safe to call requeue(T)
64 * when T is already queued. T may not be NULL.
65 *
66 * mc_job_arrival(T) - This is the catch all function when T enters
67 * the system after either a suspension or at a
68 * job release. It will queue T (which means it
69 * is not safe to call mc_job_arrival(T) if
70 * T is already queued) and then check whether a
71 * preemption is necessary. If a preemption is
72 * necessary it will update the linkage
73 * accordingly and cause scheduled to be called
74 * (either with an IPI or need_resched). It is
75 * safe to call mc_job_arrival(T) if T's
76 * next job has not been actually released yet
77 * (releast time in the future). T will be put
78 * on the release queue in that case.
79 *
80 * job_completion(T) - Take care of everything that needs to be done
81 * to prepare T for its next release and place
82 * it in the right queue with
83 * mc_job_arrival().
84 *
85 *
86 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
87 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
88 * the functions will automatically propagate pending task from the ready queue
89 * to a linked task. This is the job of the calling function ( by means of
90 * __take_ready).
91 */
92
93
94/* cpu_entry_t - maintain the linked and scheduled state
95 */
96typedef struct {
97 int cpu;
98 struct task_struct* linked; /* only RT tasks */
99 struct task_struct* scheduled; /* only RT tasks */
100 atomic_t will_schedule; /* prevent unneeded IPIs */
101 struct bheap_node* hn;
102} cpu_entry_t;
103DEFINE_PER_CPU(cpu_entry_t, mc_cpu_entries);
104
105cpu_entry_t* mc_cpus[NR_CPUS];
106
107#define set_will_schedule() \
108 (atomic_set(&__get_cpu_var(mc_cpu_entries).will_schedule, 1))
109#define clear_will_schedule() \
110 (atomic_set(&__get_cpu_var(mc_cpu_entries).will_schedule, 0))
111#define test_will_schedule(cpu) \
112 (atomic_read(&per_cpu(mc_cpu_entries, cpu).will_schedule))
113
114
115/* the cpus queue themselves according to priority in here */
116static struct bheap_node mc_heap_node[NR_CPUS];
117static struct bheap mc_cpu_heap;
118
119/*static rt_domain_t mc;*/
120DEFINE_PER_CPU(rt_domain_t, crit_b);
121static rt_domain_t crit_c;
122static rt_domain_t crit_d;
123#define mc_lock (mc.ready_lock)
124
125
126/* Uncomment this if you want to see all scheduling decisions in the
127 * TRACE() log.
128#define WANT_ALL_SCHED_EVENTS
129 */
130
131static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
132{
133 cpu_entry_t *a, *b;
134 a = _a->value;
135 b = _b->value;
136 /* Note that a and b are inverted: we want the lowest-priority CPU at
137 * the top of the heap.
138 */
139 return edf_higher_prio(b->linked, a->linked);
140}
141
142/* update_cpu_position - Move the cpu entry to the correct place to maintain
143 * order in the cpu queue. Caller must hold mc lock.
144 */
145static void update_cpu_position(cpu_entry_t *entry)
146{
147 if (likely(bheap_node_in_heap(entry->hn)))
148 bheap_delete(cpu_lower_prio, &mc_cpu_heap, entry->hn);
149 bheap_insert(cpu_lower_prio, &mc_cpu_heap, entry->hn);
150}
151
152/* caller must hold mc lock */
153static cpu_entry_t* lowest_prio_cpu(void)
154{
155 struct bheap_node* hn;
156 hn = bheap_peek(cpu_lower_prio, &mc_cpu_heap);
157 return hn->value;
158}
159
160
161/* link_task_to_cpu - Update the link of a CPU.
162 * Handles the case where the to-be-linked task is already
163 * scheduled on a different CPU.
164 */
165static noinline void link_task_to_cpu(struct task_struct* linked,
166 cpu_entry_t *entry)
167{
168 cpu_entry_t *sched;
169 struct task_struct* tmp;
170 int on_cpu;
171
172 BUG_ON(linked && !is_realtime(linked));
173
174 /* Currently linked task is set to be unlinked. */
175 if (entry->linked) {
176 entry->linked->rt_param.linked_on = NO_CPU;
177 }
178
179 /* Link new task to CPU. */
180 if (linked) {
181 set_rt_flags(linked, RT_F_RUNNING);
182 /* handle task is already scheduled somewhere! */
183 on_cpu = linked->rt_param.scheduled_on;
184 if (on_cpu != NO_CPU) {
185 sched = &per_cpu(mc_cpu_entries, on_cpu);
186 /* this should only happen if not linked already */
187 BUG_ON(sched->linked == linked);
188
189 /* If we are already scheduled on the CPU to which we
190 * wanted to link, we don't need to do the swap --
191 * we just link ourselves to the CPU and depend on
192 * the caller to get things right.
193 */
194 if (entry != sched) {
195 TRACE_TASK(linked,
196 "already scheduled on %d, updating link.\n",
197 sched->cpu);
198 tmp = sched->linked;
199 linked->rt_param.linked_on = sched->cpu;
200 sched->linked = linked;
201 update_cpu_position(sched);
202 linked = tmp;
203 }
204 }
205 if (linked) /* might be NULL due to swap */
206 linked->rt_param.linked_on = entry->cpu;
207 }
208 entry->linked = linked;
209#ifdef WANT_ALL_SCHED_EVENTS
210 if (linked)
211 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
212 else
213 TRACE("NULL linked to %d.\n", entry->cpu);
214#endif
215 update_cpu_position(entry);
216}
217
218/* unlink - Make sure a task is not linked any longer to an entry
219 * where it was linked before. Must hold mc_lock.
220 */
221static noinline void unlink(struct task_struct* t)
222{
223 cpu_entry_t *entry;
224
225 if (unlikely(!t)) {
226 TRACE_BUG_ON(!t);
227 return;
228 }
229
230 if (t->rt_param.linked_on != NO_CPU) {
231 /* unlink */
232 entry = &per_cpu(mc_cpu_entries, t->rt_param.linked_on);
233 t->rt_param.linked_on = NO_CPU;
234 link_task_to_cpu(NULL, entry);
235 } else if (is_queued(t)) {
236 /* This is an interesting situation: t is scheduled,
237 * but was just recently unlinked. It cannot be
238 * linked anywhere else (because then it would have
239 * been relinked to this CPU), thus it must be in some
240 * queue. We must remove it from the list in this
241 * case.
242 */
243 remove(&mc, t);
244 }
245}
246
247
248/* preempt - force a CPU to reschedule
249 */
250static void preempt(cpu_entry_t *entry)
251{
252 preempt_if_preemptable(entry->scheduled, entry->cpu);
253}
254
255/* requeue - Put an unlinked task into gsn-edf domain.
256 * Caller must hold mc_lock.
257 */
258static noinline void requeue(struct task_struct* task)
259{
260 BUG_ON(!task);
261 /* sanity check before insertion */
262 BUG_ON(is_queued(task));
263
264 if (is_released(task, litmus_clock()))
265 __add_ready(&mc, task);
266 else {
267 /* it has got to wait */
268 add_release(&mc, task);
269 }
270}
271
272/* check for any necessary preemptions */
273static void check_for_preemptions(void)
274{
275 struct task_struct *task;
276 cpu_entry_t* last;
277
278 for(last = lowest_prio_cpu();
279 edf_preemption_needed(&mc, last->linked);
280 last = lowest_prio_cpu()) {
281 /* preemption necessary */
282 task = __take_ready(&mc);
283 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
284 task->pid, last->cpu);
285 if (last->linked)
286 requeue(last->linked);
287 link_task_to_cpu(task, last);
288 preempt(last);
289 }
290}
291
292/* mc_job_arrival: task is either resumed or released */
293static noinline void mc_job_arrival(struct task_struct* task)
294{
295 BUG_ON(!task);
296
297 requeue(task);
298 check_for_preemptions();
299}
300
301static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks)
302{
303 unsigned long flags;
304
305 raw_spin_lock_irqsave(&mc_lock, flags);
306
307 __merge_ready(rt, tasks);
308 check_for_preemptions();
309
310 raw_spin_unlock_irqrestore(&mc_lock, flags);
311}
312
313/* caller holds mc_lock */
314static noinline void job_completion(struct task_struct *t, int forced)
315{
316 BUG_ON(!t);
317
318 sched_trace_task_completion(t, forced);
319
320 TRACE_TASK(t, "job_completion().\n");
321
322 /* set flags */
323 set_rt_flags(t, RT_F_SLEEP);
324 /* prepare for next period */
325 prepare_for_next_period(t);
326 if (is_released(t, litmus_clock()))
327 sched_trace_task_release(t);
328 /* unlink */
329 unlink(t);
330 /* requeue
331 * But don't requeue a blocking task. */
332 if (is_running(t))
333 mc_job_arrival(t);
334}
335
336/* mc_tick - this function is called for every local timer
337 * interrupt.
338 *
339 * checks whether the current task has expired and checks
340 * whether we need to preempt it if it has not expired
341 */
342static void mc_tick(struct task_struct* t)
343{
344 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
345 if (!is_np(t)) {
346 /* np tasks will be preempted when they become
347 * preemptable again
348 */
349 set_tsk_need_resched(t);
350 set_will_schedule();
351 TRACE("mc_scheduler_tick: "
352 "%d is preemptable "
353 " => FORCE_RESCHED\n", t->pid);
354 } else if (is_user_np(t)) {
355 TRACE("mc_scheduler_tick: "
356 "%d is non-preemptable, "
357 "preemption delayed.\n", t->pid);
358 request_exit_np(t);
359 }
360 }
361}
362
363/* Getting schedule() right is a bit tricky. schedule() may not make any
364 * assumptions on the state of the current task since it may be called for a
365 * number of reasons. The reasons include a scheduler_tick() determined that it
366 * was necessary, because sys_exit_np() was called, because some Linux
367 * subsystem determined so, or even (in the worst case) because there is a bug
368 * hidden somewhere. Thus, we must take extreme care to determine what the
369 * current state is.
370 *
371 * The CPU could currently be scheduling a task (or not), be linked (or not).
372 *
373 * The following assertions for the scheduled task could hold:
374 *
375 * - !is_running(scheduled) // the job blocks
376 * - scheduled->timeslice == 0 // the job completed (forcefully)
377 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall)
378 * - linked != scheduled // we need to reschedule (for any reason)
379 * - is_np(scheduled) // rescheduling must be delayed,
380 * sys_exit_np must be requested
381 *
382 * Any of these can occur together.
383 */
384static struct task_struct* mc_schedule(struct task_struct * prev)
385{
386 cpu_entry_t* entry = &__get_cpu_var(mc_cpu_entries);
387 int out_of_time, sleep, preempt, np, exists, blocks;
388 struct task_struct* next = NULL;
389
390#ifdef CONFIG_RELEASE_MASTER
391 /* Bail out early if we are the release master.
392 * The release master never schedules any real-time tasks.
393 */
394 if (mc.release_master == entry->cpu)
395 return NULL;
396#endif
397
398 raw_spin_lock(&mc_lock);
399 clear_will_schedule();
400
401 /* sanity checking */
402 BUG_ON(entry->scheduled && entry->scheduled != prev);
403 BUG_ON(entry->scheduled && !is_realtime(prev));
404 BUG_ON(is_realtime(prev) && !entry->scheduled);
405
406 /* (0) Determine state */
407 exists = entry->scheduled != NULL;
408 blocks = exists && !is_running(entry->scheduled);
409 out_of_time = exists &&
410 budget_enforced(entry->scheduled) &&
411 budget_exhausted(entry->scheduled);
412 np = exists && is_np(entry->scheduled);
413 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
414 preempt = entry->scheduled != entry->linked;
415
416#ifdef WANT_ALL_SCHED_EVENTS
417 TRACE_TASK(prev, "invoked mc_schedule.\n");
418#endif
419
420 if (exists)
421 TRACE_TASK(prev,
422 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
423 "state:%d sig:%d\n",
424 blocks, out_of_time, np, sleep, preempt,
425 prev->state, signal_pending(prev));
426 if (entry->linked && preempt)
427 TRACE_TASK(prev, "will be preempted by %s/%d\n",
428 entry->linked->comm, entry->linked->pid);
429
430
431 /* If a task blocks we have no choice but to reschedule.
432 */
433 if (blocks)
434 unlink(entry->scheduled);
435
436 /* Request a sys_exit_np() call if we would like to preempt but cannot.
437 * We need to make sure to update the link structure anyway in case
438 * that we are still linked. Multiple calls to request_exit_np() don't
439 * hurt.
440 */
441 if (np && (out_of_time || preempt || sleep)) {
442 unlink(entry->scheduled);
443 request_exit_np(entry->scheduled);
444 }
445
446 /* Any task that is preemptable and either exhausts its execution
447 * budget or wants to sleep completes. We may have to reschedule after
448 * this. Don't do a job completion if we block (can't have timers running
449 * for blocked jobs). Preemption go first for the same reason.
450 */
451 if (!np && (out_of_time || sleep) && !blocks && !preempt)
452 job_completion(entry->scheduled, !sleep);
453
454 /* Link pending task if we became unlinked.
455 */
456 if (!entry->linked)
457 link_task_to_cpu(__take_ready(&mc), entry);
458
459 /* The final scheduling decision. Do we need to switch for some reason?
460 * If linked is different from scheduled, then select linked as next.
461 */
462 if ((!np || blocks) &&
463 entry->linked != entry->scheduled) {
464 /* Schedule a linked job? */
465 if (entry->linked) {
466 entry->linked->rt_param.scheduled_on = entry->cpu;
467 next = entry->linked;
468 }
469 if (entry->scheduled) {
470 /* not gonna be scheduled soon */
471 entry->scheduled->rt_param.scheduled_on = NO_CPU;
472 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
473 }
474 } else
475 /* Only override Linux scheduler if we have a real-time task
476 * scheduled that needs to continue.
477 */
478 if (exists)
479 next = prev;
480
481 raw_spin_unlock(&mc_lock);
482
483#ifdef WANT_ALL_SCHED_EVENTS
484 TRACE("mc_lock released, next=0x%p\n", next);
485
486 if (next)
487 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
488 else if (exists && !next)
489 TRACE("becomes idle at %llu.\n", litmus_clock());
490#endif
491
492
493 return next;
494}
495
496
497/* _finish_switch - we just finished the switch away from prev
498 */
499static void mc_finish_switch(struct task_struct *prev)
500{
501 cpu_entry_t* entry = &__get_cpu_var(mc_cpu_entries);
502
503 entry->scheduled = is_realtime(current) ? current : NULL;
504#ifdef WANT_ALL_SCHED_EVENTS
505 TRACE_TASK(prev, "switched away from\n");
506#endif
507}
508
509
510/* Prepare a task for running in RT mode
511 */
512static void mc_task_new(struct task_struct * t, int on_rq, int running)
513{
514 unsigned long flags;
515 cpu_entry_t* entry;
516
517 TRACE("gsn edf: task new %d\n", t->pid);
518
519 raw_spin_lock_irqsave(&mc_lock, flags);
520
521 /* setup job params */
522 release_at(t, litmus_clock());
523
524 if (running) {
525 entry = &per_cpu(mc_cpu_entries, task_cpu(t));
526 BUG_ON(entry->scheduled);
527
528#ifdef CONFIG_RELEASE_MASTER
529 if (entry->cpu != mc.release_master) {
530#endif
531 entry->scheduled = t;
532 tsk_rt(t)->scheduled_on = task_cpu(t);
533#ifdef CONFIG_RELEASE_MASTER
534 } else {
535 /* do not schedule on release master */
536 preempt(entry); /* force resched */
537 tsk_rt(t)->scheduled_on = NO_CPU;
538 }
539#endif
540 } else {
541 t->rt_param.scheduled_on = NO_CPU;
542 }
543 t->rt_param.linked_on = NO_CPU;
544
545 mc_job_arrival(t);
546 raw_spin_unlock_irqrestore(&mc_lock, flags);
547}
548
549static void mc_task_wake_up(struct task_struct *task)
550{
551 unsigned long flags;
552 lt_t now;
553
554 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
555
556 raw_spin_lock_irqsave(&mc_lock, flags);
557 /* We need to take suspensions because of semaphores into
558 * account! If a job resumes after being suspended due to acquiring
559 * a semaphore, it should never be treated as a new job release.
560 */
561 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
562 set_rt_flags(task, RT_F_RUNNING);
563 } else {
564 now = litmus_clock();
565 if (is_tardy(task, now)) {
566 /* new sporadic release */
567 release_at(task, now);
568 sched_trace_task_release(task);
569 }
570 else {
571 if (task->rt.time_slice) {
572 /* came back in time before deadline
573 */
574 set_rt_flags(task, RT_F_RUNNING);
575 }
576 }
577 }
578 mc_job_arrival(task);
579 raw_spin_unlock_irqrestore(&mc_lock, flags);
580}
581
582static void mc_task_block(struct task_struct *t)
583{
584 unsigned long flags;
585
586 TRACE_TASK(t, "block at %llu\n", litmus_clock());
587
588 /* unlink if necessary */
589 raw_spin_lock_irqsave(&mc_lock, flags);
590 unlink(t);
591 raw_spin_unlock_irqrestore(&mc_lock, flags);
592
593 BUG_ON(!is_realtime(t));
594}
595
596
597static void mc_task_exit(struct task_struct * t)
598{
599 unsigned long flags;
600
601 /* unlink if necessary */
602 raw_spin_lock_irqsave(&mc_lock, flags);
603 unlink(t);
604 if (tsk_rt(t)->scheduled_on != NO_CPU) {
605 mc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
606 tsk_rt(t)->scheduled_on = NO_CPU;
607 }
608 raw_spin_unlock_irqrestore(&mc_lock, flags);
609
610 BUG_ON(!is_realtime(t));
611 TRACE_TASK(t, "RIP\n");
612}
613
614static long mc_admit_task(struct task_struct* tsk)
615{
616 return 0;
617}
618
619static long mc_activate_plugin(void)
620{
621 int cpu;
622 cpu_entry_t *entry;
623
624 bheap_init(&mc_cpu_heap);
625#ifdef CONFIG_RELEASE_MASTER
626 mc.release_master = atomic_read(&release_master_cpu);
627#endif
628
629 for_each_online_cpu(cpu) {
630 entry = &per_cpu(mc_cpu_entries, cpu);
631 bheap_node_init(&entry->hn, entry);
632 atomic_set(&entry->will_schedule, 0);
633 entry->linked = NULL;
634 entry->scheduled = NULL;
635#ifdef CONFIG_RELEASE_MASTER
636 if (cpu != mc.release_master) {
637#endif
638 TRACE("MC: Initializing CPU #%d.\n", cpu);
639 update_cpu_position(entry);
640#ifdef CONFIG_RELEASE_MASTER
641 } else {
642 TRACE("MC: CPU %d is release master.\n", cpu);
643 }
644#endif
645 }
646 return 0;
647}
648
649/* Plugin object */
650static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
651 .plugin_name = "MC",
652 .finish_switch = mc_finish_switch,
653 .tick = mc_tick,
654 .task_new = mc_task_new,
655 .complete_job = complete_job,
656 .task_exit = mc_task_exit,
657 .schedule = mc_schedule,
658 .task_wake_up = mc_task_wake_up,
659 .task_block = mc_task_block,
660#ifdef CONFIG_FMLP
661 .fmlp_active = 1,
662 .pi_block = mc_pi_block,
663 .inherit_priority = mc_inherit_priority,
664 .return_priority = mc_return_priority,
665#endif
666 .admit_task = mc_admit_task,
667 .activate_plugin = mc_activate_plugin,
668};
669
670
671static int __init init_gsn_edf(void)
672{
673 int cpu;
674 cpu_entry_t *entry;
675
676 bheap_init(&mc_cpu_heap);
677 /* initialize CPU state */
678 for (cpu = 0; cpu < NR_CPUS; cpu++) {
679 entry = &per_cpu(mc_cpu_entries, cpu);
680 mc_cpus[cpu] = entry;
681 atomic_set(&entry->will_schedule, 0);
682 entry->cpu = cpu;
683 entry->hn = &mc_heap_node[cpu];
684 bheap_node_init(&entry->hn, entry);
685 }
686 edf_domain_init(&mc, NULL, mc_release_jobs);
687 return register_sched_plugin(&gsn_edf_plugin);
688}
689
690
691module_init(init_gsn_edf);