aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Calandrino <jmc@cs.unc.edu>2008-05-18 16:28:30 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-05-18 16:31:39 -0400
commit37f3d488cc35844eb6d8c4e94e79f1680fcd3af8 (patch)
tree767dcc76b35d740d22b7c4ce9c918e91532f6bd8
parentea837a9913f3728285e00a269f4c6314c50f3be9 (diff)
Add the C-EDF plugin.
-rw-r--r--litmus/Makefile2
-rwxr-xr-xlitmus/sched_cedf.c716
2 files changed, 717 insertions, 1 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 0e3bedd165..bfe393eb56 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y = sched_plugin.o litmus.o sched_trace.o \ 5obj-y = sched_plugin.o litmus.o sched_trace.o \
6 edf_common.o jobs.o\ 6 edf_common.o jobs.o\
7 sched_gsn_edf.o sched_psn_edf.o \ 7 sched_gsn_edf.o sched_psn_edf.o sched_cedf.o \
8 rt_domain.o fdso.o sync.o \ 8 rt_domain.o fdso.o sync.o \
9 fmlp.o srp.o norqlock.o 9 fmlp.o srp.o norqlock.o
10 10
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
new file mode 100755
index 0000000000..2cfa0b38ac
--- /dev/null
+++ b/litmus/sched_cedf.c
@@ -0,0 +1,716 @@
1/*
2 * kernel/sched_cedf.c
3 *
4 * Implementation of the Clustered EDF (C-EDF) scheduling algorithm.
5 * Linking is included so that support for synchronization (e.g., through
6 * the implementation of a "CSN-EDF" algorithm) can be added later if desired.
7 *
8 * This version uses the simple approach and serializes all scheduling
9 * decisions by the use of a queue lock. This is probably not the
10 * best way to do it, but it should suffice for now.
11 */
12
13#include <linux/spinlock.h>
14#include <linux/percpu.h>
15#include <linux/sched.h>
16#include <linux/list.h>
17
18#include <litmus/litmus.h>
19#include <litmus/jobs.h>
20#include <litmus/sched_plugin.h>
21#include <litmus/edf_common.h>
22#include <litmus/sched_trace.h>
23
24#include <linux/module.h>
25
26/* Overview of C-EDF operations.
27 *
28 * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage
29 * structure (NOT the actually scheduled
30 * task). If there is another linked task To
31 * already it will set To->linked_on = NO_CPU
32 * (thereby removing its association with this
33 * CPU). However, it will not requeue the
34 * previously linked task (if any). It will set
35 * T's state to RT_F_RUNNING and check whether
36 * it is already running somewhere else. If T
37 * is scheduled somewhere else it will link
38 * it to that CPU instead (and pull the linked
39 * task to cpu). T may be NULL.
40 *
41 * unlink(T) - Unlink removes T from all scheduler data
42 * structures. If it is linked to some CPU it
43 * will link NULL to that CPU. If it is
44 * currently queued in the cedf queue for
45 * a partition, it will be removed from
46 * the T->rt_list. It is safe to call
47 * unlink(T) if T is not linked. T may not
48 * be NULL.
49 *
50 * requeue(T) - Requeue will insert T into the appropriate
51 * queue. If the system is in real-time mode and
52 * the T is released already, it will go into the
53 * ready queue. If the system is not in
54 * real-time mode is T, then T will go into the
55 * release queue. If T's release time is in the
56 * future, it will go into the release
57 * queue. That means that T's release time/job
58 * no/etc. has to be updated before requeue(T) is
59 * called. It is not safe to call requeue(T)
60 * when T is already queued. T may not be NULL.
61 *
62 * cedf_job_arrival(T) - This is the catch-all function when T enters
63 * the system after either a suspension or at a
64 * job release. It will queue T (which means it
65 * is not safe to call cedf_job_arrival(T) if
66 * T is already queued) and then check whether a
67 * preemption is necessary. If a preemption is
68 * necessary it will update the linkage
69 * accordingly and cause scheduled to be called
70 * (either with an IPI or need_resched). It is
71 * safe to call cedf_job_arrival(T) if T's
72 * next job has not been actually released yet
73 * (release time in the future). T will be put
74 * on the release queue in that case.
75 *
76 * job_completion(T) - Take care of everything that needs to be done
77 * to prepare T for its next release and place
78 * it in the right queue with
79 * cedf_job_arrival().
80 *
81 *
82 * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is
83 * equivalent to unlink(T). Note that if you unlink a task from a CPU none of
84 * the functions will automatically propagate pending task from the ready queue
85 * to a linked task. This is the job of the calling function ( by means of
86 * __take_ready).
87 */
88
89/* cpu_entry_t - maintain the linked and scheduled state
90 */
91typedef struct {
92 int cpu;
93 struct task_struct* linked; /* only RT tasks */
94 struct task_struct* scheduled; /* only RT tasks */
95 struct list_head list;
96 atomic_t will_schedule; /* prevent unneeded IPIs */
97} cpu_entry_t;
98DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries);
99
100cpu_entry_t* cedf_cpu_entries_array[NR_CPUS];
101
102#define set_will_schedule() \
103 (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1))
104#define clear_will_schedule() \
105 (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0))
106#define test_will_schedule(cpu) \
107 (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule))
108
109#define NO_CPU 0xffffffff
110
111/* Cluster size -- currently four. This is a variable to allow for
112 * the possibility of changing the cluster size online in the future.
113 */
114int cluster_size = 4;
115
116typedef struct {
117 rt_domain_t domain;
118 int first_cpu;
119 int last_cpu;
120
121 /* the cpus queue themselves according to priority in here */
122 struct list_head cedf_cpu_queue;
123
124 /* per-partition spinlock: protects the domain and
125 * serializes scheduling decisions
126 */
127#define slock domain.ready_lock
128} cedf_domain_t;
129
130DEFINE_PER_CPU(cedf_domain_t*, cedf_domains) = NULL;
131
132cedf_domain_t* cedf_domains_array[NR_CPUS];
133
134
135/* These are defined similarly to partitioning, except that a
136 * tasks partition is any cpu of the cluster to which it
137 * is assigned, typically the lowest-numbered cpu.
138 */
139#define local_edf (&__get_cpu_var(cedf_domains)->domain)
140#define local_cedf __get_cpu_var(cedf_domains)
141#define remote_edf(cpu) (&per_cpu(cedf_domains, cpu)->domain)
142#define remote_cedf(cpu) per_cpu(cedf_domains, cpu)
143#define task_edf(task) remote_edf(get_partition(task))
144#define task_cedf(task) remote_cedf(get_partition(task))
145
146/* update_cpu_position - Move the cpu entry to the correct place to maintain
147 * order in the cpu queue. Caller must hold cedf lock.
148 *
149 * This really should be a heap.
150 */
151static void update_cpu_position(cpu_entry_t *entry)
152{
153 cpu_entry_t *other;
154 struct list_head *cedf_cpu_queue =
155 &(remote_cedf(entry->cpu))->cedf_cpu_queue;
156 struct list_head *pos;
157
158 BUG_ON(!cedf_cpu_queue);
159
160 if (likely(in_list(&entry->list)))
161 list_del(&entry->list);
162 /* if we do not execute real-time jobs we just move
163 * to the end of the queue
164 */
165 if (entry->linked) {
166 list_for_each(pos, cedf_cpu_queue) {
167 other = list_entry(pos, cpu_entry_t, list);
168 if (edf_higher_prio(entry->linked, other->linked)) {
169 __list_add(&entry->list, pos->prev, pos);
170 return;
171 }
172 }
173 }
174 /* if we get this far we have the lowest priority job */
175 list_add_tail(&entry->list, cedf_cpu_queue);
176}
177
178/* link_task_to_cpu - Update the link of a CPU.
179 * Handles the case where the to-be-linked task is already
180 * scheduled on a different CPU.
181 */
182static noinline void link_task_to_cpu(struct task_struct* linked,
183 cpu_entry_t *entry)
184{
185 cpu_entry_t *sched;
186 struct task_struct* tmp;
187 int on_cpu;
188
189 BUG_ON(linked && !is_realtime(linked));
190
191 /* Cannot link task to a CPU that doesn't belong to its partition... */
192 BUG_ON(linked && remote_cedf(entry->cpu) != task_cedf(linked));
193
194 /* Currently linked task is set to be unlinked. */
195 if (entry->linked) {
196 entry->linked->rt_param.linked_on = NO_CPU;
197 }
198
199 /* Link new task to CPU. */
200 if (linked) {
201 set_rt_flags(linked, RT_F_RUNNING);
202 /* handle task is already scheduled somewhere! */
203 on_cpu = linked->rt_param.scheduled_on;
204 if (on_cpu != NO_CPU) {
205 sched = &per_cpu(cedf_cpu_entries, on_cpu);
206 /* this should only happen if not linked already */
207 BUG_ON(sched->linked == linked);
208
209 /* If we are already scheduled on the CPU to which we
210 * wanted to link, we don't need to do the swap --
211 * we just link ourselves to the CPU and depend on
212 * the caller to get things right.
213 */
214 if (entry != sched) {
215 tmp = sched->linked;
216 linked->rt_param.linked_on = sched->cpu;
217 sched->linked = linked;
218 update_cpu_position(sched);
219 linked = tmp;
220 }
221 }
222 if (linked) /* might be NULL due to swap */
223 linked->rt_param.linked_on = entry->cpu;
224 }
225 entry->linked = linked;
226
227 if (entry->linked)
228 TRACE_TASK(entry->linked, "linked to CPU %d, state:%d\n",
229 entry->cpu, entry->linked->state);
230 else
231 TRACE("NULL linked to CPU %d\n", entry->cpu);
232
233 update_cpu_position(entry);
234}
235
236/* unlink - Make sure a task is not linked any longer to an entry
237 * where it was linked before. Must hold cedf_lock.
238 */
239static noinline void unlink(struct task_struct* t)
240{
241 cpu_entry_t *entry;
242
243 if (unlikely(!t)) {
244 TRACE_BUG_ON(!t);
245 return;
246 }
247
248 if (t->rt_param.linked_on != NO_CPU) {
249 /* unlink */
250 entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on);
251 t->rt_param.linked_on = NO_CPU;
252 link_task_to_cpu(NULL, entry);
253 } else if (in_list(&t->rt_list)) {
254 /* This is an interesting situation: t is scheduled,
255 * but was just recently unlinked. It cannot be
256 * linked anywhere else (because then it would have
257 * been relinked to this CPU), thus it must be in some
258 * queue. We must remove it from the list in this
259 * case.
260 */
261 list_del(&t->rt_list);
262 }
263}
264
265
266/* preempt - force a CPU to reschedule
267 */
268static noinline void preempt(cpu_entry_t *entry)
269{
270 /* We cannot make the is_np() decision here if it is a remote CPU
271 * because requesting exit_np() requires that we currently use the
272 * address space of the task. Thus, in the remote case we just send
273 * the IPI and let schedule() handle the problem.
274 */
275
276 if (smp_processor_id() == entry->cpu) {
277 if (entry->scheduled && is_np(entry->scheduled))
278 request_exit_np(entry->scheduled);
279 else
280 set_tsk_need_resched(current);
281 } else
282 /* in case that it is a remote CPU we have to defer the
283 * the decision to the remote CPU
284 * FIXME: We could save a few IPI's here if we leave the flag
285 * set when we are waiting for a np_exit().
286 */
287 if (!test_will_schedule(entry->cpu))
288 smp_send_reschedule(entry->cpu);
289}
290
291/* requeue - Put an unlinked task into c-edf domain.
292 * Caller must hold cedf_lock.
293 */
294static noinline void requeue(struct task_struct* task)
295{
296 cedf_domain_t* cedf;
297 rt_domain_t* edf;
298
299 BUG_ON(!task);
300 /* sanity check rt_list before insertion */
301 BUG_ON(in_list(&task->rt_list));
302
303 /* Get correct real-time domain. */
304 cedf = task_cedf(task);
305 edf = &cedf->domain;
306
307 if (get_rt_flags(task) == RT_F_SLEEP) {
308 /* this task has expired
309 * _schedule has already taken care of updating
310 * the release and
311 * deadline. We just must check if it has been released.
312 */
313 if (is_released(task, litmus_clock()))
314 __add_ready(edf, task);
315 else {
316 /* it has got to wait */
317 add_release(edf, task);
318 }
319
320 } else
321 /* this is a forced preemption
322 * thus the task stays in the ready_queue
323 * we only must make it available to others
324 */
325 __add_ready(edf, task);
326}
327
328/* cedf_job_arrival: task is either resumed or released */
329static noinline void cedf_job_arrival(struct task_struct* task)
330{
331 cpu_entry_t* last;
332 cedf_domain_t* cedf;
333 rt_domain_t* edf;
334 struct list_head *cedf_cpu_queue;
335
336 BUG_ON(!task);
337
338 /* Get correct real-time domain. */
339 cedf = task_cedf(task);
340 edf = &cedf->domain;
341 cedf_cpu_queue = &cedf->cedf_cpu_queue;
342
343 BUG_ON(!cedf);
344 BUG_ON(!edf);
345 BUG_ON(!cedf_cpu_queue);
346 BUG_ON(list_empty(cedf_cpu_queue));
347
348 /* first queue arriving job */
349 requeue(task);
350
351 /* then check for any necessary preemptions */
352 last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list);
353 if (edf_preemption_needed(edf, last->linked)) {
354 /* preemption necessary */
355 task = __take_ready(edf);
356 TRACE("job_arrival: task %d linked to %d, state:%d\n",
357 task->pid, last->cpu, task->state);
358 if (last->linked)
359 requeue(last->linked);
360
361 link_task_to_cpu(task, last);
362 preempt(last);
363 }
364}
365
366/* check for current job releases */
367static void cedf_job_release(struct task_struct* t, rt_domain_t* _)
368{
369 cedf_domain_t* cedf = task_cedf(t);
370 unsigned long flags;
371
372 BUG_ON(!t);
373 BUG_ON(!cedf);
374
375 spin_lock_irqsave(&cedf->slock, flags);
376 sched_trace_job_release(queued);
377 cedf_job_arrival(t);
378 spin_unlock_irqrestore(&cedf->slock, flags);
379}
380
381/* cedf_tick - this function is called for every local timer
382 * interrupt.
383 *
384 * checks whether the current task has expired and checks
385 * whether we need to preempt it if it has not expired
386 */
387static void cedf_tick(struct task_struct* t)
388{
389 BUG_ON(!t);
390
391 if (is_realtime(t) && budget_exhausted(t)) {
392 if (!is_np(t)) {
393 /* np tasks will be preempted when they become
394 * preemptable again
395 */
396 set_tsk_need_resched(t);
397 set_will_schedule();
398 TRACE("cedf_scheduler_tick: "
399 "%d is preemptable (state:%d) "
400 " => FORCE_RESCHED\n", t->pid, t->state);
401 } else {
402 TRACE("cedf_scheduler_tick: "
403 "%d is non-preemptable (state:%d), "
404 "preemption delayed.\n", t->pid, t->state);
405 request_exit_np(t);
406 }
407 }
408}
409
410/* caller holds cedf_lock */
411static noinline void job_completion(struct task_struct *t)
412{
413 BUG_ON(!t);
414
415 sched_trace_job_completion(t);
416
417 TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state);
418
419 /* set flags */
420 set_rt_flags(t, RT_F_SLEEP);
421 /* prepare for next period */
422 prepare_for_next_period(t);
423 /* unlink */
424 unlink(t);
425 /* requeue
426 * But don't requeue a blocking task. */
427 if (is_running(t))
428 cedf_job_arrival(t);
429}
430
431/* Getting schedule() right is a bit tricky. schedule() may not make any
432 * assumptions on the state of the current task since it may be called for a
433 * number of reasons. The reasons include a scheduler_tick() determined that it
434 * was necessary, because sys_exit_np() was called, because some Linux
435 * subsystem determined so, or even (in the worst case) because there is a bug
436 * hidden somewhere. Thus, we must take extreme care to determine what the
437 * current state is.
438 *
439 * The CPU could currently be scheduling a task (or not), be linked (or not).
440 *
441 * The following assertions for the scheduled task could hold:
442 *
443 * - !is_running(scheduled) // the job blocks
444 * - scheduled->timeslice == 0 // the job completed (forcefully)
445 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall)
446 * - linked != scheduled // we need to reschedule (for any reason)
447 * - is_np(scheduled) // rescheduling must be delayed,
448 * sys_exit_np must be requested
449 *
450 * Any of these can occur together.
451 */
452static struct task_struct* cedf_schedule(struct task_struct * prev)
453{
454 cedf_domain_t* cedf = local_cedf;
455 rt_domain_t* edf = &cedf->domain;
456 cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries);
457 int out_of_time, sleep, preempt, np,
458 exists, blocks;
459 struct task_struct* next = NULL;
460
461 BUG_ON(!prev);
462 BUG_ON(!cedf);
463 BUG_ON(!edf);
464 BUG_ON(!entry);
465 BUG_ON(cedf != remote_cedf(entry->cpu));
466 BUG_ON(is_realtime(prev) && cedf != task_cedf(prev));
467
468 /* Will be released in finish_switch. */
469 spin_lock(&cedf->slock);
470 clear_will_schedule();
471
472 /* sanity checking */
473 BUG_ON(entry->scheduled && entry->scheduled != prev);
474 BUG_ON(entry->scheduled && !is_realtime(prev));
475 BUG_ON(is_realtime(prev) && !entry->scheduled);
476
477 /* (0) Determine state */
478 exists = entry->scheduled != NULL;
479 blocks = exists && !is_running(entry->scheduled);
480 out_of_time = exists && budget_exhausted(entry->scheduled);
481 np = exists && is_np(entry->scheduled);
482 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
483 preempt = entry->scheduled != entry->linked;
484
485 /* If a task blocks we have no choice but to reschedule.
486 */
487 if (blocks)
488 unlink(entry->scheduled);
489
490 /* Request a sys_exit_np() call if we would like to preempt but cannot.
491 * We need to make sure to update the link structure anyway in case
492 * that we are still linked. Multiple calls to request_exit_np() don't
493 * hurt.
494 */
495 if (np && (out_of_time || preempt || sleep)) {
496 unlink(entry->scheduled);
497 request_exit_np(entry->scheduled);
498 }
499
500 /* Any task that is preemptable and either exhausts its execution
501 * budget or wants to sleep completes. We may have to reschedule after
502 * this. Don't do a job completion if blocks (can't have timers
503 * running for blocked jobs). Preemption go first for the same reason.
504 */
505 if (!np && (out_of_time || sleep) && !blocks && !preempt)
506 job_completion(entry->scheduled);
507
508 /* Link pending task if we became unlinked.
509 */
510 if (!entry->linked)
511 link_task_to_cpu(__take_ready(edf), entry);
512
513 /* The final scheduling decision. Do we need to switch for some reason?
514 * If linked different from scheduled select linked as next.
515 */
516 if ((!np || blocks) &&
517 entry->linked != entry->scheduled) {
518 /* Schedule a linked job? */
519 if (entry->linked) {
520 entry->linked->rt_param.scheduled_on = entry->cpu;
521 next = entry->linked;
522 }
523 if (entry->scheduled) {
524 /* not gonna be scheduled soon */
525 entry->scheduled->rt_param.scheduled_on = NO_CPU;
526 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
527 }
528 } else
529 /* Only override Linux scheduler if we have real-time task
530 * scheduled that needs to continue.
531 */
532 if (exists)
533 next = prev;
534
535 spin_unlock(&cedf->slock);
536
537 return next;
538}
539
540/* _finish_switch - we just finished the switch away from prev
541 */
542static void cedf_finish_switch(struct task_struct *prev)
543{
544 cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries);
545
546 BUG_ON(!prev);
547 BUG_ON(!entry);
548
549 entry->scheduled = is_realtime(current) ? current : NULL;
550}
551
552/* Prepare a task for running in RT mode
553 */
554static void cedf_task_new(struct task_struct *t, int on_rq, int running)
555{
556 unsigned long flags;
557 cedf_domain_t* cedf = task_cedf(t);
558 cpu_entry_t* entry;
559
560 BUG_ON(!cedf);
561
562 spin_lock_irqsave(&cedf->slock, flags);
563 if (running) {
564 entry = &per_cpu(cedf_cpu_entries, task_cpu(t));
565 BUG_ON(!entry);
566 BUG_ON(entry->scheduled);
567 entry->scheduled = t;
568 t->rt_param.scheduled_on = task_cpu(t);
569 } else
570 t->rt_param.scheduled_on = NO_CPU;
571 t->rt_param.linked_on = NO_CPU;
572
573 /* setup job params */
574 release_at(t, litmus_clock());
575
576 cedf_job_arrival(t);
577 spin_unlock_irqrestore(&cedf->slock, flags);
578}
579
580
581static void cedf_task_wake_up(struct task_struct *task)
582{
583 unsigned long flags;
584 cedf_domain_t* cedf;
585 lt_t now;
586
587 BUG_ON(!task);
588
589 cedf = task_cedf(task);
590 BUG_ON(!cedf);
591
592 spin_lock_irqsave(&cedf->slock, flags);
593 /* We need to take suspensions because of semaphores into
594 * account! If a job resumes after being suspended due to acquiring
595 * a semaphore, it should never be treated as a new job release.
596 */
597 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
598 set_rt_flags(task, RT_F_RUNNING);
599 } else {
600 now = litmus_clock();
601 if (is_tardy(task, now)) {
602 /* new sporadic release */
603 release_at(task, now);
604 sched_trace_job_release(task);
605 }
606 else if (task->time_slice)
607 /* came back in time before deadline
608 */
609 set_rt_flags(task, RT_F_RUNNING);
610 }
611 cedf_job_arrival(task);
612 spin_unlock_irqrestore(&cedf->slock, flags);
613}
614
615
616static void cedf_task_block(struct task_struct *t)
617{
618 unsigned long flags;
619
620 BUG_ON(!t);
621
622 /* unlink if necessary */
623 spin_lock_irqsave(&task_cedf(t)->slock, flags);
624 unlink(t);
625 spin_unlock_irqrestore(&task_cedf(t)->slock, flags);
626
627 BUG_ON(!is_realtime(t));
628 BUG_ON(t->rt_list.next != LIST_POISON1);
629 BUG_ON(t->rt_list.prev != LIST_POISON2);
630}
631
632static void cedf_task_exit(struct task_struct * t)
633{
634 unsigned long flags;
635
636 BUG_ON(!t);
637
638 /* unlink if necessary */
639 spin_lock_irqsave(&task_cedf(t)->slock, flags);
640 unlink(t);
641 spin_unlock_irqrestore(&task_cedf(t)->slock, flags);
642
643 BUG_ON(!is_realtime(t));
644 TRACE_TASK(t, "RIP\n");
645 BUG_ON(t->rt_list.next != LIST_POISON1);
646 BUG_ON(t->rt_list.prev != LIST_POISON2);
647}
648
649static long cedf_admit_task(struct task_struct* tsk)
650{
651 return (task_cpu(tsk) >= task_cedf(tsk)->first_cpu &&
652 task_cpu(tsk) <= task_cedf(tsk)->last_cpu) ? 0 : -EINVAL;
653}
654
655
656/* Plugin object */
657static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
658 .plugin_name = "C-EDF",
659 .finish_switch = cedf_finish_switch,
660 .tick = cedf_tick,
661 .task_new = cedf_task_new,
662 .complete_job = complete_job,
663 .task_exit = cedf_task_exit,
664 .schedule = cedf_schedule,
665 .task_wake_up = cedf_task_wake_up,
666 .task_block = cedf_task_block,
667 .admit_task = cedf_admit_task
668};
669
670static void cedf_domain_init(int first_cpu, int last_cpu)
671{
672 int cpu;
673
674 /* Create new domain for this cluster. */
675 cedf_domain_t *new_cedf_domain = kmalloc(sizeof(cedf_domain_t),
676 GFP_KERNEL);
677
678 /* Initialize cluster domain. */
679 edf_domain_init(&new_cedf_domain->domain, NULL,
680 cedf_job_release);
681 new_cedf_domain->first_cpu = first_cpu;
682 new_cedf_domain->last_cpu = last_cpu;
683 INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue);
684
685 /* Assign all cpus in cluster to point to this domain. */
686 for (cpu = first_cpu; cpu <= last_cpu; cpu++) {
687 remote_cedf(cpu) = new_cedf_domain;
688 cedf_domains_array[cpu] = new_cedf_domain;
689 }
690}
691
692static int __init init_cedf(void)
693{
694 int cpu;
695 cpu_entry_t *entry;
696
697 /* initialize CPU state */
698 for (cpu = 0; cpu < NR_CPUS; cpu++) {
699 entry = &per_cpu(cedf_cpu_entries, cpu);
700 cedf_cpu_entries_array[cpu] = entry;
701 atomic_set(&entry->will_schedule, 0);
702 entry->linked = NULL;
703 entry->scheduled = NULL;
704 entry->cpu = cpu;
705 INIT_LIST_HEAD(&entry->list);
706 }
707
708 /* initialize all cluster domains */
709 for (cpu = 0; cpu < NR_CPUS; cpu += cluster_size)
710 cedf_domain_init(cpu, cpu+cluster_size-1);
711
712 return register_sched_plugin(&cedf_plugin);
713}
714
715module_init(init_cedf);
716