aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/rt_param.h34
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/sched_edf_fm.c565
3 files changed, 601 insertions, 1 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 1290e2939e33..6f43deacb3e1 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -35,6 +35,31 @@ typedef enum {
35 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ 35 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */
36} budget_policy_t; 36} budget_policy_t;
37 37
38
39/* The parameters for EDF-Fm scheduling algorithm.
40 * Each task may be fixed or migratory. Migratory tasks may
41 * migrate on 2 (contiguous) CPU only. NR_CPUS_EDF_FM = 2.
42 */
43#define NR_CPUS_EDF_FM 2
44
45struct edffm_params {
46 /* EDF-fm where can a migratory task execute? */
47 unsigned int cpus[NR_CPUS_EDF_FM];
48 /* how many cpus are used by this task?
49 * fixed = 0, migratory = (NR_CPUS_EDF_FM - 1)
50 * Efficient way to allow writing cpus[nr_cpus].
51 */
52 unsigned int nr_cpus;
53 /* Fraction of this task exec_cost that each CPU should handle.
54 * We keep the fraction divided in num/denom : a matrix of
55 * (NR_CPUS_EDF_FM rows) x (2 columns).
56 * The first column is the numerator of the fraction.
57 * The second column is the denominator.
58 * In EDF-fm this is a 2*2 matrix
59 */
60 lt_t fraction[2][NR_CPUS_EDF_FM];
61};
62
38/* Parameters for NPS-F semi-partitioned scheduling algorithm. 63/* Parameters for NPS-F semi-partitioned scheduling algorithm.
39 * Each (cpu, budget) entry defines the share ('budget' in ns, a % of 64 * Each (cpu, budget) entry defines the share ('budget' in ns, a % of
40 * the slot_length) of the notional processor on the CPU 'cpu'. 65 * the slot_length) of the notional processor on the CPU 'cpu'.
@@ -86,6 +111,9 @@ struct rt_task {
86 111
87 /* parameters used by the semi-partitioned algorithms */ 112 /* parameters used by the semi-partitioned algorithms */
88 union { 113 union {
114 /* EDF-Fm; defined in sched_edf_fm.c */
115 struct edffm_params fm;
116
89 /* NPS-F; defined in sched_npsf.c 117 /* NPS-F; defined in sched_npsf.c
90 * id for the server (notional processor) that holds 118 * id for the server (notional processor) that holds
91 * this task; the same npfs_id can be assigned to "the same" 119 * this task; the same npfs_id can be assigned to "the same"
@@ -243,6 +271,12 @@ struct rt_param {
243 271
244 /* runtime info for the semi-part plugins */ 272 /* runtime info for the semi-part plugins */
245 union { 273 union {
274 /* EDF-Fm runtime information
275 * number of jobs handled by this cpu
276 * (to determine next cpu for a migrating task)
277 */
278 unsigned int cpu_job_no[NR_CPUS_EDF_FM];
279
246 /* EDF-WM runtime information */ 280 /* EDF-WM runtime information */
247 struct { 281 struct {
248 /* at which exec time did the current slice start? */ 282 /* at which exec time did the current slice start? */
diff --git a/litmus/Makefile b/litmus/Makefile
index f26736964479..5533a58eb684 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -16,7 +16,8 @@ obj-y = sched_plugin.o litmus.o \
16 sched_gsn_edf.o \ 16 sched_gsn_edf.o \
17 sched_psn_edf.o \ 17 sched_psn_edf.o \
18 sched_edf_wm.o \ 18 sched_edf_wm.o \
19 sched_npsf.o 19 sched_npsf.o \
20 sched_edf_fm.o
20 21
21obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 22obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
22obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 23obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
diff --git a/litmus/sched_edf_fm.c b/litmus/sched_edf_fm.c
new file mode 100644
index 000000000000..b7210725c072
--- /dev/null
+++ b/litmus/sched_edf_fm.c
@@ -0,0 +1,565 @@
1/*
2 * litmus/sched_edf_fm.c
3 *
4 * Implementation of the EDF-fm scheduling algorithm.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11
12#include <linux/module.h>
13
14#include <litmus/litmus.h>
15#include <litmus/jobs.h>
16#include <litmus/sched_plugin.h>
17#include <litmus/edf_common.h>
18
19typedef struct {
20 rt_domain_t domain;
21 int cpu;
22 struct task_struct* scheduled; /* only RT tasks */
23/* domain lock */
24#define slock domain.ready_lock
25} edffm_domain_t;
26
27DEFINE_PER_CPU(edffm_domain_t, edffm_domains);
28
29#define local_edffm (&__get_cpu_var(edffm_domains))
30#define remote_edf(cpu) (&per_cpu(edffm_domains, cpu).domain)
31#define remote_edffm(cpu) (&per_cpu(edffm_domains, cpu))
32#define task_edf(task) remote_edf(get_partition(task))
33#define task_edffm(task) remote_edffm(get_partition(task))
34
35#define edffm_params(t) (t->rt_param.task_params.semi_part.fm)
36
37/* Is the task a migratory task? */
38#define is_migrat_task(task) (edffm_params(task).nr_cpus)
39/* t is on the wrong CPU (it should be requeued properly) */
40#define wrong_cpu(t) is_migrat_task((t)) && task_cpu((t)) != get_partition((t))
41/* Get next CPU */
42#define migrat_next_cpu(t) \
43 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
44 edffm_params(t).cpus[1] : \
45 edffm_params(t).cpus[0])
46/* Get current cpu */
47#define migrat_cur_cpu(t) \
48 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
49 edffm_params(t).cpus[0] : \
50 edffm_params(t).cpus[1])
51/* Manipulate share for current cpu */
52#define cur_cpu_fract_num(t) \
53 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
54 edffm_params(t).fraction[0][0] : \
55 edffm_params(t).fraction[0][1])
56#define cur_cpu_fract_den(t) \
57 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
58 edffm_params(t).fraction[1][0] : \
59 edffm_params(t).fraction[1][1])
60/* Get job number for current cpu */
61#define cur_cpu_job_no(t) \
62 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
63 tsk_rt(t)->semi_part.cpu_job_no[0] : \
64 tsk_rt(t)->semi_part.cpu_job_no[1])
65/* What is the current cpu position in the array? */
66#define edffm_cpu_pos(cpu,t) \
67 ((cpu == edffm_params(t).cpus[0]) ? \
68 0 : 1)
69
70/*
71 * EDF-fm: migratory tasks have higher prio than fixed, EDF in both classes.
72 * (Both first and second may be NULL).
73 */
74int edffm_higher_prio(struct task_struct* first, struct task_struct* second)
75{
76 if ((first && edffm_params(first).nr_cpus) ||
77 (second && edffm_params(second).nr_cpus)) {
78 if ((first && edffm_params(first).nr_cpus) &&
79 (second && edffm_params(second).nr_cpus))
80 /* both are migrating */
81 return edf_higher_prio(first, second);
82
83 if (first && edffm_params(first).nr_cpus)
84 /* first is migrating */
85 return 1;
86 else
87 /* second is migrating */
88 return 0;
89 }
90
91 /* both are fixed or not real time */
92 return edf_higher_prio(first, second);
93}
94
95/* need_to_preempt - check whether the task t needs to be preempted
96 * call only with irqs disabled and with ready_lock acquired
97 */
98int edffm_preemption_needed(rt_domain_t* rt, struct task_struct *t)
99{
100 /* we need the read lock for edf_ready_queue */
101 /* no need to preempt if there is nothing pending */
102 if (!__jobs_pending(rt))
103 return 0;
104 /* we need to reschedule if t doesn't exist */
105 if (!t)
106 return 1;
107
108 /* make sure to get non-rt stuff out of the way */
109 return !is_realtime(t) || edffm_higher_prio(__next_ready(rt), t);
110}
111
112/* we assume the lock is being held */
113static void preempt(edffm_domain_t *edffm)
114{
115 preempt_if_preemptable(edffm->scheduled, edffm->cpu);
116}
117
118static void edffm_release_jobs(rt_domain_t* rt, struct bheap* tasks)
119{
120 unsigned long flags;
121 edffm_domain_t *edffm = container_of(rt, edffm_domain_t, domain);
122
123 raw_spin_lock_irqsave(&edffm->slock, flags);
124
125 __merge_ready(rt, tasks);
126
127 if (edffm_preemption_needed(rt, edffm->scheduled))
128 preempt(edffm);
129
130 raw_spin_unlock_irqrestore(&edffm->slock, flags);
131}
132
133/* EDF-fm uses the "release_master" field to force the next release for
134 * the task 'task' to happen on a remote CPU. The remote cpu for task is
135 * previously set up during job_completion() taking into consideration
136 * whether a task is a migratory task or not.
137 */
138static inline void
139edffm_add_release_remote(struct task_struct *task)
140{
141 unsigned long flags;
142 rt_domain_t *rt = task_edf(task);
143
144 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
145
146 /* "modify" destination cpu */
147 rt->release_master = get_partition(task);
148
149 TRACE_TASK(task, "Add remote release: smp_proc_id = %d, cpu = %d, remote = %d\n",
150 smp_processor_id(), task_cpu(task), rt->release_master);
151
152 /* trigger future release */
153 __add_release(rt, task);
154
155 /* reset proper release_master and unlock */
156 rt->release_master = NO_CPU;
157 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
158}
159
160/* perform double ready_queue locking in an orderwise fashion
161 * this is called with: interrupt disabled and rq->lock held (from
162 * schedule())
163 */
164static noinline void double_domain_lock(edffm_domain_t *dom1, edffm_domain_t *dom2)
165{
166 if (dom1 == dom2) {
167 /* fake */
168 raw_spin_lock(&dom1->slock);
169 } else {
170 if (dom1 < dom2) {
171 raw_spin_lock(&dom1->slock);
172 raw_spin_lock(&dom2->slock);
173 TRACE("acquired %d and %d\n", dom1->cpu, dom2->cpu);
174 } else {
175 raw_spin_lock(&dom2->slock);
176 raw_spin_lock(&dom1->slock);
177 TRACE("acquired %d and %d\n", dom2->cpu, dom1->cpu);
178 }
179 }
180}
181
182/* Directly insert a task in a remote ready queue. This function
183 * should only be called if this task is a migrating task and its
184 * last job for this CPU just completed (a new one is released for
185 * a remote CPU), but the new job is already tardy.
186 */
187static noinline void insert_task_in_remote_ready(struct task_struct *task)
188{
189 edffm_domain_t *this = remote_edffm(task_cpu(task));
190 edffm_domain_t *remote = remote_edffm(get_partition(task));
191
192 BUG_ON(get_partition(task) != remote->cpu);
193
194 TRACE_TASK(task, "Migrate From P%d -> To P%d\n",
195 this->cpu, remote->cpu);
196 TRACE_TASK(task, "Inserting in remote ready queue\n");
197
198 WARN_ON(!irqs_disabled());
199
200 raw_spin_unlock(&this->slock);
201 mb();
202 TRACE_TASK(task,"edffm_lock %d released\n", this->cpu);
203
204 /* lock both ready queues */
205 double_domain_lock(this, remote);
206 mb();
207
208 __add_ready(&remote->domain, task);
209
210 /* release remote but keep ours */
211 raw_spin_unlock(&remote->slock);
212 TRACE_TASK(task,"edffm_lock %d released\n", remote->cpu);
213
214 /* ask remote cpu to reschedule, we are already rescheduling on this */
215 preempt(remote);
216}
217
218static void requeue(struct task_struct* t, rt_domain_t *edf)
219{
220 if (t->state != TASK_RUNNING)
221 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
222
223 set_rt_flags(t, RT_F_RUNNING);
224 if (is_released(t, litmus_clock())) {
225 if (wrong_cpu(t)) {
226 /* this should only happen if t just completed, but
227 * its next release is already tardy, so it should be
228 * migrated and inserted in the remote ready queue
229 */
230 TRACE_TASK(t, "Migrating task already released, "
231 "move from P%d to P%d\n",
232 task_cpu(t), get_partition(t));
233
234 insert_task_in_remote_ready(t);
235 } else {
236 /* not a migrat task or the job is on the right CPU */
237 __add_ready(edf, t);
238 }
239 } else {
240 if (wrong_cpu(t)) {
241
242 TRACE_TASK(t, "Migrating task, adding remote release\n");
243 edffm_add_release_remote(t);
244 } else {
245 TRACE_TASK(t, "Adding local release\n");
246 add_release(edf, t);
247 }
248 }
249}
250
251/* Update statistics for the _current_ job.
252 * - job_no was incremented _before_ starting this job
253 * (release_at / prepare_for_next_period)
254 * - cpu_job_no is incremented when the job completes
255 */
256static void update_job_counter(struct task_struct *t)
257{
258 int cpu_pos;
259
260 /* Which CPU counter should be incremented? */
261 cpu_pos = edffm_cpu_pos(t->rt_param.task_params.cpu, t);
262 t->rt_param.semi_part.cpu_job_no[cpu_pos]++;
263
264 TRACE_TASK(t, "job_no = %d, cpu_job_no(pos %d) = %d, cpu %d\n",
265 t->rt_param.job_params.job_no, cpu_pos, cur_cpu_job_no(t),
266 t->rt_param.task_params.cpu);
267}
268
269/* What is the next cpu for this job? (eq. 8, in EDF-Fm paper) */
270static int next_cpu_for_job(struct task_struct *t)
271{
272 BUG_ON(!is_migrat_task(t));
273
274 TRACE_TASK(t, "%u = %u * %u / %u\n",
275 t->rt_param.job_params.job_no, cur_cpu_job_no(t),
276 cur_cpu_fract_den(t), cur_cpu_fract_num(t));
277 if ((t->rt_param.job_params.job_no) ==
278 (((lt_t) cur_cpu_job_no(t) * cur_cpu_fract_den(t)) /
279 cur_cpu_fract_num(t)))
280 return edffm_params(t).cpus[0];
281
282 return edffm_params(t).cpus[1];
283}
284
285/* If needed (the share for task t on this CPU is exhausted), updates
286 * the task_params.cpu for the _migrating_ task t
287 */
288static void change_migrat_cpu_if_needed(struct task_struct *t)
289{
290 BUG_ON(!is_migrat_task(t));
291 /* EDF-fm: if it is a migrating task and it has already executed
292 * the required number of jobs on this CPU, we need to move it
293 * on its next CPU; changing the cpu here will affect the requeue
294 * and the next release
295 */
296 if (unlikely(next_cpu_for_job(t) != migrat_cur_cpu(t))) {
297
298 tsk_rt(t)->task_params.cpu = migrat_next_cpu(t);
299 TRACE_TASK(t, "EDF-fm: will migrate job %d -> %d\n",
300 task_cpu(t), tsk_rt(t)->task_params.cpu);
301 return;
302 }
303
304 TRACE_TASK(t, "EDF-fm: job will stay on %d -> %d\n",
305 task_cpu(t), tsk_rt(t)->task_params.cpu);
306}
307
308static void job_completion(struct task_struct* t, int forced)
309{
310 sched_trace_task_completion(t,forced);
311 TRACE_TASK(t, "job_completion().\n");
312
313 if (unlikely(is_migrat_task(t))) {
314 update_job_counter(t);
315 change_migrat_cpu_if_needed(t);
316 }
317
318 set_rt_flags(t, RT_F_SLEEP);
319 prepare_for_next_period(t);
320}
321
322static void edffm_tick(struct task_struct *t)
323{
324 edffm_domain_t *edffm = local_edffm;
325
326 BUG_ON(is_realtime(t) && t != edffm->scheduled);
327
328 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
329 set_tsk_need_resched(t);
330 TRACE("edffm_scheduler_tick: "
331 "%d is preemptable "
332 " => FORCE_RESCHED\n", t->pid);
333 }
334}
335
336static struct task_struct* edffm_schedule(struct task_struct * prev)
337{
338 edffm_domain_t* edffm = local_edffm;
339 rt_domain_t* edf = &edffm->domain;
340 struct task_struct* next;
341
342 int out_of_time, sleep, preempt, exists, blocks, change_cpu, resched;
343
344 raw_spin_lock(&edffm->slock);
345
346 BUG_ON(edffm->scheduled && edffm->scheduled != prev);
347 BUG_ON(edffm->scheduled && !is_realtime(prev));
348
349 /* (0) Determine state */
350 exists = edffm->scheduled != NULL;
351 blocks = exists && !is_running(edffm->scheduled);
352 out_of_time = exists &&
353 budget_enforced(edffm->scheduled) &&
354 budget_exhausted(edffm->scheduled);
355 sleep = exists && get_rt_flags(edffm->scheduled) == RT_F_SLEEP;
356 change_cpu = exists && wrong_cpu(edffm->scheduled);
357 preempt = edffm_preemption_needed(edf, prev);
358
359 BUG_ON(blocks && change_cpu);
360
361 if (exists)
362 TRACE_TASK(prev,
363 "blocks:%d out_of_time:%d sleep:%d preempt:%d "
364 "wrong_cpu:%d state:%d sig:%d\n",
365 blocks, out_of_time, sleep, preempt,
366 change_cpu, prev->state, signal_pending(prev));
367
368 /* If we need to preempt do so. */
369 resched = preempt;
370
371 /* If a task blocks we have no choice but to reschedule. */
372 if (blocks)
373 resched = 1;
374
375 /* If a task has just woken up, it was tardy and the wake up
376 * raced with this schedule, a new job has already been released,
377 * but scheduled should be enqueued on a remote ready queue, and a
378 * new task should be selected for the current queue.
379 */
380 if (change_cpu)
381 resched = 1;
382
383 /* Any task that is preemptable and either exhausts its execution
384 * budget or wants to sleep completes. We may have to reschedule after
385 * this.
386 */
387 if ((out_of_time || sleep) && !blocks) {
388 job_completion(edffm->scheduled, !sleep);
389 resched = 1;
390 }
391
392 /* The final scheduling decision. Do we need to switch for some reason?
393 * Switch if we are in RT mode and have no task or if we need to
394 * resched.
395 */
396 next = NULL;
397 if (resched || !exists) {
398
399 if (edffm->scheduled && !blocks)
400 requeue(edffm->scheduled, edf);
401 next = __take_ready(edf);
402 } else
403 /* Only override Linux scheduler if we have a real-time task
404 * scheduled that needs to continue.
405 */
406 if (exists)
407 next = prev;
408
409 if (next) {
410 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
411 set_rt_flags(next, RT_F_RUNNING);
412 } else {
413 TRACE("becoming idle at %llu\n", litmus_clock());
414 }
415
416 edffm->scheduled = next;
417 raw_spin_unlock(&edffm->slock);
418
419 return next;
420}
421
422/* Prepare a task for running in RT mode
423 */
424static void edffm_task_new(struct task_struct * t, int on_rq, int running)
425{
426 rt_domain_t* edf = task_edf(t);
427 edffm_domain_t* edffm = task_edffm(t);
428 unsigned long flags;
429
430 TRACE_TASK(t, "EDF-fm: task new, cpu = %d\n",
431 t->rt_param.task_params.cpu);
432
433 release_at(t, litmus_clock());
434 update_job_counter(t);
435
436 /* The task should be running in the queue, otherwise signal
437 * code will try to wake it up with fatal consequences.
438 */
439 raw_spin_lock_irqsave(&edffm->slock, flags);
440 if (running) {
441 /* there shouldn't be anything else running at the time */
442 BUG_ON(edffm->scheduled);
443 edffm->scheduled = t;
444 } else {
445 requeue(t, edf);
446 /* maybe we have to reschedule */
447 preempt(edffm);
448 }
449 raw_spin_unlock_irqrestore(&edffm->slock, flags);
450}
451
452static void edffm_task_wake_up(struct task_struct *task)
453{
454 unsigned long flags;
455 edffm_domain_t* edffm = task_edffm(task);
456 rt_domain_t* edf = task_edf(task);
457 lt_t now;
458
459 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
460
461 TRACE_TASK(task, "acquire edffm %d\n", edffm->cpu);
462 raw_spin_lock_irqsave(&edffm->slock, flags);
463
464 BUG_ON(edffm != task_edffm(task));
465 BUG_ON(is_queued(task));
466
467 now = litmus_clock();
468 if (is_tardy(task, now)) {
469 if (unlikely(is_migrat_task(task))) {
470 /* a new job will be released.
471 * Update current job counter */
472 update_job_counter(task);
473 /* Switch CPU if needed */
474 change_migrat_cpu_if_needed(task);
475 }
476 /* new sporadic release */
477 TRACE_TASK(task, "release new\n");
478 release_at(task, now);
479 sched_trace_task_release(task);
480 }
481
482 /* Only add to ready queue if it is not the currently-scheduled
483 * task. This could be the case if a task was woken up concurrently
484 * on a remote CPU before the executing CPU got around to actually
485 * de-scheduling the task, i.e., wake_up() raced with schedule()
486 * and won.
487 */
488 if (edffm->scheduled != task)
489 requeue(task, edf);
490
491 raw_spin_unlock_irqrestore(&edffm->slock, flags);
492 TRACE_TASK(task, "release edffm %d\n", edffm->cpu);
493 TRACE_TASK(task, "wake up done\n");
494}
495
496static void edffm_task_block(struct task_struct *t)
497{
498 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);
499
500 BUG_ON(!is_realtime(t));
501 if (is_queued(t)) {
502 edffm_domain_t *edffm = local_edffm;
503 TRACE_TASK(t, "task blocked, race with wakeup, "
504 "remove from queue %d\n", edffm->cpu);
505 remove(&edffm->domain, t);
506 }
507}
508
509static void edffm_task_exit(struct task_struct * t)
510{
511 unsigned long flags;
512 edffm_domain_t* edffm = task_edffm(t);
513 rt_domain_t* edf;
514
515 raw_spin_lock_irqsave(&edffm->slock, flags);
516 if (is_queued(t)) {
517 /* dequeue */
518 edf = task_edf(t);
519 remove(edf, t);
520 }
521 if (edffm->scheduled == t)
522 edffm->scheduled = NULL;
523
524 TRACE_TASK(t, "RIP\n");
525
526 preempt(edffm);
527 raw_spin_unlock_irqrestore(&edffm->slock, flags);
528}
529
530static long edffm_admit_task(struct task_struct* tsk)
531{
532 return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL;
533}
534
535/* Plugin object */
536static struct sched_plugin edffm_plugin __cacheline_aligned_in_smp = {
537 .plugin_name = "EDF-fm",
538 .tick = edffm_tick,
539 .task_new = edffm_task_new,
540 .complete_job = complete_job,
541 .task_exit = edffm_task_exit,
542 .schedule = edffm_schedule,
543 .task_wake_up = edffm_task_wake_up,
544 .task_block = edffm_task_block,
545 .admit_task = edffm_admit_task
546};
547
548static int __init init_edffm(void)
549{
550 int i;
551 edffm_domain_t *edffm;
552
553 /* Note, broken if num_online_cpus() may change */
554 for (i = 0; i < num_online_cpus(); i++) {
555 edffm = remote_edffm(i);
556 edffm->cpu = i;
557 edffm->scheduled = NULL;
558 edf_domain_init(&edffm->domain, NULL, edffm_release_jobs);
559 }
560
561 return register_sched_plugin(&edffm_plugin);
562}
563
564module_init(init_edffm);
565