aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-09-22 15:49:07 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-09-22 15:49:07 -0400
commitf49ec60b4d6394cf199837bbe4c10258e5a877c7 (patch)
treeef3e5d7fa7140d9d5dcde98bae9cbba9c4a7767e
parent5bb94208c31339c352c2aca20f8450f8dd5a464e (diff)
parent06b23f7b33c61cf1f03acb2d19ddf5dc6c57a810 (diff)
Merge branch 'wip-edffm' into wip-npsf-merge
Conflicts: include/litmus/rt_param.h litmus/Makefile
-rw-r--r--include/litmus/rt_param.h64
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/sched_edf_fm.c562
3 files changed, 614 insertions, 15 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 789494ff584f..be8f5528ff87 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -33,6 +33,42 @@ typedef enum {
33 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ 33 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */
34} budget_policy_t; 34} budget_policy_t;
35 35
36
37/* Parameters for EDF-Fm scheduling algorithm.
38 * Each task may be fixed or migratory. Migratory tasks may
39 * migrate on 2 (contiguous) CPU only. NR_CPUS_EDF_FM = 2.
40 */
41#define NR_CPUS_EDF_FM 2
42
43struct edffm_params {
44 /* EDF-fm where can a migratory task execute? */
45 unsigned int cpus[NR_CPUS_EDF_FM];
46 /* how many cpus are used by this task?
47 * fixed = 0, migratory = (NR_CPUS_EDF_FM - 1)
48 * Efficient way to allow writing cpus[nr_cpus].
49 */
50 unsigned int nr_cpus;
51 /* Fraction of this task exec_cost that each CPU should handle.
52 * We keep the fraction divided in num/denom : a matrix of
53 * (NR_CPUS_EDF_FM rows) x (2 columns).
54 * The first column is the numerator of the fraction.
55 * The second column is the denominator.
56 * In EDF-fm this is a 2*2 matrix
57 */
58 lt_t fraction[2][NR_CPUS_EDF_FM];
59};
60
61/* NPS-F semi-part. plugin.
62 * Each (cpu, budget) entry defines the share ('budget' in ns, a % of
63 * the slot_length) of the notional processor on the CPU 'cpu'.
64 * This structure is used by the library - syscall interface in order
65 * to go through the overhead of a syscall only once per server.
66 */
67struct npsf_budgets {
68 int cpu;
69 lt_t budget;
70};
71
36struct rt_task { 72struct rt_task {
37 lt_t exec_cost; 73 lt_t exec_cost;
38 lt_t period; 74 lt_t period;
@@ -43,26 +79,16 @@ struct rt_task {
43 79
44 /* parameters used by the semi-partitioned algorithms */ 80 /* parameters used by the semi-partitioned algorithms */
45 union { 81 union {
46 /* NPS-F */ 82 /* EDF-Fm; defined in sched_edf_fm.c */
47 /* The id for the server (notional processor) that holds 83 struct edffm_params fm;
84 /* NPS-F: id for the server (notional processor) that holds
48 * this task; the same npfs_id can be assigned to "the same" 85 * this task; the same npfs_id can be assigned to "the same"
49 * server split on different cpus 86 * server split on different cpus
50 * */ 87 * */
51 int npsf_id; 88 int npsf_id;
52 } semi_part; 89 } semi_part;
53}; 90};
54 91
55/* NPS-F semi-part. plugin.
56 * Each (cpu, budget) entry defines the share ('budget' in ns, a % of
57 * the slot_length) of the notional processor on the CPU 'cpu'.
58 * This structure is used by the library - syscall interface in order
59 * to go through the overhead of a syscall only once per server.
60 */
61struct npsf_budgets {
62 int cpu;
63 lt_t budget;
64};
65
66/* The definition of the data that is shared between the kernel and real-time 92/* The definition of the data that is shared between the kernel and real-time
67 * tasks via a shared page (see litmus/ctrldev.c). 93 * tasks via a shared page (see litmus/ctrldev.c).
68 * 94 *
@@ -205,6 +231,16 @@ struct rt_param {
205 231
206 /* Pointer to the page shared between userspace and kernel. */ 232 /* Pointer to the page shared between userspace and kernel. */
207 struct control_page * ctrl_page; 233 struct control_page * ctrl_page;
234
235 /* runtime info for the semi-part plugins */
236 union {
237 struct {
238 /* EDF-fm: number of jobs handled by this cpu
239 * (to determine next cpu for a migrating task)
240 */
241 unsigned int cpu_job_no[NR_CPUS_EDF_FM];
242 } fm;
243 } semi_part;
208}; 244};
209 245
210/* Possible RT flags */ 246/* Possible RT flags */
diff --git a/litmus/Makefile b/litmus/Makefile
index fbd15ccf6423..89c22198a53a 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -14,7 +14,8 @@ obj-y = sched_plugin.o litmus.o \
14 ctrldev.o \ 14 ctrldev.o \
15 sched_gsn_edf.o \ 15 sched_gsn_edf.o \
16 sched_psn_edf.o \ 16 sched_psn_edf.o \
17 sched_npsf.o 17 sched_npsf.o \
18 sched_edf_fm.o
18 19
19obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 20obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
20obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 21obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
diff --git a/litmus/sched_edf_fm.c b/litmus/sched_edf_fm.c
new file mode 100644
index 000000000000..e14f8588c7a4
--- /dev/null
+++ b/litmus/sched_edf_fm.c
@@ -0,0 +1,562 @@
1/*
2 * litmus/sched_edf_fm.c
3 *
4 * Implementation of the EDF-fm scheduling algorithm.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10#include <linux/spinlock.h>
11
12#include <linux/module.h>
13
14#include <litmus/litmus.h>
15#include <litmus/jobs.h>
16#include <litmus/sched_plugin.h>
17#include <litmus/edf_common.h>
18
19typedef struct {
20 rt_domain_t domain;
21 int cpu;
22 struct task_struct* scheduled; /* only RT tasks */
23/* domain lock */
24#define slock domain.ready_lock
25} edffm_domain_t;
26
27DEFINE_PER_CPU(edffm_domain_t, edffm_domains);
28
29#define local_edffm (&__get_cpu_var(edffm_domains))
30#define remote_edf(cpu) (&per_cpu(edffm_domains, cpu).domain)
31#define remote_edffm(cpu) (&per_cpu(edffm_domains, cpu))
32#define task_edf(task) remote_edf(get_partition(task))
33#define task_edffm(task) remote_edffm(get_partition(task))
34
35#define edffm_params(t) (t->rt_param.task_params.semi_part.fm)
36
37/* Is the task a migratory task? */
38#define is_migrat_task(task) (edffm_params(task).nr_cpus)
39/* t is on the wrong CPU (it should be requeued properly) */
40#define wrong_cpu(t) is_migrat_task((t)) && task_cpu((t)) != get_partition((t))
41/* Get next CPU */
42#define migrat_next_cpu(t) \
43 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
44 edffm_params(t).cpus[1] : \
45 edffm_params(t).cpus[0])
46/* Get current cpu */
47#define migrat_cur_cpu(t) \
48 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
49 edffm_params(t).cpus[0] : \
50 edffm_params(t).cpus[1])
51/* Manipulate share for current cpu */
52#define cur_cpu_fract_num(t) \
53 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
54 edffm_params(t).fraction[0][0] : \
55 edffm_params(t).fraction[0][1])
56#define cur_cpu_fract_den(t) \
57 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
58 edffm_params(t).fraction[1][0] : \
59 edffm_params(t).fraction[1][1])
60/* Get job number for current cpu */
61#define cur_cpu_job_no(t) \
62 ((tsk_rt(t)->task_params.cpu == edffm_params(t).cpus[0]) ? \
63 tsk_rt(t)->semi_part.fm.cpu_job_no[0] : \
64 tsk_rt(t)->semi_part.fm.cpu_job_no[1])
65/* What is the current cpu position in the array? */
66#define edffm_cpu_pos(cpu,t) \
67 ((cpu == edffm_params(t).cpus[0]) ? \
68 0 : 1)
69
70/*
71 * EDF-fm: migratory tasks have higher prio than fixed, EDF in both classes.
72 * (Both first and second may be NULL).
73 */
74int edffm_higher_prio(struct task_struct* first, struct task_struct* second)
75{
76 if ((first && edffm_params(first).nr_cpus) ||
77 (second && edffm_params(second).nr_cpus)) {
78 if ((first && edffm_params(first).nr_cpus) &&
79 (second && edffm_params(second).nr_cpus))
80 /* both are migrating */
81 return edf_higher_prio(first, second);
82
83 if (first && edffm_params(first).nr_cpus)
84 /* first is migrating */
85 return 1;
86 else
87 /* second is migrating */
88 return 0;
89 }
90
91 /* both are fixed or not real time */
92 return edf_higher_prio(first, second);
93}
94
95/* need_to_preempt - check whether the task t needs to be preempted
96 * call only with irqs disabled and with ready_lock acquired
97 */
98int edffm_preemption_needed(rt_domain_t* rt, struct task_struct *t)
99{
100 /* we need the read lock for edf_ready_queue */
101 /* no need to preempt if there is nothing pending */
102 if (!__jobs_pending(rt))
103 return 0;
104 /* we need to reschedule if t doesn't exist */
105 if (!t)
106 return 1;
107
108 /* make sure to get non-rt stuff out of the way */
109 return !is_realtime(t) || edffm_higher_prio(__next_ready(rt), t);
110}
111
112/* we assume the lock is being held */
113static void preempt(edffm_domain_t *edffm)
114{
115 preempt_if_preemptable(edffm->scheduled, edffm->cpu);
116}
117
118static void edffm_release_jobs(rt_domain_t* rt, struct bheap* tasks)
119{
120 unsigned long flags;
121 edffm_domain_t *edffm = container_of(rt, edffm_domain_t, domain);
122
123 raw_spin_lock_irqsave(&edffm->slock, flags);
124
125 __merge_ready(rt, tasks);
126
127 if (edffm_preemption_needed(rt, edffm->scheduled))
128 preempt(edffm);
129
130 raw_spin_unlock_irqrestore(&edffm->slock, flags);
131}
132
133/* EDF-fm uses the "release_master" field to force the next release for
134 * the task 'task' to happen on a remote CPU. The remote cpu for task is
135 * previously set up during job_completion() taking into consideration
136 * whether a task is a migratory task or not.
137 */
138static inline void
139edffm_add_release_remote(struct task_struct *task)
140{
141 unsigned long flags;
142 rt_domain_t *rt = task_edf(task);
143
144 raw_spin_lock_irqsave(&rt->tobe_lock, flags);
145
146 /* "modify" destination cpu */
147 rt->release_master = get_partition(task);
148
149 TRACE_TASK(task, "Add remote release: smp_proc_id = %d, cpu = %d, remote = %d\n",
150 smp_processor_id(), task_cpu(task), rt->release_master);
151
152 /* trigger future release */
153 __add_release(rt, task);
154
155 /* reset proper release_master and unlock */
156 rt->release_master = NO_CPU;
157 raw_spin_unlock_irqrestore(&rt->tobe_lock, flags);
158}
159
160/* perform double ready_queue locking in an orderwise fashion
161 * this is called with: interrupt disabled and rq->lock held (from
162 * schedule())
163 */
164static noinline void double_domain_lock(edffm_domain_t *dom1, edffm_domain_t *dom2)
165{
166 if (dom1 == dom2) {
167 /* fake */
168 raw_spin_lock(&dom1->slock);
169 } else {
170 if (dom1 < dom2) {
171 raw_spin_lock(&dom1->slock);
172 raw_spin_lock(&dom2->slock);
173 TRACE("acquired %d and %d\n", dom1->cpu, dom2->cpu);
174 } else {
175 raw_spin_lock(&dom2->slock);
176 raw_spin_lock(&dom1->slock);
177 TRACE("acquired %d and %d\n", dom2->cpu, dom1->cpu);
178 }
179 }
180}
181
182/* Directly insert a task in a remote ready queue. This function
183 * should only be called if this task is a migrating task and its
184 * last job for this CPU just completed (a new one is released for
185 * a remote CPU), but the new job is already tardy.
186 */
187static noinline void insert_task_in_remote_ready(struct task_struct *task)
188{
189 edffm_domain_t *this = remote_edffm(task_cpu(task));
190 edffm_domain_t *remote = remote_edffm(get_partition(task));
191
192 BUG_ON(get_partition(task) != remote->cpu);
193
194 TRACE_TASK(task, "Migrate From P%d -> To P%d\n",
195 this->cpu, remote->cpu);
196 TRACE_TASK(task, "Inserting in remote ready queue\n");
197
198 WARN_ON(!irqs_disabled());
199
200 raw_spin_unlock(&this->slock);
201 mb();
202 TRACE_TASK(task,"edffm_lock %d released\n", this->cpu);
203
204 /* lock both ready queues */
205 double_domain_lock(this, remote);
206 mb();
207
208 __add_ready(&remote->domain, task);
209
210 /* release remote but keep ours */
211 raw_spin_unlock(&remote->slock);
212 TRACE_TASK(task,"edffm_lock %d released\n", remote->cpu);
213
214 /* ask remote cpu to reschedule, we are already rescheduling on this */
215 preempt(remote);
216}
217
218static void requeue(struct task_struct* t, rt_domain_t *edf)
219{
220 if (t->state != TASK_RUNNING)
221 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
222
223 set_rt_flags(t, RT_F_RUNNING);
224 if (is_released(t, litmus_clock())) {
225 if (wrong_cpu(t)) {
226 /* this should only happen if t just completed, but
227 * its next release is already tardy, so it should be
228 * migrated and inserted in the remote ready queue
229 */
230 TRACE_TASK(t, "Migrating task already released, "
231 "move from P%d to P%d\n",
232 task_cpu(t), get_partition(t));
233
234 insert_task_in_remote_ready(t);
235 } else {
236 /* not a migrat task or the job is on the right CPU */
237 __add_ready(edf, t);
238 }
239 } else {
240 if (wrong_cpu(t)) {
241
242 TRACE_TASK(t, "Migrating task, adding remote release\n");
243 edffm_add_release_remote(t);
244 } else {
245 TRACE_TASK(t, "Adding local release\n");
246 add_release(edf, t);
247 }
248 }
249}
250
251/* Update statistics for the _current_ job.
252 * - job_no was incremented _before_ starting this job
253 * (release_at / prepare_for_next_period)
254 * - cpu_job_no is incremented when the job completes
255 */
256static void update_job_counter(struct task_struct *t)
257{
258 int cpu_pos;
259
260 /* Which CPU counter should be incremented? */
261 cpu_pos = edffm_cpu_pos(t->rt_param.task_params.cpu, t);
262 t->rt_param.semi_part.fm.cpu_job_no[cpu_pos]++;
263
264 TRACE_TASK(t, "job_no = %d, cpu_job_no(pos %d) = %d, cpu %d\n",
265 t->rt_param.job_params.job_no, cpu_pos, cur_cpu_job_no(t),
266 t->rt_param.task_params.cpu);
267}
268
269/* What is the next cpu for this job? (eq. 8, in EDF-Fm paper) */
270static int next_cpu_for_job(struct task_struct *t)
271{
272 BUG_ON(!is_migrat_task(t));
273
274 if ((t->rt_param.job_params.job_no) ==
275 (((lt_t) cur_cpu_job_no(t) * cur_cpu_fract_den(t)) /
276 cur_cpu_fract_num(t)))
277 return edffm_params(t).cpus[0];
278
279 return edffm_params(t).cpus[1];
280}
281
282/* If needed (the share for task t on this CPU is exhausted), updates
283 * the task_params.cpu for the _migrating_ task t
284 */
285static void change_migrat_cpu_if_needed(struct task_struct *t)
286{
287 BUG_ON(!is_migrat_task(t));
288 /* EDF-fm: if it is a migrating task and it has already executed
289 * the required number of jobs on this CPU, we need to move it
290 * on its next CPU; changing the cpu here will affect the requeue
291 * and the next release
292 */
293 if (unlikely(next_cpu_for_job(t) != migrat_cur_cpu(t))) {
294
295 tsk_rt(t)->task_params.cpu = migrat_next_cpu(t);
296 TRACE_TASK(t, "EDF-fm: will migrate job %d -> %d\n",
297 task_cpu(t), tsk_rt(t)->task_params.cpu);
298 return;
299 }
300
301 TRACE_TASK(t, "EDF-fm: job will stay on %d -> %d\n",
302 task_cpu(t), tsk_rt(t)->task_params.cpu);
303}
304
305static void job_completion(struct task_struct* t, int forced)
306{
307 sched_trace_task_completion(t,forced);
308 TRACE_TASK(t, "job_completion().\n");
309
310 if (unlikely(is_migrat_task(t))) {
311 update_job_counter(t);
312 change_migrat_cpu_if_needed(t);
313 }
314
315 set_rt_flags(t, RT_F_SLEEP);
316 prepare_for_next_period(t);
317}
318
319static void edffm_tick(struct task_struct *t)
320{
321 edffm_domain_t *edffm = local_edffm;
322
323 BUG_ON(is_realtime(t) && t != edffm->scheduled);
324
325 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
326 set_tsk_need_resched(t);
327 TRACE("edffm_scheduler_tick: "
328 "%d is preemptable "
329 " => FORCE_RESCHED\n", t->pid);
330 }
331}
332
333static struct task_struct* edffm_schedule(struct task_struct * prev)
334{
335 edffm_domain_t* edffm = local_edffm;
336 rt_domain_t* edf = &edffm->domain;
337 struct task_struct* next;
338
339 int out_of_time, sleep, preempt, exists, blocks, change_cpu, resched;
340
341 raw_spin_lock(&edffm->slock);
342
343 BUG_ON(edffm->scheduled && edffm->scheduled != prev);
344 BUG_ON(edffm->scheduled && !is_realtime(prev));
345
346 /* (0) Determine state */
347 exists = edffm->scheduled != NULL;
348 blocks = exists && !is_running(edffm->scheduled);
349 out_of_time = exists &&
350 budget_enforced(edffm->scheduled) &&
351 budget_exhausted(edffm->scheduled);
352 sleep = exists && get_rt_flags(edffm->scheduled) == RT_F_SLEEP;
353 change_cpu = exists && wrong_cpu(edffm->scheduled);
354 preempt = edffm_preemption_needed(edf, prev);
355
356 BUG_ON(blocks && change_cpu);
357
358 if (exists)
359 TRACE_TASK(prev,
360 "blocks:%d out_of_time:%d sleep:%d preempt:%d "
361 "wrong_cpu:%d state:%d sig:%d\n",
362 blocks, out_of_time, sleep, preempt,
363 change_cpu, prev->state, signal_pending(prev));
364
365 /* If we need to preempt do so. */
366 resched = preempt;
367
368 /* If a task blocks we have no choice but to reschedule. */
369 if (blocks)
370 resched = 1;
371
372 /* If a task has just woken up, it was tardy and the wake up
373 * raced with this schedule, a new job has already been released,
374 * but scheduled should be enqueued on a remote ready queue, and a
375 * new task should be selected for the current queue.
376 */
377 if (change_cpu)
378 resched = 1;
379
380 /* Any task that is preemptable and either exhausts its execution
381 * budget or wants to sleep completes. We may have to reschedule after
382 * this.
383 */
384 if ((out_of_time || sleep) && !blocks) {
385 job_completion(edffm->scheduled, !sleep);
386 resched = 1;
387 }
388
389 /* The final scheduling decision. Do we need to switch for some reason?
390 * Switch if we are in RT mode and have no task or if we need to
391 * resched.
392 */
393 next = NULL;
394 if (resched || !exists) {
395
396 if (edffm->scheduled && !blocks)
397 requeue(edffm->scheduled, edf);
398 next = __take_ready(edf);
399 } else
400 /* Only override Linux scheduler if we have a real-time task
401 * scheduled that needs to continue.
402 */
403 if (exists)
404 next = prev;
405
406 if (next) {
407 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
408 set_rt_flags(next, RT_F_RUNNING);
409 } else {
410 TRACE("becoming idle at %llu\n", litmus_clock());
411 }
412
413 edffm->scheduled = next;
414 raw_spin_unlock(&edffm->slock);
415
416 return next;
417}
418
419/* Prepare a task for running in RT mode
420 */
421static void edffm_task_new(struct task_struct * t, int on_rq, int running)
422{
423 rt_domain_t* edf = task_edf(t);
424 edffm_domain_t* edffm = task_edffm(t);
425 unsigned long flags;
426
427 TRACE_TASK(t, "EDF-fm: task new, cpu = %d\n",
428 t->rt_param.task_params.cpu);
429
430 release_at(t, litmus_clock());
431 update_job_counter(t);
432
433 /* The task should be running in the queue, otherwise signal
434 * code will try to wake it up with fatal consequences.
435 */
436 raw_spin_lock_irqsave(&edffm->slock, flags);
437 if (running) {
438 /* there shouldn't be anything else running at the time */
439 BUG_ON(edffm->scheduled);
440 edffm->scheduled = t;
441 } else {
442 requeue(t, edf);
443 /* maybe we have to reschedule */
444 preempt(edffm);
445 }
446 raw_spin_unlock_irqrestore(&edffm->slock, flags);
447}
448
449static void edffm_task_wake_up(struct task_struct *task)
450{
451 unsigned long flags;
452 edffm_domain_t* edffm = task_edffm(task);
453 rt_domain_t* edf = task_edf(task);
454 lt_t now;
455
456 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
457
458 TRACE_TASK(task, "acquire edffm %d\n", edffm->cpu);
459 raw_spin_lock_irqsave(&edffm->slock, flags);
460
461 BUG_ON(edffm != task_edffm(task));
462 BUG_ON(is_queued(task));
463
464 now = litmus_clock();
465 if (is_tardy(task, now)) {
466 if (unlikely(is_migrat_task(task))) {
467 /* a new job will be released.
468 * Update current job counter */
469 update_job_counter(task);
470 /* Switch CPU if needed */
471 change_migrat_cpu_if_needed(task);
472 }
473 /* new sporadic release */
474 TRACE_TASK(task, "release new\n");
475 release_at(task, now);
476 sched_trace_task_release(task);
477 }
478
479 /* Only add to ready queue if it is not the currently-scheduled
480 * task. This could be the case if a task was woken up concurrently
481 * on a remote CPU before the executing CPU got around to actually
482 * de-scheduling the task, i.e., wake_up() raced with schedule()
483 * and won.
484 */
485 if (edffm->scheduled != task)
486 requeue(task, edf);
487
488 raw_spin_unlock_irqrestore(&edffm->slock, flags);
489 TRACE_TASK(task, "release edffm %d\n", edffm->cpu);
490 TRACE_TASK(task, "wake up done\n");
491}
492
493static void edffm_task_block(struct task_struct *t)
494{
495 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);
496
497 BUG_ON(!is_realtime(t));
498 if (is_queued(t)) {
499 edffm_domain_t *edffm = local_edffm;
500 TRACE_TASK(t, "task blocked, race with wakeup, "
501 "remove from queue %d\n", edffm->cpu);
502 remove(&edffm->domain, t);
503 }
504}
505
506static void edffm_task_exit(struct task_struct * t)
507{
508 unsigned long flags;
509 edffm_domain_t* edffm = task_edffm(t);
510 rt_domain_t* edf;
511
512 raw_spin_lock_irqsave(&edffm->slock, flags);
513 if (is_queued(t)) {
514 /* dequeue */
515 edf = task_edf(t);
516 remove(edf, t);
517 }
518 if (edffm->scheduled == t)
519 edffm->scheduled = NULL;
520
521 TRACE_TASK(t, "RIP\n");
522
523 preempt(edffm);
524 raw_spin_unlock_irqrestore(&edffm->slock, flags);
525}
526
527static long edffm_admit_task(struct task_struct* tsk)
528{
529 return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL;
530}
531
532/* Plugin object */
533static struct sched_plugin edffm_plugin __cacheline_aligned_in_smp = {
534 .plugin_name = "EDF-fm",
535 .tick = edffm_tick,
536 .task_new = edffm_task_new,
537 .complete_job = complete_job,
538 .task_exit = edffm_task_exit,
539 .schedule = edffm_schedule,
540 .task_wake_up = edffm_task_wake_up,
541 .task_block = edffm_task_block,
542 .admit_task = edffm_admit_task
543};
544
545static int __init init_edffm(void)
546{
547 int i;
548 edffm_domain_t *edffm;
549
550 /* Note, broken if num_online_cpus() may change */
551 for (i = 0; i < num_online_cpus(); i++) {
552 edffm = remote_edffm(i);
553 edffm->cpu = i;
554 edffm->scheduled = NULL;
555 edf_domain_init(&edffm->domain, NULL, edffm_release_jobs);
556 }
557
558 return register_sched_plugin(&edffm_plugin);
559}
560
561module_init(init_edffm);
562