diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-12-07 14:58:52 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-12-07 14:58:52 -0500 |
commit | 4a36db417c95a0ce3e70d2896d0d81b98d478b53 (patch) | |
tree | 2236e4ae4cee35e5539b6dec727f965788ba4b5f | |
parent | 51c41c7b109e6da035c42ca85d6da44586a18359 (diff) |
Remove C-EDF plugin.
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 703 |
2 files changed, 0 insertions, 704 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 837f697004..b5ac4ca345 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -9,7 +9,6 @@ obj-y = sched_plugin.o litmus.o \ | |||
9 | heap.o \ | 9 | heap.o \ |
10 | sched_gsn_edf.o \ | 10 | sched_gsn_edf.o \ |
11 | sched_psn_edf.o \ | 11 | sched_psn_edf.o \ |
12 | sched_cedf.o \ | ||
13 | sched_pfair.o | 12 | sched_pfair.o |
14 | 13 | ||
15 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 14 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c deleted file mode 100644 index 96e145a8a7..0000000000 --- a/litmus/sched_cedf.c +++ /dev/null | |||
@@ -1,703 +0,0 @@ | |||
1 | /* | ||
2 | * kernel/sched_cedf.c | ||
3 | * | ||
4 | * Implementation of the Clustered EDF (C-EDF) scheduling algorithm. | ||
5 | * Linking is included so that support for synchronization (e.g., through | ||
6 | * the implementation of a "CSN-EDF" algorithm) can be added later if desired. | ||
7 | * | ||
8 | * This version uses the simple approach and serializes all scheduling | ||
9 | * decisions by the use of a queue lock. This is probably not the | ||
10 | * best way to do it, but it should suffice for now. | ||
11 | */ | ||
12 | |||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/list.h> | ||
17 | |||
18 | #include <litmus/litmus.h> | ||
19 | #include <litmus/jobs.h> | ||
20 | #include <litmus/sched_plugin.h> | ||
21 | #include <litmus/edf_common.h> | ||
22 | #include <litmus/sched_trace.h> | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | |||
26 | /* Overview of C-EDF operations. | ||
27 | * | ||
28 | * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
29 | * structure (NOT the actually scheduled | ||
30 | * task). If there is another linked task To | ||
31 | * already it will set To->linked_on = NO_CPU | ||
32 | * (thereby removing its association with this | ||
33 | * CPU). However, it will not requeue the | ||
34 | * previously linked task (if any). It will set | ||
35 | * T's state to RT_F_RUNNING and check whether | ||
36 | * it is already running somewhere else. If T | ||
37 | * is scheduled somewhere else it will link | ||
38 | * it to that CPU instead (and pull the linked | ||
39 | * task to cpu). T may be NULL. | ||
40 | * | ||
41 | * unlink(T) - Unlink removes T from all scheduler data | ||
42 | * structures. If it is linked to some CPU it | ||
43 | * will link NULL to that CPU. If it is | ||
44 | * currently queued in the cedf queue for | ||
45 | * a partition, it will be removed from | ||
46 | * the rt_domain. It is safe to call | ||
47 | * unlink(T) if T is not linked. T may not | ||
48 | * be NULL. | ||
49 | * | ||
50 | * requeue(T) - Requeue will insert T into the appropriate | ||
51 | * queue. If the system is in real-time mode and | ||
52 | * the T is released already, it will go into the | ||
53 | * ready queue. If the system is not in | ||
54 | * real-time mode is T, then T will go into the | ||
55 | * release queue. If T's release time is in the | ||
56 | * future, it will go into the release | ||
57 | * queue. That means that T's release time/job | ||
58 | * no/etc. has to be updated before requeue(T) is | ||
59 | * called. It is not safe to call requeue(T) | ||
60 | * when T is already queued. T may not be NULL. | ||
61 | * | ||
62 | * cedf_job_arrival(T) - This is the catch-all function when T enters | ||
63 | * the system after either a suspension or at a | ||
64 | * job release. It will queue T (which means it | ||
65 | * is not safe to call cedf_job_arrival(T) if | ||
66 | * T is already queued) and then check whether a | ||
67 | * preemption is necessary. If a preemption is | ||
68 | * necessary it will update the linkage | ||
69 | * accordingly and cause scheduled to be called | ||
70 | * (either with an IPI or need_resched). It is | ||
71 | * safe to call cedf_job_arrival(T) if T's | ||
72 | * next job has not been actually released yet | ||
73 | * (release time in the future). T will be put | ||
74 | * on the release queue in that case. | ||
75 | * | ||
76 | * job_completion(T) - Take care of everything that needs to be done | ||
77 | * to prepare T for its next release and place | ||
78 | * it in the right queue with | ||
79 | * cedf_job_arrival(). | ||
80 | * | ||
81 | * | ||
82 | * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
83 | * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
84 | * the functions will automatically propagate pending task from the ready queue | ||
85 | * to a linked task. This is the job of the calling function ( by means of | ||
86 | * __take_ready). | ||
87 | */ | ||
88 | |||
89 | /* cpu_entry_t - maintain the linked and scheduled state | ||
90 | */ | ||
91 | typedef struct { | ||
92 | int cpu; | ||
93 | struct task_struct* linked; /* only RT tasks */ | ||
94 | struct task_struct* scheduled; /* only RT tasks */ | ||
95 | struct list_head list; | ||
96 | atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
97 | } cpu_entry_t; | ||
98 | DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | ||
99 | |||
100 | cpu_entry_t* cedf_cpu_entries_array[NR_CPUS]; | ||
101 | |||
102 | #define set_will_schedule() \ | ||
103 | (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) | ||
104 | #define clear_will_schedule() \ | ||
105 | (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0)) | ||
106 | #define test_will_schedule(cpu) \ | ||
107 | (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | ||
108 | |||
109 | /* Cluster size -- currently four. This is a variable to allow for | ||
110 | * the possibility of changing the cluster size online in the future. | ||
111 | */ | ||
112 | int cluster_size = 4; | ||
113 | |||
114 | typedef struct { | ||
115 | rt_domain_t domain; | ||
116 | int first_cpu; | ||
117 | int last_cpu; | ||
118 | |||
119 | /* the cpus queue themselves according to priority in here */ | ||
120 | struct list_head cedf_cpu_queue; | ||
121 | |||
122 | /* per-partition spinlock: protects the domain and | ||
123 | * serializes scheduling decisions | ||
124 | */ | ||
125 | #define slock domain.ready_lock | ||
126 | } cedf_domain_t; | ||
127 | |||
128 | DEFINE_PER_CPU(cedf_domain_t*, cedf_domains) = NULL; | ||
129 | |||
130 | cedf_domain_t* cedf_domains_array[NR_CPUS]; | ||
131 | |||
132 | |||
133 | /* These are defined similarly to partitioning, except that a | ||
134 | * tasks partition is any cpu of the cluster to which it | ||
135 | * is assigned, typically the lowest-numbered cpu. | ||
136 | */ | ||
137 | #define local_edf (&__get_cpu_var(cedf_domains)->domain) | ||
138 | #define local_cedf __get_cpu_var(cedf_domains) | ||
139 | #define remote_edf(cpu) (&per_cpu(cedf_domains, cpu)->domain) | ||
140 | #define remote_cedf(cpu) per_cpu(cedf_domains, cpu) | ||
141 | #define task_edf(task) remote_edf(get_partition(task)) | ||
142 | #define task_cedf(task) remote_cedf(get_partition(task)) | ||
143 | |||
144 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
145 | * order in the cpu queue. Caller must hold cedf lock. | ||
146 | * | ||
147 | * This really should be a heap. | ||
148 | */ | ||
149 | static void update_cpu_position(cpu_entry_t *entry) | ||
150 | { | ||
151 | cpu_entry_t *other; | ||
152 | struct list_head *cedf_cpu_queue = | ||
153 | &(remote_cedf(entry->cpu))->cedf_cpu_queue; | ||
154 | struct list_head *pos; | ||
155 | |||
156 | BUG_ON(!cedf_cpu_queue); | ||
157 | |||
158 | if (likely(in_list(&entry->list))) | ||
159 | list_del(&entry->list); | ||
160 | /* if we do not execute real-time jobs we just move | ||
161 | * to the end of the queue | ||
162 | */ | ||
163 | if (entry->linked) { | ||
164 | list_for_each(pos, cedf_cpu_queue) { | ||
165 | other = list_entry(pos, cpu_entry_t, list); | ||
166 | if (edf_higher_prio(entry->linked, other->linked)) { | ||
167 | __list_add(&entry->list, pos->prev, pos); | ||
168 | return; | ||
169 | } | ||
170 | } | ||
171 | } | ||
172 | /* if we get this far we have the lowest priority job */ | ||
173 | list_add_tail(&entry->list, cedf_cpu_queue); | ||
174 | } | ||
175 | |||
176 | /* link_task_to_cpu - Update the link of a CPU. | ||
177 | * Handles the case where the to-be-linked task is already | ||
178 | * scheduled on a different CPU. | ||
179 | */ | ||
180 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
181 | cpu_entry_t *entry) | ||
182 | { | ||
183 | cpu_entry_t *sched; | ||
184 | struct task_struct* tmp; | ||
185 | int on_cpu; | ||
186 | |||
187 | BUG_ON(linked && !is_realtime(linked)); | ||
188 | |||
189 | /* Cannot link task to a CPU that doesn't belong to its partition... */ | ||
190 | BUG_ON(linked && remote_cedf(entry->cpu) != task_cedf(linked)); | ||
191 | |||
192 | /* Currently linked task is set to be unlinked. */ | ||
193 | if (entry->linked) { | ||
194 | entry->linked->rt_param.linked_on = NO_CPU; | ||
195 | } | ||
196 | |||
197 | /* Link new task to CPU. */ | ||
198 | if (linked) { | ||
199 | set_rt_flags(linked, RT_F_RUNNING); | ||
200 | /* handle task is already scheduled somewhere! */ | ||
201 | on_cpu = linked->rt_param.scheduled_on; | ||
202 | if (on_cpu != NO_CPU) { | ||
203 | sched = &per_cpu(cedf_cpu_entries, on_cpu); | ||
204 | /* this should only happen if not linked already */ | ||
205 | BUG_ON(sched->linked == linked); | ||
206 | |||
207 | /* If we are already scheduled on the CPU to which we | ||
208 | * wanted to link, we don't need to do the swap -- | ||
209 | * we just link ourselves to the CPU and depend on | ||
210 | * the caller to get things right. | ||
211 | */ | ||
212 | if (entry != sched) { | ||
213 | tmp = sched->linked; | ||
214 | linked->rt_param.linked_on = sched->cpu; | ||
215 | sched->linked = linked; | ||
216 | update_cpu_position(sched); | ||
217 | linked = tmp; | ||
218 | } | ||
219 | } | ||
220 | if (linked) /* might be NULL due to swap */ | ||
221 | linked->rt_param.linked_on = entry->cpu; | ||
222 | } | ||
223 | entry->linked = linked; | ||
224 | |||
225 | if (entry->linked) | ||
226 | TRACE_TASK(entry->linked, "linked to CPU %d, state:%d\n", | ||
227 | entry->cpu, entry->linked->state); | ||
228 | else | ||
229 | TRACE("NULL linked to CPU %d\n", entry->cpu); | ||
230 | |||
231 | update_cpu_position(entry); | ||
232 | } | ||
233 | |||
234 | /* unlink - Make sure a task is not linked any longer to an entry | ||
235 | * where it was linked before. Must hold cedf_lock. | ||
236 | */ | ||
237 | static noinline void unlink(struct task_struct* t) | ||
238 | { | ||
239 | cpu_entry_t *entry; | ||
240 | |||
241 | if (unlikely(!t)) { | ||
242 | TRACE_BUG_ON(!t); | ||
243 | return; | ||
244 | } | ||
245 | |||
246 | if (t->rt_param.linked_on != NO_CPU) { | ||
247 | /* unlink */ | ||
248 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | ||
249 | t->rt_param.linked_on = NO_CPU; | ||
250 | link_task_to_cpu(NULL, entry); | ||
251 | } else if (is_queued(t)) { | ||
252 | /* This is an interesting situation: t is scheduled, | ||
253 | * but was just recently unlinked. It cannot be | ||
254 | * linked anywhere else (because then it would have | ||
255 | * been relinked to this CPU), thus it must be in some | ||
256 | * queue. We must remove it from the list in this | ||
257 | * case. | ||
258 | */ | ||
259 | remove(task_edf(t), t); | ||
260 | } | ||
261 | } | ||
262 | |||
263 | |||
264 | /* preempt - force a CPU to reschedule | ||
265 | */ | ||
266 | static noinline void preempt(cpu_entry_t *entry) | ||
267 | { | ||
268 | /* We cannot make the is_np() decision here if it is a remote CPU | ||
269 | * because requesting exit_np() requires that we currently use the | ||
270 | * address space of the task. Thus, in the remote case we just send | ||
271 | * the IPI and let schedule() handle the problem. | ||
272 | */ | ||
273 | |||
274 | if (smp_processor_id() == entry->cpu) { | ||
275 | if (entry->scheduled && is_np(entry->scheduled)) | ||
276 | request_exit_np(entry->scheduled); | ||
277 | else | ||
278 | set_tsk_need_resched(current); | ||
279 | } else | ||
280 | /* in case that it is a remote CPU we have to defer the | ||
281 | * the decision to the remote CPU | ||
282 | * FIXME: We could save a few IPI's here if we leave the flag | ||
283 | * set when we are waiting for a np_exit(). | ||
284 | */ | ||
285 | if (!test_will_schedule(entry->cpu)) | ||
286 | smp_send_reschedule(entry->cpu); | ||
287 | } | ||
288 | |||
289 | /* requeue - Put an unlinked task into c-edf domain. | ||
290 | * Caller must hold cedf_lock. | ||
291 | */ | ||
292 | static noinline void requeue(struct task_struct* task) | ||
293 | { | ||
294 | cedf_domain_t* cedf; | ||
295 | rt_domain_t* edf; | ||
296 | |||
297 | BUG_ON(!task); | ||
298 | /* sanity check rt_list before insertion */ | ||
299 | BUG_ON(is_queued(task)); | ||
300 | |||
301 | /* Get correct real-time domain. */ | ||
302 | cedf = task_cedf(task); | ||
303 | edf = &cedf->domain; | ||
304 | |||
305 | if (is_released(task, litmus_clock())) | ||
306 | __add_ready(edf, task); | ||
307 | else { | ||
308 | /* it has got to wait */ | ||
309 | add_release(edf, task); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | static void check_for_preemptions(cedf_domain_t* cedf) | ||
314 | { | ||
315 | cpu_entry_t *last; | ||
316 | struct task_struct *task; | ||
317 | struct list_head *cedf_cpu_queue; | ||
318 | cedf_cpu_queue = &cedf->cedf_cpu_queue; | ||
319 | |||
320 | for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); | ||
321 | edf_preemption_needed(&cedf->domain, last->linked); | ||
322 | last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) { | ||
323 | /* preemption necessary */ | ||
324 | task = __take_ready(&cedf->domain); | ||
325 | TRACE("check_for_preemptions: task %d linked to %d, state:%d\n", | ||
326 | task->pid, last->cpu, task->state); | ||
327 | if (last->linked) | ||
328 | requeue(last->linked); | ||
329 | link_task_to_cpu(task, last); | ||
330 | preempt(last); | ||
331 | } | ||
332 | |||
333 | } | ||
334 | |||
335 | /* cedf_job_arrival: task is either resumed or released */ | ||
336 | static noinline void cedf_job_arrival(struct task_struct* task) | ||
337 | { | ||
338 | cedf_domain_t* cedf; | ||
339 | rt_domain_t* edf; | ||
340 | |||
341 | BUG_ON(!task); | ||
342 | |||
343 | /* Get correct real-time domain. */ | ||
344 | cedf = task_cedf(task); | ||
345 | edf = &cedf->domain; | ||
346 | |||
347 | /* first queue arriving job */ | ||
348 | requeue(task); | ||
349 | |||
350 | /* then check for any necessary preemptions */ | ||
351 | check_for_preemptions(cedf); | ||
352 | } | ||
353 | |||
354 | /* check for current job releases */ | ||
355 | static void cedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
356 | { | ||
357 | cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain); | ||
358 | unsigned long flags; | ||
359 | |||
360 | spin_lock_irqsave(&cedf->slock, flags); | ||
361 | |||
362 | __merge_ready(&cedf->domain, tasks); | ||
363 | check_for_preemptions(cedf); | ||
364 | spin_unlock_irqrestore(&cedf->slock, flags); | ||
365 | } | ||
366 | |||
367 | /* cedf_tick - this function is called for every local timer | ||
368 | * interrupt. | ||
369 | * | ||
370 | * checks whether the current task has expired and checks | ||
371 | * whether we need to preempt it if it has not expired | ||
372 | */ | ||
373 | static void cedf_tick(struct task_struct* t) | ||
374 | { | ||
375 | BUG_ON(!t); | ||
376 | |||
377 | if (is_realtime(t) && budget_exhausted(t)) { | ||
378 | if (!is_np(t)) { | ||
379 | /* np tasks will be preempted when they become | ||
380 | * preemptable again | ||
381 | */ | ||
382 | set_tsk_need_resched(t); | ||
383 | set_will_schedule(); | ||
384 | TRACE("cedf_scheduler_tick: " | ||
385 | "%d is preemptable (state:%d) " | ||
386 | " => FORCE_RESCHED\n", t->pid, t->state); | ||
387 | } else { | ||
388 | TRACE("cedf_scheduler_tick: " | ||
389 | "%d is non-preemptable (state:%d), " | ||
390 | "preemption delayed.\n", t->pid, t->state); | ||
391 | request_exit_np(t); | ||
392 | } | ||
393 | } | ||
394 | } | ||
395 | |||
396 | /* caller holds cedf_lock */ | ||
397 | static noinline void job_completion(struct task_struct *t, int forced) | ||
398 | { | ||
399 | BUG_ON(!t); | ||
400 | |||
401 | sched_trace_task_completion(t, forced); | ||
402 | |||
403 | TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state); | ||
404 | |||
405 | /* set flags */ | ||
406 | set_rt_flags(t, RT_F_SLEEP); | ||
407 | /* prepare for next period */ | ||
408 | prepare_for_next_period(t); | ||
409 | /* unlink */ | ||
410 | unlink(t); | ||
411 | /* requeue | ||
412 | * But don't requeue a blocking task. */ | ||
413 | if (is_running(t)) | ||
414 | cedf_job_arrival(t); | ||
415 | } | ||
416 | |||
417 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
418 | * assumptions on the state of the current task since it may be called for a | ||
419 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
420 | * was necessary, because sys_exit_np() was called, because some Linux | ||
421 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
422 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
423 | * current state is. | ||
424 | * | ||
425 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
426 | * | ||
427 | * The following assertions for the scheduled task could hold: | ||
428 | * | ||
429 | * - !is_running(scheduled) // the job blocks | ||
430 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
431 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
432 | * - linked != scheduled // we need to reschedule (for any reason) | ||
433 | * - is_np(scheduled) // rescheduling must be delayed, | ||
434 | * sys_exit_np must be requested | ||
435 | * | ||
436 | * Any of these can occur together. | ||
437 | */ | ||
438 | static struct task_struct* cedf_schedule(struct task_struct * prev) | ||
439 | { | ||
440 | cedf_domain_t* cedf = local_cedf; | ||
441 | rt_domain_t* edf = &cedf->domain; | ||
442 | cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
443 | int out_of_time, sleep, preempt, np, | ||
444 | exists, blocks; | ||
445 | struct task_struct* next = NULL; | ||
446 | |||
447 | BUG_ON(!prev); | ||
448 | BUG_ON(!cedf); | ||
449 | BUG_ON(!edf); | ||
450 | BUG_ON(!entry); | ||
451 | BUG_ON(cedf != remote_cedf(entry->cpu)); | ||
452 | BUG_ON(is_realtime(prev) && cedf != task_cedf(prev)); | ||
453 | |||
454 | /* Will be released in finish_switch. */ | ||
455 | spin_lock(&cedf->slock); | ||
456 | clear_will_schedule(); | ||
457 | |||
458 | /* sanity checking */ | ||
459 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
460 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
461 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
462 | |||
463 | /* (0) Determine state */ | ||
464 | exists = entry->scheduled != NULL; | ||
465 | blocks = exists && !is_running(entry->scheduled); | ||
466 | out_of_time = exists && budget_exhausted(entry->scheduled); | ||
467 | np = exists && is_np(entry->scheduled); | ||
468 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
469 | preempt = entry->scheduled != entry->linked; | ||
470 | |||
471 | /* If a task blocks we have no choice but to reschedule. | ||
472 | */ | ||
473 | if (blocks) | ||
474 | unlink(entry->scheduled); | ||
475 | |||
476 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
477 | * We need to make sure to update the link structure anyway in case | ||
478 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
479 | * hurt. | ||
480 | */ | ||
481 | if (np && (out_of_time || preempt || sleep)) { | ||
482 | unlink(entry->scheduled); | ||
483 | request_exit_np(entry->scheduled); | ||
484 | } | ||
485 | |||
486 | /* Any task that is preemptable and either exhausts its execution | ||
487 | * budget or wants to sleep completes. We may have to reschedule after | ||
488 | * this. Don't do a job completion if blocks (can't have timers | ||
489 | * running for blocked jobs). Preemption go first for the same reason. | ||
490 | */ | ||
491 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
492 | job_completion(entry->scheduled, !sleep); | ||
493 | |||
494 | /* Link pending task if we became unlinked. | ||
495 | */ | ||
496 | if (!entry->linked) | ||
497 | link_task_to_cpu(__take_ready(edf), entry); | ||
498 | |||
499 | /* The final scheduling decision. Do we need to switch for some reason? | ||
500 | * If linked different from scheduled select linked as next. | ||
501 | */ | ||
502 | if ((!np || blocks) && | ||
503 | entry->linked != entry->scheduled) { | ||
504 | /* Schedule a linked job? */ | ||
505 | if (entry->linked) { | ||
506 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
507 | next = entry->linked; | ||
508 | } | ||
509 | if (entry->scheduled) { | ||
510 | /* not gonna be scheduled soon */ | ||
511 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
512 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
513 | } | ||
514 | } else | ||
515 | /* Only override Linux scheduler if we have real-time task | ||
516 | * scheduled that needs to continue. | ||
517 | */ | ||
518 | if (exists) | ||
519 | next = prev; | ||
520 | |||
521 | spin_unlock(&cedf->slock); | ||
522 | |||
523 | return next; | ||
524 | } | ||
525 | |||
526 | /* _finish_switch - we just finished the switch away from prev | ||
527 | */ | ||
528 | static void cedf_finish_switch(struct task_struct *prev) | ||
529 | { | ||
530 | cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
531 | |||
532 | BUG_ON(!prev); | ||
533 | BUG_ON(!entry); | ||
534 | |||
535 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
536 | } | ||
537 | |||
538 | /* Prepare a task for running in RT mode | ||
539 | */ | ||
540 | static void cedf_task_new(struct task_struct *t, int on_rq, int running) | ||
541 | { | ||
542 | unsigned long flags; | ||
543 | cedf_domain_t* cedf = task_cedf(t); | ||
544 | cpu_entry_t* entry; | ||
545 | |||
546 | BUG_ON(!cedf); | ||
547 | |||
548 | spin_lock_irqsave(&cedf->slock, flags); | ||
549 | if (running) { | ||
550 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | ||
551 | BUG_ON(!entry); | ||
552 | BUG_ON(entry->scheduled); | ||
553 | entry->scheduled = t; | ||
554 | t->rt_param.scheduled_on = task_cpu(t); | ||
555 | } else | ||
556 | t->rt_param.scheduled_on = NO_CPU; | ||
557 | t->rt_param.linked_on = NO_CPU; | ||
558 | |||
559 | /* setup job params */ | ||
560 | release_at(t, litmus_clock()); | ||
561 | |||
562 | cedf_job_arrival(t); | ||
563 | spin_unlock_irqrestore(&cedf->slock, flags); | ||
564 | } | ||
565 | |||
566 | |||
567 | static void cedf_task_wake_up(struct task_struct *task) | ||
568 | { | ||
569 | unsigned long flags; | ||
570 | cedf_domain_t* cedf; | ||
571 | lt_t now; | ||
572 | |||
573 | BUG_ON(!task); | ||
574 | |||
575 | cedf = task_cedf(task); | ||
576 | BUG_ON(!cedf); | ||
577 | |||
578 | spin_lock_irqsave(&cedf->slock, flags); | ||
579 | /* We need to take suspensions because of semaphores into | ||
580 | * account! If a job resumes after being suspended due to acquiring | ||
581 | * a semaphore, it should never be treated as a new job release. | ||
582 | */ | ||
583 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
584 | set_rt_flags(task, RT_F_RUNNING); | ||
585 | } else { | ||
586 | now = litmus_clock(); | ||
587 | if (is_tardy(task, now)) { | ||
588 | /* new sporadic release */ | ||
589 | release_at(task, now); | ||
590 | sched_trace_task_release(task); | ||
591 | } | ||
592 | else if (task->time_slice) | ||
593 | /* came back in time before deadline | ||
594 | */ | ||
595 | set_rt_flags(task, RT_F_RUNNING); | ||
596 | } | ||
597 | cedf_job_arrival(task); | ||
598 | spin_unlock_irqrestore(&cedf->slock, flags); | ||
599 | } | ||
600 | |||
601 | |||
602 | static void cedf_task_block(struct task_struct *t) | ||
603 | { | ||
604 | unsigned long flags; | ||
605 | |||
606 | BUG_ON(!t); | ||
607 | |||
608 | /* unlink if necessary */ | ||
609 | spin_lock_irqsave(&task_cedf(t)->slock, flags); | ||
610 | unlink(t); | ||
611 | spin_unlock_irqrestore(&task_cedf(t)->slock, flags); | ||
612 | |||
613 | BUG_ON(!is_realtime(t)); | ||
614 | } | ||
615 | |||
616 | static void cedf_task_exit(struct task_struct * t) | ||
617 | { | ||
618 | unsigned long flags; | ||
619 | |||
620 | BUG_ON(!t); | ||
621 | |||
622 | /* unlink if necessary */ | ||
623 | spin_lock_irqsave(&task_cedf(t)->slock, flags); | ||
624 | unlink(t); | ||
625 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
626 | cedf_cpu_entries_array[tsk_rt(t)->scheduled_on]-> | ||
627 | scheduled = NULL; | ||
628 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
629 | } | ||
630 | spin_unlock_irqrestore(&task_cedf(t)->slock, flags); | ||
631 | |||
632 | BUG_ON(!is_realtime(t)); | ||
633 | TRACE_TASK(t, "RIP\n"); | ||
634 | } | ||
635 | |||
636 | static long cedf_admit_task(struct task_struct* tsk) | ||
637 | { | ||
638 | return (task_cpu(tsk) >= task_cedf(tsk)->first_cpu && | ||
639 | task_cpu(tsk) <= task_cedf(tsk)->last_cpu) ? 0 : -EINVAL; | ||
640 | } | ||
641 | |||
642 | |||
643 | /* Plugin object */ | ||
644 | static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
645 | .plugin_name = "C-EDF", | ||
646 | .finish_switch = cedf_finish_switch, | ||
647 | .tick = cedf_tick, | ||
648 | .task_new = cedf_task_new, | ||
649 | .complete_job = complete_job, | ||
650 | .task_exit = cedf_task_exit, | ||
651 | .schedule = cedf_schedule, | ||
652 | .task_wake_up = cedf_task_wake_up, | ||
653 | .task_block = cedf_task_block, | ||
654 | .admit_task = cedf_admit_task | ||
655 | }; | ||
656 | |||
657 | static void cedf_domain_init(int first_cpu, int last_cpu) | ||
658 | { | ||
659 | int cpu; | ||
660 | |||
661 | /* Create new domain for this cluster. */ | ||
662 | cedf_domain_t *new_cedf_domain = kmalloc(sizeof(*new_cedf_domain), | ||
663 | GFP_KERNEL); | ||
664 | |||
665 | /* Initialize cluster domain. */ | ||
666 | edf_domain_init(&new_cedf_domain->domain, NULL, | ||
667 | cedf_release_jobs); | ||
668 | new_cedf_domain->first_cpu = first_cpu; | ||
669 | new_cedf_domain->last_cpu = last_cpu; | ||
670 | INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); | ||
671 | |||
672 | /* Assign all cpus in cluster to point to this domain. */ | ||
673 | for (cpu = first_cpu; cpu <= last_cpu; cpu++) { | ||
674 | remote_cedf(cpu) = new_cedf_domain; | ||
675 | cedf_domains_array[cpu] = new_cedf_domain; | ||
676 | } | ||
677 | } | ||
678 | |||
679 | static int __init init_cedf(void) | ||
680 | { | ||
681 | int cpu; | ||
682 | cpu_entry_t *entry; | ||
683 | |||
684 | /* initialize CPU state */ | ||
685 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
686 | entry = &per_cpu(cedf_cpu_entries, cpu); | ||
687 | cedf_cpu_entries_array[cpu] = entry; | ||
688 | atomic_set(&entry->will_schedule, 0); | ||
689 | entry->linked = NULL; | ||
690 | entry->scheduled = NULL; | ||
691 | entry->cpu = cpu; | ||
692 | INIT_LIST_HEAD(&entry->list); | ||
693 | } | ||
694 | |||
695 | /* initialize all cluster domains */ | ||
696 | for (cpu = 0; cpu < NR_CPUS; cpu += cluster_size) | ||
697 | cedf_domain_init(cpu, cpu+cluster_size-1); | ||
698 | |||
699 | return register_sched_plugin(&cedf_plugin); | ||
700 | } | ||
701 | |||
702 | module_init(init_cedf); | ||
703 | |||