diff options
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 772 |
1 files changed, 772 insertions, 0 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c new file mode 100644 index 000000000000..e57a11afda16 --- /dev/null +++ b/litmus/sched_cedf.c | |||
@@ -0,0 +1,772 @@ | |||
1 | /* | ||
2 | * litmus/sched_cedf.c | ||
3 | * | ||
4 | * Implementation of the C-EDF scheduling algorithm. | ||
5 | * | ||
6 | * This implementation is based on G-EDF: | ||
7 | * - CPUs are clustered around L2 or L3 caches. | ||
8 | * - Clusters topology is automatically detected (this is arch dependent | ||
9 | * and is working only on x86 at the moment --- and only with modern | ||
10 | * cpus that exports cpuid4 information) | ||
11 | * - The plugins _does not_ attempt to put tasks in the right cluster i.e. | ||
12 | * the programmer needs to be aware of the topology to place tasks | ||
13 | * in the desired cluster | ||
14 | * - default clustering is around L2 cache (cache index = 2) | ||
15 | * supported clusters are: L1 (private cache: pedf), L2, L3, ALL (all | ||
16 | * online_cpus are placed in a single cluster). | ||
17 | * | ||
18 | * For details on functions, take a look at sched_gsn_edf.c | ||
19 | * | ||
20 | * Currently, we do not support changes in the number of online cpus. | ||
21 | * If the num_online_cpus() dynamically changes, the plugin is broken. | ||
22 | * | ||
23 | * This version uses the simple approach and serializes all scheduling | ||
24 | * decisions by the use of a queue lock. This is probably not the | ||
25 | * best way to do it, but it should suffice for now. | ||
26 | */ | ||
27 | |||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/percpu.h> | ||
30 | #include <linux/sched.h> | ||
31 | |||
32 | #include <litmus/litmus.h> | ||
33 | #include <litmus/jobs.h> | ||
34 | #include <litmus/sched_plugin.h> | ||
35 | #include <litmus/edf_common.h> | ||
36 | #include <litmus/sched_trace.h> | ||
37 | |||
38 | #include <litmus/bheap.h> | ||
39 | |||
40 | #include <linux/module.h> | ||
41 | |||
42 | /* forward declaration... a funny thing with C ;) */ | ||
43 | struct clusterdomain; | ||
44 | |||
45 | /* cpu_entry_t - maintain the linked and scheduled state | ||
46 | * | ||
47 | * A cpu also contains a pointer to the cedf_domain_t cluster | ||
48 | * that owns it (struct clusterdomain*) | ||
49 | */ | ||
50 | typedef struct { | ||
51 | int cpu; | ||
52 | struct clusterdomain* cluster; /* owning cluster */ | ||
53 | struct task_struct* linked; /* only RT tasks */ | ||
54 | struct task_struct* scheduled; /* only RT tasks */ | ||
55 | atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
56 | struct bheap_node* hn; | ||
57 | } cpu_entry_t; | ||
58 | |||
59 | /* one cpu_entry_t per CPU */ | ||
60 | DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | ||
61 | |||
62 | #define set_will_schedule() \ | ||
63 | (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) | ||
64 | #define clear_will_schedule() \ | ||
65 | (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0)) | ||
66 | #define test_will_schedule(cpu) \ | ||
67 | (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | ||
68 | |||
69 | /* | ||
70 | * In C-EDF there is a cedf domain _per_ cluster | ||
71 | * The number of clusters is dynamically determined accordingly to the | ||
72 | * total cpu number and the cluster size | ||
73 | */ | ||
74 | typedef struct clusterdomain { | ||
75 | /* rt_domain for this cluster */ | ||
76 | rt_domain_t domain; | ||
77 | /* cpus in this cluster */ | ||
78 | cpu_entry_t* *cpus; | ||
79 | /* map of this cluster cpus */ | ||
80 | cpumask_var_t cpu_map; | ||
81 | /* the cpus queue themselves according to priority in here */ | ||
82 | struct bheap_node *heap_node; | ||
83 | struct bheap cpu_heap; | ||
84 | /* lock for this cluster */ | ||
85 | #define lock domain.ready_lock | ||
86 | } cedf_domain_t; | ||
87 | |||
88 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | ||
89 | cedf_domain_t *cedf; | ||
90 | |||
91 | #define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) | ||
92 | #define task_cpu_cluster(task) remote_cluster(get_partition(task)) | ||
93 | |||
94 | /* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling | ||
95 | * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose | ||
96 | * information during the initialization of the plugin (e.g., topology) | ||
97 | #define WANT_ALL_SCHED_EVENTS | ||
98 | */ | ||
99 | #define VERBOSE_INIT | ||
100 | |||
101 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
102 | { | ||
103 | cpu_entry_t *a, *b; | ||
104 | a = _a->value; | ||
105 | b = _b->value; | ||
106 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
107 | * the top of the heap. | ||
108 | */ | ||
109 | return edf_higher_prio(b->linked, a->linked); | ||
110 | } | ||
111 | |||
112 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
113 | * order in the cpu queue. Caller must hold cedf lock. | ||
114 | */ | ||
115 | static void update_cpu_position(cpu_entry_t *entry) | ||
116 | { | ||
117 | cedf_domain_t *cluster = entry->cluster; | ||
118 | |||
119 | if (likely(bheap_node_in_heap(entry->hn))) | ||
120 | bheap_delete(cpu_lower_prio, | ||
121 | &cluster->cpu_heap, | ||
122 | entry->hn); | ||
123 | |||
124 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); | ||
125 | } | ||
126 | |||
127 | /* caller must hold cedf lock */ | ||
128 | static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) | ||
129 | { | ||
130 | struct bheap_node* hn; | ||
131 | hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); | ||
132 | return hn->value; | ||
133 | } | ||
134 | |||
135 | |||
136 | /* link_task_to_cpu - Update the link of a CPU. | ||
137 | * Handles the case where the to-be-linked task is already | ||
138 | * scheduled on a different CPU. | ||
139 | */ | ||
140 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
141 | cpu_entry_t *entry) | ||
142 | { | ||
143 | cpu_entry_t *sched; | ||
144 | struct task_struct* tmp; | ||
145 | int on_cpu; | ||
146 | |||
147 | BUG_ON(linked && !is_realtime(linked)); | ||
148 | |||
149 | /* Currently linked task is set to be unlinked. */ | ||
150 | if (entry->linked) { | ||
151 | entry->linked->rt_param.linked_on = NO_CPU; | ||
152 | } | ||
153 | |||
154 | /* Link new task to CPU. */ | ||
155 | if (linked) { | ||
156 | set_rt_flags(linked, RT_F_RUNNING); | ||
157 | /* handle task is already scheduled somewhere! */ | ||
158 | on_cpu = linked->rt_param.scheduled_on; | ||
159 | if (on_cpu != NO_CPU) { | ||
160 | sched = &per_cpu(cedf_cpu_entries, on_cpu); | ||
161 | /* this should only happen if not linked already */ | ||
162 | BUG_ON(sched->linked == linked); | ||
163 | |||
164 | /* If we are already scheduled on the CPU to which we | ||
165 | * wanted to link, we don't need to do the swap -- | ||
166 | * we just link ourselves to the CPU and depend on | ||
167 | * the caller to get things right. | ||
168 | */ | ||
169 | if (entry != sched) { | ||
170 | TRACE_TASK(linked, | ||
171 | "already scheduled on %d, updating link.\n", | ||
172 | sched->cpu); | ||
173 | tmp = sched->linked; | ||
174 | linked->rt_param.linked_on = sched->cpu; | ||
175 | sched->linked = linked; | ||
176 | update_cpu_position(sched); | ||
177 | linked = tmp; | ||
178 | } | ||
179 | } | ||
180 | if (linked) /* might be NULL due to swap */ | ||
181 | linked->rt_param.linked_on = entry->cpu; | ||
182 | } | ||
183 | entry->linked = linked; | ||
184 | #ifdef WANT_ALL_SCHED_EVENTS | ||
185 | if (linked) | ||
186 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
187 | else | ||
188 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
189 | #endif | ||
190 | update_cpu_position(entry); | ||
191 | } | ||
192 | |||
193 | /* unlink - Make sure a task is not linked any longer to an entry | ||
194 | * where it was linked before. Must hold cedf_lock. | ||
195 | */ | ||
196 | static noinline void unlink(struct task_struct* t) | ||
197 | { | ||
198 | cpu_entry_t *entry; | ||
199 | |||
200 | if (unlikely(!t)) { | ||
201 | TRACE_BUG_ON(!t); | ||
202 | return; | ||
203 | } | ||
204 | |||
205 | |||
206 | if (t->rt_param.linked_on != NO_CPU) { | ||
207 | /* unlink */ | ||
208 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | ||
209 | t->rt_param.linked_on = NO_CPU; | ||
210 | link_task_to_cpu(NULL, entry); | ||
211 | } else if (is_queued(t)) { | ||
212 | /* This is an interesting situation: t is scheduled, | ||
213 | * but was just recently unlinked. It cannot be | ||
214 | * linked anywhere else (because then it would have | ||
215 | * been relinked to this CPU), thus it must be in some | ||
216 | * queue. We must remove it from the list in this | ||
217 | * case. | ||
218 | * | ||
219 | * in C-EDF case is should be somewhere in the queue for | ||
220 | * its domain, therefore and we can get the domain using | ||
221 | * task_cpu_cluster | ||
222 | */ | ||
223 | remove(&(task_cpu_cluster(t))->domain, t); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | |||
228 | /* preempt - force a CPU to reschedule | ||
229 | */ | ||
230 | static void preempt(cpu_entry_t *entry) | ||
231 | { | ||
232 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
233 | } | ||
234 | |||
235 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
236 | * Caller must hold cedf_lock. | ||
237 | */ | ||
238 | static noinline void requeue(struct task_struct* task) | ||
239 | { | ||
240 | cedf_domain_t *cluster = task_cpu_cluster(task); | ||
241 | BUG_ON(!task); | ||
242 | /* sanity check before insertion */ | ||
243 | BUG_ON(is_queued(task)); | ||
244 | |||
245 | if (is_released(task, litmus_clock())) | ||
246 | __add_ready(&cluster->domain, task); | ||
247 | else { | ||
248 | /* it has got to wait */ | ||
249 | add_release(&cluster->domain, task); | ||
250 | } | ||
251 | } | ||
252 | |||
253 | /* check for any necessary preemptions */ | ||
254 | static void check_for_preemptions(cedf_domain_t *cluster) | ||
255 | { | ||
256 | struct task_struct *task; | ||
257 | cpu_entry_t* last; | ||
258 | |||
259 | for(last = lowest_prio_cpu(cluster); | ||
260 | edf_preemption_needed(&cluster->domain, last->linked); | ||
261 | last = lowest_prio_cpu(cluster)) { | ||
262 | /* preemption necessary */ | ||
263 | task = __take_ready(&cluster->domain); | ||
264 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
265 | task->pid, last->cpu); | ||
266 | if (last->linked) | ||
267 | requeue(last->linked); | ||
268 | link_task_to_cpu(task, last); | ||
269 | preempt(last); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | /* cedf_job_arrival: task is either resumed or released */ | ||
274 | static noinline void cedf_job_arrival(struct task_struct* task) | ||
275 | { | ||
276 | cedf_domain_t *cluster = task_cpu_cluster(task); | ||
277 | BUG_ON(!task); | ||
278 | |||
279 | requeue(task); | ||
280 | check_for_preemptions(cluster); | ||
281 | } | ||
282 | |||
283 | static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
284 | { | ||
285 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | ||
286 | unsigned long flags; | ||
287 | |||
288 | spin_lock_irqsave(&cluster->lock, flags); | ||
289 | |||
290 | __merge_ready(&cluster->domain, tasks); | ||
291 | check_for_preemptions(cluster); | ||
292 | |||
293 | spin_unlock_irqrestore(&cluster->lock, flags); | ||
294 | } | ||
295 | |||
296 | /* caller holds cedf_lock */ | ||
297 | static noinline void job_completion(struct task_struct *t, int forced) | ||
298 | { | ||
299 | BUG_ON(!t); | ||
300 | |||
301 | sched_trace_task_completion(t, forced); | ||
302 | |||
303 | TRACE_TASK(t, "job_completion().\n"); | ||
304 | |||
305 | /* set flags */ | ||
306 | set_rt_flags(t, RT_F_SLEEP); | ||
307 | /* prepare for next period */ | ||
308 | prepare_for_next_period(t); | ||
309 | if (is_released(t, litmus_clock())) | ||
310 | sched_trace_task_release(t); | ||
311 | /* unlink */ | ||
312 | unlink(t); | ||
313 | /* requeue | ||
314 | * But don't requeue a blocking task. */ | ||
315 | if (is_running(t)) | ||
316 | cedf_job_arrival(t); | ||
317 | } | ||
318 | |||
319 | /* cedf_tick - this function is called for every local timer | ||
320 | * interrupt. | ||
321 | * | ||
322 | * checks whether the current task has expired and checks | ||
323 | * whether we need to preempt it if it has not expired | ||
324 | */ | ||
325 | static void cedf_tick(struct task_struct* t) | ||
326 | { | ||
327 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
328 | if (!is_np(t)) { | ||
329 | /* np tasks will be preempted when they become | ||
330 | * preemptable again | ||
331 | */ | ||
332 | set_tsk_need_resched(t); | ||
333 | set_will_schedule(); | ||
334 | TRACE("cedf_scheduler_tick: " | ||
335 | "%d is preemptable " | ||
336 | " => FORCE_RESCHED\n", t->pid); | ||
337 | } else if (is_user_np(t)) { | ||
338 | TRACE("cedf_scheduler_tick: " | ||
339 | "%d is non-preemptable, " | ||
340 | "preemption delayed.\n", t->pid); | ||
341 | request_exit_np(t); | ||
342 | } | ||
343 | } | ||
344 | } | ||
345 | |||
346 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
347 | * assumptions on the state of the current task since it may be called for a | ||
348 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
349 | * was necessary, because sys_exit_np() was called, because some Linux | ||
350 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
351 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
352 | * current state is. | ||
353 | * | ||
354 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
355 | * | ||
356 | * The following assertions for the scheduled task could hold: | ||
357 | * | ||
358 | * - !is_running(scheduled) // the job blocks | ||
359 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
360 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
361 | * - linked != scheduled // we need to reschedule (for any reason) | ||
362 | * - is_np(scheduled) // rescheduling must be delayed, | ||
363 | * sys_exit_np must be requested | ||
364 | * | ||
365 | * Any of these can occur together. | ||
366 | */ | ||
367 | static struct task_struct* cedf_schedule(struct task_struct * prev) | ||
368 | { | ||
369 | cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
370 | cedf_domain_t *cluster = entry->cluster; | ||
371 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
372 | struct task_struct* next = NULL; | ||
373 | |||
374 | spin_lock(&cluster->lock); | ||
375 | clear_will_schedule(); | ||
376 | |||
377 | /* sanity checking */ | ||
378 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
379 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
380 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
381 | |||
382 | /* (0) Determine state */ | ||
383 | exists = entry->scheduled != NULL; | ||
384 | blocks = exists && !is_running(entry->scheduled); | ||
385 | out_of_time = exists && | ||
386 | budget_enforced(entry->scheduled) && | ||
387 | budget_exhausted(entry->scheduled); | ||
388 | np = exists && is_np(entry->scheduled); | ||
389 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
390 | preempt = entry->scheduled != entry->linked; | ||
391 | |||
392 | #ifdef WANT_ALL_SCHED_EVENTS | ||
393 | TRACE_TASK(prev, "invoked cedf_schedule.\n"); | ||
394 | #endif | ||
395 | |||
396 | if (exists) | ||
397 | TRACE_TASK(prev, | ||
398 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
399 | "state:%d sig:%d\n", | ||
400 | blocks, out_of_time, np, sleep, preempt, | ||
401 | prev->state, signal_pending(prev)); | ||
402 | if (entry->linked && preempt) | ||
403 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
404 | entry->linked->comm, entry->linked->pid); | ||
405 | |||
406 | |||
407 | /* If a task blocks we have no choice but to reschedule. | ||
408 | */ | ||
409 | if (blocks) | ||
410 | unlink(entry->scheduled); | ||
411 | |||
412 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
413 | * We need to make sure to update the link structure anyway in case | ||
414 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
415 | * hurt. | ||
416 | */ | ||
417 | if (np && (out_of_time || preempt || sleep)) { | ||
418 | unlink(entry->scheduled); | ||
419 | request_exit_np(entry->scheduled); | ||
420 | } | ||
421 | |||
422 | /* Any task that is preemptable and either exhausts its execution | ||
423 | * budget or wants to sleep completes. We may have to reschedule after | ||
424 | * this. Don't do a job completion if we block (can't have timers running | ||
425 | * for blocked jobs). Preemption go first for the same reason. | ||
426 | */ | ||
427 | if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
428 | job_completion(entry->scheduled, !sleep); | ||
429 | |||
430 | /* Link pending task if we became unlinked. | ||
431 | */ | ||
432 | if (!entry->linked) | ||
433 | link_task_to_cpu(__take_ready(&cluster->domain), entry); | ||
434 | |||
435 | /* The final scheduling decision. Do we need to switch for some reason? | ||
436 | * If linked is different from scheduled, then select linked as next. | ||
437 | */ | ||
438 | if ((!np || blocks) && | ||
439 | entry->linked != entry->scheduled) { | ||
440 | /* Schedule a linked job? */ | ||
441 | if (entry->linked) { | ||
442 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
443 | next = entry->linked; | ||
444 | } | ||
445 | if (entry->scheduled) { | ||
446 | /* not gonna be scheduled soon */ | ||
447 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
448 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
449 | } | ||
450 | } else | ||
451 | /* Only override Linux scheduler if we have a real-time task | ||
452 | * scheduled that needs to continue. | ||
453 | */ | ||
454 | if (exists) | ||
455 | next = prev; | ||
456 | |||
457 | spin_unlock(&cluster->lock); | ||
458 | |||
459 | #ifdef WANT_ALL_SCHED_EVENTS | ||
460 | TRACE("cedf_lock released, next=0x%p\n", next); | ||
461 | |||
462 | if (next) | ||
463 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
464 | else if (exists && !next) | ||
465 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
466 | #endif | ||
467 | |||
468 | |||
469 | return next; | ||
470 | } | ||
471 | |||
472 | |||
473 | /* _finish_switch - we just finished the switch away from prev | ||
474 | */ | ||
475 | static void cedf_finish_switch(struct task_struct *prev) | ||
476 | { | ||
477 | cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
478 | |||
479 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
480 | #ifdef WANT_ALL_SCHED_EVENTS | ||
481 | TRACE_TASK(prev, "switched away from\n"); | ||
482 | #endif | ||
483 | } | ||
484 | |||
485 | |||
486 | /* Prepare a task for running in RT mode | ||
487 | */ | ||
488 | static void cedf_task_new(struct task_struct * t, int on_rq, int running) | ||
489 | { | ||
490 | unsigned long flags; | ||
491 | cpu_entry_t* entry; | ||
492 | cedf_domain_t* cluster; | ||
493 | |||
494 | TRACE("gsn edf: task new %d\n", t->pid); | ||
495 | |||
496 | /* the cluster doesn't change even if t is running */ | ||
497 | cluster = task_cpu_cluster(t); | ||
498 | |||
499 | spin_lock_irqsave(&cluster->domain.ready_lock, flags); | ||
500 | |||
501 | /* setup job params */ | ||
502 | release_at(t, litmus_clock()); | ||
503 | |||
504 | if (running) { | ||
505 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | ||
506 | BUG_ON(entry->scheduled); | ||
507 | |||
508 | entry->scheduled = t; | ||
509 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
510 | } else { | ||
511 | t->rt_param.scheduled_on = NO_CPU; | ||
512 | } | ||
513 | t->rt_param.linked_on = NO_CPU; | ||
514 | |||
515 | cedf_job_arrival(t); | ||
516 | spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | ||
517 | } | ||
518 | |||
519 | static void cedf_task_wake_up(struct task_struct *task) | ||
520 | { | ||
521 | unsigned long flags; | ||
522 | lt_t now; | ||
523 | cedf_domain_t *cluster; | ||
524 | |||
525 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
526 | |||
527 | cluster = task_cpu_cluster(task); | ||
528 | |||
529 | spin_lock_irqsave(&cluster->lock, flags); | ||
530 | /* We need to take suspensions because of semaphores into | ||
531 | * account! If a job resumes after being suspended due to acquiring | ||
532 | * a semaphore, it should never be treated as a new job release. | ||
533 | */ | ||
534 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
535 | set_rt_flags(task, RT_F_RUNNING); | ||
536 | } else { | ||
537 | now = litmus_clock(); | ||
538 | if (is_tardy(task, now)) { | ||
539 | /* new sporadic release */ | ||
540 | release_at(task, now); | ||
541 | sched_trace_task_release(task); | ||
542 | } | ||
543 | else { | ||
544 | if (task->rt.time_slice) { | ||
545 | /* came back in time before deadline | ||
546 | */ | ||
547 | set_rt_flags(task, RT_F_RUNNING); | ||
548 | } | ||
549 | } | ||
550 | } | ||
551 | cedf_job_arrival(task); | ||
552 | spin_unlock_irqrestore(&cluster->lock, flags); | ||
553 | } | ||
554 | |||
555 | static void cedf_task_block(struct task_struct *t) | ||
556 | { | ||
557 | unsigned long flags; | ||
558 | cedf_domain_t *cluster; | ||
559 | |||
560 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
561 | |||
562 | cluster = task_cpu_cluster(t); | ||
563 | |||
564 | /* unlink if necessary */ | ||
565 | spin_lock_irqsave(&cluster->lock, flags); | ||
566 | unlink(t); | ||
567 | spin_unlock_irqrestore(&cluster->lock, flags); | ||
568 | |||
569 | BUG_ON(!is_realtime(t)); | ||
570 | } | ||
571 | |||
572 | |||
573 | static void cedf_task_exit(struct task_struct * t) | ||
574 | { | ||
575 | unsigned long flags; | ||
576 | cedf_domain_t *cluster = task_cpu_cluster(t); | ||
577 | |||
578 | /* unlink if necessary */ | ||
579 | spin_lock_irqsave(&cluster->lock, flags); | ||
580 | unlink(t); | ||
581 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
582 | cluster->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
583 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
584 | } | ||
585 | spin_unlock_irqrestore(&cluster->lock, flags); | ||
586 | |||
587 | BUG_ON(!is_realtime(t)); | ||
588 | TRACE_TASK(t, "RIP\n"); | ||
589 | } | ||
590 | |||
591 | static long cedf_admit_task(struct task_struct* tsk) | ||
592 | { | ||
593 | return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | ||
594 | } | ||
595 | |||
596 | /* total number of cluster */ | ||
597 | static int num_clusters; | ||
598 | /* we do not support cluster of different sizes */ | ||
599 | static unsigned int cluster_size; | ||
600 | |||
601 | #ifdef VERBOSE_INIT | ||
602 | static void print_cluster_topology(cpumask_var_t mask, int cpu) | ||
603 | { | ||
604 | int chk; | ||
605 | char buf[255]; | ||
606 | |||
607 | chk = cpulist_scnprintf(buf, 254, mask); | ||
608 | buf[chk] = '\0'; | ||
609 | printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf); | ||
610 | |||
611 | } | ||
612 | #endif | ||
613 | |||
614 | static int clusters_allocated = 0; | ||
615 | |||
616 | static void cleanup_cedf(void) | ||
617 | { | ||
618 | int i; | ||
619 | |||
620 | if (clusters_allocated) { | ||
621 | for (i = 0; i < num_clusters; i++) { | ||
622 | kfree(cedf[i].cpus); | ||
623 | kfree(cedf[i].heap_node); | ||
624 | free_cpumask_var(cedf[i].cpu_map); | ||
625 | } | ||
626 | |||
627 | kfree(cedf); | ||
628 | } | ||
629 | } | ||
630 | |||
631 | static long cedf_activate_plugin(void) | ||
632 | { | ||
633 | int i, j, cpu, ccpu, cpu_count; | ||
634 | cpu_entry_t *entry; | ||
635 | |||
636 | cpumask_var_t mask; | ||
637 | int chk = 0; | ||
638 | |||
639 | /* de-allocate old clusters, if any */ | ||
640 | cleanup_cedf(); | ||
641 | |||
642 | printk(KERN_INFO "C-EDF: Activate Plugin, cache index = %d\n", | ||
643 | cluster_cache_index); | ||
644 | |||
645 | /* need to get cluster_size first */ | ||
646 | if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
647 | return -ENOMEM; | ||
648 | |||
649 | if (unlikely(cluster_cache_index == num_online_cpus())) { | ||
650 | |||
651 | cluster_size = num_online_cpus(); | ||
652 | } else { | ||
653 | |||
654 | chk = get_shared_cpu_map(mask, 0, cluster_cache_index); | ||
655 | if (chk) { | ||
656 | /* if chk != 0 then it is the max allowed index */ | ||
657 | printk(KERN_INFO "C-EDF: Cannot support cache index = %d\n", | ||
658 | cluster_cache_index); | ||
659 | printk(KERN_INFO "C-EDF: Using cache index = %d\n", | ||
660 | chk); | ||
661 | cluster_cache_index = chk; | ||
662 | } | ||
663 | |||
664 | cluster_size = cpumask_weight(mask); | ||
665 | } | ||
666 | |||
667 | if ((num_online_cpus() % cluster_size) != 0) { | ||
668 | /* this can't be right, some cpus are left out */ | ||
669 | printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n", | ||
670 | num_online_cpus(), cluster_size); | ||
671 | return -1; | ||
672 | } | ||
673 | |||
674 | num_clusters = num_online_cpus() / cluster_size; | ||
675 | printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n", | ||
676 | num_clusters, cluster_size); | ||
677 | |||
678 | /* initialize clusters */ | ||
679 | cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC); | ||
680 | for (i = 0; i < num_clusters; i++) { | ||
681 | |||
682 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), | ||
683 | GFP_ATOMIC); | ||
684 | cedf[i].heap_node = kmalloc( | ||
685 | cluster_size * sizeof(struct bheap_node), | ||
686 | GFP_ATOMIC); | ||
687 | bheap_init(&(cedf[i].cpu_heap)); | ||
688 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | ||
689 | |||
690 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | ||
691 | return -ENOMEM; | ||
692 | } | ||
693 | |||
694 | /* cycle through cluster and add cpus to them */ | ||
695 | for (i = 0; i < num_clusters; i++) { | ||
696 | |||
697 | for_each_online_cpu(cpu) { | ||
698 | /* check if the cpu is already in a cluster */ | ||
699 | for (j = 0; j < num_clusters; j++) | ||
700 | if (cpumask_test_cpu(cpu, cedf[j].cpu_map)) | ||
701 | break; | ||
702 | /* if it is in a cluster go to next cpu */ | ||
703 | if (cpumask_test_cpu(cpu, cedf[j].cpu_map)) | ||
704 | continue; | ||
705 | |||
706 | /* this cpu isn't in any cluster */ | ||
707 | /* get the shared cpus */ | ||
708 | if (unlikely(cluster_cache_index == num_online_cpus())) | ||
709 | cpumask_copy(mask, cpu_online_mask); | ||
710 | else | ||
711 | get_shared_cpu_map(mask, cpu, cluster_cache_index); | ||
712 | |||
713 | cpumask_copy(cedf[i].cpu_map, mask); | ||
714 | #ifdef VERBOSE_INIT | ||
715 | print_cluster_topology(mask, cpu); | ||
716 | #endif | ||
717 | /* add cpus to current cluster and init cpu_entry_t */ | ||
718 | cpu_count = 0; | ||
719 | for_each_cpu(ccpu, cedf[i].cpu_map) { | ||
720 | |||
721 | entry = &per_cpu(cedf_cpu_entries, ccpu); | ||
722 | cedf[i].cpus[cpu_count] = entry; | ||
723 | atomic_set(&entry->will_schedule, 0); | ||
724 | entry->cpu = ccpu; | ||
725 | entry->cluster = &cedf[i]; | ||
726 | entry->hn = &(cedf[i].heap_node[cpu_count]); | ||
727 | bheap_node_init(&entry->hn, entry); | ||
728 | |||
729 | cpu_count++; | ||
730 | |||
731 | entry->linked = NULL; | ||
732 | entry->scheduled = NULL; | ||
733 | update_cpu_position(entry); | ||
734 | } | ||
735 | /* done with this cluster */ | ||
736 | break; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | free_cpumask_var(mask); | ||
741 | clusters_allocated = 1; | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | /* Plugin object */ | ||
746 | static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
747 | .plugin_name = "C-EDF", | ||
748 | .finish_switch = cedf_finish_switch, | ||
749 | .tick = cedf_tick, | ||
750 | .task_new = cedf_task_new, | ||
751 | .complete_job = complete_job, | ||
752 | .task_exit = cedf_task_exit, | ||
753 | .schedule = cedf_schedule, | ||
754 | .task_wake_up = cedf_task_wake_up, | ||
755 | .task_block = cedf_task_block, | ||
756 | .admit_task = cedf_admit_task, | ||
757 | .activate_plugin = cedf_activate_plugin, | ||
758 | }; | ||
759 | |||
760 | |||
761 | static int __init init_cedf(void) | ||
762 | { | ||
763 | return register_sched_plugin(&cedf_plugin); | ||
764 | } | ||
765 | |||
766 | static void clean_cedf(void) | ||
767 | { | ||
768 | cleanup_cedf(); | ||
769 | } | ||
770 | |||
771 | module_init(init_cedf); | ||
772 | module_exit(clean_cedf); | ||