diff options
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 889 |
1 files changed, 889 insertions, 0 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c new file mode 100644 index 000000000000..cf41d5ec2e28 --- /dev/null +++ b/litmus/sched_cedf.c | |||
@@ -0,0 +1,889 @@ | |||
1 | /* | ||
2 | * litmus/sched_cedf.c | ||
3 | * | ||
4 | * Implementation of the C-EDF scheduling algorithm. | ||
5 | * | ||
6 | * This implementation is based on G-EDF: | ||
7 | * - CPUs are clustered around L2 or L3 caches. | ||
8 | * - Clusters topology is automatically detected (this is arch dependent | ||
9 | * and is working only on x86 at the moment --- and only with modern | ||
10 | * cpus that exports cpuid4 information) | ||
11 | * - The plugins _does not_ attempt to put tasks in the right cluster i.e. | ||
12 | * the programmer needs to be aware of the topology to place tasks | ||
13 | * in the desired cluster | ||
14 | * - default clustering is around L2 cache (cache index = 2) | ||
15 | * supported clusters are: L1 (private cache: pedf), L2, L3, ALL (all | ||
16 | * online_cpus are placed in a single cluster). | ||
17 | * | ||
18 | * For details on functions, take a look at sched_gsn_edf.c | ||
19 | * | ||
20 | * Currently, we do not support changes in the number of online cpus. | ||
21 | * If the num_online_cpus() dynamically changes, the plugin is broken. | ||
22 | * | ||
23 | * This version uses the simple approach and serializes all scheduling | ||
24 | * decisions by the use of a queue lock. This is probably not the | ||
25 | * best way to do it, but it should suffice for now. | ||
26 | */ | ||
27 | |||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/percpu.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/slab.h> | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | |||
35 | #include <litmus/litmus.h> | ||
36 | #include <litmus/jobs.h> | ||
37 | #include <litmus/preempt.h> | ||
38 | #include <litmus/budget.h> | ||
39 | #include <litmus/np.h> | ||
40 | #include <litmus/sched_plugin.h> | ||
41 | #include <litmus/edf_common.h> | ||
42 | #include <litmus/sched_trace.h> | ||
43 | |||
44 | #include <litmus/clustered.h> | ||
45 | |||
46 | #include <litmus/bheap.h> | ||
47 | |||
48 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
49 | #include <litmus/affinity.h> | ||
50 | #endif | ||
51 | |||
52 | /* to configure the cluster size */ | ||
53 | #include <litmus/litmus_proc.h> | ||
54 | #include <linux/uaccess.h> | ||
55 | |||
56 | /* Reference configuration variable. Determines which cache level is used to | ||
57 | * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | ||
58 | * all CPUs form a single cluster (just like GSN-EDF). | ||
59 | */ | ||
60 | static enum cache_level cluster_config = GLOBAL_CLUSTER; | ||
61 | |||
62 | struct clusterdomain; | ||
63 | |||
64 | /* cpu_entry_t - maintain the linked and scheduled state | ||
65 | * | ||
66 | * A cpu also contains a pointer to the cedf_domain_t cluster | ||
67 | * that owns it (struct clusterdomain*) | ||
68 | */ | ||
69 | typedef struct { | ||
70 | int cpu; | ||
71 | struct clusterdomain* cluster; /* owning cluster */ | ||
72 | struct task_struct* linked; /* only RT tasks */ | ||
73 | struct task_struct* scheduled; /* only RT tasks */ | ||
74 | atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
75 | struct bheap_node* hn; | ||
76 | } cpu_entry_t; | ||
77 | |||
78 | /* one cpu_entry_t per CPU */ | ||
79 | DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | ||
80 | |||
81 | /* | ||
82 | * In C-EDF there is a cedf domain _per_ cluster | ||
83 | * The number of clusters is dynamically determined accordingly to the | ||
84 | * total cpu number and the cluster size | ||
85 | */ | ||
86 | typedef struct clusterdomain { | ||
87 | /* rt_domain for this cluster */ | ||
88 | rt_domain_t domain; | ||
89 | /* cpus in this cluster */ | ||
90 | cpu_entry_t* *cpus; | ||
91 | /* map of this cluster cpus */ | ||
92 | cpumask_var_t cpu_map; | ||
93 | /* the cpus queue themselves according to priority in here */ | ||
94 | struct bheap_node *heap_node; | ||
95 | struct bheap cpu_heap; | ||
96 | /* lock for this cluster */ | ||
97 | #define cluster_lock domain.ready_lock | ||
98 | } cedf_domain_t; | ||
99 | |||
100 | /* a cedf_domain per cluster; allocation is done at init/activation time */ | ||
101 | cedf_domain_t *cedf; | ||
102 | |||
103 | #define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) | ||
104 | #define task_cpu_cluster(task) remote_cluster(get_partition(task)) | ||
105 | |||
106 | /* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling | ||
107 | * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose | ||
108 | * information during the initialization of the plugin (e.g., topology) | ||
109 | #define WANT_ALL_SCHED_EVENTS | ||
110 | */ | ||
111 | #define VERBOSE_INIT | ||
112 | |||
113 | static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
114 | { | ||
115 | cpu_entry_t *a, *b; | ||
116 | a = _a->value; | ||
117 | b = _b->value; | ||
118 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
119 | * the top of the heap. | ||
120 | */ | ||
121 | return edf_higher_prio(b->linked, a->linked); | ||
122 | } | ||
123 | |||
124 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
125 | * order in the cpu queue. Caller must hold cedf lock. | ||
126 | */ | ||
127 | static void update_cpu_position(cpu_entry_t *entry) | ||
128 | { | ||
129 | cedf_domain_t *cluster = entry->cluster; | ||
130 | |||
131 | if (likely(bheap_node_in_heap(entry->hn))) | ||
132 | bheap_delete(cpu_lower_prio, | ||
133 | &cluster->cpu_heap, | ||
134 | entry->hn); | ||
135 | |||
136 | bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); | ||
137 | } | ||
138 | |||
139 | /* caller must hold cedf lock */ | ||
140 | static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) | ||
141 | { | ||
142 | struct bheap_node* hn; | ||
143 | hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); | ||
144 | return hn->value; | ||
145 | } | ||
146 | |||
147 | |||
148 | /* link_task_to_cpu - Update the link of a CPU. | ||
149 | * Handles the case where the to-be-linked task is already | ||
150 | * scheduled on a different CPU. | ||
151 | */ | ||
152 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
153 | cpu_entry_t *entry) | ||
154 | { | ||
155 | cpu_entry_t *sched; | ||
156 | struct task_struct* tmp; | ||
157 | int on_cpu; | ||
158 | |||
159 | BUG_ON(linked && !is_realtime(linked)); | ||
160 | |||
161 | /* Currently linked task is set to be unlinked. */ | ||
162 | if (entry->linked) { | ||
163 | entry->linked->rt_param.linked_on = NO_CPU; | ||
164 | } | ||
165 | |||
166 | /* Link new task to CPU. */ | ||
167 | if (linked) { | ||
168 | /* handle task is already scheduled somewhere! */ | ||
169 | on_cpu = linked->rt_param.scheduled_on; | ||
170 | if (on_cpu != NO_CPU) { | ||
171 | sched = &per_cpu(cedf_cpu_entries, on_cpu); | ||
172 | /* this should only happen if not linked already */ | ||
173 | BUG_ON(sched->linked == linked); | ||
174 | |||
175 | /* If we are already scheduled on the CPU to which we | ||
176 | * wanted to link, we don't need to do the swap -- | ||
177 | * we just link ourselves to the CPU and depend on | ||
178 | * the caller to get things right. | ||
179 | */ | ||
180 | if (entry != sched) { | ||
181 | TRACE_TASK(linked, | ||
182 | "already scheduled on %d, updating link.\n", | ||
183 | sched->cpu); | ||
184 | tmp = sched->linked; | ||
185 | linked->rt_param.linked_on = sched->cpu; | ||
186 | sched->linked = linked; | ||
187 | update_cpu_position(sched); | ||
188 | linked = tmp; | ||
189 | } | ||
190 | } | ||
191 | if (linked) /* might be NULL due to swap */ | ||
192 | linked->rt_param.linked_on = entry->cpu; | ||
193 | } | ||
194 | entry->linked = linked; | ||
195 | #ifdef WANT_ALL_SCHED_EVENTS | ||
196 | if (linked) | ||
197 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
198 | else | ||
199 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
200 | #endif | ||
201 | update_cpu_position(entry); | ||
202 | } | ||
203 | |||
204 | /* unlink - Make sure a task is not linked any longer to an entry | ||
205 | * where it was linked before. Must hold cedf_lock. | ||
206 | */ | ||
207 | static noinline void unlink(struct task_struct* t) | ||
208 | { | ||
209 | cpu_entry_t *entry; | ||
210 | |||
211 | if (t->rt_param.linked_on != NO_CPU) { | ||
212 | /* unlink */ | ||
213 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | ||
214 | t->rt_param.linked_on = NO_CPU; | ||
215 | link_task_to_cpu(NULL, entry); | ||
216 | } else if (is_queued(t)) { | ||
217 | /* This is an interesting situation: t is scheduled, | ||
218 | * but was just recently unlinked. It cannot be | ||
219 | * linked anywhere else (because then it would have | ||
220 | * been relinked to this CPU), thus it must be in some | ||
221 | * queue. We must remove it from the list in this | ||
222 | * case. | ||
223 | * | ||
224 | * in C-EDF case is should be somewhere in the queue for | ||
225 | * its domain, therefore and we can get the domain using | ||
226 | * task_cpu_cluster | ||
227 | */ | ||
228 | remove(&(task_cpu_cluster(t))->domain, t); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | |||
233 | /* preempt - force a CPU to reschedule | ||
234 | */ | ||
235 | static void preempt(cpu_entry_t *entry) | ||
236 | { | ||
237 | preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
238 | } | ||
239 | |||
240 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
241 | * Caller must hold cedf_lock. | ||
242 | */ | ||
243 | static noinline void requeue(struct task_struct* task) | ||
244 | { | ||
245 | cedf_domain_t *cluster = task_cpu_cluster(task); | ||
246 | BUG_ON(!task); | ||
247 | /* sanity check before insertion */ | ||
248 | BUG_ON(is_queued(task)); | ||
249 | |||
250 | if (is_early_releasing(task) || is_released(task, litmus_clock())) | ||
251 | __add_ready(&cluster->domain, task); | ||
252 | else { | ||
253 | /* it has got to wait */ | ||
254 | add_release(&cluster->domain, task); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
259 | static cpu_entry_t* cedf_get_nearest_available_cpu( | ||
260 | cedf_domain_t *cluster, cpu_entry_t *start) | ||
261 | { | ||
262 | cpu_entry_t *affinity; | ||
263 | |||
264 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, | ||
265 | #ifdef CONFIG_RELEASE_MASTER | ||
266 | cluster->domain.release_master, | ||
267 | #else | ||
268 | NO_CPU, | ||
269 | #endif | ||
270 | cluster->cpu_map); | ||
271 | |||
272 | /* make sure CPU is in our cluster */ | ||
273 | if (affinity && cpumask_test_cpu(affinity->cpu, cluster->cpu_map)) | ||
274 | return(affinity); | ||
275 | else | ||
276 | return(NULL); | ||
277 | } | ||
278 | #endif | ||
279 | |||
280 | |||
281 | /* check for any necessary preemptions */ | ||
282 | static void check_for_preemptions(cedf_domain_t *cluster) | ||
283 | { | ||
284 | struct task_struct *task; | ||
285 | cpu_entry_t *last; | ||
286 | |||
287 | #ifdef CONFIG_PREFER_LOCAL_LINKING | ||
288 | cpu_entry_t *local; | ||
289 | |||
290 | /* Before linking to other CPUs, check first whether the local CPU is | ||
291 | * idle. */ | ||
292 | local = this_cpu_ptr(&cedf_cpu_entries); | ||
293 | task = __peek_ready(&cluster->domain); | ||
294 | |||
295 | if (task && !local->linked | ||
296 | #ifdef CONFIG_RELEASE_MASTER | ||
297 | && likely(local->cpu != cluster->domain.release_master) | ||
298 | #endif | ||
299 | ) { | ||
300 | task = __take_ready(&cluster->domain); | ||
301 | TRACE_TASK(task, "linking to local CPU %d to avoid IPI\n", local->cpu); | ||
302 | link_task_to_cpu(task, local); | ||
303 | preempt(local); | ||
304 | } | ||
305 | #endif | ||
306 | |||
307 | |||
308 | for(last = lowest_prio_cpu(cluster); | ||
309 | edf_preemption_needed(&cluster->domain, last->linked); | ||
310 | last = lowest_prio_cpu(cluster)) { | ||
311 | /* preemption necessary */ | ||
312 | task = __take_ready(&cluster->domain); | ||
313 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
314 | task->pid, last->cpu); | ||
315 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
316 | { | ||
317 | cpu_entry_t *affinity = | ||
318 | cedf_get_nearest_available_cpu(cluster, | ||
319 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | ||
320 | if(affinity) | ||
321 | last = affinity; | ||
322 | else if(requeue_preempted_job(last->linked)) | ||
323 | requeue(last->linked); | ||
324 | } | ||
325 | #else | ||
326 | if (requeue_preempted_job(last->linked)) | ||
327 | requeue(last->linked); | ||
328 | #endif | ||
329 | link_task_to_cpu(task, last); | ||
330 | preempt(last); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | /* cedf_job_arrival: task is either resumed or released */ | ||
335 | static noinline void cedf_job_arrival(struct task_struct* task) | ||
336 | { | ||
337 | cedf_domain_t *cluster = task_cpu_cluster(task); | ||
338 | BUG_ON(!task); | ||
339 | |||
340 | requeue(task); | ||
341 | check_for_preemptions(cluster); | ||
342 | } | ||
343 | |||
344 | static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
345 | { | ||
346 | cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | ||
347 | unsigned long flags; | ||
348 | |||
349 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
350 | |||
351 | __merge_ready(&cluster->domain, tasks); | ||
352 | check_for_preemptions(cluster); | ||
353 | |||
354 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
355 | } | ||
356 | |||
357 | /* caller holds cedf_lock */ | ||
358 | static noinline void current_job_completion(int forced) | ||
359 | { | ||
360 | struct task_struct *t = current; | ||
361 | |||
362 | sched_trace_task_completion(t, forced); | ||
363 | |||
364 | TRACE_TASK(t, "job_completion(forced=%d).\n", forced); | ||
365 | |||
366 | /* set flags */ | ||
367 | tsk_rt(t)->completed = 0; | ||
368 | /* prepare for next period */ | ||
369 | prepare_for_next_period(t); | ||
370 | if (is_early_releasing(t) || is_released(t, litmus_clock())) | ||
371 | sched_trace_task_release(t); | ||
372 | /* unlink */ | ||
373 | unlink(t); | ||
374 | /* requeue | ||
375 | * But don't requeue a blocking task. */ | ||
376 | if (is_current_running()) | ||
377 | cedf_job_arrival(t); | ||
378 | } | ||
379 | |||
380 | /* Getting schedule() right is a bit tricky. schedule() may not make any | ||
381 | * assumptions on the state of the current task since it may be called for a | ||
382 | * number of reasons. The reasons include a scheduler_tick() determined that it | ||
383 | * was necessary, because sys_exit_np() was called, because some Linux | ||
384 | * subsystem determined so, or even (in the worst case) because there is a bug | ||
385 | * hidden somewhere. Thus, we must take extreme care to determine what the | ||
386 | * current state is. | ||
387 | * | ||
388 | * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
389 | * | ||
390 | * The following assertions for the scheduled task could hold: | ||
391 | * | ||
392 | * - !is_running(scheduled) // the job blocks | ||
393 | * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
394 | * - is_completed() // the job completed (by syscall) | ||
395 | * - linked != scheduled // we need to reschedule (for any reason) | ||
396 | * - is_np(scheduled) // rescheduling must be delayed, | ||
397 | * sys_exit_np must be requested | ||
398 | * | ||
399 | * Any of these can occur together. | ||
400 | */ | ||
401 | static struct task_struct* cedf_schedule(struct task_struct * prev) | ||
402 | { | ||
403 | cpu_entry_t* entry = this_cpu_ptr(&cedf_cpu_entries); | ||
404 | cedf_domain_t *cluster = entry->cluster; | ||
405 | int out_of_time, sleep, preempt, np, exists, blocks; | ||
406 | struct task_struct* next = NULL; | ||
407 | |||
408 | #ifdef CONFIG_RELEASE_MASTER | ||
409 | /* Bail out early if we are the release master. | ||
410 | * The release master never schedules any real-time tasks. | ||
411 | */ | ||
412 | if (unlikely(cluster->domain.release_master == entry->cpu)) { | ||
413 | sched_state_task_picked(); | ||
414 | return NULL; | ||
415 | } | ||
416 | #endif | ||
417 | |||
418 | raw_spin_lock(&cluster->cluster_lock); | ||
419 | |||
420 | /* sanity checking */ | ||
421 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
422 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
423 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
424 | |||
425 | /* (0) Determine state */ | ||
426 | exists = entry->scheduled != NULL; | ||
427 | blocks = exists && !is_current_running(); | ||
428 | out_of_time = exists && budget_enforced(entry->scheduled) | ||
429 | && budget_exhausted(entry->scheduled); | ||
430 | np = exists && is_np(entry->scheduled); | ||
431 | sleep = exists && is_completed(entry->scheduled); | ||
432 | preempt = entry->scheduled != entry->linked; | ||
433 | |||
434 | #ifdef WANT_ALL_SCHED_EVENTS | ||
435 | TRACE_TASK(prev, "invoked cedf_schedule.\n"); | ||
436 | #endif | ||
437 | |||
438 | if (exists) | ||
439 | TRACE_TASK(prev, | ||
440 | "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
441 | "state:%d sig:%d\n", | ||
442 | blocks, out_of_time, np, sleep, preempt, | ||
443 | prev->state, signal_pending(prev)); | ||
444 | if (entry->linked && preempt) | ||
445 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
446 | entry->linked->comm, entry->linked->pid); | ||
447 | |||
448 | |||
449 | /* If a task blocks we have no choice but to reschedule. | ||
450 | */ | ||
451 | if (blocks) | ||
452 | unlink(entry->scheduled); | ||
453 | |||
454 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
455 | * We need to make sure to update the link structure anyway in case | ||
456 | * that we are still linked. Multiple calls to request_exit_np() don't | ||
457 | * hurt. | ||
458 | */ | ||
459 | if (np && (out_of_time || preempt || sleep)) { | ||
460 | unlink(entry->scheduled); | ||
461 | request_exit_np(entry->scheduled); | ||
462 | } | ||
463 | |||
464 | /* Any task that is preemptable and either exhausts its execution | ||
465 | * budget or wants to sleep completes. We may have to reschedule after | ||
466 | * this. Don't do a job completion if we block (can't have timers running | ||
467 | * for blocked jobs). | ||
468 | */ | ||
469 | if (!np && (out_of_time || sleep)) | ||
470 | current_job_completion(!sleep); | ||
471 | |||
472 | /* Link pending task if we became unlinked. | ||
473 | */ | ||
474 | if (!entry->linked) | ||
475 | link_task_to_cpu(__take_ready(&cluster->domain), entry); | ||
476 | |||
477 | /* The final scheduling decision. Do we need to switch for some reason? | ||
478 | * If linked is different from scheduled, then select linked as next. | ||
479 | */ | ||
480 | if ((!np || blocks) && | ||
481 | entry->linked != entry->scheduled) { | ||
482 | /* Schedule a linked job? */ | ||
483 | if (entry->linked) { | ||
484 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
485 | next = entry->linked; | ||
486 | } | ||
487 | if (entry->scheduled) { | ||
488 | /* not gonna be scheduled soon */ | ||
489 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
490 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
491 | } | ||
492 | } else | ||
493 | /* Only override Linux scheduler if we have a real-time task | ||
494 | * scheduled that needs to continue. | ||
495 | */ | ||
496 | if (exists) | ||
497 | next = prev; | ||
498 | |||
499 | sched_state_task_picked(); | ||
500 | raw_spin_unlock(&cluster->cluster_lock); | ||
501 | |||
502 | #ifdef WANT_ALL_SCHED_EVENTS | ||
503 | TRACE("cedf_lock released, next=0x%p\n", next); | ||
504 | |||
505 | if (next) | ||
506 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
507 | else if (exists && !next) | ||
508 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
509 | #endif | ||
510 | |||
511 | |||
512 | return next; | ||
513 | } | ||
514 | |||
515 | |||
516 | /* _finish_switch - we just finished the switch away from prev | ||
517 | */ | ||
518 | static void cedf_finish_switch(struct task_struct *prev) | ||
519 | { | ||
520 | cpu_entry_t* entry = this_cpu_ptr(&cedf_cpu_entries); | ||
521 | |||
522 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
523 | #ifdef WANT_ALL_SCHED_EVENTS | ||
524 | TRACE_TASK(prev, "switched away from\n"); | ||
525 | #endif | ||
526 | } | ||
527 | |||
528 | |||
529 | /* Prepare a task for running in RT mode | ||
530 | */ | ||
531 | static void cedf_task_new(struct task_struct * t, int on_rq, int is_scheduled) | ||
532 | { | ||
533 | unsigned long flags; | ||
534 | cpu_entry_t* entry; | ||
535 | cedf_domain_t* cluster; | ||
536 | |||
537 | TRACE("gsn edf: task new %d\n", t->pid); | ||
538 | |||
539 | /* the cluster doesn't change even if t is scheduled */ | ||
540 | cluster = task_cpu_cluster(t); | ||
541 | |||
542 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
543 | |||
544 | /* setup job params */ | ||
545 | release_at(t, litmus_clock()); | ||
546 | |||
547 | if (is_scheduled) { | ||
548 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | ||
549 | BUG_ON(entry->scheduled); | ||
550 | |||
551 | #ifdef CONFIG_RELEASE_MASTER | ||
552 | if (entry->cpu != cluster->domain.release_master) { | ||
553 | #endif | ||
554 | entry->scheduled = t; | ||
555 | tsk_rt(t)->scheduled_on = task_cpu(t); | ||
556 | #ifdef CONFIG_RELEASE_MASTER | ||
557 | } else { | ||
558 | /* do not schedule on release master */ | ||
559 | preempt(entry); /* force resched */ | ||
560 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
561 | } | ||
562 | #endif | ||
563 | } else { | ||
564 | t->rt_param.scheduled_on = NO_CPU; | ||
565 | } | ||
566 | t->rt_param.linked_on = NO_CPU; | ||
567 | |||
568 | if (on_rq || is_scheduled) | ||
569 | cedf_job_arrival(t); | ||
570 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); | ||
571 | } | ||
572 | |||
573 | static void cedf_task_wake_up(struct task_struct *task) | ||
574 | { | ||
575 | unsigned long flags; | ||
576 | lt_t now; | ||
577 | cedf_domain_t *cluster; | ||
578 | |||
579 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
580 | |||
581 | cluster = task_cpu_cluster(task); | ||
582 | |||
583 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
584 | now = litmus_clock(); | ||
585 | if (is_sporadic(task) && is_tardy(task, now)) { | ||
586 | inferred_sporadic_job_release_at(task, now); | ||
587 | } | ||
588 | cedf_job_arrival(task); | ||
589 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
590 | } | ||
591 | |||
592 | static void cedf_task_block(struct task_struct *t) | ||
593 | { | ||
594 | unsigned long flags; | ||
595 | cedf_domain_t *cluster; | ||
596 | |||
597 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
598 | |||
599 | cluster = task_cpu_cluster(t); | ||
600 | |||
601 | /* unlink if necessary */ | ||
602 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
603 | unlink(t); | ||
604 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
605 | |||
606 | BUG_ON(!is_realtime(t)); | ||
607 | } | ||
608 | |||
609 | |||
610 | static void cedf_task_exit(struct task_struct * t) | ||
611 | { | ||
612 | unsigned long flags; | ||
613 | cedf_domain_t *cluster = task_cpu_cluster(t); | ||
614 | |||
615 | /* unlink if necessary */ | ||
616 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | ||
617 | unlink(t); | ||
618 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
619 | cpu_entry_t *cpu; | ||
620 | cpu = &per_cpu(cedf_cpu_entries, tsk_rt(t)->scheduled_on); | ||
621 | cpu->scheduled = NULL; | ||
622 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
623 | } | ||
624 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | ||
625 | |||
626 | BUG_ON(!is_realtime(t)); | ||
627 | TRACE_TASK(t, "RIP\n"); | ||
628 | } | ||
629 | |||
630 | static long cedf_admit_task(struct task_struct* tsk) | ||
631 | { | ||
632 | return (remote_cluster(task_cpu(tsk)) == task_cpu_cluster(tsk)) ? | ||
633 | 0 : -EINVAL; | ||
634 | } | ||
635 | |||
636 | /* total number of cluster */ | ||
637 | static int num_clusters; | ||
638 | /* we do not support cluster of different sizes */ | ||
639 | static unsigned int cluster_size; | ||
640 | |||
641 | #ifdef VERBOSE_INIT | ||
642 | static void print_cluster_topology(cpumask_var_t mask, int cpu) | ||
643 | { | ||
644 | printk(KERN_INFO "CPU = %d, shared cpu(s) = %*pbl\n", cpu, | ||
645 | cpumask_pr_args(mask)); | ||
646 | |||
647 | } | ||
648 | #endif | ||
649 | |||
650 | static int clusters_allocated = 0; | ||
651 | |||
652 | static void cleanup_cedf(void) | ||
653 | { | ||
654 | int i; | ||
655 | |||
656 | if (clusters_allocated) { | ||
657 | for (i = 0; i < num_clusters; i++) { | ||
658 | kfree(cedf[i].cpus); | ||
659 | kfree(cedf[i].heap_node); | ||
660 | free_cpumask_var(cedf[i].cpu_map); | ||
661 | } | ||
662 | |||
663 | kfree(cedf); | ||
664 | } | ||
665 | } | ||
666 | |||
667 | static struct domain_proc_info cedf_domain_proc_info; | ||
668 | static long cedf_get_domain_proc_info(struct domain_proc_info **ret) | ||
669 | { | ||
670 | *ret = &cedf_domain_proc_info; | ||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | static void cedf_setup_domain_proc(void) | ||
675 | { | ||
676 | int i, cpu, domain; | ||
677 | #ifdef CONFIG_RELEASE_MASTER | ||
678 | int release_master = atomic_read(&release_master_cpu); | ||
679 | /* skip over the domain with the release master if cluster size is 1 */ | ||
680 | int skip_domain = (1 == cluster_size && release_master != NO_CPU) ? | ||
681 | release_master : NO_CPU; | ||
682 | #else | ||
683 | int release_master = NO_CPU; | ||
684 | int skip_domain = NO_CPU; | ||
685 | #endif | ||
686 | int num_rt_cpus = num_online_cpus() - (release_master != NO_CPU); | ||
687 | int num_rt_domains = num_clusters - (skip_domain != NO_CPU); | ||
688 | struct cd_mapping *map; | ||
689 | |||
690 | memset(&cedf_domain_proc_info, 0, sizeof(cedf_domain_proc_info)); | ||
691 | init_domain_proc_info(&cedf_domain_proc_info, num_rt_cpus, num_rt_domains); | ||
692 | cedf_domain_proc_info.num_cpus = num_rt_cpus; | ||
693 | cedf_domain_proc_info.num_domains = num_rt_domains; | ||
694 | |||
695 | for (cpu = 0, i = 0; cpu < num_online_cpus(); ++cpu) { | ||
696 | if (cpu == release_master) | ||
697 | continue; | ||
698 | map = &cedf_domain_proc_info.cpu_to_domains[i]; | ||
699 | /* pointer math to figure out the domain index */ | ||
700 | domain = remote_cluster(cpu) - cedf; | ||
701 | map->id = cpu; | ||
702 | cpumask_set_cpu(domain, map->mask); | ||
703 | ++i; | ||
704 | } | ||
705 | |||
706 | for (domain = 0, i = 0; domain < num_clusters; ++domain) { | ||
707 | if (domain == skip_domain) | ||
708 | continue; | ||
709 | map = &cedf_domain_proc_info.domain_to_cpus[i]; | ||
710 | map->id = i; | ||
711 | cpumask_copy(map->mask, cedf[domain].cpu_map); | ||
712 | ++i; | ||
713 | } | ||
714 | } | ||
715 | |||
716 | static long cedf_activate_plugin(void) | ||
717 | { | ||
718 | int i, j, cpu, ccpu, cpu_count; | ||
719 | cpu_entry_t *entry; | ||
720 | |||
721 | cpumask_var_t mask; | ||
722 | int chk = 0; | ||
723 | |||
724 | /* de-allocate old clusters, if any */ | ||
725 | cleanup_cedf(); | ||
726 | |||
727 | printk(KERN_INFO "C-EDF: Activate Plugin, cluster configuration = %d\n", | ||
728 | cluster_config); | ||
729 | |||
730 | /* need to get cluster_size first */ | ||
731 | if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
732 | return -ENOMEM; | ||
733 | |||
734 | if (cluster_config == GLOBAL_CLUSTER) { | ||
735 | cluster_size = num_online_cpus(); | ||
736 | } else { | ||
737 | chk = get_shared_cpu_map(mask, 0, cluster_config); | ||
738 | if (chk) { | ||
739 | /* if chk != 0 then it is the max allowed index */ | ||
740 | printk(KERN_INFO "C-EDF: Cluster configuration = %d " | ||
741 | "is not supported on this hardware.\n", | ||
742 | cluster_config); | ||
743 | /* User should notice that the configuration failed, so | ||
744 | * let's bail out. */ | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | |||
748 | cluster_size = cpumask_weight(mask); | ||
749 | } | ||
750 | |||
751 | if ((num_online_cpus() % cluster_size) != 0) { | ||
752 | /* this can't be right, some cpus are left out */ | ||
753 | printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n", | ||
754 | num_online_cpus(), cluster_size); | ||
755 | return -1; | ||
756 | } | ||
757 | |||
758 | num_clusters = num_online_cpus() / cluster_size; | ||
759 | printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n", | ||
760 | num_clusters, cluster_size); | ||
761 | |||
762 | /* initialize clusters */ | ||
763 | cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC); | ||
764 | for (i = 0; i < num_clusters; i++) { | ||
765 | |||
766 | cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), | ||
767 | GFP_ATOMIC); | ||
768 | cedf[i].heap_node = kmalloc( | ||
769 | cluster_size * sizeof(struct bheap_node), | ||
770 | GFP_ATOMIC); | ||
771 | bheap_init(&(cedf[i].cpu_heap)); | ||
772 | edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | ||
773 | |||
774 | if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | ||
775 | return -ENOMEM; | ||
776 | #ifdef CONFIG_RELEASE_MASTER | ||
777 | cedf[i].domain.release_master = atomic_read(&release_master_cpu); | ||
778 | #endif | ||
779 | } | ||
780 | |||
781 | /* cycle through cluster and add cpus to them */ | ||
782 | for (i = 0; i < num_clusters; i++) { | ||
783 | |||
784 | for_each_online_cpu(cpu) { | ||
785 | /* check if the cpu is already in a cluster */ | ||
786 | for (j = 0; j < num_clusters; j++) | ||
787 | if (cpumask_test_cpu(cpu, cedf[j].cpu_map)) | ||
788 | break; | ||
789 | /* if it is in a cluster go to next cpu */ | ||
790 | if (j < num_clusters && | ||
791 | cpumask_test_cpu(cpu, cedf[j].cpu_map)) | ||
792 | continue; | ||
793 | |||
794 | /* this cpu isn't in any cluster */ | ||
795 | /* get the shared cpus */ | ||
796 | if (unlikely(cluster_config == GLOBAL_CLUSTER)) | ||
797 | cpumask_copy(mask, cpu_online_mask); | ||
798 | else | ||
799 | get_shared_cpu_map(mask, cpu, cluster_config); | ||
800 | |||
801 | cpumask_copy(cedf[i].cpu_map, mask); | ||
802 | #ifdef VERBOSE_INIT | ||
803 | print_cluster_topology(mask, cpu); | ||
804 | #endif | ||
805 | /* add cpus to current cluster and init cpu_entry_t */ | ||
806 | cpu_count = 0; | ||
807 | for_each_cpu(ccpu, cedf[i].cpu_map) { | ||
808 | |||
809 | entry = &per_cpu(cedf_cpu_entries, ccpu); | ||
810 | cedf[i].cpus[cpu_count] = entry; | ||
811 | atomic_set(&entry->will_schedule, 0); | ||
812 | entry->cpu = ccpu; | ||
813 | entry->cluster = &cedf[i]; | ||
814 | entry->hn = &(cedf[i].heap_node[cpu_count]); | ||
815 | bheap_node_init(&entry->hn, entry); | ||
816 | |||
817 | cpu_count++; | ||
818 | |||
819 | entry->linked = NULL; | ||
820 | entry->scheduled = NULL; | ||
821 | #ifdef CONFIG_RELEASE_MASTER | ||
822 | /* only add CPUs that should schedule jobs */ | ||
823 | if (entry->cpu != entry->cluster->domain.release_master) | ||
824 | #endif | ||
825 | update_cpu_position(entry); | ||
826 | } | ||
827 | /* done with this cluster */ | ||
828 | break; | ||
829 | } | ||
830 | } | ||
831 | |||
832 | clusters_allocated = 1; | ||
833 | free_cpumask_var(mask); | ||
834 | |||
835 | cedf_setup_domain_proc(); | ||
836 | |||
837 | return 0; | ||
838 | } | ||
839 | |||
840 | static long cedf_deactivate_plugin(void) | ||
841 | { | ||
842 | destroy_domain_proc_info(&cedf_domain_proc_info); | ||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | /* Plugin object */ | ||
847 | static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
848 | .plugin_name = "C-EDF", | ||
849 | .finish_switch = cedf_finish_switch, | ||
850 | .task_new = cedf_task_new, | ||
851 | .complete_job = complete_job, | ||
852 | .task_exit = cedf_task_exit, | ||
853 | .schedule = cedf_schedule, | ||
854 | .task_wake_up = cedf_task_wake_up, | ||
855 | .task_block = cedf_task_block, | ||
856 | .admit_task = cedf_admit_task, | ||
857 | .activate_plugin = cedf_activate_plugin, | ||
858 | .deactivate_plugin = cedf_deactivate_plugin, | ||
859 | .get_domain_proc_info = cedf_get_domain_proc_info, | ||
860 | }; | ||
861 | |||
862 | static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | ||
863 | |||
864 | static int __init init_cedf(void) | ||
865 | { | ||
866 | int err, fs; | ||
867 | |||
868 | err = register_sched_plugin(&cedf_plugin); | ||
869 | if (!err) { | ||
870 | fs = make_plugin_proc_dir(&cedf_plugin, &cedf_dir); | ||
871 | if (!fs) | ||
872 | cluster_file = create_cluster_file(cedf_dir, &cluster_config); | ||
873 | else | ||
874 | printk(KERN_ERR "Could not allocate C-EDF procfs dir.\n"); | ||
875 | } | ||
876 | return err; | ||
877 | } | ||
878 | |||
879 | static void clean_cedf(void) | ||
880 | { | ||
881 | cleanup_cedf(); | ||
882 | if (cluster_file) | ||
883 | remove_proc_entry("cluster", cedf_dir); | ||
884 | if (cedf_dir) | ||
885 | remove_plugin_proc_dir(&cedf_plugin); | ||
886 | } | ||
887 | |||
888 | module_init(init_cedf); | ||
889 | module_exit(clean_cedf); | ||