diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-21 01:29:34 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 11:58:39 -0400 |
commit | 592eaca1409e55407e980f71b2ec604ca3610ba5 (patch) | |
tree | 43cadd8d3f9cc150a7b108696bfabcddcff55650 /litmus/sched_cedf.c | |
parent | fb8d6602af1cbc09115544056b872b976c6349c3 (diff) |
Avoid needlessly costly migrations. CONFIG_SCHED_CPU_AFFINITY
Given a choice between several available CPUs (unlinked) on which
to schedule a task, let the scheduler select the CPU closest to
where that task was previously scheduled. Hopefully, this will
reduce cache migration penalties.
Notes: SCHED_CPU_AFFINITY is dependent upon x86 (only x86 is
supported at this time). Also PFair/PD^2 does not make use of
this feature.
Signed-off-by: Andrea Bastoni <bastoni@cs.unc.edu>
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 33 |
1 files changed, 33 insertions, 0 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 73fe1c442a0d..e29a9fe2a8e8 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -43,6 +43,10 @@ | |||
43 | 43 | ||
44 | #include <litmus/bheap.h> | 44 | #include <litmus/bheap.h> |
45 | 45 | ||
46 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
47 | #include <litmus/affinity.h> | ||
48 | #endif | ||
49 | |||
46 | /* to configure the cluster size */ | 50 | /* to configure the cluster size */ |
47 | #include <litmus/litmus_proc.h> | 51 | #include <litmus/litmus_proc.h> |
48 | #include <linux/uaccess.h> | 52 | #include <linux/uaccess.h> |
@@ -257,6 +261,23 @@ static noinline void requeue(struct task_struct* task) | |||
257 | } | 261 | } |
258 | } | 262 | } |
259 | 263 | ||
264 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
265 | static cpu_entry_t* cedf_get_nearest_available_cpu( | ||
266 | cedf_domain_t *cluster, cpu_entry_t* start) | ||
267 | { | ||
268 | cpu_entry_t* affinity; | ||
269 | |||
270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); | ||
271 | |||
272 | /* make sure CPU is in our cluster */ | ||
273 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | ||
274 | return(affinity); | ||
275 | else | ||
276 | return(NULL); | ||
277 | } | ||
278 | #endif | ||
279 | |||
280 | |||
260 | /* check for any necessary preemptions */ | 281 | /* check for any necessary preemptions */ |
261 | static void check_for_preemptions(cedf_domain_t *cluster) | 282 | static void check_for_preemptions(cedf_domain_t *cluster) |
262 | { | 283 | { |
@@ -270,8 +291,20 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
270 | task = __take_ready(&cluster->domain); | 291 | task = __take_ready(&cluster->domain); |
271 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 292 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", |
272 | task->pid, last->cpu); | 293 | task->pid, last->cpu); |
294 | #ifdef CONFIG_SCHED_CPU_AFFINITY | ||
295 | { | ||
296 | cpu_entry_t* affinity = | ||
297 | cedf_get_nearest_available_cpu(cluster, | ||
298 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | ||
299 | if(affinity) | ||
300 | last = affinity; | ||
301 | else if(last->linked) | ||
302 | requeue(last->linked); | ||
303 | } | ||
304 | #else | ||
273 | if (last->linked) | 305 | if (last->linked) |
274 | requeue(last->linked); | 306 | requeue(last->linked); |
307 | #endif | ||
275 | link_task_to_cpu(task, last); | 308 | link_task_to_cpu(task, last); |
276 | preempt(last); | 309 | preempt(last); |
277 | } | 310 | } |