diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-22 01:30:25 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 12:04:53 -0400 |
commit | b4c52e27caa701a16e120b43a0e70ca6529a58a4 (patch) | |
tree | 677c7326db2a1334b28870c2cf4dc3a9795be08e /litmus/sched_cedf.c | |
parent | b751e4e17e667f11404fc2f290416c0df050e964 (diff) |
C-EDF: Make migration affinity work with Release Master
Needed to update C-EDF to handle release master. Also
updated get_nearest_available_cpu() to take NO_CPU instead
of -1 to indicate that there is no release master. While
NO_CPU is 0xffffffff (-1 in two's complement), we still
translate this value to -1 in case NO_CPU changes.
Signed-off-by: Andrea Bastoni <bastoni@cs.unc.edu>
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 0707059597d6..690b94dbd686 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -263,11 +263,17 @@ static noinline void requeue(struct task_struct* task) | |||
263 | 263 | ||
264 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 264 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
265 | static cpu_entry_t* cedf_get_nearest_available_cpu( | 265 | static cpu_entry_t* cedf_get_nearest_available_cpu( |
266 | cedf_domain_t *cluster, cpu_entry_t* start) | 266 | cedf_domain_t *cluster, cpu_entry_t *start) |
267 | { | 267 | { |
268 | cpu_entry_t* affinity; | 268 | cpu_entry_t *affinity; |
269 | 269 | ||
270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); | 270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, |
271 | #ifdef CONFIG_RELEASE_MASTER | ||
272 | cluster->domain.release_master | ||
273 | #else | ||
274 | NO_CPU | ||
275 | #endif | ||
276 | ); | ||
271 | 277 | ||
272 | /* make sure CPU is in our cluster */ | 278 | /* make sure CPU is in our cluster */ |
273 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | 279 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) |
@@ -282,7 +288,7 @@ static cpu_entry_t* cedf_get_nearest_available_cpu( | |||
282 | static void check_for_preemptions(cedf_domain_t *cluster) | 288 | static void check_for_preemptions(cedf_domain_t *cluster) |
283 | { | 289 | { |
284 | struct task_struct *task; | 290 | struct task_struct *task; |
285 | cpu_entry_t* last; | 291 | cpu_entry_t *last; |
286 | 292 | ||
287 | for(last = lowest_prio_cpu(cluster); | 293 | for(last = lowest_prio_cpu(cluster); |
288 | edf_preemption_needed(&cluster->domain, last->linked); | 294 | edf_preemption_needed(&cluster->domain, last->linked); |
@@ -293,7 +299,7 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
293 | task->pid, last->cpu); | 299 | task->pid, last->cpu); |
294 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
295 | { | 301 | { |
296 | cpu_entry_t* affinity = | 302 | cpu_entry_t *affinity = |
297 | cedf_get_nearest_available_cpu(cluster, | 303 | cedf_get_nearest_available_cpu(cluster, |
298 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | 304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); |
299 | if(affinity) | 305 | if(affinity) |