diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-06-22 01:30:25 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2011-08-27 12:04:53 -0400 |
commit | b4c52e27caa701a16e120b43a0e70ca6529a58a4 (patch) | |
tree | 677c7326db2a1334b28870c2cf4dc3a9795be08e | |
parent | b751e4e17e667f11404fc2f290416c0df050e964 (diff) |
C-EDF: Make migration affinity work with Release Master
Needed to update C-EDF to handle release master. Also
updated get_nearest_available_cpu() to take NO_CPU instead
of -1 to indicate that there is no release master. While
NO_CPU is 0xffffffff (-1 in two's complement), we still
translate this value to -1 in case NO_CPU changes.
Signed-off-by: Andrea Bastoni <bastoni@cs.unc.edu>
-rw-r--r-- | include/litmus/affinity.h | 15 | ||||
-rw-r--r-- | litmus/affinity.c | 6 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 16 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 10 |
4 files changed, 26 insertions, 21 deletions
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h index 5eee0eaa170d..ca2e442eb547 100644 --- a/include/litmus/affinity.h +++ b/include/litmus/affinity.h | |||
@@ -24,12 +24,12 @@ void init_topology(void); /* called by Litmus module's _init_litmus() */ | |||
24 | 24 | ||
25 | /* Works like: | 25 | /* Works like: |
26 | void get_nearest_available_cpu( | 26 | void get_nearest_available_cpu( |
27 | cpu_entry_t* nearest, | 27 | cpu_entry_t **nearest, |
28 | cpu_entry_t* start, | 28 | cpu_entry_t *start, |
29 | cpu_entry_t* entries, | 29 | cpu_entry_t *entries, |
30 | int release_master) | 30 | int release_master) |
31 | 31 | ||
32 | Set release_master = -1 for no RM. | 32 | Set release_master = NO_CPU for no Release Master. |
33 | 33 | ||
34 | We use a macro here to exploit the fact that C-EDF and G-EDF | 34 | We use a macro here to exploit the fact that C-EDF and G-EDF |
35 | have similar structures for their cpu_entry_t structs, even though | 35 | have similar structures for their cpu_entry_t structs, even though |
@@ -48,13 +48,14 @@ dissertation.) | |||
48 | } else { \ | 48 | } else { \ |
49 | int __level; \ | 49 | int __level; \ |
50 | int __cpu; \ | 50 | int __cpu; \ |
51 | struct neighborhood* __neighbors = &neigh_info[(start)->cpu]; \ | 51 | int __release_master = ((release_master) == NO_CPU) ? -1 : (release_master); \ |
52 | struct neighborhood *__neighbors = &neigh_info[(start)->cpu]; \ | ||
52 | \ | 53 | \ |
53 | for (__level = 0; (__level < NUM_CACHE_LEVELS) && !(nearest); ++__level) { \ | 54 | for (__level = 0; (__level < NUM_CACHE_LEVELS) && !(nearest); ++__level) { \ |
54 | if (__neighbors->size[__level] > 1) { \ | 55 | if (__neighbors->size[__level] > 1) { \ |
55 | for_each_cpu(__cpu, __neighbors->neighbors[__level]) { \ | 56 | for_each_cpu(__cpu, __neighbors->neighbors[__level]) { \ |
56 | if (__cpu != (release_master)) { \ | 57 | if (__cpu != __release_master) { \ |
57 | cpu_entry_t* __entry = &per_cpu((entries), __cpu); \ | 58 | cpu_entry_t *__entry = &per_cpu((entries), __cpu); \ |
58 | if (!__entry->linked) { \ | 59 | if (!__entry->linked) { \ |
59 | (nearest) = __entry; \ | 60 | (nearest) = __entry; \ |
60 | break; \ | 61 | break; \ |
diff --git a/litmus/affinity.c b/litmus/affinity.c index 9adab7a3bcd7..3fa6dd789400 100644 --- a/litmus/affinity.c +++ b/litmus/affinity.c | |||
@@ -16,8 +16,6 @@ void init_topology(void) { | |||
16 | 16 | ||
17 | for_each_online_cpu(cpu) { | 17 | for_each_online_cpu(cpu) { |
18 | for (i = 0; i < depth; ++i) { | 18 | for (i = 0; i < depth; ++i) { |
19 | long unsigned int firstbits; | ||
20 | |||
21 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); | 19 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); |
22 | if (chk) { | 20 | if (chk) { |
23 | /* failed */ | 21 | /* failed */ |
@@ -27,9 +25,9 @@ void init_topology(void) { | |||
27 | neigh_info[cpu].size[i] = | 25 | neigh_info[cpu].size[i] = |
28 | cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); | 26 | cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); |
29 | } | 27 | } |
30 | firstbits = *neigh_info[cpu].neighbors[i]->bits; | ||
31 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", | 28 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", |
32 | cpu, neigh_info[cpu].size[i], i, firstbits); | 29 | cpu, neigh_info[cpu].size[i], i, |
30 | *cpumask_bits(neigh_info[cpu].neighbors[i])); | ||
33 | } | 31 | } |
34 | 32 | ||
35 | /* set data for non-existent levels */ | 33 | /* set data for non-existent levels */ |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 0707059597d6..690b94dbd686 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -263,11 +263,17 @@ static noinline void requeue(struct task_struct* task) | |||
263 | 263 | ||
264 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 264 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
265 | static cpu_entry_t* cedf_get_nearest_available_cpu( | 265 | static cpu_entry_t* cedf_get_nearest_available_cpu( |
266 | cedf_domain_t *cluster, cpu_entry_t* start) | 266 | cedf_domain_t *cluster, cpu_entry_t *start) |
267 | { | 267 | { |
268 | cpu_entry_t* affinity; | 268 | cpu_entry_t *affinity; |
269 | 269 | ||
270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); | 270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, |
271 | #ifdef CONFIG_RELEASE_MASTER | ||
272 | cluster->domain.release_master | ||
273 | #else | ||
274 | NO_CPU | ||
275 | #endif | ||
276 | ); | ||
271 | 277 | ||
272 | /* make sure CPU is in our cluster */ | 278 | /* make sure CPU is in our cluster */ |
273 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | 279 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) |
@@ -282,7 +288,7 @@ static cpu_entry_t* cedf_get_nearest_available_cpu( | |||
282 | static void check_for_preemptions(cedf_domain_t *cluster) | 288 | static void check_for_preemptions(cedf_domain_t *cluster) |
283 | { | 289 | { |
284 | struct task_struct *task; | 290 | struct task_struct *task; |
285 | cpu_entry_t* last; | 291 | cpu_entry_t *last; |
286 | 292 | ||
287 | for(last = lowest_prio_cpu(cluster); | 293 | for(last = lowest_prio_cpu(cluster); |
288 | edf_preemption_needed(&cluster->domain, last->linked); | 294 | edf_preemption_needed(&cluster->domain, last->linked); |
@@ -293,7 +299,7 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
293 | task->pid, last->cpu); | 299 | task->pid, last->cpu); |
294 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
295 | { | 301 | { |
296 | cpu_entry_t* affinity = | 302 | cpu_entry_t *affinity = |
297 | cedf_get_nearest_available_cpu(cluster, | 303 | cedf_get_nearest_available_cpu(cluster, |
298 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | 304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); |
299 | if(affinity) | 305 | if(affinity) |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 17926e9fccdc..467f8b284de4 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -258,15 +258,15 @@ static noinline void requeue(struct task_struct* task) | |||
258 | } | 258 | } |
259 | 259 | ||
260 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 260 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
261 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | 261 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) |
262 | { | 262 | { |
263 | cpu_entry_t* affinity; | 263 | cpu_entry_t *affinity; |
264 | 264 | ||
265 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | 265 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, |
266 | #ifdef CONFIG_RELEASE_MASTER | 266 | #ifdef CONFIG_RELEASE_MASTER |
267 | gsnedf.release_master | 267 | gsnedf.release_master |
268 | #else | 268 | #else |
269 | -1 | 269 | NO_CPU |
270 | #endif | 270 | #endif |
271 | ); | 271 | ); |
272 | 272 | ||
@@ -278,7 +278,7 @@ static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | |||
278 | static void check_for_preemptions(void) | 278 | static void check_for_preemptions(void) |
279 | { | 279 | { |
280 | struct task_struct *task; | 280 | struct task_struct *task; |
281 | cpu_entry_t* last; | 281 | cpu_entry_t *last; |
282 | 282 | ||
283 | for (last = lowest_prio_cpu(); | 283 | for (last = lowest_prio_cpu(); |
284 | edf_preemption_needed(&gsnedf, last->linked); | 284 | edf_preemption_needed(&gsnedf, last->linked); |
@@ -290,7 +290,7 @@ static void check_for_preemptions(void) | |||
290 | 290 | ||
291 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 291 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
292 | { | 292 | { |
293 | cpu_entry_t* affinity = | 293 | cpu_entry_t *affinity = |
294 | gsnedf_get_nearest_available_cpu( | 294 | gsnedf_get_nearest_available_cpu( |
295 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | 295 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); |
296 | if (affinity) | 296 | if (affinity) |