diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/affinity.c | 6 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 16 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 10 |
3 files changed, 18 insertions, 14 deletions
diff --git a/litmus/affinity.c b/litmus/affinity.c index 9adab7a3bcd7..3fa6dd789400 100644 --- a/litmus/affinity.c +++ b/litmus/affinity.c | |||
@@ -16,8 +16,6 @@ void init_topology(void) { | |||
16 | 16 | ||
17 | for_each_online_cpu(cpu) { | 17 | for_each_online_cpu(cpu) { |
18 | for (i = 0; i < depth; ++i) { | 18 | for (i = 0; i < depth; ++i) { |
19 | long unsigned int firstbits; | ||
20 | |||
21 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); | 19 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); |
22 | if (chk) { | 20 | if (chk) { |
23 | /* failed */ | 21 | /* failed */ |
@@ -27,9 +25,9 @@ void init_topology(void) { | |||
27 | neigh_info[cpu].size[i] = | 25 | neigh_info[cpu].size[i] = |
28 | cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); | 26 | cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); |
29 | } | 27 | } |
30 | firstbits = *neigh_info[cpu].neighbors[i]->bits; | ||
31 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", | 28 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", |
32 | cpu, neigh_info[cpu].size[i], i, firstbits); | 29 | cpu, neigh_info[cpu].size[i], i, |
30 | *cpumask_bits(neigh_info[cpu].neighbors[i])); | ||
33 | } | 31 | } |
34 | 32 | ||
35 | /* set data for non-existent levels */ | 33 | /* set data for non-existent levels */ |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 0707059597d6..690b94dbd686 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -263,11 +263,17 @@ static noinline void requeue(struct task_struct* task) | |||
263 | 263 | ||
264 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 264 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
265 | static cpu_entry_t* cedf_get_nearest_available_cpu( | 265 | static cpu_entry_t* cedf_get_nearest_available_cpu( |
266 | cedf_domain_t *cluster, cpu_entry_t* start) | 266 | cedf_domain_t *cluster, cpu_entry_t *start) |
267 | { | 267 | { |
268 | cpu_entry_t* affinity; | 268 | cpu_entry_t *affinity; |
269 | 269 | ||
270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, -1); | 270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, |
271 | #ifdef CONFIG_RELEASE_MASTER | ||
272 | cluster->domain.release_master | ||
273 | #else | ||
274 | NO_CPU | ||
275 | #endif | ||
276 | ); | ||
271 | 277 | ||
272 | /* make sure CPU is in our cluster */ | 278 | /* make sure CPU is in our cluster */ |
273 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) | 279 | if (affinity && cpu_isset(affinity->cpu, *cluster->cpu_map)) |
@@ -282,7 +288,7 @@ static cpu_entry_t* cedf_get_nearest_available_cpu( | |||
282 | static void check_for_preemptions(cedf_domain_t *cluster) | 288 | static void check_for_preemptions(cedf_domain_t *cluster) |
283 | { | 289 | { |
284 | struct task_struct *task; | 290 | struct task_struct *task; |
285 | cpu_entry_t* last; | 291 | cpu_entry_t *last; |
286 | 292 | ||
287 | for(last = lowest_prio_cpu(cluster); | 293 | for(last = lowest_prio_cpu(cluster); |
288 | edf_preemption_needed(&cluster->domain, last->linked); | 294 | edf_preemption_needed(&cluster->domain, last->linked); |
@@ -293,7 +299,7 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
293 | task->pid, last->cpu); | 299 | task->pid, last->cpu); |
294 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
295 | { | 301 | { |
296 | cpu_entry_t* affinity = | 302 | cpu_entry_t *affinity = |
297 | cedf_get_nearest_available_cpu(cluster, | 303 | cedf_get_nearest_available_cpu(cluster, |
298 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | 304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); |
299 | if(affinity) | 305 | if(affinity) |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 17926e9fccdc..467f8b284de4 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -258,15 +258,15 @@ static noinline void requeue(struct task_struct* task) | |||
258 | } | 258 | } |
259 | 259 | ||
260 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 260 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
261 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | 261 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) |
262 | { | 262 | { |
263 | cpu_entry_t* affinity; | 263 | cpu_entry_t *affinity; |
264 | 264 | ||
265 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | 265 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, |
266 | #ifdef CONFIG_RELEASE_MASTER | 266 | #ifdef CONFIG_RELEASE_MASTER |
267 | gsnedf.release_master | 267 | gsnedf.release_master |
268 | #else | 268 | #else |
269 | -1 | 269 | NO_CPU |
270 | #endif | 270 | #endif |
271 | ); | 271 | ); |
272 | 272 | ||
@@ -278,7 +278,7 @@ static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | |||
278 | static void check_for_preemptions(void) | 278 | static void check_for_preemptions(void) |
279 | { | 279 | { |
280 | struct task_struct *task; | 280 | struct task_struct *task; |
281 | cpu_entry_t* last; | 281 | cpu_entry_t *last; |
282 | 282 | ||
283 | for (last = lowest_prio_cpu(); | 283 | for (last = lowest_prio_cpu(); |
284 | edf_preemption_needed(&gsnedf, last->linked); | 284 | edf_preemption_needed(&gsnedf, last->linked); |
@@ -290,7 +290,7 @@ static void check_for_preemptions(void) | |||
290 | 290 | ||
291 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 291 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
292 | { | 292 | { |
293 | cpu_entry_t* affinity = | 293 | cpu_entry_t *affinity = |
294 | gsnedf_get_nearest_available_cpu( | 294 | gsnedf_get_nearest_available_cpu( |
295 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | 295 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); |
296 | if (affinity) | 296 | if (affinity) |