diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/affinity.c | 6 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 8 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 8 |
3 files changed, 10 insertions, 12 deletions
diff --git a/litmus/affinity.c b/litmus/affinity.c index 9adab7a3bcd7..3fa6dd789400 100644 --- a/litmus/affinity.c +++ b/litmus/affinity.c | |||
@@ -16,8 +16,6 @@ void init_topology(void) { | |||
16 | 16 | ||
17 | for_each_online_cpu(cpu) { | 17 | for_each_online_cpu(cpu) { |
18 | for (i = 0; i < depth; ++i) { | 18 | for (i = 0; i < depth; ++i) { |
19 | long unsigned int firstbits; | ||
20 | |||
21 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); | 19 | chk = get_shared_cpu_map((struct cpumask *)&neigh_info[cpu].neighbors[i], cpu, i); |
22 | if (chk) { | 20 | if (chk) { |
23 | /* failed */ | 21 | /* failed */ |
@@ -27,9 +25,9 @@ void init_topology(void) { | |||
27 | neigh_info[cpu].size[i] = | 25 | neigh_info[cpu].size[i] = |
28 | cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); | 26 | cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); |
29 | } | 27 | } |
30 | firstbits = *neigh_info[cpu].neighbors[i]->bits; | ||
31 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", | 28 | printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", |
32 | cpu, neigh_info[cpu].size[i], i, firstbits); | 29 | cpu, neigh_info[cpu].size[i], i, |
30 | *cpumask_bits(neigh_info[cpu].neighbors[i])); | ||
33 | } | 31 | } |
34 | 32 | ||
35 | /* set data for non-existent levels */ | 33 | /* set data for non-existent levels */ |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 352ae27a6afa..690b94dbd686 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -263,9 +263,9 @@ static noinline void requeue(struct task_struct* task) | |||
263 | 263 | ||
264 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 264 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
265 | static cpu_entry_t* cedf_get_nearest_available_cpu( | 265 | static cpu_entry_t* cedf_get_nearest_available_cpu( |
266 | cedf_domain_t *cluster, cpu_entry_t* start) | 266 | cedf_domain_t *cluster, cpu_entry_t *start) |
267 | { | 267 | { |
268 | cpu_entry_t* affinity; | 268 | cpu_entry_t *affinity; |
269 | 269 | ||
270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, | 270 | get_nearest_available_cpu(affinity, start, cedf_cpu_entries, |
271 | #ifdef CONFIG_RELEASE_MASTER | 271 | #ifdef CONFIG_RELEASE_MASTER |
@@ -288,7 +288,7 @@ static cpu_entry_t* cedf_get_nearest_available_cpu( | |||
288 | static void check_for_preemptions(cedf_domain_t *cluster) | 288 | static void check_for_preemptions(cedf_domain_t *cluster) |
289 | { | 289 | { |
290 | struct task_struct *task; | 290 | struct task_struct *task; |
291 | cpu_entry_t* last; | 291 | cpu_entry_t *last; |
292 | 292 | ||
293 | for(last = lowest_prio_cpu(cluster); | 293 | for(last = lowest_prio_cpu(cluster); |
294 | edf_preemption_needed(&cluster->domain, last->linked); | 294 | edf_preemption_needed(&cluster->domain, last->linked); |
@@ -299,7 +299,7 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
299 | task->pid, last->cpu); | 299 | task->pid, last->cpu); |
300 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 300 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
301 | { | 301 | { |
302 | cpu_entry_t* affinity = | 302 | cpu_entry_t *affinity = |
303 | cedf_get_nearest_available_cpu(cluster, | 303 | cedf_get_nearest_available_cpu(cluster, |
304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | 304 | &per_cpu(cedf_cpu_entries, task_cpu(task))); |
305 | if(affinity) | 305 | if(affinity) |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index ada0e0af7f52..467f8b284de4 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -258,9 +258,9 @@ static noinline void requeue(struct task_struct* task) | |||
258 | } | 258 | } |
259 | 259 | ||
260 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 260 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
261 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | 261 | static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t *start) |
262 | { | 262 | { |
263 | cpu_entry_t* affinity; | 263 | cpu_entry_t *affinity; |
264 | 264 | ||
265 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, | 265 | get_nearest_available_cpu(affinity, start, gsnedf_cpu_entries, |
266 | #ifdef CONFIG_RELEASE_MASTER | 266 | #ifdef CONFIG_RELEASE_MASTER |
@@ -278,7 +278,7 @@ static cpu_entry_t* gsnedf_get_nearest_available_cpu(cpu_entry_t* start) | |||
278 | static void check_for_preemptions(void) | 278 | static void check_for_preemptions(void) |
279 | { | 279 | { |
280 | struct task_struct *task; | 280 | struct task_struct *task; |
281 | cpu_entry_t* last; | 281 | cpu_entry_t *last; |
282 | 282 | ||
283 | for (last = lowest_prio_cpu(); | 283 | for (last = lowest_prio_cpu(); |
284 | edf_preemption_needed(&gsnedf, last->linked); | 284 | edf_preemption_needed(&gsnedf, last->linked); |
@@ -290,7 +290,7 @@ static void check_for_preemptions(void) | |||
290 | 290 | ||
291 | #ifdef CONFIG_SCHED_CPU_AFFINITY | 291 | #ifdef CONFIG_SCHED_CPU_AFFINITY |
292 | { | 292 | { |
293 | cpu_entry_t* affinity = | 293 | cpu_entry_t *affinity = |
294 | gsnedf_get_nearest_available_cpu( | 294 | gsnedf_get_nearest_available_cpu( |
295 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | 295 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); |
296 | if (affinity) | 296 | if (affinity) |