diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 00:19:46 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 00:19:46 -0400 |
commit | c69fc56de1df5769f2ec69c915c7ad5afe63804c (patch) | |
tree | 18cc8d2ad5d6643edf8b73a3a7d26c55b2125d25 | |
parent | d95c3578120e5bc4784069439f00ccb1b5f87717 (diff) |
cpumask: use topology_core_cpumask/topology_thread_cpumask instead of cpu_core_map/cpu_sibling_map
Impact: cleanup
This is presumably what those definitions are for, and while all archs
define cpu_core_map/cpu_sibling map, that's changing (eg. x86 wants to
change it to a pointer).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | block/blk.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 8 |
2 files changed, 5 insertions, 5 deletions
diff --git a/block/blk.h b/block/blk.h index 0dce92c37496..3ee94358b43d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -102,7 +102,7 @@ static inline int blk_cpu_to_group(int cpu) | |||
102 | const struct cpumask *mask = cpu_coregroup_mask(cpu); | 102 | const struct cpumask *mask = cpu_coregroup_mask(cpu); |
103 | return cpumask_first(mask); | 103 | return cpumask_first(mask); |
104 | #elif defined(CONFIG_SCHED_SMT) | 104 | #elif defined(CONFIG_SCHED_SMT) |
105 | return first_cpu(per_cpu(cpu_sibling_map, cpu)); | 105 | return cpumask_first(topology_thread_cpumask(cpu)); |
106 | #else | 106 | #else |
107 | return cpu; | 107 | return cpu; |
108 | #endif | 108 | #endif |
diff --git a/kernel/sched.c b/kernel/sched.c index 0a76d0b6f215..5dabd80c3c15 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7249,7 +7249,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map, | |||
7249 | { | 7249 | { |
7250 | int group; | 7250 | int group; |
7251 | 7251 | ||
7252 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7252 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
7253 | group = cpumask_first(mask); | 7253 | group = cpumask_first(mask); |
7254 | if (sg) | 7254 | if (sg) |
7255 | *sg = &per_cpu(sched_group_core, group).sg; | 7255 | *sg = &per_cpu(sched_group_core, group).sg; |
@@ -7278,7 +7278,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
7278 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); | 7278 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7279 | group = cpumask_first(mask); | 7279 | group = cpumask_first(mask); |
7280 | #elif defined(CONFIG_SCHED_SMT) | 7280 | #elif defined(CONFIG_SCHED_SMT) |
7281 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7281 | cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map); |
7282 | group = cpumask_first(mask); | 7282 | group = cpumask_first(mask); |
7283 | #else | 7283 | #else |
7284 | group = cpu; | 7284 | group = cpu; |
@@ -7621,7 +7621,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7621 | SD_INIT(sd, SIBLING); | 7621 | SD_INIT(sd, SIBLING); |
7622 | set_domain_attribute(sd, attr); | 7622 | set_domain_attribute(sd, attr); |
7623 | cpumask_and(sched_domain_span(sd), | 7623 | cpumask_and(sched_domain_span(sd), |
7624 | &per_cpu(cpu_sibling_map, i), cpu_map); | 7624 | topology_thread_cpumask(i), cpu_map); |
7625 | sd->parent = p; | 7625 | sd->parent = p; |
7626 | p->child = sd; | 7626 | p->child = sd; |
7627 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7627 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7632,7 +7632,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7632 | /* Set up CPU (sibling) groups */ | 7632 | /* Set up CPU (sibling) groups */ |
7633 | for_each_cpu(i, cpu_map) { | 7633 | for_each_cpu(i, cpu_map) { |
7634 | cpumask_and(this_sibling_map, | 7634 | cpumask_and(this_sibling_map, |
7635 | &per_cpu(cpu_sibling_map, i), cpu_map); | 7635 | topology_thread_cpumask(i), cpu_map); |
7636 | if (i != cpumask_first(this_sibling_map)) | 7636 | if (i != cpumask_first(this_sibling_map)) |
7637 | continue; | 7637 | continue; |
7638 | 7638 | ||