diff options
author | Mike Travis <travis@sgi.com> | 2007-10-16 04:24:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:50 -0400 |
commit | d5a7430ddcdb598261d70f7eb1bf450b5be52085 (patch) | |
tree | 3b94672e0dbc2bff125de3266908f1a47a17b795 /arch/x86 | |
parent | 083576112940fda783d716fd5ccc744f81667b2f (diff) |
Convert cpu_sibling_map to be a per cpu variable
Convert cpu_sibling_map from a static array sized by NR_CPUS to a per_cpu
variable. This saves sizeof(cpumask_t) * NR unused cpus. Access is mostly
from startup and CPU HOTPLUG functions.
Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/io_apic_32.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 26 | ||||
-rw-r--r-- | arch/x86/oprofile/op_model_p4.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 4 |
7 files changed, 38 insertions, 38 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 8eb414b906d2..793eae854f4f 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -200,7 +200,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
200 | unsigned int i; | 200 | unsigned int i; |
201 | 201 | ||
202 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
203 | policy->cpus = cpu_sibling_map[policy->cpu]; | 203 | policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); |
204 | #endif | 204 | #endif |
205 | 205 | ||
206 | /* Errata workaround */ | 206 | /* Errata workaround */ |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 36685e8f7be1..14d68aa301ee 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
322 | 322 | ||
323 | /* only run on CPU to be set, or on its sibling */ | 323 | /* only run on CPU to be set, or on its sibling */ |
324 | #ifdef CONFIG_SMP | 324 | #ifdef CONFIG_SMP |
325 | policy->cpus = cpu_sibling_map[policy->cpu]; | 325 | policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); |
326 | #endif | 326 | #endif |
327 | 327 | ||
328 | cpus_allowed = current->cpus_allowed; | 328 | cpus_allowed = current->cpus_allowed; |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index e2f4a1c68547..4ee1e5ee9b57 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -378,7 +378,7 @@ static struct irq_cpu_info { | |||
378 | 378 | ||
379 | #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) | 379 | #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) |
380 | 380 | ||
381 | #define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) | 381 | #define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i))) |
382 | 382 | ||
383 | static cpumask_t balance_irq_affinity[NR_IRQS] = { | 383 | static cpumask_t balance_irq_affinity[NR_IRQS] = { |
384 | [0 ... NR_IRQS-1] = CPU_MASK_ALL | 384 | [0 ... NR_IRQS-1] = CPU_MASK_ALL |
@@ -598,7 +598,7 @@ tryanotherirq: | |||
598 | * (A+B)/2 vs B | 598 | * (A+B)/2 vs B |
599 | */ | 599 | */ |
600 | load = CPU_IRQ(min_loaded) >> 1; | 600 | load = CPU_IRQ(min_loaded) >> 1; |
601 | for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { | 601 | for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) { |
602 | if (load > CPU_IRQ(j)) { | 602 | if (load > CPU_IRQ(j)) { |
603 | /* This won't change cpu_sibling_map[min_loaded] */ | 603 | /* This won't change cpu_sibling_map[min_loaded] */ |
604 | load = CPU_IRQ(j); | 604 | load = CPU_IRQ(j); |
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 4cbab48ba865..31fc08bd15ef 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
@@ -70,8 +70,8 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
70 | int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; | 70 | int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; |
71 | 71 | ||
72 | /* representing HT siblings of each logical CPU */ | 72 | /* representing HT siblings of each logical CPU */ |
73 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; | 73 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); |
74 | EXPORT_SYMBOL(cpu_sibling_map); | 74 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
75 | 75 | ||
76 | /* representing HT and core siblings of each logical CPU */ | 76 | /* representing HT and core siblings of each logical CPU */ |
77 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 77 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); |
@@ -319,8 +319,8 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
319 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 319 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
320 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && | 320 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && |
321 | c[cpu].cpu_core_id == c[i].cpu_core_id) { | 321 | c[cpu].cpu_core_id == c[i].cpu_core_id) { |
322 | cpu_set(i, cpu_sibling_map[cpu]); | 322 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); |
323 | cpu_set(cpu, cpu_sibling_map[i]); | 323 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); |
324 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | 324 | cpu_set(i, per_cpu(cpu_core_map, cpu)); |
325 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | 325 | cpu_set(cpu, per_cpu(cpu_core_map, i)); |
326 | cpu_set(i, c[cpu].llc_shared_map); | 326 | cpu_set(i, c[cpu].llc_shared_map); |
@@ -328,13 +328,13 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
328 | } | 328 | } |
329 | } | 329 | } |
330 | } else { | 330 | } else { |
331 | cpu_set(cpu, cpu_sibling_map[cpu]); | 331 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); |
332 | } | 332 | } |
333 | 333 | ||
334 | cpu_set(cpu, c[cpu].llc_shared_map); | 334 | cpu_set(cpu, c[cpu].llc_shared_map); |
335 | 335 | ||
336 | if (current_cpu_data.x86_max_cores == 1) { | 336 | if (current_cpu_data.x86_max_cores == 1) { |
337 | per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; | 337 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); |
338 | c[cpu].booted_cores = 1; | 338 | c[cpu].booted_cores = 1; |
339 | return; | 339 | return; |
340 | } | 340 | } |
@@ -351,12 +351,12 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
351 | /* | 351 | /* |
352 | * Does this new cpu bringup a new core? | 352 | * Does this new cpu bringup a new core? |
353 | */ | 353 | */ |
354 | if (cpus_weight(cpu_sibling_map[cpu]) == 1) { | 354 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { |
355 | /* | 355 | /* |
356 | * for each core in package, increment | 356 | * for each core in package, increment |
357 | * the booted_cores for this new cpu | 357 | * the booted_cores for this new cpu |
358 | */ | 358 | */ |
359 | if (first_cpu(cpu_sibling_map[i]) == i) | 359 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) |
360 | c[cpu].booted_cores++; | 360 | c[cpu].booted_cores++; |
361 | /* | 361 | /* |
362 | * increment the core count for all | 362 | * increment the core count for all |
@@ -983,7 +983,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
983 | printk(KERN_NOTICE "Local APIC not detected." | 983 | printk(KERN_NOTICE "Local APIC not detected." |
984 | " Using dummy APIC emulation.\n"); | 984 | " Using dummy APIC emulation.\n"); |
985 | map_cpu_to_logical_apicid(); | 985 | map_cpu_to_logical_apicid(); |
986 | cpu_set(0, cpu_sibling_map[0]); | 986 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
987 | cpu_set(0, per_cpu(cpu_core_map, 0)); | 987 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
988 | return; | 988 | return; |
989 | } | 989 | } |
@@ -1008,7 +1008,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1008 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); | 1008 | printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); |
1009 | smpboot_clear_io_apic_irqs(); | 1009 | smpboot_clear_io_apic_irqs(); |
1010 | phys_cpu_present_map = physid_mask_of_physid(0); | 1010 | phys_cpu_present_map = physid_mask_of_physid(0); |
1011 | cpu_set(0, cpu_sibling_map[0]); | 1011 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
1012 | cpu_set(0, per_cpu(cpu_core_map, 0)); | 1012 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
1013 | return; | 1013 | return; |
1014 | } | 1014 | } |
@@ -1023,7 +1023,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1023 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); | 1023 | printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); |
1024 | smpboot_clear_io_apic_irqs(); | 1024 | smpboot_clear_io_apic_irqs(); |
1025 | phys_cpu_present_map = physid_mask_of_physid(0); | 1025 | phys_cpu_present_map = physid_mask_of_physid(0); |
1026 | cpu_set(0, cpu_sibling_map[0]); | 1026 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
1027 | cpu_set(0, per_cpu(cpu_core_map, 0)); | 1027 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
1028 | return; | 1028 | return; |
1029 | } | 1029 | } |
@@ -1102,15 +1102,15 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1102 | Dprintk("Boot done.\n"); | 1102 | Dprintk("Boot done.\n"); |
1103 | 1103 | ||
1104 | /* | 1104 | /* |
1105 | * construct cpu_sibling_map[], so that we can tell sibling CPUs | 1105 | * construct cpu_sibling_map, so that we can tell sibling CPUs |
1106 | * efficiently. | 1106 | * efficiently. |
1107 | */ | 1107 | */ |
1108 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 1108 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
1109 | cpus_clear(cpu_sibling_map[cpu]); | 1109 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
1110 | cpus_clear(per_cpu(cpu_core_map, cpu)); | 1110 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | cpu_set(0, cpu_sibling_map[0]); | 1113 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
1114 | cpu_set(0, per_cpu(cpu_core_map, 0)); | 1114 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
1115 | 1115 | ||
1116 | smpboot_setup_io_apic(); | 1116 | smpboot_setup_io_apic(); |
@@ -1153,13 +1153,13 @@ void remove_siblinginfo(int cpu) | |||
1153 | /*/ | 1153 | /*/ |
1154 | * last thread sibling in this cpu core going down | 1154 | * last thread sibling in this cpu core going down |
1155 | */ | 1155 | */ |
1156 | if (cpus_weight(cpu_sibling_map[cpu]) == 1) | 1156 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) |
1157 | c[sibling].booted_cores--; | 1157 | c[sibling].booted_cores--; |
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) | 1160 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) |
1161 | cpu_clear(cpu, cpu_sibling_map[sibling]); | 1161 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
1162 | cpus_clear(cpu_sibling_map[cpu]); | 1162 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
1163 | cpus_clear(per_cpu(cpu_core_map, cpu)); | 1163 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
1164 | c[cpu].phys_proc_id = 0; | 1164 | c[cpu].phys_proc_id = 0; |
1165 | c[cpu].cpu_core_id = 0; | 1165 | c[cpu].cpu_core_id = 0; |
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 6723c8622828..0faa0a0af272 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -91,8 +91,8 @@ EXPORT_SYMBOL(cpu_data); | |||
91 | int smp_threads_ready; | 91 | int smp_threads_ready; |
92 | 92 | ||
93 | /* representing HT siblings of each logical CPU */ | 93 | /* representing HT siblings of each logical CPU */ |
94 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; | 94 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); |
95 | EXPORT_SYMBOL(cpu_sibling_map); | 95 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
96 | 96 | ||
97 | /* representing HT and core siblings of each logical CPU */ | 97 | /* representing HT and core siblings of each logical CPU */ |
98 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | 98 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); |
@@ -262,8 +262,8 @@ static inline void set_cpu_sibling_map(int cpu) | |||
262 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | 262 | for_each_cpu_mask(i, cpu_sibling_setup_map) { |
263 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && | 263 | if (c[cpu].phys_proc_id == c[i].phys_proc_id && |
264 | c[cpu].cpu_core_id == c[i].cpu_core_id) { | 264 | c[cpu].cpu_core_id == c[i].cpu_core_id) { |
265 | cpu_set(i, cpu_sibling_map[cpu]); | 265 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); |
266 | cpu_set(cpu, cpu_sibling_map[i]); | 266 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); |
267 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | 267 | cpu_set(i, per_cpu(cpu_core_map, cpu)); |
268 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | 268 | cpu_set(cpu, per_cpu(cpu_core_map, i)); |
269 | cpu_set(i, c[cpu].llc_shared_map); | 269 | cpu_set(i, c[cpu].llc_shared_map); |
@@ -271,13 +271,13 @@ static inline void set_cpu_sibling_map(int cpu) | |||
271 | } | 271 | } |
272 | } | 272 | } |
273 | } else { | 273 | } else { |
274 | cpu_set(cpu, cpu_sibling_map[cpu]); | 274 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); |
275 | } | 275 | } |
276 | 276 | ||
277 | cpu_set(cpu, c[cpu].llc_shared_map); | 277 | cpu_set(cpu, c[cpu].llc_shared_map); |
278 | 278 | ||
279 | if (current_cpu_data.x86_max_cores == 1) { | 279 | if (current_cpu_data.x86_max_cores == 1) { |
280 | per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; | 280 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); |
281 | c[cpu].booted_cores = 1; | 281 | c[cpu].booted_cores = 1; |
282 | return; | 282 | return; |
283 | } | 283 | } |
@@ -294,12 +294,12 @@ static inline void set_cpu_sibling_map(int cpu) | |||
294 | /* | 294 | /* |
295 | * Does this new cpu bringup a new core? | 295 | * Does this new cpu bringup a new core? |
296 | */ | 296 | */ |
297 | if (cpus_weight(cpu_sibling_map[cpu]) == 1) { | 297 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { |
298 | /* | 298 | /* |
299 | * for each core in package, increment | 299 | * for each core in package, increment |
300 | * the booted_cores for this new cpu | 300 | * the booted_cores for this new cpu |
301 | */ | 301 | */ |
302 | if (first_cpu(cpu_sibling_map[i]) == i) | 302 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) |
303 | c[cpu].booted_cores++; | 303 | c[cpu].booted_cores++; |
304 | /* | 304 | /* |
305 | * increment the core count for all | 305 | * increment the core count for all |
@@ -735,7 +735,7 @@ static __init void disable_smp(void) | |||
735 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); | 735 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); |
736 | else | 736 | else |
737 | phys_cpu_present_map = physid_mask_of_physid(0); | 737 | phys_cpu_present_map = physid_mask_of_physid(0); |
738 | cpu_set(0, cpu_sibling_map[0]); | 738 | cpu_set(0, per_cpu(cpu_sibling_map, 0)); |
739 | cpu_set(0, per_cpu(cpu_core_map, 0)); | 739 | cpu_set(0, per_cpu(cpu_core_map, 0)); |
740 | } | 740 | } |
741 | 741 | ||
@@ -976,13 +976,13 @@ static void remove_siblinginfo(int cpu) | |||
976 | /* | 976 | /* |
977 | * last thread sibling in this cpu core going down | 977 | * last thread sibling in this cpu core going down |
978 | */ | 978 | */ |
979 | if (cpus_weight(cpu_sibling_map[cpu]) == 1) | 979 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) |
980 | c[sibling].booted_cores--; | 980 | c[sibling].booted_cores--; |
981 | } | 981 | } |
982 | 982 | ||
983 | for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) | 983 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) |
984 | cpu_clear(cpu, cpu_sibling_map[sibling]); | 984 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); |
985 | cpus_clear(cpu_sibling_map[cpu]); | 985 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
986 | cpus_clear(per_cpu(cpu_core_map, cpu)); | 986 | cpus_clear(per_cpu(cpu_core_map, cpu)); |
987 | c[cpu].phys_proc_id = 0; | 987 | c[cpu].phys_proc_id = 0; |
988 | c[cpu].cpu_core_id = 0; | 988 | c[cpu].cpu_core_id = 0; |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 47925927b12f..56b4757a1f47 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
@@ -379,7 +379,7 @@ static unsigned int get_stagger(void) | |||
379 | { | 379 | { |
380 | #ifdef CONFIG_SMP | 380 | #ifdef CONFIG_SMP |
381 | int cpu = smp_processor_id(); | 381 | int cpu = smp_processor_id(); |
382 | return (cpu != first_cpu(cpu_sibling_map[cpu])); | 382 | return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu))); |
383 | #endif | 383 | #endif |
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 539d42530fc4..4fa33c27ccb6 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -147,7 +147,7 @@ void __init xen_smp_prepare_boot_cpu(void) | |||
147 | make_lowmem_page_readwrite(&per_cpu__gdt_page); | 147 | make_lowmem_page_readwrite(&per_cpu__gdt_page); |
148 | 148 | ||
149 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 149 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
150 | cpus_clear(cpu_sibling_map[cpu]); | 150 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
151 | /* | 151 | /* |
152 | * cpu_core_map lives in a per cpu area that is cleared | 152 | * cpu_core_map lives in a per cpu area that is cleared |
153 | * when the per cpu array is allocated. | 153 | * when the per cpu array is allocated. |
@@ -164,7 +164,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
164 | unsigned cpu; | 164 | unsigned cpu; |
165 | 165 | ||
166 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 166 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
167 | cpus_clear(cpu_sibling_map[cpu]); | 167 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); |
168 | /* | 168 | /* |
169 | * cpu_core_ map will be zeroed when the per | 169 | * cpu_core_ map will be zeroed when the per |
170 | * cpu area is allocated. | 170 | * cpu area is allocated. |