aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2007-10-19 14:35:04 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-19 14:35:04 -0400
commit92cb7612aee39642d109b8d935ad265e602c0563 (patch)
tree307f4183226f52418bd6842b5d970f03524ad1c1 /arch/x86
parentf1df280f53d7c3ce8613a3b25d1efe009b9860dd (diff)
x86: convert cpuinfo_x86 array to a per_cpu array
cpu_data is currently an array defined using NR_CPUS. This means that we overallocate since we will rarely really use maximum configured cpus. When NR_CPU count is raised to 4096 the size of cpu_data becomes 3,145,728 bytes. These changes were adopted from the sparc64 (and ia64) code. An additional field was added to cpuinfo_x86 to be a non-ambiguous cpu index. This corresponds to the index into a cpumask_t as well as the per_cpu index. It's used in various places like show_cpuinfo(). cpu_data is defined to be the boot_cpu_data structure for the NON-SMP case. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Dmitry Torokhov <dtor@mail.ru> Cc: "Antonino A. Daplas" <adaplas@pol.net> Cc: Mark M. Hoffman <mhoffman@lightlink.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/acpi/cstate.c4
-rw-r--r--arch/x86/kernel/acpi/processor.c2
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/sc520_freq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/proc.c11
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/mce_64.c3
-rw-r--r--arch/x86/kernel/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/microcode.c6
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/setup_64.c20
-rw-r--r--arch/x86/kernel/smp_32.c2
-rw-r--r--arch/x86/kernel/smpboot_32.c51
-rw-r--r--arch/x86/kernel/smpboot_64.c45
-rw-r--r--arch/x86/kernel/tsc_32.c8
-rw-r--r--arch/x86/kernel/tsc_64.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/lib/delay_32.c2
-rw-r--r--arch/x86/lib/delay_64.c3
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
31 files changed, 125 insertions, 110 deletions
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 2d39f55d29a..10b67170b13 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -29,7 +29,7 @@
29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, 29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
30 unsigned int cpu) 30 unsigned int cpu)
31{ 31{
32 struct cpuinfo_x86 *c = cpu_data + cpu; 32 struct cpuinfo_x86 *c = &cpu_data(cpu);
33 33
34 flags->bm_check = 0; 34 flags->bm_check = 0;
35 if (num_online_cpus() == 1) 35 if (num_online_cpus() == 1)
@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
72 struct acpi_processor_cx *cx, struct acpi_power_register *reg) 72 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
73{ 73{
74 struct cstate_entry *percpu_entry; 74 struct cstate_entry *percpu_entry;
75 struct cpuinfo_x86 *c = cpu_data + cpu; 75 struct cpuinfo_x86 *c = &cpu_data(cpu);
76 76
77 cpumask_t saved_mask; 77 cpumask_t saved_mask;
78 int retval; 78 int retval;
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index b54fded4983..2ed0a4ce62f 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr) 63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{ 64{
65 unsigned int cpu = pr->id; 65 unsigned int cpu = pr->id;
66 struct cpuinfo_x86 *c = cpu_data + cpu; 66 struct cpuinfo_x86 *c = &cpu_data(cpu);
67 67
68 pr->pdc = NULL; 68 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL) 69 if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3bd2688bd44..d6405e0842b 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp)
357 if (smp) { 357 if (smp) {
358 printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); 358 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
359 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 359 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
360 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 360 clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
361 list_for_each_entry(mod, &smp_alt_modules, next) 361 list_for_each_entry(mod, &smp_alt_modules, next)
362 alternatives_smp_lock(mod->locks, mod->locks_end, 362 alternatives_smp_lock(mod->locks, mod->locks_end,
363 mod->text, mod->text_end); 363 mod->text, mod->text_end);
364 } else { 364 } else {
365 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 365 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
366 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 366 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
367 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 367 set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
368 list_for_each_entry(mod, &smp_alt_modules, next) 368 list_for_each_entry(mod, &smp_alt_modules, next)
369 alternatives_smp_unlock(mod->locks, mod->locks_end, 369 alternatives_smp_unlock(mod->locks, mod->locks_end,
370 mod->text, mod->text_end); 370 mod->text, mod->text_end);
@@ -432,7 +432,7 @@ void __init alternative_instructions(void)
432 if (1 == num_possible_cpus()) { 432 if (1 == num_possible_cpus()) {
433 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 433 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
434 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 434 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
435 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 435 set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
436 alternatives_smp_unlock(__smp_locks, __smp_locks_end, 436 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
437 _text, _etext); 437 _text, _etext);
438 } 438 }
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 2ca43ba32bc..fea0af0476b 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict;
77 77
78static int check_est_cpu(unsigned int cpuid) 78static int check_est_cpu(unsigned int cpuid)
79{ 79{
80 struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; 80 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
81 81
82 if (cpu->x86_vendor != X86_VENDOR_INTEL || 82 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
83 !cpu_has(cpu, X86_FEATURE_EST)) 83 !cpu_has(cpu, X86_FEATURE_EST))
@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
560 unsigned int cpu = policy->cpu; 560 unsigned int cpu = policy->cpu;
561 struct acpi_cpufreq_data *data; 561 struct acpi_cpufreq_data *data;
562 unsigned int result = 0; 562 unsigned int result = 0;
563 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 563 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
564 struct acpi_processor_performance *perf; 564 struct acpi_processor_performance *perf;
565 565
566 dprintk("acpi_cpufreq_cpu_init\n"); 566 dprintk("acpi_cpufreq_cpu_init\n");
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c11baaf9f2b..326a4c81f68 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = {
305 305
306static int __init eps_init(void) 306static int __init eps_init(void)
307{ 307{
308 struct cpuinfo_x86 *c = cpu_data; 308 struct cpuinfo_x86 *c = &cpu_data(0);
309 309
310 /* This driver will work only on Centaur C7 processors with 310 /* This driver will work only on Centaur C7 processors with
311 * Enhanced SpeedStep/PowerSaver registers */ 311 * Enhanced SpeedStep/PowerSaver registers */
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index 1e7ae7dafcf..94619c22f56 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy,
199 199
200static int elanfreq_cpu_init(struct cpufreq_policy *policy) 200static int elanfreq_cpu_init(struct cpufreq_policy *policy)
201{ 201{
202 struct cpuinfo_x86 *c = cpu_data; 202 struct cpuinfo_x86 *c = &cpu_data(0);
203 unsigned int i; 203 unsigned int i;
204 int result; 204 int result;
205 205
@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = {
280 280
281static int __init elanfreq_init(void) 281static int __init elanfreq_init(void)
282{ 282{
283 struct cpuinfo_x86 *c = cpu_data; 283 struct cpuinfo_x86 *c = &cpu_data(0);
284 284
285 /* Test if we have the right hardware */ 285 /* Test if we have the right hardware */
286 if ((c->x86_vendor != X86_VENDOR_AMD) || 286 if ((c->x86_vendor != X86_VENDOR_AMD) ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 5045f5d583c..749d00cb2eb 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
780 780
781static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 781static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
782{ 782{
783 struct cpuinfo_x86 *c = cpu_data; 783 struct cpuinfo_x86 *c = &cpu_data(0);
784 char *cpuname=NULL; 784 char *cpuname=NULL;
785 int ret; 785 int ret;
786 u32 lo, hi; 786 u32 lo, hi;
@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = {
959 959
960static int __init longhaul_init(void) 960static int __init longhaul_init(void)
961{ 961{
962 struct cpuinfo_x86 *c = cpu_data; 962 struct cpuinfo_x86 *c = &cpu_data(0);
963 963
964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) 964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
965 return -ENODEV; 965 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index b2689514295..af4a867a097 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
172 u32 save_lo, save_hi; 172 u32 save_lo, save_hi;
173 u32 eax, ebx, ecx, edx; 173 u32 eax, ebx, ecx, edx;
174 u32 try_hi; 174 u32 try_hi;
175 struct cpuinfo_x86 *c = cpu_data; 175 struct cpuinfo_x86 *c = &cpu_data(0);
176 176
177 if (!low_freq || !high_freq) 177 if (!low_freq || !high_freq)
178 return -EINVAL; 178 return -EINVAL;
@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = {
298 */ 298 */
299static int __init longrun_init(void) 299static int __init longrun_init(void)
300{ 300{
301 struct cpuinfo_x86 *c = cpu_data; 301 struct cpuinfo_x86 *c = &cpu_data(0);
302 302
303 if (c->x86_vendor != X86_VENDOR_TRANSMETA || 303 if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
304 !cpu_has(c, X86_FEATURE_LONGRUN)) 304 !cpu_has(c, X86_FEATURE_LONGRUN))
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 793eae854f4..14791ec55cf 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
195 195
196static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) 196static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
197{ 197{
198 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 198 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
199 int cpuid = 0; 199 int cpuid = 0;
200 unsigned int i; 200 unsigned int i;
201 201
@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = {
279 279
280static int __init cpufreq_p4_init(void) 280static int __init cpufreq_p4_init(void)
281{ 281{
282 struct cpuinfo_x86 *c = cpu_data; 282 struct cpuinfo_x86 *c = &cpu_data(0);
283 int ret; 283 int ret;
284 284
285 /* 285 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index 6d028533931..42405b4e34e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
215 */ 215 */
216static int __init powernow_k6_init(void) 216static int __init powernow_k6_init(void)
217{ 217{
218 struct cpuinfo_x86 *c = cpu_data; 218 struct cpuinfo_x86 *c = &cpu_data(0);
219 219
220 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || 220 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
221 ((c->x86_model != 12) && (c->x86_model != 13))) 221 ((c->x86_model != 12) && (c->x86_model != 13)))
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index f3686a5f230..b5a9863d6cd 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed)
114 114
115static int check_powernow(void) 115static int check_powernow(void)
116{ 116{
117 struct cpuinfo_x86 *c = cpu_data; 117 struct cpuinfo_x86 *c = &cpu_data(0);
118 unsigned int maxei, eax, ebx, ecx, edx; 118 unsigned int maxei, eax, ebx, ecx, edx;
119 119
120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { 120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
index d9f3e90a7ae..42da9bd677d 100644
--- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy,
102 102
103static int sc520_freq_cpu_init(struct cpufreq_policy *policy) 103static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
104{ 104{
105 struct cpuinfo_x86 *c = cpu_data; 105 struct cpuinfo_x86 *c = &cpu_data(0);
106 int result; 106 int result;
107 107
108 /* capability check */ 108 /* capability check */
@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = {
151 151
152static int __init sc520_freq_init(void) 152static int __init sc520_freq_init(void)
153{ 153{
154 struct cpuinfo_x86 *c = cpu_data; 154 struct cpuinfo_x86 *c = &cpu_data(0);
155 int err; 155 int err;
156 156
157 /* Test if we have the right hardware */ 157 /* Test if we have the right hardware */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 811d4743854..3031f119619 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -230,7 +230,7 @@ static struct cpu_model models[] =
230 230
231static int centrino_cpu_init_table(struct cpufreq_policy *policy) 231static int centrino_cpu_init_table(struct cpufreq_policy *policy)
232{ 232{
233 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; 233 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
234 struct cpu_model *model; 234 struct cpu_model *model;
235 235
236 for(model = models; model->cpu_id != NULL; model++) 236 for(model = models; model->cpu_id != NULL; model++)
@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
340 340
341static int centrino_cpu_init(struct cpufreq_policy *policy) 341static int centrino_cpu_init(struct cpufreq_policy *policy)
342{ 342{
343 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; 343 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
344 unsigned freq; 344 unsigned freq;
345 unsigned l, h; 345 unsigned l, h;
346 int ret; 346 int ret;
@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = {
612 */ 612 */
613static int __init centrino_init(void) 613static int __init centrino_init(void)
614{ 614{
615 struct cpuinfo_x86 *cpu = cpu_data; 615 struct cpuinfo_x86 *cpu = &cpu_data(0);
616 616
617 if (!cpu_has(cpu, X86_FEATURE_EST)) 617 if (!cpu_has(cpu, X86_FEATURE_EST))
618 return -ENODEV; 618 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index b1acc8ce316..76c3ab0da46 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
228 228
229unsigned int speedstep_detect_processor (void) 229unsigned int speedstep_detect_processor (void)
230{ 230{
231 struct cpuinfo_x86 *c = cpu_data; 231 struct cpuinfo_x86 *c = &cpu_data(0);
232 u32 ebx, msr_lo, msr_hi; 232 u32 ebx, msr_lo, msr_hi;
233 233
234 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); 234 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 35c7ebb5742..9921b01fe19 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
297#ifdef CONFIG_X86_HT 297#ifdef CONFIG_X86_HT
298 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 298 unsigned int cpu = c->cpu_index;
299#endif 299#endif
300 300
301 if (c->cpuid_level > 3) { 301 if (c->cpuid_level > 3) {
@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459 struct _cpuid4_info *this_leaf, *sibling_leaf; 459 struct _cpuid4_info *this_leaf, *sibling_leaf;
460 unsigned long num_threads_sharing; 460 unsigned long num_threads_sharing;
461 int index_msb, i; 461 int index_msb, i;
462 struct cpuinfo_x86 *c = cpu_data; 462 struct cpuinfo_x86 *c = &cpu_data(cpu);
463 463
464 this_leaf = CPUID4_INFO_IDX(cpu, index); 464 this_leaf = CPUID4_INFO_IDX(cpu, index);
465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
470 index_msb = get_count_order(num_threads_sharing); 470 index_msb = get_count_order(num_threads_sharing);
471 471
472 for_each_online_cpu(i) { 472 for_each_online_cpu(i) {
473 if (c[i].apicid >> index_msb == 473 if (cpu_data(i).apicid >> index_msb ==
474 c[cpu].apicid >> index_msb) { 474 c->apicid >> index_msb) {
475 cpu_set(i, this_leaf->shared_cpu_map); 475 cpu_set(i, this_leaf->shared_cpu_map);
476 if (i != cpu && cpuid4_info[i]) { 476 if (i != cpu && cpuid4_info[i]) {
477 sibling_leaf = CPUID4_INFO_IDX(i, index); 477 sibling_leaf = CPUID4_INFO_IDX(i, index);
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 879a0f789b1..2d42b414b77 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
85 /* nothing */ 85 /* nothing */
86 }; 86 };
87 struct cpuinfo_x86 *c = v; 87 struct cpuinfo_x86 *c = v;
88 int i, n = c - cpu_data; 88 int i, n = 0;
89 int fpu_exception; 89 int fpu_exception;
90 90
91#ifdef CONFIG_SMP 91#ifdef CONFIG_SMP
92 if (!cpu_online(n)) 92 if (!cpu_online(n))
93 return 0; 93 return 0;
94 n = c->cpu_index;
94#endif 95#endif
95 seq_printf(m, "processor\t: %d\n" 96 seq_printf(m, "processor\t: %d\n"
96 "vendor_id\t: %s\n" 97 "vendor_id\t: %s\n"
@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
175 176
176static void *c_start(struct seq_file *m, loff_t *pos) 177static void *c_start(struct seq_file *m, loff_t *pos)
177{ 178{
178 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 179 if (*pos == 0) /* just in case, cpu 0 is not the first */
180 *pos = first_cpu(cpu_possible_map);
181 if ((*pos) < NR_CPUS && cpu_possible(*pos))
182 return &cpu_data(*pos);
183 return NULL;
179} 184}
180static void *c_next(struct seq_file *m, void *v, loff_t *pos) 185static void *c_next(struct seq_file *m, void *v, loff_t *pos)
181{ 186{
182 ++*pos; 187 *pos = next_cpu(*pos, cpu_possible_map);
183 return c_start(m, pos); 188 return c_start(m, pos);
184} 189}
185static void c_stop(struct seq_file *m, void *v) 190static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2086c727fb0..05c9936a16c 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
114static int cpuid_open(struct inode *inode, struct file *file) 114static int cpuid_open(struct inode *inode, struct file *file)
115{ 115{
116 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 116 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
117 struct cpuinfo_x86 *c = &(cpu_data)[cpu]; 117 struct cpuinfo_x86 *c = &cpu_data(cpu);
118 118
119 if (cpu >= NR_CPUS || !cpu_online(cpu)) 119 if (cpu >= NR_CPUS || !cpu_online(cpu))
120 return -ENXIO; /* No such CPU */ 120 return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 66e6b797b2c..2cf20de5bec 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu)
799{ 799{
800 int err; 800 int err;
801 int i; 801 int i;
802 if (!mce_available(&cpu_data[cpu])) 802
803 if (!mce_available(&cpu_data(cpu)))
803 return -EIO; 804 return -EIO;
804 805
805 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); 806 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 0d2afd96aca..752fb16a817 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
472 sprintf(name, "threshold_bank%i", bank); 472 sprintf(name, "threshold_bank%i", bank);
473 473
474#ifdef CONFIG_SMP 474#ifdef CONFIG_SMP
475 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ 475 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
476 i = first_cpu(per_cpu(cpu_core_map, cpu)); 476 i = first_cpu(per_cpu(cpu_core_map, cpu));
477 477
478 /* first core not up yet */ 478 /* first core not up yet */
479 if (cpu_data[i].cpu_core_id) 479 if (cpu_data(i).cpu_core_id)
480 goto out; 480 goto out;
481 481
482 /* already linked */ 482 /* already linked */
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 09cf7811035..09c315214a5 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -132,7 +132,7 @@ static struct ucode_cpu_info {
132 132
133static void collect_cpu_info(int cpu_num) 133static void collect_cpu_info(int cpu_num)
134{ 134{
135 struct cpuinfo_x86 *c = cpu_data + cpu_num; 135 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
136 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; 136 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
137 unsigned int val[2]; 137 unsigned int val[2];
138 138
@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev;
522static int cpu_request_microcode(int cpu) 522static int cpu_request_microcode(int cpu)
523{ 523{
524 char name[30]; 524 char name[30];
525 struct cpuinfo_x86 *c = cpu_data + cpu; 525 struct cpuinfo_x86 *c = &cpu_data(cpu);
526 const struct firmware *firmware; 526 const struct firmware *firmware;
527 void *buf; 527 void *buf;
528 unsigned long size; 528 unsigned long size;
@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu)
570 570
571static int apply_microcode_check_cpu(int cpu) 571static int apply_microcode_check_cpu(int cpu)
572{ 572{
573 struct cpuinfo_x86 *c = cpu_data + cpu; 573 struct cpuinfo_x86 *c = &cpu_data(cpu);
574 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 574 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
575 cpumask_t old; 575 cpumask_t old;
576 unsigned int val[2]; 576 unsigned int val[2];
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index e18e516cf54..ee6eba4ecfe 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
112static int msr_open(struct inode *inode, struct file *file) 112static int msr_open(struct inode *inode, struct file *file)
113{ 113{
114 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 114 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
115 struct cpuinfo_x86 *c = &(cpu_data)[cpu]; 115 struct cpuinfo_x86 *c = &cpu_data(cpu);
116 116
117 if (cpu >= NR_CPUS || !cpu_online(cpu)) 117 if (cpu >= NR_CPUS || !cpu_online(cpu))
118 return -ENXIO; /* No such CPU */ 118 return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 94630c66470..0f0d9d4a665 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -534,7 +534,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
534 but in the same order as the HT nodeids. 534 but in the same order as the HT nodeids.
535 If that doesn't result in a usable node fall back to the 535 If that doesn't result in a usable node fall back to the
536 path for the previous case. */ 536 path for the previous case. */
537 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); 537 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
538 if (ht_nodeid >= 0 && 538 if (ht_nodeid >= 0 &&
539 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 539 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
540 node = apicid_to_node[ht_nodeid]; 540 node = apicid_to_node[ht_nodeid];
@@ -858,6 +858,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
858 858
859#ifdef CONFIG_SMP 859#ifdef CONFIG_SMP
860 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 860 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
861 c->cpu_index = 0;
861#endif 862#endif
862} 863}
863 864
@@ -964,6 +965,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
964static int show_cpuinfo(struct seq_file *m, void *v) 965static int show_cpuinfo(struct seq_file *m, void *v)
965{ 966{
966 struct cpuinfo_x86 *c = v; 967 struct cpuinfo_x86 *c = v;
968 int cpu = 0;
967 969
968 /* 970 /*
969 * These flag bits must match the definitions in <asm/cpufeature.h>. 971 * These flag bits must match the definitions in <asm/cpufeature.h>.
@@ -1042,8 +1044,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1042 1044
1043 1045
1044#ifdef CONFIG_SMP 1046#ifdef CONFIG_SMP
1045 if (!cpu_online(c-cpu_data)) 1047 if (!cpu_online(c->cpu_index))
1046 return 0; 1048 return 0;
1049 cpu = c->cpu_index;
1047#endif 1050#endif
1048 1051
1049 seq_printf(m,"processor\t: %u\n" 1052 seq_printf(m,"processor\t: %u\n"
@@ -1051,7 +1054,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1051 "cpu family\t: %d\n" 1054 "cpu family\t: %d\n"
1052 "model\t\t: %d\n" 1055 "model\t\t: %d\n"
1053 "model name\t: %s\n", 1056 "model name\t: %s\n",
1054 (unsigned)(c-cpu_data), 1057 (unsigned)cpu,
1055 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", 1058 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1056 c->x86, 1059 c->x86,
1057 (int)c->x86_model, 1060 (int)c->x86_model,
@@ -1063,7 +1066,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1063 seq_printf(m, "stepping\t: unknown\n"); 1066 seq_printf(m, "stepping\t: unknown\n");
1064 1067
1065 if (cpu_has(c,X86_FEATURE_TSC)) { 1068 if (cpu_has(c,X86_FEATURE_TSC)) {
1066 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); 1069 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1067 if (!freq) 1070 if (!freq)
1068 freq = cpu_khz; 1071 freq = cpu_khz;
1069 seq_printf(m, "cpu MHz\t\t: %u.%03u\n", 1072 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
@@ -1076,7 +1079,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1076 1079
1077#ifdef CONFIG_SMP 1080#ifdef CONFIG_SMP
1078 if (smp_num_siblings * c->x86_max_cores > 1) { 1081 if (smp_num_siblings * c->x86_max_cores > 1) {
1079 int cpu = c - cpu_data;
1080 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 1082 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1081 seq_printf(m, "siblings\t: %d\n", 1083 seq_printf(m, "siblings\t: %d\n",
1082 cpus_weight(per_cpu(cpu_core_map, cpu))); 1084 cpus_weight(per_cpu(cpu_core_map, cpu)));
@@ -1134,12 +1136,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1134 1136
1135static void *c_start(struct seq_file *m, loff_t *pos) 1137static void *c_start(struct seq_file *m, loff_t *pos)
1136{ 1138{
1137 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 1139 if (*pos == 0) /* just in case, cpu 0 is not the first */
1140 *pos = first_cpu(cpu_possible_map);
1141 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1142 return &cpu_data(*pos);
1143 return NULL;
1138} 1144}
1139 1145
1140static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1146static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1141{ 1147{
1142 ++*pos; 1148 *pos = next_cpu(*pos, cpu_possible_map);
1143 return c_start(m, pos); 1149 return c_start(m, pos);
1144} 1150}
1145 1151
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 4974c3d2a0a..2621ca3b2e4 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy)
610 */ 610 */
611 cpu_clear(smp_processor_id(), cpu_online_map); 611 cpu_clear(smp_processor_id(), cpu_online_map);
612 disable_local_APIC(); 612 disable_local_APIC();
613 if (cpu_data[smp_processor_id()].hlt_works_ok) 613 if (cpu_data(smp_processor_id()).hlt_works_ok)
614 for(;;) halt(); 614 for(;;) halt();
615 for (;;); 615 for (;;);
616} 616}
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 47bccfc283d..7b8fdfa169d 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -89,8 +89,8 @@ EXPORT_SYMBOL(cpu_possible_map);
89static cpumask_t smp_commenced_mask; 89static cpumask_t smp_commenced_mask;
90 90
91/* Per CPU bogomips and other parameters */ 91/* Per CPU bogomips and other parameters */
92struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 92DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
93EXPORT_SYMBOL(cpu_data); 93EXPORT_PER_CPU_SYMBOL(cpu_info);
94 94
95/* 95/*
96 * The following static array is used during kernel startup 96 * The following static array is used during kernel startup
@@ -158,9 +158,10 @@ void __init smp_alloc_memory(void)
158 158
159void __cpuinit smp_store_cpu_info(int id) 159void __cpuinit smp_store_cpu_info(int id)
160{ 160{
161 struct cpuinfo_x86 *c = cpu_data + id; 161 struct cpuinfo_x86 *c = &cpu_data(id);
162 162
163 *c = boot_cpu_data; 163 *c = boot_cpu_data;
164 c->cpu_index = id;
164 if (id!=0) 165 if (id!=0)
165 identify_secondary_cpu(c); 166 identify_secondary_cpu(c);
166 /* 167 /*
@@ -302,7 +303,7 @@ static int cpucount;
302/* maps the cpu to the sched domain representing multi-core */ 303/* maps the cpu to the sched domain representing multi-core */
303cpumask_t cpu_coregroup_map(int cpu) 304cpumask_t cpu_coregroup_map(int cpu)
304{ 305{
305 struct cpuinfo_x86 *c = cpu_data + cpu; 306 struct cpuinfo_x86 *c = &cpu_data(cpu);
306 /* 307 /*
307 * For perf, we return last level cache shared map. 308 * For perf, we return last level cache shared map.
308 * And for power savings, we return cpu_core_map 309 * And for power savings, we return cpu_core_map
@@ -319,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
319void __cpuinit set_cpu_sibling_map(int cpu) 320void __cpuinit set_cpu_sibling_map(int cpu)
320{ 321{
321 int i; 322 int i;
322 struct cpuinfo_x86 *c = cpu_data; 323 struct cpuinfo_x86 *c = &cpu_data(cpu);
323 324
324 cpu_set(cpu, cpu_sibling_setup_map); 325 cpu_set(cpu, cpu_sibling_setup_map);
325 326
326 if (smp_num_siblings > 1) { 327 if (smp_num_siblings > 1) {
327 for_each_cpu_mask(i, cpu_sibling_setup_map) { 328 for_each_cpu_mask(i, cpu_sibling_setup_map) {
328 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 329 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
329 c[cpu].cpu_core_id == c[i].cpu_core_id) { 330 c->cpu_core_id == cpu_data(i).cpu_core_id) {
330 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 331 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
331 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 332 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
332 cpu_set(i, per_cpu(cpu_core_map, cpu)); 333 cpu_set(i, per_cpu(cpu_core_map, cpu));
333 cpu_set(cpu, per_cpu(cpu_core_map, i)); 334 cpu_set(cpu, per_cpu(cpu_core_map, i));
334 cpu_set(i, c[cpu].llc_shared_map); 335 cpu_set(i, c->llc_shared_map);
335 cpu_set(cpu, c[i].llc_shared_map); 336 cpu_set(cpu, cpu_data(i).llc_shared_map);
336 } 337 }
337 } 338 }
338 } else { 339 } else {
339 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 340 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
340 } 341 }
341 342
342 cpu_set(cpu, c[cpu].llc_shared_map); 343 cpu_set(cpu, c->llc_shared_map);
343 344
344 if (current_cpu_data.x86_max_cores == 1) { 345 if (current_cpu_data.x86_max_cores == 1) {
345 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 346 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
346 c[cpu].booted_cores = 1; 347 c->booted_cores = 1;
347 return; 348 return;
348 } 349 }
349 350
350 for_each_cpu_mask(i, cpu_sibling_setup_map) { 351 for_each_cpu_mask(i, cpu_sibling_setup_map) {
351 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 352 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
352 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 353 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
353 cpu_set(i, c[cpu].llc_shared_map); 354 cpu_set(i, c->llc_shared_map);
354 cpu_set(cpu, c[i].llc_shared_map); 355 cpu_set(cpu, cpu_data(i).llc_shared_map);
355 } 356 }
356 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 357 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
357 cpu_set(i, per_cpu(cpu_core_map, cpu)); 358 cpu_set(i, per_cpu(cpu_core_map, cpu));
358 cpu_set(cpu, per_cpu(cpu_core_map, i)); 359 cpu_set(cpu, per_cpu(cpu_core_map, i));
359 /* 360 /*
@@ -365,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
365 * the booted_cores for this new cpu 366 * the booted_cores for this new cpu
366 */ 367 */
367 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 368 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
368 c[cpu].booted_cores++; 369 c->booted_cores++;
369 /* 370 /*
370 * increment the core count for all 371 * increment the core count for all
371 * the other cpus in this package 372 * the other cpus in this package
372 */ 373 */
373 if (i != cpu) 374 if (i != cpu)
374 c[i].booted_cores++; 375 cpu_data(i).booted_cores++;
375 } else if (i != cpu && !c[cpu].booted_cores) 376 } else if (i != cpu && !c->booted_cores)
376 c[cpu].booted_cores = c[i].booted_cores; 377 c->booted_cores = cpu_data(i).booted_cores;
377 } 378 }
378 } 379 }
379} 380}
@@ -852,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
852 /* number CPUs logically, starting from 1 (BSP is 0) */ 853 /* number CPUs logically, starting from 1 (BSP is 0) */
853 Dprintk("OK.\n"); 854 Dprintk("OK.\n");
854 printk("CPU%d: ", cpu); 855 printk("CPU%d: ", cpu);
855 print_cpu_info(&cpu_data[cpu]); 856 print_cpu_info(&cpu_data(cpu));
856 Dprintk("CPU has booted.\n"); 857 Dprintk("CPU has booted.\n");
857 } else { 858 } else {
858 boot_error= 1; 859 boot_error= 1;
@@ -969,7 +970,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
969 */ 970 */
970 smp_store_cpu_info(0); /* Final full version of the data */ 971 smp_store_cpu_info(0); /* Final full version of the data */
971 printk("CPU%d: ", 0); 972 printk("CPU%d: ", 0);
972 print_cpu_info(&cpu_data[0]); 973 print_cpu_info(&cpu_data(0));
973 974
974 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 975 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
975 boot_cpu_logical_apicid = logical_smp_processor_id(); 976 boot_cpu_logical_apicid = logical_smp_processor_id();
@@ -1092,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1092 Dprintk("Before bogomips.\n"); 1093 Dprintk("Before bogomips.\n");
1093 for (cpu = 0; cpu < NR_CPUS; cpu++) 1094 for (cpu = 0; cpu < NR_CPUS; cpu++)
1094 if (cpu_isset(cpu, cpu_callout_map)) 1095 if (cpu_isset(cpu, cpu_callout_map))
1095 bogosum += cpu_data[cpu].loops_per_jiffy; 1096 bogosum += cpu_data(cpu).loops_per_jiffy;
1096 printk(KERN_INFO 1097 printk(KERN_INFO
1097 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 1098 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1098 cpucount+1, 1099 cpucount+1,
@@ -1162,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
1162void remove_siblinginfo(int cpu) 1163void remove_siblinginfo(int cpu)
1163{ 1164{
1164 int sibling; 1165 int sibling;
1165 struct cpuinfo_x86 *c = cpu_data; 1166 struct cpuinfo_x86 *c = &cpu_data(cpu);
1166 1167
1167 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1168 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1168 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1169 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -1170,15 +1171,15 @@ void remove_siblinginfo(int cpu)
1170 * last thread sibling in this cpu core going down 1171 * last thread sibling in this cpu core going down
1171 */ 1172 */
1172 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1173 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1173 c[sibling].booted_cores--; 1174 cpu_data(sibling).booted_cores--;
1174 } 1175 }
1175 1176
1176 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1177 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1177 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1178 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1178 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1179 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1179 cpus_clear(per_cpu(cpu_core_map, cpu)); 1180 cpus_clear(per_cpu(cpu_core_map, cpu));
1180 c[cpu].phys_proc_id = 0; 1181 c->phys_proc_id = 0;
1181 c[cpu].cpu_core_id = 0; 1182 c->cpu_core_id = 0;
1182 cpu_clear(cpu, cpu_sibling_setup_map); 1183 cpu_clear(cpu, cpu_sibling_setup_map);
1183} 1184}
1184 1185
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 7c8f5864318..fd1fff6a35a 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map;
84EXPORT_SYMBOL(cpu_possible_map); 84EXPORT_SYMBOL(cpu_possible_map);
85 85
86/* Per CPU bogomips and other parameters */ 86/* Per CPU bogomips and other parameters */
87struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 87DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
88EXPORT_SYMBOL(cpu_data); 88EXPORT_PER_CPU_SYMBOL(cpu_info);
89 89
90/* Set when the idlers are all forked */ 90/* Set when the idlers are all forked */
91int smp_threads_ready; 91int smp_threads_ready;
@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void)
138 138
139static void __cpuinit smp_store_cpu_info(int id) 139static void __cpuinit smp_store_cpu_info(int id)
140{ 140{
141 struct cpuinfo_x86 *c = cpu_data + id; 141 struct cpuinfo_x86 *c = &cpu_data(id);
142 142
143 *c = boot_cpu_data; 143 *c = boot_cpu_data;
144 c->cpu_index = id;
144 identify_cpu(c); 145 identify_cpu(c);
145 print_cpu_info(c); 146 print_cpu_info(c);
146} 147}
@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void)
237/* maps the cpu to the sched domain representing multi-core */ 238/* maps the cpu to the sched domain representing multi-core */
238cpumask_t cpu_coregroup_map(int cpu) 239cpumask_t cpu_coregroup_map(int cpu)
239{ 240{
240 struct cpuinfo_x86 *c = cpu_data + cpu; 241 struct cpuinfo_x86 *c = &cpu_data(cpu);
241 /* 242 /*
242 * For perf, we return last level cache shared map. 243 * For perf, we return last level cache shared map.
243 * And for power savings, we return cpu_core_map 244 * And for power savings, we return cpu_core_map
@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map;
254static inline void set_cpu_sibling_map(int cpu) 255static inline void set_cpu_sibling_map(int cpu)
255{ 256{
256 int i; 257 int i;
257 struct cpuinfo_x86 *c = cpu_data; 258 struct cpuinfo_x86 *c = &cpu_data(cpu);
258 259
259 cpu_set(cpu, cpu_sibling_setup_map); 260 cpu_set(cpu, cpu_sibling_setup_map);
260 261
261 if (smp_num_siblings > 1) { 262 if (smp_num_siblings > 1) {
262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 263 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 264 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 265 c->cpu_core_id == cpu_data(i).cpu_core_id) {
265 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 266 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
266 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 267 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
267 cpu_set(i, per_cpu(cpu_core_map, cpu)); 268 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, per_cpu(cpu_core_map, i)); 269 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 270 cpu_set(i, c->llc_shared_map);
270 cpu_set(cpu, c[i].llc_shared_map); 271 cpu_set(cpu, cpu_data(i).llc_shared_map);
271 } 272 }
272 } 273 }
273 } else { 274 } else {
274 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 275 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
275 } 276 }
276 277
277 cpu_set(cpu, c[cpu].llc_shared_map); 278 cpu_set(cpu, c->llc_shared_map);
278 279
279 if (current_cpu_data.x86_max_cores == 1) { 280 if (current_cpu_data.x86_max_cores == 1) {
280 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 281 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
281 c[cpu].booted_cores = 1; 282 c->booted_cores = 1;
282 return; 283 return;
283 } 284 }
284 285
285 for_each_cpu_mask(i, cpu_sibling_setup_map) { 286 for_each_cpu_mask(i, cpu_sibling_setup_map) {
286 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 287 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
287 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 288 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
288 cpu_set(i, c[cpu].llc_shared_map); 289 cpu_set(i, c->llc_shared_map);
289 cpu_set(cpu, c[i].llc_shared_map); 290 cpu_set(cpu, cpu_data(i).llc_shared_map);
290 } 291 }
291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 292 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
292 cpu_set(i, per_cpu(cpu_core_map, cpu)); 293 cpu_set(i, per_cpu(cpu_core_map, cpu));
293 cpu_set(cpu, per_cpu(cpu_core_map, i)); 294 cpu_set(cpu, per_cpu(cpu_core_map, i));
294 /* 295 /*
@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu)
300 * the booted_cores for this new cpu 301 * the booted_cores for this new cpu
301 */ 302 */
302 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 303 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
303 c[cpu].booted_cores++; 304 c->booted_cores++;
304 /* 305 /*
305 * increment the core count for all 306 * increment the core count for all
306 * the other cpus in this package 307 * the other cpus in this package
307 */ 308 */
308 if (i != cpu) 309 if (i != cpu)
309 c[i].booted_cores++; 310 cpu_data(i).booted_cores++;
310 } else if (i != cpu && !c[cpu].booted_cores) 311 } else if (i != cpu && !c->booted_cores)
311 c[cpu].booted_cores = c[i].booted_cores; 312 c->booted_cores = cpu_data(i).booted_cores;
312 } 313 }
313 } 314 }
314} 315}
@@ -989,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
989static void remove_siblinginfo(int cpu) 990static void remove_siblinginfo(int cpu)
990{ 991{
991 int sibling; 992 int sibling;
992 struct cpuinfo_x86 *c = cpu_data; 993 struct cpuinfo_x86 *c = &cpu_data(cpu);
993 994
994 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 995 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
995 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 996 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -997,15 +998,15 @@ static void remove_siblinginfo(int cpu)
997 * last thread sibling in this cpu core going down 998 * last thread sibling in this cpu core going down
998 */ 999 */
999 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1000 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1000 c[sibling].booted_cores--; 1001 cpu_data(sibling).booted_cores--;
1001 } 1002 }
1002 1003
1003 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1004 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1004 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1005 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1005 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1006 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1006 cpus_clear(per_cpu(cpu_core_map, cpu)); 1007 cpus_clear(per_cpu(cpu_core_map, cpu));
1007 c[cpu].phys_proc_id = 0; 1008 c->phys_proc_id = 0;
1008 c[cpu].cpu_core_id = 0; 1009 c->cpu_core_id = 0;
1009 cpu_clear(cpu, cpu_sibling_setup_map); 1010 cpu_clear(cpu, cpu_sibling_setup_map);
1010} 1011}
1011 1012
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e87a3939ed4..b8a7cf67143 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void)
181 if (cpu_has_tsc) { 181 if (cpu_has_tsc) {
182 cpu_khz = calculate_cpu_khz(); 182 cpu_khz = calculate_cpu_khz();
183 tsc_khz = cpu_khz; 183 tsc_khz = cpu_khz;
184 cpu_data[0].loops_per_jiffy = 184 cpu_data(0).loops_per_jiffy =
185 cpufreq_scale(cpu_data[0].loops_per_jiffy, 185 cpufreq_scale(cpu_data(0).loops_per_jiffy,
186 cpu_khz_old, cpu_khz); 186 cpu_khz_old, cpu_khz);
187 return 0; 187 return 0;
188 } else 188 } else
@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
215 return 0; 215 return 0;
216 } 216 }
217 ref_freq = freq->old; 217 ref_freq = freq->old;
218 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; 218 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
219 cpu_khz_ref = cpu_khz; 219 cpu_khz_ref = cpu_khz;
220 } 220 }
221 221
@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
223 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || 223 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
224 (val == CPUFREQ_RESUMECHANGE)) { 224 (val == CPUFREQ_RESUMECHANGE)) {
225 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 225 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
226 cpu_data[freq->cpu].loops_per_jiffy = 226 cpu_data(freq->cpu).loops_per_jiffy =
227 cpufreq_scale(loops_per_jiffy_ref, 227 cpufreq_scale(loops_per_jiffy_ref,
228 ref_freq, freq->new); 228 ref_freq, freq->new);
229 229
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 9f22e542c37..9c70af45b42 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
73 struct cpufreq_freqs *freq = data; 73 struct cpufreq_freqs *freq = data;
74 unsigned long *lpj, dummy; 74 unsigned long *lpj, dummy;
75 75
76 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) 76 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
77 return 0; 77 return 0;
78 78
79 lpj = &dummy; 79 lpj = &dummy;
80 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 80 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
81#ifdef CONFIG_SMP 81#ifdef CONFIG_SMP
82 lpj = &cpu_data[freq->cpu].loops_per_jiffy; 82 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
83#else 83#else
84 lpj = &boot_cpu_data.loops_per_jiffy; 84 lpj = &boot_cpu_data.loops_per_jiffy;
85#endif 85#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 4a2c340ab0f..78f2250963a 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
291#ifdef CONFIG_NUMA 291#ifdef CONFIG_NUMA
292 node = cpu_to_node(cpu); 292 node = cpu_to_node(cpu);
293#endif 293#endif
294 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) 294 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
295 write_rdtscp_aux((node << 12) | cpu); 295 write_rdtscp_aux((node << 12) | cpu);
296 296
297 /* Store cpu number in limit so that it can be loaded quickly 297 /* Store cpu number in limit so that it can be loaded quickly
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index f6edb11364d..952e7a89c2a 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops)
82 __asm__("mull %0" 82 __asm__("mull %0"
83 :"=d" (xloops), "=&a" (d0) 83 :"=d" (xloops), "=&a" (d0)
84 :"1" (xloops), "0" 84 :"1" (xloops), "0"
85 (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); 85 (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
86 86
87 __delay(++xloops); 87 __delay(++xloops);
88} 88}
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index 2dbebd30834..0ebbfb9e7c7 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay);
40 40
41inline void __const_udelay(unsigned long xloops) 41inline void __const_udelay(unsigned long xloops)
42{ 42{
43 __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1); 43 __delay(((xloops * HZ *
44 cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
44} 45}
45EXPORT_SYMBOL(__const_udelay); 46EXPORT_SYMBOL(__const_udelay);
46 47
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index e4928aa6bdf..f93a730b44d 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR
36 36
37/* per CPU data structure (for /proc/cpuinfo et al), visible externally 37/* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */ 38 * indexed physically */
39struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 39DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned;
40EXPORT_SYMBOL(cpu_data); 40EXPORT_PER_CPU_SYMBOL(cpu_info);
41 41
42/* physical ID of the CPU used to boot the system */ 42/* physical ID of the CPU used to boot the system */
43unsigned char boot_cpu_id; 43unsigned char boot_cpu_id;
@@ -430,7 +430,7 @@ find_smp_config(void)
430void __init 430void __init
431smp_store_cpu_info(int id) 431smp_store_cpu_info(int id)
432{ 432{
433 struct cpuinfo_x86 *c=&cpu_data[id]; 433 struct cpuinfo_x86 *c = &cpu_data(id);
434 434
435 *c = boot_cpu_data; 435 *c = boot_cpu_data;
436 436
@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu)
634 cpu, smp_processor_id())); 634 cpu, smp_processor_id()));
635 635
636 printk("CPU%d: ", cpu); 636 printk("CPU%d: ", cpu);
637 print_cpu_info(&cpu_data[cpu]); 637 print_cpu_info(&cpu_data(cpu));
638 wmb(); 638 wmb();
639 cpu_set(cpu, cpu_callout_map); 639 cpu_set(cpu, cpu_callout_map);
640 cpu_set(cpu, cpu_present_map); 640 cpu_set(cpu, cpu_present_map);
@@ -683,7 +683,7 @@ smp_boot_cpus(void)
683 */ 683 */
684 smp_store_cpu_info(boot_cpu_id); 684 smp_store_cpu_info(boot_cpu_id);
685 printk("CPU%d: ", boot_cpu_id); 685 printk("CPU%d: ", boot_cpu_id);
686 print_cpu_info(&cpu_data[boot_cpu_id]); 686 print_cpu_info(&cpu_data(boot_cpu_id));
687 687
688 if(is_cpu_quad()) { 688 if(is_cpu_quad()) {
689 /* booting on a Quad CPU */ 689 /* booting on a Quad CPU */
@@ -714,7 +714,7 @@ smp_boot_cpus(void)
714 unsigned long bogosum = 0; 714 unsigned long bogosum = 0;
715 for (i = 0; i < NR_CPUS; i++) 715 for (i = 0; i < NR_CPUS; i++)
716 if (cpu_isset(i, cpu_online_map)) 716 if (cpu_isset(i, cpu_online_map))
717 bogosum += cpu_data[i].loops_per_jiffy; 717 bogosum += cpu_data(i).loops_per_jiffy;
718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
719 cpucount+1, 719 cpucount+1,
720 bogosum/(500000/HZ), 720 bogosum/(500000/HZ),