aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-09-05 03:19:50 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-05 03:19:50 -0400
commit446d27338d3b422dd3dfe496d0f362230994d059 (patch)
tree3452a8c7c487675020b2a2137635bc7dfede57fe /arch
parentaccf0fa697eeb5ff4c2360edc4da5b10abac0b7b (diff)
parent0a488a53d7ca46ac638c30079072c57e50cfcc7b (diff)
Merge branch 'x86/cpu' into x86/core
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c12
-rw-r--r--arch/x86/kernel/cpu/amd_64.c4
-rw-r--r--arch/x86/kernel/cpu/centaur.c14
-rw-r--r--arch/x86/kernel/cpu/centaur_64.c3
-rw-r--r--arch/x86/kernel/cpu/common.c589
-rw-r--r--arch/x86/kernel/cpu/common_64.c363
-rw-r--r--arch/x86/kernel/cpu/cpu.h18
-rw-r--r--arch/x86/kernel/cpu/cyrix.c38
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/intel_64.c3
-rw-r--r--arch/x86/kernel/cpu/transmeta.c3
-rw-r--r--arch/x86/kernel/cpu/umc.c3
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/traps_64.c5
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S8
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S9
17 files changed, 627 insertions, 453 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 3ede19a4e0b2..403e689df0b8 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -8,14 +8,14 @@ obj-y += proc.o capflags.o powerflags.o
8obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o 8obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o
9obj-$(CONFIG_X86_64) += common_64.o bugs_64.o 9obj-$(CONFIG_X86_64) += common_64.o bugs_64.o
10 10
11obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
12obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
11obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o 13obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o
12obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o 14obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o
13obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 15obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
14obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o 16obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
15obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o 17obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
16obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o 18obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
17obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
18obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
19obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o 19obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
20 20
21obj-$(CONFIG_X86_MCE) += mcheck/ 21obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index cae9cabc3031..d64ea6097ca7 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
31 if (c->x86_power & (1<<8)) 31 if (c->x86_power & (1<<8))
32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 32 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
33 } 33 }
34
35 /* Set MTRR capability flag if appropriate */
36 if (c->x86_model == 13 || c->x86_model == 9 ||
37 (c->x86_model == 8 && c->x86_mask >= 8))
38 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
34} 39}
35 40
36static void __cpuinit init_amd(struct cpuinfo_x86 *c) 41static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
166 mbytes); 171 mbytes);
167 } 172 }
168 173
169 /* Set MTRR capability flag if appropriate */
170 if (c->x86_model == 13 || c->x86_model == 9 ||
171 (c->x86_model == 8 && c->x86_mask >= 8))
172 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
173 break; 174 break;
174 } 175 }
175 176
@@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
297 .c_early_init = early_init_amd, 298 .c_early_init = early_init_amd,
298 .c_init = init_amd, 299 .c_init = init_amd,
299 .c_size_cache = amd_size_cache, 300 .c_size_cache = amd_size_cache,
301 .c_x86_vendor = X86_VENDOR_AMD,
300}; 302};
301 303
302cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 304cpu_dev_register(amd_cpu_dev);
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c
index d1692b2a41ff..d1c721c0c49f 100644
--- a/arch/x86/kernel/cpu/amd_64.c
+++ b/arch/x86/kernel/cpu/amd_64.c
@@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
218 .c_ident = { "AuthenticAMD" }, 218 .c_ident = { "AuthenticAMD" },
219 .c_early_init = early_init_amd, 219 .c_early_init = early_init_amd,
220 .c_init = init_amd, 220 .c_init = init_amd,
221 .c_x86_vendor = X86_VENDOR_AMD,
221}; 222};
222 223
223cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev); 224cpu_dev_register(amd_cpu_dev);
224
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e0f45edd6a55..e5f6d89521bf 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -314,6 +314,16 @@ enum {
314 EAMD3D = 1<<20, 314 EAMD3D = 1<<20,
315}; 315};
316 316
317static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
318{
319 switch (c->x86) {
320 case 5:
321 /* Emulate MTRRs using Centaur's MCR. */
322 set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
323 break;
324 }
325}
326
317static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 327static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
318{ 328{
319 329
@@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
462static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 472static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
463 .c_vendor = "Centaur", 473 .c_vendor = "Centaur",
464 .c_ident = { "CentaurHauls" }, 474 .c_ident = { "CentaurHauls" },
475 .c_early_init = early_init_centaur,
465 .c_init = init_centaur, 476 .c_init = init_centaur,
466 .c_size_cache = centaur_size_cache, 477 .c_size_cache = centaur_size_cache,
478 .c_x86_vendor = X86_VENDOR_CENTAUR,
467}; 479};
468 480
469cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev); 481cpu_dev_register(centaur_cpu_dev);
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
index 1d181c40e2e1..49cfc6d2f2fb 100644
--- a/arch/x86/kernel/cpu/centaur_64.c
+++ b/arch/x86/kernel/cpu/centaur_64.c
@@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
29 .c_ident = { "CentaurHauls" }, 29 .c_ident = { "CentaurHauls" },
30 .c_early_init = early_init_centaur, 30 .c_early_init = early_init_centaur,
31 .c_init = init_centaur, 31 .c_init = init_centaur,
32 .c_x86_vendor = X86_VENDOR_CENTAUR,
32}; 33};
33 34
34cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev); 35cpu_dev_register(centaur_cpu_dev);
35 36
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c63ec65f484c..008c73796bbb 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -22,6 +22,8 @@
22 22
23#include "cpu.h" 23#include "cpu.h"
24 24
25static struct cpu_dev *this_cpu __cpuinitdata;
26
25DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 27DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
26 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 28 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
27 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 29 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
@@ -58,12 +60,124 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
58} }; 60} };
59EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 61EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
60 62
61__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
62
63static int cachesize_override __cpuinitdata = -1; 63static int cachesize_override __cpuinitdata = -1;
64static int disable_x86_serial_nr __cpuinitdata = 1; 64static int disable_x86_serial_nr __cpuinitdata = 1;
65 65
66struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 66static int __init cachesize_setup(char *str)
67{
68 get_option(&str, &cachesize_override);
69 return 1;
70}
71__setup("cachesize=", cachesize_setup);
72
73/*
74 * Naming convention should be: <Name> [(<Codename>)]
75 * This table only is used unless init_<vendor>() below doesn't set it;
76 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
77 *
78 */
79
80/* Look up CPU names by table lookup. */
81static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
82{
83 struct cpu_model_info *info;
84
85 if (c->x86_model >= 16)
86 return NULL; /* Range check */
87
88 if (!this_cpu)
89 return NULL;
90
91 info = this_cpu->c_models;
92
93 while (info && info->family) {
94 if (info->family == c->x86)
95 return info->model_names[c->x86_model];
96 info++;
97 }
98 return NULL; /* Not found */
99}
100
101static int __init x86_fxsr_setup(char *s)
102{
103 setup_clear_cpu_cap(X86_FEATURE_FXSR);
104 setup_clear_cpu_cap(X86_FEATURE_XMM);
105 return 1;
106}
107__setup("nofxsr", x86_fxsr_setup);
108
109static int __init x86_sep_setup(char *s)
110{
111 setup_clear_cpu_cap(X86_FEATURE_SEP);
112 return 1;
113}
114__setup("nosep", x86_sep_setup);
115
116/* Standard macro to see if a specific flag is changeable */
117static inline int flag_is_changeable_p(u32 flag)
118{
119 u32 f1, f2;
120
121 asm("pushfl\n\t"
122 "pushfl\n\t"
123 "popl %0\n\t"
124 "movl %0,%1\n\t"
125 "xorl %2,%0\n\t"
126 "pushl %0\n\t"
127 "popfl\n\t"
128 "pushfl\n\t"
129 "popl %0\n\t"
130 "popfl\n\t"
131 : "=&r" (f1), "=&r" (f2)
132 : "ir" (flag));
133
134 return ((f1^f2) & flag) != 0;
135}
136
137/* Probe for the CPUID instruction */
138static int __cpuinit have_cpuid_p(void)
139{
140 return flag_is_changeable_p(X86_EFLAGS_ID);
141}
142
143static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
144{
145 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
146 /* Disable processor serial number */
147 unsigned long lo, hi;
148 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
149 lo |= 0x200000;
150 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
151 printk(KERN_NOTICE "CPU serial number disabled.\n");
152 clear_cpu_cap(c, X86_FEATURE_PN);
153
154 /* Disabling the serial number may affect the cpuid level */
155 c->cpuid_level = cpuid_eax(0);
156 }
157}
158
159static int __init x86_serial_nr_setup(char *s)
160{
161 disable_x86_serial_nr = 0;
162 return 1;
163}
164__setup("serialnumber", x86_serial_nr_setup);
165
166__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
167
168/* Current gdt points %fs at the "master" per-cpu area: after this,
169 * it's on the real one. */
170void switch_to_new_gdt(void)
171{
172 struct desc_ptr gdt_descr;
173
174 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
175 gdt_descr.size = GDT_SIZE - 1;
176 load_gdt(&gdt_descr);
177 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
178}
179
180static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
67 181
68static void __cpuinit default_init(struct cpuinfo_x86 *c) 182static void __cpuinit default_init(struct cpuinfo_x86 *c)
69{ 183{
@@ -81,22 +195,15 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
81static struct cpu_dev __cpuinitdata default_cpu = { 195static struct cpu_dev __cpuinitdata default_cpu = {
82 .c_init = default_init, 196 .c_init = default_init,
83 .c_vendor = "Unknown", 197 .c_vendor = "Unknown",
198 .c_x86_vendor = X86_VENDOR_UNKNOWN,
84}; 199};
85static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
86
87static int __init cachesize_setup(char *str)
88{
89 get_option(&str, &cachesize_override);
90 return 1;
91}
92__setup("cachesize=", cachesize_setup);
93 200
94int __cpuinit get_model_name(struct cpuinfo_x86 *c) 201int __cpuinit get_model_name(struct cpuinfo_x86 *c)
95{ 202{
96 unsigned int *v; 203 unsigned int *v;
97 char *p, *q; 204 char *p, *q;
98 205
99 if (cpuid_eax(0x80000000) < 0x80000004) 206 if (c->extended_cpuid_level < 0x80000004)
100 return 0; 207 return 0;
101 208
102 v = (unsigned int *) c->x86_model_id; 209 v = (unsigned int *) c->x86_model_id;
@@ -120,24 +227,23 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
120 return 1; 227 return 1;
121} 228}
122 229
123
124void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 230void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
125{ 231{
126 unsigned int n, dummy, ecx, edx, l2size; 232 unsigned int n, dummy, ebx, ecx, edx, l2size;
127 233
128 n = cpuid_eax(0x80000000); 234 n = c->extended_cpuid_level;
129 235
130 if (n >= 0x80000005) { 236 if (n >= 0x80000005) {
131 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); 237 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
132 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", 238 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
133 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); 239 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
134 c->x86_cache_size = (ecx>>24)+(edx>>24); 240 c->x86_cache_size = (ecx>>24) + (edx>>24);
135 } 241 }
136 242
137 if (n < 0x80000006) /* Some chips just has a large L1. */ 243 if (n < 0x80000006) /* Some chips just has a large L1. */
138 return; 244 return;
139 245
140 ecx = cpuid_ecx(0x80000006); 246 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
141 l2size = ecx >> 16; 247 l2size = ecx >> 16;
142 248
143 /* do processor-specific cache resizing */ 249 /* do processor-specific cache resizing */
@@ -154,112 +260,90 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
154 c->x86_cache_size = l2size; 260 c->x86_cache_size = l2size;
155 261
156 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 262 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
157 l2size, ecx & 0xFF); 263 l2size, ecx & 0xFF);
158} 264}
159 265
160/* 266#ifdef CONFIG_X86_HT
161 * Naming convention should be: <Name> [(<Codename>)] 267void __cpuinit detect_ht(struct cpuinfo_x86 *c)
162 * This table only is used unless init_<vendor>() below doesn't set it;
163 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
164 *
165 */
166
167/* Look up CPU names by table lookup. */
168static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
169{ 268{
170 struct cpu_model_info *info; 269 u32 eax, ebx, ecx, edx;
270 int index_msb, core_bits;
171 271
172 if (c->x86_model >= 16) 272 if (!cpu_has(c, X86_FEATURE_HT))
173 return NULL; /* Range check */ 273 return;
174 274
175 if (!this_cpu) 275 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
176 return NULL; 276 goto out;
177 277
178 info = this_cpu->c_models; 278 cpuid(1, &eax, &ebx, &ecx, &edx);
179 279
180 while (info && info->family) { 280 smp_num_siblings = (ebx & 0xff0000) >> 16;
181 if (info->family == c->x86) 281
182 return info->model_names[c->x86_model]; 282 if (smp_num_siblings == 1) {
183 info++; 283 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
284 } else if (smp_num_siblings > 1) {
285
286 if (smp_num_siblings > NR_CPUS) {
287 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
288 smp_num_siblings);
289 smp_num_siblings = 1;
290 return;
291 }
292
293 index_msb = get_count_order(smp_num_siblings);
294 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
295
296
297 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
298
299 index_msb = get_count_order(smp_num_siblings);
300
301 core_bits = get_count_order(c->x86_max_cores);
302
303 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
304 ((1 << core_bits) - 1);
184 } 305 }
185 return NULL; /* Not found */
186}
187 306
307out:
308 if ((c->x86_max_cores * smp_num_siblings) > 1) {
309 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
310 c->phys_proc_id);
311 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
312 c->cpu_core_id);
313 }
314}
315#endif
188 316
189static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 317static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
190{ 318{
191 char *v = c->x86_vendor_id; 319 char *v = c->x86_vendor_id;
192 int i; 320 int i;
193 static int printed; 321 static int printed;
194 322
195 for (i = 0; i < X86_VENDOR_NUM; i++) { 323 for (i = 0; i < X86_VENDOR_NUM; i++) {
196 if (cpu_devs[i]) { 324 if (!cpu_devs[i])
197 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 325 break;
198 (cpu_devs[i]->c_ident[1] && 326
199 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 327 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
200 c->x86_vendor = i; 328 (cpu_devs[i]->c_ident[1] &&
201 if (!early) 329 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
202 this_cpu = cpu_devs[i]; 330 this_cpu = cpu_devs[i];
203 return; 331 c->x86_vendor = this_cpu->c_x86_vendor;
204 } 332 return;
205 } 333 }
206 } 334 }
335
207 if (!printed) { 336 if (!printed) {
208 printed++; 337 printed++;
209 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); 338 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
210 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 339 printk(KERN_ERR "CPU: Your system may be unstable.\n");
211 } 340 }
341
212 c->x86_vendor = X86_VENDOR_UNKNOWN; 342 c->x86_vendor = X86_VENDOR_UNKNOWN;
213 this_cpu = &default_cpu; 343 this_cpu = &default_cpu;
214} 344}
215 345
216 346void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
217static int __init x86_fxsr_setup(char *s)
218{
219 setup_clear_cpu_cap(X86_FEATURE_FXSR);
220 setup_clear_cpu_cap(X86_FEATURE_XMM);
221 return 1;
222}
223__setup("nofxsr", x86_fxsr_setup);
224
225
226static int __init x86_sep_setup(char *s)
227{
228 setup_clear_cpu_cap(X86_FEATURE_SEP);
229 return 1;
230}
231__setup("nosep", x86_sep_setup);
232
233
234/* Standard macro to see if a specific flag is changeable */
235static inline int flag_is_changeable_p(u32 flag)
236{
237 u32 f1, f2;
238
239 asm("pushfl\n\t"
240 "pushfl\n\t"
241 "popl %0\n\t"
242 "movl %0,%1\n\t"
243 "xorl %2,%0\n\t"
244 "pushl %0\n\t"
245 "popfl\n\t"
246 "pushfl\n\t"
247 "popl %0\n\t"
248 "popfl\n\t"
249 : "=&r" (f1), "=&r" (f2)
250 : "ir" (flag));
251
252 return ((f1^f2) & flag) != 0;
253}
254
255
256/* Probe for the CPUID instruction */
257static int __cpuinit have_cpuid_p(void)
258{
259 return flag_is_changeable_p(X86_EFLAGS_ID);
260}
261
262void __init cpu_detect(struct cpuinfo_x86 *c)
263{ 347{
264 /* Get vendor name */ 348 /* Get vendor name */
265 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 349 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -268,50 +352,47 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
268 (unsigned int *)&c->x86_vendor_id[4]); 352 (unsigned int *)&c->x86_vendor_id[4]);
269 353
270 c->x86 = 4; 354 c->x86 = 4;
355 /* Intel-defined flags: level 0x00000001 */
271 if (c->cpuid_level >= 0x00000001) { 356 if (c->cpuid_level >= 0x00000001) {
272 u32 junk, tfms, cap0, misc; 357 u32 junk, tfms, cap0, misc;
273 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 358 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
274 c->x86 = (tfms >> 8) & 15; 359 c->x86 = (tfms >> 8) & 0xf;
275 c->x86_model = (tfms >> 4) & 15; 360 c->x86_model = (tfms >> 4) & 0xf;
361 c->x86_mask = tfms & 0xf;
276 if (c->x86 == 0xf) 362 if (c->x86 == 0xf)
277 c->x86 += (tfms >> 20) & 0xff; 363 c->x86 += (tfms >> 20) & 0xff;
278 if (c->x86 >= 0x6) 364 if (c->x86 >= 0x6)
279 c->x86_model += ((tfms >> 16) & 0xF) << 4; 365 c->x86_model += ((tfms >> 16) & 0xf) << 4;
280 c->x86_mask = tfms & 15;
281 if (cap0 & (1<<19)) { 366 if (cap0 & (1<<19)) {
282 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
283 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 367 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
368 c->x86_cache_alignment = c->x86_clflush_size;
284 } 369 }
285 } 370 }
286} 371}
287static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) 372
373static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
288{ 374{
289 u32 tfms, xlvl; 375 u32 tfms, xlvl;
290 unsigned int ebx; 376 u32 ebx;
291 377
292 memset(&c->x86_capability, 0, sizeof c->x86_capability); 378 /* Intel-defined flags: level 0x00000001 */
293 if (have_cpuid_p()) { 379 if (c->cpuid_level >= 0x00000001) {
294 /* Intel-defined flags: level 0x00000001 */ 380 u32 capability, excap;
295 if (c->cpuid_level >= 0x00000001) { 381 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
296 u32 capability, excap; 382 c->x86_capability[0] = capability;
297 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 383 c->x86_capability[4] = excap;
298 c->x86_capability[0] = capability; 384 }
299 c->x86_capability[4] = excap;
300 }
301 385
302 /* AMD-defined flags: level 0x80000001 */ 386 /* AMD-defined flags: level 0x80000001 */
303 xlvl = cpuid_eax(0x80000000); 387 xlvl = cpuid_eax(0x80000000);
304 if ((xlvl & 0xffff0000) == 0x80000000) { 388 c->extended_cpuid_level = xlvl;
305 if (xlvl >= 0x80000001) { 389 if ((xlvl & 0xffff0000) == 0x80000000) {
306 c->x86_capability[1] = cpuid_edx(0x80000001); 390 if (xlvl >= 0x80000001) {
307 c->x86_capability[6] = cpuid_ecx(0x80000001); 391 c->x86_capability[1] = cpuid_edx(0x80000001);
308 } 392 c->x86_capability[6] = cpuid_ecx(0x80000001);
309 } 393 }
310
311 } 394 }
312
313} 395}
314
315/* 396/*
316 * Do minimum CPU detection early. 397 * Do minimum CPU detection early.
317 * Fields really needed: vendor, cpuid_level, family, model, mask, 398 * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -321,25 +402,54 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
321 * WARNING: this function is only called on the BP. Don't add code here 402 * WARNING: this function is only called on the BP. Don't add code here
322 * that is supposed to run on all CPUs. 403 * that is supposed to run on all CPUs.
323 */ 404 */
324static void __init early_cpu_detect(void) 405static void __init early_identify_cpu(struct cpuinfo_x86 *c)
325{ 406{
326 struct cpuinfo_x86 *c = &boot_cpu_data;
327
328 c->x86_cache_alignment = 32;
329 c->x86_clflush_size = 32; 407 c->x86_clflush_size = 32;
408 c->x86_cache_alignment = c->x86_clflush_size;
330 409
331 if (!have_cpuid_p()) 410 if (!have_cpuid_p())
332 return; 411 return;
333 412
413 memset(&c->x86_capability, 0, sizeof c->x86_capability);
414
415 c->extended_cpuid_level = 0;
416
334 cpu_detect(c); 417 cpu_detect(c);
335 418
336 get_cpu_vendor(c, 1); 419 get_cpu_vendor(c);
420
421 get_cpu_cap(c);
337 422
338 if (c->x86_vendor != X86_VENDOR_UNKNOWN && 423 if (this_cpu->c_early_init)
339 cpu_devs[c->x86_vendor]->c_early_init) 424 this_cpu->c_early_init(c);
340 cpu_devs[c->x86_vendor]->c_early_init(c);
341 425
342 early_get_cap(c); 426 validate_pat_support(c);
427}
428
429void __init early_cpu_init(void)
430{
431 struct cpu_dev **cdev;
432 int count = 0;
433
434 printk("KERNEL supported cpus:\n");
435 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
436 struct cpu_dev *cpudev = *cdev;
437 unsigned int j;
438
439 if (count >= X86_VENDOR_NUM)
440 break;
441 cpu_devs[count] = cpudev;
442 count++;
443
444 for (j = 0; j < 2; j++) {
445 if (!cpudev->c_ident[j])
446 continue;
447 printk(" %s %s\n", cpudev->c_vendor,
448 cpudev->c_ident[j]);
449 }
450 }
451
452 early_identify_cpu(&boot_cpu_data);
343} 453}
344 454
345/* 455/*
@@ -373,86 +483,33 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
373 483
374static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 484static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
375{ 485{
376 u32 tfms, xlvl; 486 if (!have_cpuid_p())
377 unsigned int ebx; 487 return;
378
379 if (have_cpuid_p()) {
380 /* Get vendor name */
381 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
382 (unsigned int *)&c->x86_vendor_id[0],
383 (unsigned int *)&c->x86_vendor_id[8],
384 (unsigned int *)&c->x86_vendor_id[4]);
385
386 get_cpu_vendor(c, 0);
387 /* Initialize the standard set of capabilities */
388 /* Note that the vendor-specific code below might override */
389 /* Intel-defined flags: level 0x00000001 */
390 if (c->cpuid_level >= 0x00000001) {
391 u32 capability, excap;
392 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
393 c->x86_capability[0] = capability;
394 c->x86_capability[4] = excap;
395 c->x86 = (tfms >> 8) & 15;
396 c->x86_model = (tfms >> 4) & 15;
397 if (c->x86 == 0xf)
398 c->x86 += (tfms >> 20) & 0xff;
399 if (c->x86 >= 0x6)
400 c->x86_model += ((tfms >> 16) & 0xF) << 4;
401 c->x86_mask = tfms & 15;
402 c->initial_apicid = (ebx >> 24) & 0xFF;
403#ifdef CONFIG_X86_HT
404 c->apicid = phys_pkg_id(c->initial_apicid, 0);
405 c->phys_proc_id = c->initial_apicid;
406#else
407 c->apicid = c->initial_apicid;
408#endif
409 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
410 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
411 } else {
412 /* Have CPUID level 0 only - unheard of */
413 c->x86 = 4;
414 }
415 488
416 /* AMD-defined flags: level 0x80000001 */ 489 c->extended_cpuid_level = 0;
417 xlvl = cpuid_eax(0x80000000);
418 if ((xlvl & 0xffff0000) == 0x80000000) {
419 if (xlvl >= 0x80000001) {
420 c->x86_capability[1] = cpuid_edx(0x80000001);
421 c->x86_capability[6] = cpuid_ecx(0x80000001);
422 }
423 if (xlvl >= 0x80000004)
424 get_model_name(c); /* Default name */
425 }
426 490
427 init_scattered_cpuid_features(c); 491 cpu_detect(c);
428 detect_nopl(c);
429 }
430}
431 492
432static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 493 get_cpu_vendor(c);
433{
434 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
435 /* Disable processor serial number */
436 unsigned long lo, hi;
437 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
438 lo |= 0x200000;
439 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
440 printk(KERN_NOTICE "CPU serial number disabled.\n");
441 clear_cpu_cap(c, X86_FEATURE_PN);
442 494
443 /* Disabling the serial number may affect the cpuid level */ 495 get_cpu_cap(c);
444 c->cpuid_level = cpuid_eax(0);
445 }
446}
447 496
448static int __init x86_serial_nr_setup(char *s) 497 if (c->cpuid_level >= 0x00000001) {
449{ 498 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
450 disable_x86_serial_nr = 0; 499#ifdef CONFIG_X86_HT
451 return 1; 500 c->apicid = phys_pkg_id(c->initial_apicid, 0);
452} 501 c->phys_proc_id = c->initial_apicid;
453__setup("serialnumber", x86_serial_nr_setup); 502#else
503 c->apicid = c->initial_apicid;
504#endif
505 }
454 506
507 if (c->extended_cpuid_level >= 0x80000004)
508 get_model_name(c); /* Default name */
455 509
510 init_scattered_cpuid_features(c);
511 detect_nopl(c);
512}
456 513
457/* 514/*
458 * This does the hard work of actually picking apart the CPU stuff... 515 * This does the hard work of actually picking apart the CPU stuff...
@@ -529,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
529 */ 586 */
530 if (c != &boot_cpu_data) { 587 if (c != &boot_cpu_data) {
531 /* AND the already accumulated flags with these */ 588 /* AND the already accumulated flags with these */
532 for (i = 0 ; i < NCAPINTS ; i++) 589 for (i = 0; i < NCAPINTS; i++)
533 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 590 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
534 } 591 }
535 592
@@ -558,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
558 mtrr_ap_init(); 615 mtrr_ap_init();
559} 616}
560 617
561#ifdef CONFIG_X86_HT 618struct msr_range {
562void __cpuinit detect_ht(struct cpuinfo_x86 *c) 619 unsigned min;
563{ 620 unsigned max;
564 u32 eax, ebx, ecx, edx; 621};
565 int index_msb, core_bits;
566
567 cpuid(1, &eax, &ebx, &ecx, &edx);
568
569 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
570 return;
571
572 smp_num_siblings = (ebx & 0xff0000) >> 16;
573 622
574 if (smp_num_siblings == 1) { 623static struct msr_range msr_range_array[] __cpuinitdata = {
575 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 624 { 0x00000000, 0x00000418},
576 } else if (smp_num_siblings > 1) { 625 { 0xc0000000, 0xc000040b},
626 { 0xc0010000, 0xc0010142},
627 { 0xc0011000, 0xc001103b},
628};
577 629
578 if (smp_num_siblings > NR_CPUS) { 630static void __cpuinit print_cpu_msr(void)
579 printk(KERN_WARNING "CPU: Unsupported number of the " 631{
580 "siblings %d", smp_num_siblings); 632 unsigned index;
581 smp_num_siblings = 1; 633 u64 val;
582 return; 634 int i;
635 unsigned index_min, index_max;
636
637 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
638 index_min = msr_range_array[i].min;
639 index_max = msr_range_array[i].max;
640 for (index = index_min; index < index_max; index++) {
641 if (rdmsrl_amd_safe(index, &val))
642 continue;
643 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
583 } 644 }
645 }
646}
584 647
585 index_msb = get_count_order(smp_num_siblings); 648static int show_msr __cpuinitdata;
586 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); 649static __init int setup_show_msr(char *arg)
587 650{
588 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 651 int num;
589 c->phys_proc_id);
590
591 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
592
593 index_msb = get_count_order(smp_num_siblings) ;
594
595 core_bits = get_count_order(c->x86_max_cores);
596 652
597 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & 653 get_option(&arg, &num);
598 ((1 << core_bits) - 1);
599 654
600 if (c->x86_max_cores > 1) 655 if (num > 0)
601 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 656 show_msr = num;
602 c->cpu_core_id); 657 return 1;
603 }
604} 658}
605#endif 659__setup("show_msr=", setup_show_msr);
606 660
607static __init int setup_noclflush(char *arg) 661static __init int setup_noclflush(char *arg)
608{ 662{
@@ -621,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
621 vendor = c->x86_vendor_id; 675 vendor = c->x86_vendor_id;
622 676
623 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) 677 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
624 printk("%s ", vendor); 678 printk(KERN_CONT "%s ", vendor);
625 679
626 if (!c->x86_model_id[0]) 680 if (c->x86_model_id[0])
627 printk("%d86", c->x86); 681 printk(KERN_CONT "%s", c->x86_model_id);
628 else 682 else
629 printk("%s", c->x86_model_id); 683 printk(KERN_CONT "%d86", c->x86);
630 684
631 if (c->x86_mask || c->cpuid_level >= 0) 685 if (c->x86_mask || c->cpuid_level >= 0)
632 printk(" stepping %02x\n", c->x86_mask); 686 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
633 else 687 else
634 printk("\n"); 688 printk(KERN_CONT "\n");
689
690#ifdef CONFIG_SMP
691 if (c->cpu_index < show_msr)
692 print_cpu_msr();
693#else
694 if (show_msr)
695 print_cpu_msr();
696#endif
635} 697}
636 698
637static __init int setup_disablecpuid(char *arg) 699static __init int setup_disablecpuid(char *arg)
@@ -647,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid);
647 709
648cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 710cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
649 711
650void __init early_cpu_init(void)
651{
652 struct cpu_vendor_dev *cvdev;
653
654 for (cvdev = __x86cpuvendor_start ;
655 cvdev < __x86cpuvendor_end ;
656 cvdev++)
657 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
658
659 early_cpu_detect();
660 validate_pat_support(&boot_cpu_data);
661}
662
663/* Make sure %fs is initialized properly in idle threads */ 712/* Make sure %fs is initialized properly in idle threads */
664struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 713struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
665{ 714{
@@ -668,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
668 return regs; 717 return regs;
669} 718}
670 719
671/* Current gdt points %fs at the "master" per-cpu area: after this,
672 * it's on the real one. */
673void switch_to_new_gdt(void)
674{
675 struct desc_ptr gdt_descr;
676
677 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
678 gdt_descr.size = GDT_SIZE - 1;
679 load_gdt(&gdt_descr);
680 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
681}
682
683/* 720/*
684 * cpu_init() initializes state that is per-CPU. Some data is already 721 * cpu_init() initializes state that is per-CPU. Some data is already
685 * initialized (naturally) in the bootstrap process, such as the GDT 722 * initialized (naturally) in the bootstrap process, such as the GDT
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index 35d11efdf1fe..ae007b3521cb 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -37,6 +37,8 @@
37 37
38#include "cpu.h" 38#include "cpu.h"
39 39
40static struct cpu_dev *this_cpu __cpuinitdata;
41
40/* We need valid kernel segments for data and code in long mode too 42/* We need valid kernel segments for data and code in long mode too
41 * IRET will check the segment types kkeil 2000/10/28 43 * IRET will check the segment types kkeil 2000/10/28
42 * Also sysret mandates a special GDT layout 44 * Also sysret mandates a special GDT layout
@@ -66,7 +68,7 @@ void switch_to_new_gdt(void)
66 load_gdt(&gdt_descr); 68 load_gdt(&gdt_descr);
67} 69}
68 70
69struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 71static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
70 72
71static void __cpuinit default_init(struct cpuinfo_x86 *c) 73static void __cpuinit default_init(struct cpuinfo_x86 *c)
72{ 74{
@@ -76,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
76static struct cpu_dev __cpuinitdata default_cpu = { 78static struct cpu_dev __cpuinitdata default_cpu = {
77 .c_init = default_init, 79 .c_init = default_init,
78 .c_vendor = "Unknown", 80 .c_vendor = "Unknown",
81 .c_x86_vendor = X86_VENDOR_UNKNOWN,
79}; 82};
80static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
81 83
82int __cpuinit get_model_name(struct cpuinfo_x86 *c) 84int __cpuinit get_model_name(struct cpuinfo_x86 *c)
83{ 85{
84 unsigned int *v; 86 unsigned int *v;
87 char *p, *q;
85 88
86 if (c->extended_cpuid_level < 0x80000004) 89 if (c->extended_cpuid_level < 0x80000004)
87 return 0; 90 return 0;
@@ -91,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
91 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 94 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
92 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 95 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
93 c->x86_model_id[48] = 0; 96 c->x86_model_id[48] = 0;
97
98 /* Intel chips right-justify this string for some dumb reason;
99 undo that brain damage */
100 p = q = &c->x86_model_id[0];
101 while (*p == ' ')
102 p++;
103 if (p != q) {
104 while (*p)
105 *q++ = *p++;
106 while (q <= &c->x86_model_id[48])
107 *q++ = '\0'; /* Zero-pad the rest */
108 }
109
94 return 1; 110 return 1;
95} 111}
96 112
97 113
98void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 114void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
99{ 115{
100 unsigned int n, dummy, ebx, ecx, edx; 116 unsigned int n, dummy, ebx, ecx, edx, l2size;
101 117
102 n = c->extended_cpuid_level; 118 n = c->extended_cpuid_level;
103 119
104 if (n >= 0x80000005) { 120 if (n >= 0x80000005) {
105 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 121 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
106 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), " 122 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
107 "D cache %dK (%d bytes/line)\n", 123 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
108 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
109 c->x86_cache_size = (ecx>>24) + (edx>>24); 124 c->x86_cache_size = (ecx>>24) + (edx>>24);
110 /* On K8 L1 TLB is inclusive, so don't count it */ 125 /* On K8 L1 TLB is inclusive, so don't count it */
111 c->x86_tlbsize = 0; 126 c->x86_tlbsize = 0;
112 } 127 }
113 128
114 if (n >= 0x80000006) { 129 if (n < 0x80000006) /* Some chips just has a large L1. */
115 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 130 return;
116 ecx = cpuid_ecx(0x80000006);
117 c->x86_cache_size = ecx >> 16;
118 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
119 131
120 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 132 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
121 c->x86_cache_size, ecx & 0xFF); 133 l2size = ecx >> 16;
122 } 134 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
135
136 c->x86_cache_size = l2size;
137
138 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
139 l2size, ecx & 0xFF);
123} 140}
124 141
125void __cpuinit detect_ht(struct cpuinfo_x86 *c) 142void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -128,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
128 u32 eax, ebx, ecx, edx; 145 u32 eax, ebx, ecx, edx;
129 int index_msb, core_bits; 146 int index_msb, core_bits;
130 147
131 cpuid(1, &eax, &ebx, &ecx, &edx);
132
133
134 if (!cpu_has(c, X86_FEATURE_HT)) 148 if (!cpu_has(c, X86_FEATURE_HT))
135 return; 149 return;
136 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 150 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
137 goto out; 151 goto out;
138 152
153 cpuid(1, &eax, &ebx, &ecx, &edx);
154
139 smp_num_siblings = (ebx & 0xff0000) >> 16; 155 smp_num_siblings = (ebx & 0xff0000) >> 16;
140 156
141 if (smp_num_siblings == 1) { 157 if (smp_num_siblings == 1) {
@@ -143,8 +159,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
143 } else if (smp_num_siblings > 1) { 159 } else if (smp_num_siblings > 1) {
144 160
145 if (smp_num_siblings > NR_CPUS) { 161 if (smp_num_siblings > NR_CPUS) {
146 printk(KERN_WARNING "CPU: Unsupported number of " 162 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
147 "siblings %d", smp_num_siblings); 163 smp_num_siblings);
148 smp_num_siblings = 1; 164 smp_num_siblings = 1;
149 return; 165 return;
150 } 166 }
@@ -161,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
161 c->cpu_core_id = phys_pkg_id(index_msb) & 177 c->cpu_core_id = phys_pkg_id(index_msb) &
162 ((1 << core_bits) - 1); 178 ((1 << core_bits) - 1);
163 } 179 }
180
164out: 181out:
165 if ((c->x86_max_cores * smp_num_siblings) > 1) { 182 if ((c->x86_max_cores * smp_num_siblings) > 1) {
166 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 183 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
@@ -168,7 +185,6 @@ out:
168 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 185 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
169 c->cpu_core_id); 186 c->cpu_core_id);
170 } 187 }
171
172#endif 188#endif
173} 189}
174 190
@@ -179,41 +195,148 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
179 static int printed; 195 static int printed;
180 196
181 for (i = 0; i < X86_VENDOR_NUM; i++) { 197 for (i = 0; i < X86_VENDOR_NUM; i++) {
182 if (cpu_devs[i]) { 198 if (!cpu_devs[i])
183 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 199 break;
184 (cpu_devs[i]->c_ident[1] && 200
185 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 201 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
186 c->x86_vendor = i; 202 (cpu_devs[i]->c_ident[1] &&
187 this_cpu = cpu_devs[i]; 203 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
188 return; 204 this_cpu = cpu_devs[i];
189 } 205 c->x86_vendor = this_cpu->c_x86_vendor;
206 return;
190 } 207 }
191 } 208 }
209
192 if (!printed) { 210 if (!printed) {
193 printed++; 211 printed++;
194 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); 212 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
195 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 213 printk(KERN_ERR "CPU: Your system may be unstable.\n");
196 } 214 }
215
197 c->x86_vendor = X86_VENDOR_UNKNOWN; 216 c->x86_vendor = X86_VENDOR_UNKNOWN;
217 this_cpu = &default_cpu;
218}
219
220void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
221{
222 /* Get vendor name */
223 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
224 (unsigned int *)&c->x86_vendor_id[0],
225 (unsigned int *)&c->x86_vendor_id[8],
226 (unsigned int *)&c->x86_vendor_id[4]);
227
228 c->x86 = 4;
229 /* Intel-defined flags: level 0x00000001 */
230 if (c->cpuid_level >= 0x00000001) {
231 u32 junk, tfms, cap0, misc;
232 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
233 c->x86 = (tfms >> 8) & 0xf;
234 c->x86_model = (tfms >> 4) & 0xf;
235 c->x86_mask = tfms & 0xf;
236 if (c->x86 == 0xf)
237 c->x86 += (tfms >> 20) & 0xff;
238 if (c->x86 >= 0x6)
239 c->x86_model += ((tfms >> 16) & 0xf) << 4;
240 if (cap0 & (1<<19)) {
241 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
242 c->x86_cache_alignment = c->x86_clflush_size;
243 }
244 }
198} 245}
199 246
200static void __init early_cpu_support_print(void) 247
248static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
201{ 249{
202 int i,j; 250 u32 tfms, xlvl;
203 struct cpu_dev *cpu_devx; 251 u32 ebx;
252
253 /* Intel-defined flags: level 0x00000001 */
254 if (c->cpuid_level >= 0x00000001) {
255 u32 capability, excap;
256
257 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
258 c->x86_capability[0] = capability;
259 c->x86_capability[4] = excap;
260 }
261
262 /* AMD-defined flags: level 0x80000001 */
263 xlvl = cpuid_eax(0x80000000);
264 c->extended_cpuid_level = xlvl;
265 if ((xlvl & 0xffff0000) == 0x80000000) {
266 if (xlvl >= 0x80000001) {
267 c->x86_capability[1] = cpuid_edx(0x80000001);
268 c->x86_capability[6] = cpuid_ecx(0x80000001);
269 }
270 }
271
272 /* Transmeta-defined flags: level 0x80860001 */
273 xlvl = cpuid_eax(0x80860000);
274 if ((xlvl & 0xffff0000) == 0x80860000) {
275 /* Don't set x86_cpuid_level here for now to not confuse. */
276 if (xlvl >= 0x80860001)
277 c->x86_capability[2] = cpuid_edx(0x80860001);
278 }
279
280 if (c->extended_cpuid_level >= 0x80000007)
281 c->x86_power = cpuid_edx(0x80000007);
282
283 if (c->extended_cpuid_level >= 0x80000008) {
284 u32 eax = cpuid_eax(0x80000008);
285
286 c->x86_virt_bits = (eax >> 8) & 0xff;
287 c->x86_phys_bits = eax & 0xff;
288 }
289}
290
291/* Do some early cpuid on the boot CPU to get some parameter that are
292 needed before check_bugs. Everything advanced is in identify_cpu
293 below. */
294static void __init early_identify_cpu(struct cpuinfo_x86 *c)
295{
296
297 c->x86_clflush_size = 64;
298 c->x86_cache_alignment = c->x86_clflush_size;
299
300 memset(&c->x86_capability, 0, sizeof c->x86_capability);
301
302 c->extended_cpuid_level = 0;
303
304 cpu_detect(c);
305
306 get_cpu_vendor(c);
307
308 get_cpu_cap(c);
309
310 if (this_cpu->c_early_init)
311 this_cpu->c_early_init(c);
312
313 validate_pat_support(c);
314}
315
316void __init early_cpu_init(void)
317{
318 struct cpu_dev **cdev;
319 int count = 0;
204 320
205 printk("KERNEL supported cpus:\n"); 321 printk("KERNEL supported cpus:\n");
206 for (i = 0; i < X86_VENDOR_NUM; i++) { 322 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
207 cpu_devx = cpu_devs[i]; 323 struct cpu_dev *cpudev = *cdev;
208 if (!cpu_devx) 324 unsigned int j;
209 continue; 325
326 if (count >= X86_VENDOR_NUM)
327 break;
328 cpu_devs[count] = cpudev;
329 count++;
330
210 for (j = 0; j < 2; j++) { 331 for (j = 0; j < 2; j++) {
211 if (!cpu_devx->c_ident[j]) 332 if (!cpudev->c_ident[j])
212 continue; 333 continue;
213 printk(" %s %s\n", cpu_devx->c_vendor, 334 printk(" %s %s\n", cpudev->c_vendor,
214 cpu_devx->c_ident[j]); 335 cpudev->c_ident[j]);
215 } 336 }
216 } 337 }
338
339 early_identify_cpu(&boot_cpu_data);
217} 340}
218 341
219/* 342/*
@@ -249,111 +372,26 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
249 } 372 }
250} 373}
251 374
252static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c); 375static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
253
254void __init early_cpu_init(void)
255{ 376{
256 struct cpu_vendor_dev *cvdev;
257
258 for (cvdev = __x86cpuvendor_start ;
259 cvdev < __x86cpuvendor_end ;
260 cvdev++)
261 cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
262 early_cpu_support_print();
263 early_identify_cpu(&boot_cpu_data);
264}
265
266/* Do some early cpuid on the boot CPU to get some parameter that are
267 needed before check_bugs. Everything advanced is in identify_cpu
268 below. */
269static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
270{
271 u32 tfms, xlvl;
272
273 c->loops_per_jiffy = loops_per_jiffy;
274 c->x86_cache_size = -1;
275 c->x86_vendor = X86_VENDOR_UNKNOWN;
276 c->x86_model = c->x86_mask = 0; /* So far unknown... */
277 c->x86_vendor_id[0] = '\0'; /* Unset */
278 c->x86_model_id[0] = '\0'; /* Unset */
279 c->x86_clflush_size = 64;
280 c->x86_cache_alignment = c->x86_clflush_size;
281 c->x86_max_cores = 1;
282 c->x86_coreid_bits = 0;
283 c->extended_cpuid_level = 0; 377 c->extended_cpuid_level = 0;
284 memset(&c->x86_capability, 0, sizeof c->x86_capability);
285 378
286 /* Get vendor name */ 379 cpu_detect(c);
287 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
288 (unsigned int *)&c->x86_vendor_id[0],
289 (unsigned int *)&c->x86_vendor_id[8],
290 (unsigned int *)&c->x86_vendor_id[4]);
291 380
292 get_cpu_vendor(c); 381 get_cpu_vendor(c);
293 382
294 /* Initialize the standard set of capabilities */ 383 get_cpu_cap(c);
295 /* Note that the vendor-specific code below might override */
296
297 /* Intel-defined flags: level 0x00000001 */
298 if (c->cpuid_level >= 0x00000001) {
299 __u32 misc;
300 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
301 &c->x86_capability[0]);
302 c->x86 = (tfms >> 8) & 0xf;
303 c->x86_model = (tfms >> 4) & 0xf;
304 c->x86_mask = tfms & 0xf;
305 if (c->x86 == 0xf)
306 c->x86 += (tfms >> 20) & 0xff;
307 if (c->x86 >= 0x6)
308 c->x86_model += ((tfms >> 16) & 0xF) << 4;
309 if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
310 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
311 } else {
312 /* Have CPUID level 0 only - unheard of */
313 c->x86 = 4;
314 }
315 384
316 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff; 385 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
317#ifdef CONFIG_SMP 386#ifdef CONFIG_SMP
318 c->phys_proc_id = c->initial_apicid; 387 c->phys_proc_id = c->initial_apicid;
319#endif 388#endif
320 /* AMD-defined flags: level 0x80000001 */
321 xlvl = cpuid_eax(0x80000000);
322 c->extended_cpuid_level = xlvl;
323 if ((xlvl & 0xffff0000) == 0x80000000) {
324 if (xlvl >= 0x80000001) {
325 c->x86_capability[1] = cpuid_edx(0x80000001);
326 c->x86_capability[6] = cpuid_ecx(0x80000001);
327 }
328 if (xlvl >= 0x80000004)
329 get_model_name(c); /* Default name */
330 }
331 389
332 /* Transmeta-defined flags: level 0x80860001 */ 390 if (c->extended_cpuid_level >= 0x80000004)
333 xlvl = cpuid_eax(0x80860000); 391 get_model_name(c); /* Default name */
334 if ((xlvl & 0xffff0000) == 0x80860000) {
335 /* Don't set x86_cpuid_level here for now to not confuse. */
336 if (xlvl >= 0x80860001)
337 c->x86_capability[2] = cpuid_edx(0x80860001);
338 }
339
340 if (c->extended_cpuid_level >= 0x80000007)
341 c->x86_power = cpuid_edx(0x80000007);
342
343 if (c->extended_cpuid_level >= 0x80000008) {
344 u32 eax = cpuid_eax(0x80000008);
345
346 c->x86_virt_bits = (eax >> 8) & 0xff;
347 c->x86_phys_bits = eax & 0xff;
348 }
349 392
393 init_scattered_cpuid_features(c);
350 detect_nopl(c); 394 detect_nopl(c);
351
352 if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
353 cpu_devs[c->x86_vendor]->c_early_init)
354 cpu_devs[c->x86_vendor]->c_early_init(c);
355
356 validate_pat_support(c);
357} 395}
358 396
359/* 397/*
@@ -363,9 +401,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
363{ 401{
364 int i; 402 int i;
365 403
366 early_identify_cpu(c); 404 c->loops_per_jiffy = loops_per_jiffy;
405 c->x86_cache_size = -1;
406 c->x86_vendor = X86_VENDOR_UNKNOWN;
407 c->x86_model = c->x86_mask = 0; /* So far unknown... */
408 c->x86_vendor_id[0] = '\0'; /* Unset */
409 c->x86_model_id[0] = '\0'; /* Unset */
410 c->x86_max_cores = 1;
411 c->x86_coreid_bits = 0;
412 c->x86_clflush_size = 64;
413 c->x86_cache_alignment = c->x86_clflush_size;
414 memset(&c->x86_capability, 0, sizeof c->x86_capability);
367 415
368 init_scattered_cpuid_features(c); 416 generic_identify(c);
369 417
370 c->apicid = phys_pkg_id(0); 418 c->apicid = phys_pkg_id(0);
371 419
@@ -411,7 +459,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
411 459
412} 460}
413 461
414void __cpuinit identify_boot_cpu(void) 462void __init identify_boot_cpu(void)
415{ 463{
416 identify_cpu(&boot_cpu_data); 464 identify_cpu(&boot_cpu_data);
417} 465}
@@ -423,6 +471,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
423 mtrr_ap_init(); 471 mtrr_ap_init();
424} 472}
425 473
474struct msr_range {
475 unsigned min;
476 unsigned max;
477};
478
479static struct msr_range msr_range_array[] __cpuinitdata = {
480 { 0x00000000, 0x00000418},
481 { 0xc0000000, 0xc000040b},
482 { 0xc0010000, 0xc0010142},
483 { 0xc0011000, 0xc001103b},
484};
485
486static void __cpuinit print_cpu_msr(void)
487{
488 unsigned index;
489 u64 val;
490 int i;
491 unsigned index_min, index_max;
492
493 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
494 index_min = msr_range_array[i].min;
495 index_max = msr_range_array[i].max;
496 for (index = index_min; index < index_max; index++) {
497 if (rdmsrl_amd_safe(index, &val))
498 continue;
499 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
500 }
501 }
502}
503
504static int show_msr __cpuinitdata;
505static __init int setup_show_msr(char *arg)
506{
507 int num;
508
509 get_option(&arg, &num);
510
511 if (num > 0)
512 show_msr = num;
513 return 1;
514}
515__setup("show_msr=", setup_show_msr);
516
426static __init int setup_noclflush(char *arg) 517static __init int setup_noclflush(char *arg)
427{ 518{
428 setup_clear_cpu_cap(X86_FEATURE_CLFLSH); 519 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
@@ -439,6 +530,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
439 printk(KERN_CONT " stepping %02x\n", c->x86_mask); 530 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
440 else 531 else
441 printk(KERN_CONT "\n"); 532 printk(KERN_CONT "\n");
533
534#ifdef CONFIG_SMP
535 if (c->cpu_index < show_msr)
536 print_cpu_msr();
537#else
538 if (show_msr)
539 print_cpu_msr();
540#endif
442} 541}
443 542
444static __init int setup_disablecpuid(char *arg) 543static __init int setup_disablecpuid(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 4d894e8565fe..3cc9d92afd8f 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -21,21 +21,15 @@ struct cpu_dev {
21 void (*c_init)(struct cpuinfo_x86 * c); 21 void (*c_init)(struct cpuinfo_x86 * c);
22 void (*c_identify)(struct cpuinfo_x86 * c); 22 void (*c_identify)(struct cpuinfo_x86 * c);
23 unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size); 23 unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
24 int c_x86_vendor;
24}; 25};
25 26
26extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM]; 27#define cpu_dev_register(cpu_devX) \
28 static struct cpu_dev *__cpu_dev_##cpu_devX __used \
29 __attribute__((__section__(".x86_cpu_dev.init"))) = \
30 &cpu_devX;
27 31
28struct cpu_vendor_dev { 32extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
29 int vendor;
30 struct cpu_dev *cpu_dev;
31};
32
33#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
34 static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
35 __attribute__((__section__(".x86cpuvendor.init"))) = \
36 { cpu_vendor_id, cpu_dev }
37
38extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
39 33
40extern int get_model_name(struct cpuinfo_x86 *c); 34extern int get_model_name(struct cpuinfo_x86 *c);
41extern void display_cacheinfo(struct cpuinfo_x86 *c); 35extern void display_cacheinfo(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index ada50505a5c8..3f8c7283d816 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,13 +15,11 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 unsigned long flags;
22 21
23 /* we test for DEVID by checking whether CCR3 is writable */ 22 /* we test for DEVID by checking whether CCR3 is writable */
24 local_irq_save(flags);
25 ccr3 = getCx86(CX86_CCR3); 23 ccr3 = getCx86(CX86_CCR3);
26 setCx86(CX86_CCR3, ccr3 ^ 0x80); 24 setCx86(CX86_CCR3, ccr3 ^ 0x80);
27 getCx86(0xc0); /* dummy to change bus */ 25 getCx86(0xc0); /* dummy to change bus */
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 *dir0 = getCx86(CX86_DIR0); 42 *dir0 = getCx86(CX86_DIR0);
45 *dir1 = getCx86(CX86_DIR1); 43 *dir1 = getCx86(CX86_DIR1);
46 } 44 }
47 local_irq_restore(flags);
48} 45}
49 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{
49 unsigned long flags;
50
51 local_irq_save(flags);
52 __do_cyrix_devid(dir0, dir1);
53 local_irq_restore(flags);
54}
50/* 55/*
51 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in 56 * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
52 * order to identify the Cyrix CPU model after we're out of setup.c 57 * order to identify the Cyrix CPU model after we're out of setup.c
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
161 local_irq_restore(flags); 166 local_irq_restore(flags);
162} 167}
163 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
170{
171 unsigned char dir0, dir0_msn, dir1 = 0;
172
173 __do_cyrix_devid(&dir0, &dir1);
174 dir0_msn = dir0 >> 4; /* identifies CPU "family" */
175
176 switch (dir0_msn) {
177 case 3: /* 6x86/6x86L */
178 /* Emulate MTRRs using Cyrix's ARRs. */
179 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
180 break;
181 case 5: /* 6x86MX/M II */
182 /* Emulate MTRRs using Cyrix's ARRs. */
183 set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
184 break;
185 }
186}
164 187
165static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
166{ 189{
@@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
416static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 439static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
417 .c_vendor = "Cyrix", 440 .c_vendor = "Cyrix",
418 .c_ident = { "CyrixInstead" }, 441 .c_ident = { "CyrixInstead" },
442 .c_early_init = early_init_cyrix,
419 .c_init = init_cyrix, 443 .c_init = init_cyrix,
420 .c_identify = cyrix_identify, 444 .c_identify = cyrix_identify,
445 .c_x86_vendor = X86_VENDOR_CYRIX,
421}; 446};
422 447
423cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev); 448cpu_dev_register(cyrix_cpu_dev);
424 449
425static struct cpu_dev nsc_cpu_dev __cpuinitdata = { 450static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
426 .c_vendor = "NSC", 451 .c_vendor = "NSC",
427 .c_ident = { "Geode by NSC" }, 452 .c_ident = { "Geode by NSC" },
428 .c_init = init_nsc, 453 .c_init = init_nsc,
454 .c_x86_vendor = X86_VENDOR_NSC,
429}; 455};
430 456
431cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev); 457cpu_dev_register(nsc_cpu_dev);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 77618c717d76..c5ac08124adc 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -303,9 +303,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
303 .c_early_init = early_init_intel, 303 .c_early_init = early_init_intel,
304 .c_init = init_intel, 304 .c_init = init_intel,
305 .c_size_cache = intel_size_cache, 305 .c_size_cache = intel_size_cache,
306 .c_x86_vendor = X86_VENDOR_INTEL,
306}; 307};
307 308
308cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev); 309cpu_dev_register(intel_cpu_dev);
309 310
310/* arch_initcall(intel_cpu_init); */ 311/* arch_initcall(intel_cpu_init); */
311 312
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c
index 1019c58d39f0..0a8128a240df 100644
--- a/arch/x86/kernel/cpu/intel_64.c
+++ b/arch/x86/kernel/cpu/intel_64.c
@@ -90,6 +90,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
90 .c_ident = { "GenuineIntel" }, 90 .c_ident = { "GenuineIntel" },
91 .c_early_init = early_init_intel, 91 .c_early_init = early_init_intel,
92 .c_init = init_intel, 92 .c_init = init_intel,
93 .c_x86_vendor = X86_VENDOR_INTEL,
93}; 94};
94cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
95 95
96cpu_dev_register(intel_cpu_dev);
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index b911a2c61b8f..7c46e6ecedca 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
102 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 102 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
103 .c_init = init_transmeta, 103 .c_init = init_transmeta,
104 .c_identify = transmeta_identify, 104 .c_identify = transmeta_identify,
105 .c_x86_vendor = X86_VENDOR_TRANSMETA,
105}; 106};
106 107
107cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev); 108cpu_dev_register(transmeta_cpu_dev);
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index b1fc90989d75..e777f79e0960 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
19 } 19 }
20 }, 20 },
21 }, 21 },
22 .c_x86_vendor = X86_VENDOR_UMC,
22}; 23};
23 24
24cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev); 25cpu_dev_register(umc_cpu_dev);
25 26
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 4090cd6f8436..6b0bb73998dd 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
330#endif 330#endif
331 .wbinvd = native_wbinvd, 331 .wbinvd = native_wbinvd,
332 .read_msr = native_read_msr_safe, 332 .read_msr = native_read_msr_safe,
333 .read_msr_amd = native_read_msr_amd_safe,
333 .write_msr = native_write_msr_safe, 334 .write_msr = native_write_msr_safe,
334 .read_tsc = native_read_tsc, 335 .read_tsc = native_read_tsc,
335 .read_pmc = native_read_pmc, 336 .read_pmc = native_read_pmc,
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index 38eb76156a47..b42068fb7b76 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -339,9 +339,8 @@ static void
339show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, 339show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
340 unsigned long *stack, unsigned long bp, char *log_lvl) 340 unsigned long *stack, unsigned long bp, char *log_lvl)
341{ 341{
342 printk("\nCall Trace:\n"); 342 printk("Call Trace:\n");
343 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); 343 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
344 printk("\n");
345} 344}
346 345
347void show_trace(struct task_struct *task, struct pt_regs *regs, 346void show_trace(struct task_struct *task, struct pt_regs *regs,
@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
386 printk(" %016lx", *stack++); 385 printk(" %016lx", *stack++);
387 touch_nmi_watchdog(); 386 touch_nmi_watchdog();
388 } 387 }
388 printk("\n");
389 show_trace_log_lvl(task, regs, sp, bp, log_lvl); 389 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
390} 390}
391 391
@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs)
443 printk("Stack: "); 443 printk("Stack: ");
444 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 444 show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
445 regs->bp, ""); 445 regs->bp, "");
446 printk("\n");
447 446
448 printk(KERN_EMERG "Code: "); 447 printk(KERN_EMERG "Code: ");
449 448
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index af5bdad84604..21b4f7eefaab 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -140,10 +140,10 @@ SECTIONS
140 *(.con_initcall.init) 140 *(.con_initcall.init)
141 __con_initcall_end = .; 141 __con_initcall_end = .;
142 } 142 }
143 .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { 143 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
144 __x86cpuvendor_start = .; 144 __x86_cpu_dev_start = .;
145 *(.x86cpuvendor.init) 145 *(.x86_cpu_dev.init)
146 __x86cpuvendor_end = .; 146 __x86_cpu_dev_end = .;
147 } 147 }
148 SECURITY_INIT 148 SECURITY_INIT
149 . = ALIGN(4); 149 . = ALIGN(4);
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 63e5c1a22e88..201e81a91a95 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -168,13 +168,12 @@ SECTIONS
168 *(.con_initcall.init) 168 *(.con_initcall.init)
169 } 169 }
170 __con_initcall_end = .; 170 __con_initcall_end = .;
171 . = ALIGN(16); 171 __x86_cpu_dev_start = .;
172 __x86cpuvendor_start = .; 172 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
173 .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) { 173 *(.x86_cpu_dev.init)
174 *(.x86cpuvendor.init)
175 } 174 }
176 __x86cpuvendor_end = .;
177 SECURITY_INIT 175 SECURITY_INIT
176 __x86_cpu_dev_end = .;
178 177
179 . = ALIGN(8); 178 . = ALIGN(8);
180 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 179 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {