diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-05 03:27:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-05 03:27:23 -0400 |
commit | 0c8c708a7e6b52556ee9079041f504191ec5f0e9 (patch) | |
tree | fb8da2f5e70ac1d21d41d5a900e48d42c1c999eb /arch/x86/kernel/cpu/common.c | |
parent | b380b0d4f7dffcc235c0facefa537d4655619101 (diff) | |
parent | d3d0ba7b8fb8f57c33207adcb41f40c176148c03 (diff) |
Merge branch 'x86/core' into x86/unify-cpu-detect
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 626 |
1 files changed, 352 insertions, 274 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80ab20d4fa39..7d5a07f0fd24 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/mtrr.h> | 13 | #include <asm/mtrr.h> |
14 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
15 | #include <asm/pat.h> | 15 | #include <asm/pat.h> |
16 | #include <asm/asm.h> | ||
16 | #ifdef CONFIG_X86_LOCAL_APIC | 17 | #ifdef CONFIG_X86_LOCAL_APIC |
17 | #include <asm/mpspec.h> | 18 | #include <asm/mpspec.h> |
18 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
@@ -21,7 +22,9 @@ | |||
21 | 22 | ||
22 | #include "cpu.h" | 23 | #include "cpu.h" |
23 | 24 | ||
24 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 25 | static struct cpu_dev *this_cpu __cpuinitdata; |
26 | |||
27 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
25 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 28 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
26 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 29 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
27 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 30 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
@@ -57,12 +60,124 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | |||
57 | } }; | 60 | } }; |
58 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 61 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
59 | 62 | ||
60 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
61 | |||
62 | static int cachesize_override __cpuinitdata = -1; | 63 | static int cachesize_override __cpuinitdata = -1; |
63 | static int disable_x86_serial_nr __cpuinitdata = 1; | 64 | static int disable_x86_serial_nr __cpuinitdata = 1; |
64 | 65 | ||
65 | struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 66 | static int __init cachesize_setup(char *str) |
67 | { | ||
68 | get_option(&str, &cachesize_override); | ||
69 | return 1; | ||
70 | } | ||
71 | __setup("cachesize=", cachesize_setup); | ||
72 | |||
73 | /* | ||
74 | * Naming convention should be: <Name> [(<Codename>)] | ||
75 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
76 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
77 | * | ||
78 | */ | ||
79 | |||
80 | /* Look up CPU names by table lookup. */ | ||
81 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
82 | { | ||
83 | struct cpu_model_info *info; | ||
84 | |||
85 | if (c->x86_model >= 16) | ||
86 | return NULL; /* Range check */ | ||
87 | |||
88 | if (!this_cpu) | ||
89 | return NULL; | ||
90 | |||
91 | info = this_cpu->c_models; | ||
92 | |||
93 | while (info && info->family) { | ||
94 | if (info->family == c->x86) | ||
95 | return info->model_names[c->x86_model]; | ||
96 | info++; | ||
97 | } | ||
98 | return NULL; /* Not found */ | ||
99 | } | ||
100 | |||
101 | static int __init x86_fxsr_setup(char *s) | ||
102 | { | ||
103 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
104 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
105 | return 1; | ||
106 | } | ||
107 | __setup("nofxsr", x86_fxsr_setup); | ||
108 | |||
109 | static int __init x86_sep_setup(char *s) | ||
110 | { | ||
111 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
112 | return 1; | ||
113 | } | ||
114 | __setup("nosep", x86_sep_setup); | ||
115 | |||
116 | /* Standard macro to see if a specific flag is changeable */ | ||
117 | static inline int flag_is_changeable_p(u32 flag) | ||
118 | { | ||
119 | u32 f1, f2; | ||
120 | |||
121 | asm("pushfl\n\t" | ||
122 | "pushfl\n\t" | ||
123 | "popl %0\n\t" | ||
124 | "movl %0,%1\n\t" | ||
125 | "xorl %2,%0\n\t" | ||
126 | "pushl %0\n\t" | ||
127 | "popfl\n\t" | ||
128 | "pushfl\n\t" | ||
129 | "popl %0\n\t" | ||
130 | "popfl\n\t" | ||
131 | : "=&r" (f1), "=&r" (f2) | ||
132 | : "ir" (flag)); | ||
133 | |||
134 | return ((f1^f2) & flag) != 0; | ||
135 | } | ||
136 | |||
137 | /* Probe for the CPUID instruction */ | ||
138 | static int __cpuinit have_cpuid_p(void) | ||
139 | { | ||
140 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
141 | } | ||
142 | |||
143 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | ||
144 | { | ||
145 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | ||
146 | /* Disable processor serial number */ | ||
147 | unsigned long lo, hi; | ||
148 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
149 | lo |= 0x200000; | ||
150 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | ||
151 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | ||
152 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
153 | |||
154 | /* Disabling the serial number may affect the cpuid level */ | ||
155 | c->cpuid_level = cpuid_eax(0); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | static int __init x86_serial_nr_setup(char *s) | ||
160 | { | ||
161 | disable_x86_serial_nr = 0; | ||
162 | return 1; | ||
163 | } | ||
164 | __setup("serialnumber", x86_serial_nr_setup); | ||
165 | |||
166 | __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | ||
167 | |||
168 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
169 | * it's on the real one. */ | ||
170 | void switch_to_new_gdt(void) | ||
171 | { | ||
172 | struct desc_ptr gdt_descr; | ||
173 | |||
174 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
175 | gdt_descr.size = GDT_SIZE - 1; | ||
176 | load_gdt(&gdt_descr); | ||
177 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
178 | } | ||
179 | |||
180 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | ||
66 | 181 | ||
67 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 182 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
68 | { | 183 | { |
@@ -80,22 +195,15 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
80 | static struct cpu_dev __cpuinitdata default_cpu = { | 195 | static struct cpu_dev __cpuinitdata default_cpu = { |
81 | .c_init = default_init, | 196 | .c_init = default_init, |
82 | .c_vendor = "Unknown", | 197 | .c_vendor = "Unknown", |
198 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
83 | }; | 199 | }; |
84 | static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
85 | |||
86 | static int __init cachesize_setup(char *str) | ||
87 | { | ||
88 | get_option(&str, &cachesize_override); | ||
89 | return 1; | ||
90 | } | ||
91 | __setup("cachesize=", cachesize_setup); | ||
92 | 200 | ||
93 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) | 201 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
94 | { | 202 | { |
95 | unsigned int *v; | 203 | unsigned int *v; |
96 | char *p, *q; | 204 | char *p, *q; |
97 | 205 | ||
98 | if (cpuid_eax(0x80000000) < 0x80000004) | 206 | if (c->extended_cpuid_level < 0x80000004) |
99 | return 0; | 207 | return 0; |
100 | 208 | ||
101 | v = (unsigned int *) c->x86_model_id; | 209 | v = (unsigned int *) c->x86_model_id; |
@@ -119,24 +227,23 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
119 | return 1; | 227 | return 1; |
120 | } | 228 | } |
121 | 229 | ||
122 | |||
123 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | 230 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
124 | { | 231 | { |
125 | unsigned int n, dummy, ecx, edx, l2size; | 232 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
126 | 233 | ||
127 | n = cpuid_eax(0x80000000); | 234 | n = c->extended_cpuid_level; |
128 | 235 | ||
129 | if (n >= 0x80000005) { | 236 | if (n >= 0x80000005) { |
130 | cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); | 237 | cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); |
131 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", | 238 | printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", |
132 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); | 239 | edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); |
133 | c->x86_cache_size = (ecx>>24)+(edx>>24); | 240 | c->x86_cache_size = (ecx>>24) + (edx>>24); |
134 | } | 241 | } |
135 | 242 | ||
136 | if (n < 0x80000006) /* Some chips just has a large L1. */ | 243 | if (n < 0x80000006) /* Some chips just has a large L1. */ |
137 | return; | 244 | return; |
138 | 245 | ||
139 | ecx = cpuid_ecx(0x80000006); | 246 | cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); |
140 | l2size = ecx >> 16; | 247 | l2size = ecx >> 16; |
141 | 248 | ||
142 | /* do processor-specific cache resizing */ | 249 | /* do processor-specific cache resizing */ |
@@ -153,112 +260,90 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
153 | c->x86_cache_size = l2size; | 260 | c->x86_cache_size = l2size; |
154 | 261 | ||
155 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", | 262 | printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", |
156 | l2size, ecx & 0xFF); | 263 | l2size, ecx & 0xFF); |
157 | } | 264 | } |
158 | 265 | ||
159 | /* | 266 | #ifdef CONFIG_X86_HT |
160 | * Naming convention should be: <Name> [(<Codename>)] | 267 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
161 | * This table only is used unless init_<vendor>() below doesn't set it; | ||
162 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | ||
163 | * | ||
164 | */ | ||
165 | |||
166 | /* Look up CPU names by table lookup. */ | ||
167 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | ||
168 | { | 268 | { |
169 | struct cpu_model_info *info; | 269 | u32 eax, ebx, ecx, edx; |
270 | int index_msb, core_bits; | ||
170 | 271 | ||
171 | if (c->x86_model >= 16) | 272 | if (!cpu_has(c, X86_FEATURE_HT)) |
172 | return NULL; /* Range check */ | 273 | return; |
173 | 274 | ||
174 | if (!this_cpu) | 275 | if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) |
175 | return NULL; | 276 | goto out; |
176 | 277 | ||
177 | info = this_cpu->c_models; | 278 | cpuid(1, &eax, &ebx, &ecx, &edx); |
178 | 279 | ||
179 | while (info && info->family) { | 280 | smp_num_siblings = (ebx & 0xff0000) >> 16; |
180 | if (info->family == c->x86) | 281 | |
181 | return info->model_names[c->x86_model]; | 282 | if (smp_num_siblings == 1) { |
182 | info++; | 283 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
284 | } else if (smp_num_siblings > 1) { | ||
285 | |||
286 | if (smp_num_siblings > NR_CPUS) { | ||
287 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | ||
288 | smp_num_siblings); | ||
289 | smp_num_siblings = 1; | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | index_msb = get_count_order(smp_num_siblings); | ||
294 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | ||
295 | |||
296 | |||
297 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
298 | |||
299 | index_msb = get_count_order(smp_num_siblings); | ||
300 | |||
301 | core_bits = get_count_order(c->x86_max_cores); | ||
302 | |||
303 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | ||
304 | ((1 << core_bits) - 1); | ||
183 | } | 305 | } |
184 | return NULL; /* Not found */ | ||
185 | } | ||
186 | 306 | ||
307 | out: | ||
308 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | ||
309 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | ||
310 | c->phys_proc_id); | ||
311 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | ||
312 | c->cpu_core_id); | ||
313 | } | ||
314 | } | ||
315 | #endif | ||
187 | 316 | ||
188 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 317 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
189 | { | 318 | { |
190 | char *v = c->x86_vendor_id; | 319 | char *v = c->x86_vendor_id; |
191 | int i; | 320 | int i; |
192 | static int printed; | 321 | static int printed; |
193 | 322 | ||
194 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 323 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
195 | if (cpu_devs[i]) { | 324 | if (!cpu_devs[i]) |
196 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 325 | break; |
197 | (cpu_devs[i]->c_ident[1] && | 326 | |
198 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 327 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
199 | c->x86_vendor = i; | 328 | (cpu_devs[i]->c_ident[1] && |
200 | if (!early) | 329 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
201 | this_cpu = cpu_devs[i]; | 330 | this_cpu = cpu_devs[i]; |
202 | return; | 331 | c->x86_vendor = this_cpu->c_x86_vendor; |
203 | } | 332 | return; |
204 | } | 333 | } |
205 | } | 334 | } |
335 | |||
206 | if (!printed) { | 336 | if (!printed) { |
207 | printed++; | 337 | printed++; |
208 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); | 338 | printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n"); |
209 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 339 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
210 | } | 340 | } |
341 | |||
211 | c->x86_vendor = X86_VENDOR_UNKNOWN; | 342 | c->x86_vendor = X86_VENDOR_UNKNOWN; |
212 | this_cpu = &default_cpu; | 343 | this_cpu = &default_cpu; |
213 | } | 344 | } |
214 | 345 | ||
215 | 346 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |
216 | static int __init x86_fxsr_setup(char *s) | ||
217 | { | ||
218 | setup_clear_cpu_cap(X86_FEATURE_FXSR); | ||
219 | setup_clear_cpu_cap(X86_FEATURE_XMM); | ||
220 | return 1; | ||
221 | } | ||
222 | __setup("nofxsr", x86_fxsr_setup); | ||
223 | |||
224 | |||
225 | static int __init x86_sep_setup(char *s) | ||
226 | { | ||
227 | setup_clear_cpu_cap(X86_FEATURE_SEP); | ||
228 | return 1; | ||
229 | } | ||
230 | __setup("nosep", x86_sep_setup); | ||
231 | |||
232 | |||
233 | /* Standard macro to see if a specific flag is changeable */ | ||
234 | static inline int flag_is_changeable_p(u32 flag) | ||
235 | { | ||
236 | u32 f1, f2; | ||
237 | |||
238 | asm("pushfl\n\t" | ||
239 | "pushfl\n\t" | ||
240 | "popl %0\n\t" | ||
241 | "movl %0,%1\n\t" | ||
242 | "xorl %2,%0\n\t" | ||
243 | "pushl %0\n\t" | ||
244 | "popfl\n\t" | ||
245 | "pushfl\n\t" | ||
246 | "popl %0\n\t" | ||
247 | "popfl\n\t" | ||
248 | : "=&r" (f1), "=&r" (f2) | ||
249 | : "ir" (flag)); | ||
250 | |||
251 | return ((f1^f2) & flag) != 0; | ||
252 | } | ||
253 | |||
254 | |||
255 | /* Probe for the CPUID instruction */ | ||
256 | static int __cpuinit have_cpuid_p(void) | ||
257 | { | ||
258 | return flag_is_changeable_p(X86_EFLAGS_ID); | ||
259 | } | ||
260 | |||
261 | void __init cpu_detect(struct cpuinfo_x86 *c) | ||
262 | { | 347 | { |
263 | /* Get vendor name */ | 348 | /* Get vendor name */ |
264 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 349 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -267,50 +352,47 @@ void __init cpu_detect(struct cpuinfo_x86 *c) | |||
267 | (unsigned int *)&c->x86_vendor_id[4]); | 352 | (unsigned int *)&c->x86_vendor_id[4]); |
268 | 353 | ||
269 | c->x86 = 4; | 354 | c->x86 = 4; |
355 | /* Intel-defined flags: level 0x00000001 */ | ||
270 | if (c->cpuid_level >= 0x00000001) { | 356 | if (c->cpuid_level >= 0x00000001) { |
271 | u32 junk, tfms, cap0, misc; | 357 | u32 junk, tfms, cap0, misc; |
272 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 358 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
273 | c->x86 = (tfms >> 8) & 15; | 359 | c->x86 = (tfms >> 8) & 0xf; |
274 | c->x86_model = (tfms >> 4) & 15; | 360 | c->x86_model = (tfms >> 4) & 0xf; |
361 | c->x86_mask = tfms & 0xf; | ||
275 | if (c->x86 == 0xf) | 362 | if (c->x86 == 0xf) |
276 | c->x86 += (tfms >> 20) & 0xff; | 363 | c->x86 += (tfms >> 20) & 0xff; |
277 | if (c->x86 >= 0x6) | 364 | if (c->x86 >= 0x6) |
278 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | 365 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
279 | c->x86_mask = tfms & 15; | ||
280 | if (cap0 & (1<<19)) { | 366 | if (cap0 & (1<<19)) { |
281 | c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; | ||
282 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 367 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
368 | c->x86_cache_alignment = c->x86_clflush_size; | ||
283 | } | 369 | } |
284 | } | 370 | } |
285 | } | 371 | } |
286 | static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | 372 | |
373 | static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | ||
287 | { | 374 | { |
288 | u32 tfms, xlvl; | 375 | u32 tfms, xlvl; |
289 | unsigned int ebx; | 376 | u32 ebx; |
290 | 377 | ||
291 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 378 | /* Intel-defined flags: level 0x00000001 */ |
292 | if (have_cpuid_p()) { | 379 | if (c->cpuid_level >= 0x00000001) { |
293 | /* Intel-defined flags: level 0x00000001 */ | 380 | u32 capability, excap; |
294 | if (c->cpuid_level >= 0x00000001) { | 381 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
295 | u32 capability, excap; | 382 | c->x86_capability[0] = capability; |
296 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 383 | c->x86_capability[4] = excap; |
297 | c->x86_capability[0] = capability; | 384 | } |
298 | c->x86_capability[4] = excap; | ||
299 | } | ||
300 | 385 | ||
301 | /* AMD-defined flags: level 0x80000001 */ | 386 | /* AMD-defined flags: level 0x80000001 */ |
302 | xlvl = cpuid_eax(0x80000000); | 387 | xlvl = cpuid_eax(0x80000000); |
303 | if ((xlvl & 0xffff0000) == 0x80000000) { | 388 | c->extended_cpuid_level = xlvl; |
304 | if (xlvl >= 0x80000001) { | 389 | if ((xlvl & 0xffff0000) == 0x80000000) { |
305 | c->x86_capability[1] = cpuid_edx(0x80000001); | 390 | if (xlvl >= 0x80000001) { |
306 | c->x86_capability[6] = cpuid_ecx(0x80000001); | 391 | c->x86_capability[1] = cpuid_edx(0x80000001); |
307 | } | 392 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
308 | } | 393 | } |
309 | |||
310 | } | 394 | } |
311 | |||
312 | } | 395 | } |
313 | |||
314 | /* | 396 | /* |
315 | * Do minimum CPU detection early. | 397 | * Do minimum CPU detection early. |
316 | * Fields really needed: vendor, cpuid_level, family, model, mask, | 398 | * Fields really needed: vendor, cpuid_level, family, model, mask, |
@@ -320,109 +402,114 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c) | |||
320 | * WARNING: this function is only called on the BP. Don't add code here | 402 | * WARNING: this function is only called on the BP. Don't add code here |
321 | * that is supposed to run on all CPUs. | 403 | * that is supposed to run on all CPUs. |
322 | */ | 404 | */ |
323 | static void __init early_cpu_detect(void) | 405 | static void __init early_identify_cpu(struct cpuinfo_x86 *c) |
324 | { | 406 | { |
325 | struct cpuinfo_x86 *c = &boot_cpu_data; | ||
326 | |||
327 | c->x86_cache_alignment = 32; | ||
328 | c->x86_clflush_size = 32; | 407 | c->x86_clflush_size = 32; |
408 | c->x86_cache_alignment = c->x86_clflush_size; | ||
329 | 409 | ||
330 | if (!have_cpuid_p()) | 410 | if (!have_cpuid_p()) |
331 | return; | 411 | return; |
332 | 412 | ||
413 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | ||
414 | |||
415 | c->extended_cpuid_level = 0; | ||
416 | |||
333 | cpu_detect(c); | 417 | cpu_detect(c); |
334 | 418 | ||
335 | get_cpu_vendor(c, 1); | 419 | get_cpu_vendor(c); |
420 | |||
421 | get_cpu_cap(c); | ||
336 | 422 | ||
337 | if (c->x86_vendor != X86_VENDOR_UNKNOWN && | 423 | if (this_cpu->c_early_init) |
338 | cpu_devs[c->x86_vendor]->c_early_init) | 424 | this_cpu->c_early_init(c); |
339 | cpu_devs[c->x86_vendor]->c_early_init(c); | ||
340 | 425 | ||
341 | early_get_cap(c); | 426 | validate_pat_support(c); |
342 | } | 427 | } |
343 | 428 | ||
344 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 429 | void __init early_cpu_init(void) |
345 | { | 430 | { |
346 | u32 tfms, xlvl; | 431 | struct cpu_dev **cdev; |
347 | unsigned int ebx; | 432 | int count = 0; |
348 | 433 | ||
349 | if (have_cpuid_p()) { | 434 | printk("KERNEL supported cpus:\n"); |
350 | /* Get vendor name */ | 435 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
351 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 436 | struct cpu_dev *cpudev = *cdev; |
352 | (unsigned int *)&c->x86_vendor_id[0], | 437 | unsigned int j; |
353 | (unsigned int *)&c->x86_vendor_id[8], | 438 | |
354 | (unsigned int *)&c->x86_vendor_id[4]); | 439 | if (count >= X86_VENDOR_NUM) |
355 | 440 | break; | |
356 | get_cpu_vendor(c, 0); | 441 | cpu_devs[count] = cpudev; |
357 | /* Initialize the standard set of capabilities */ | 442 | count++; |
358 | /* Note that the vendor-specific code below might override */ | 443 | |
359 | /* Intel-defined flags: level 0x00000001 */ | 444 | for (j = 0; j < 2; j++) { |
360 | if (c->cpuid_level >= 0x00000001) { | 445 | if (!cpudev->c_ident[j]) |
361 | u32 capability, excap; | 446 | continue; |
362 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 447 | printk(" %s %s\n", cpudev->c_vendor, |
363 | c->x86_capability[0] = capability; | 448 | cpudev->c_ident[j]); |
364 | c->x86_capability[4] = excap; | ||
365 | c->x86 = (tfms >> 8) & 15; | ||
366 | c->x86_model = (tfms >> 4) & 15; | ||
367 | if (c->x86 == 0xf) | ||
368 | c->x86 += (tfms >> 20) & 0xff; | ||
369 | if (c->x86 >= 0x6) | ||
370 | c->x86_model += ((tfms >> 16) & 0xF) << 4; | ||
371 | c->x86_mask = tfms & 15; | ||
372 | c->initial_apicid = (ebx >> 24) & 0xFF; | ||
373 | #ifdef CONFIG_X86_HT | ||
374 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
375 | c->phys_proc_id = c->initial_apicid; | ||
376 | #else | ||
377 | c->apicid = c->initial_apicid; | ||
378 | #endif | ||
379 | if (test_cpu_cap(c, X86_FEATURE_CLFLSH)) | ||
380 | c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8; | ||
381 | } else { | ||
382 | /* Have CPUID level 0 only - unheard of */ | ||
383 | c->x86 = 4; | ||
384 | } | ||
385 | |||
386 | /* AMD-defined flags: level 0x80000001 */ | ||
387 | xlvl = cpuid_eax(0x80000000); | ||
388 | if ((xlvl & 0xffff0000) == 0x80000000) { | ||
389 | if (xlvl >= 0x80000001) { | ||
390 | c->x86_capability[1] = cpuid_edx(0x80000001); | ||
391 | c->x86_capability[6] = cpuid_ecx(0x80000001); | ||
392 | } | ||
393 | if (xlvl >= 0x80000004) | ||
394 | get_model_name(c); /* Default name */ | ||
395 | } | 449 | } |
396 | |||
397 | init_scattered_cpuid_features(c); | ||
398 | } | 450 | } |
399 | 451 | ||
452 | early_identify_cpu(&boot_cpu_data); | ||
400 | } | 453 | } |
401 | 454 | ||
402 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 455 | /* |
456 | * The NOPL instruction is supposed to exist on all CPUs with | ||
457 | * family >= 6, unfortunately, that's not true in practice because | ||
458 | * of early VIA chips and (more importantly) broken virtualizers that | ||
459 | * are not easy to detect. Hence, probe for it based on first | ||
460 | * principles. | ||
461 | */ | ||
462 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | ||
403 | { | 463 | { |
404 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | 464 | const u32 nopl_signature = 0x888c53b1; /* Random number */ |
405 | /* Disable processor serial number */ | 465 | u32 has_nopl = nopl_signature; |
406 | unsigned long lo, hi; | 466 | |
407 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 467 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
408 | lo |= 0x200000; | 468 | if (c->x86 >= 6) { |
409 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 469 | asm volatile("\n" |
410 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 470 | "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ |
411 | clear_cpu_cap(c, X86_FEATURE_PN); | 471 | "2:\n" |
412 | 472 | " .section .fixup,\"ax\"\n" | |
413 | /* Disabling the serial number may affect the cpuid level */ | 473 | "3: xor %0,%0\n" |
414 | c->cpuid_level = cpuid_eax(0); | 474 | " jmp 2b\n" |
475 | " .previous\n" | ||
476 | _ASM_EXTABLE(1b,3b) | ||
477 | : "+a" (has_nopl)); | ||
478 | |||
479 | if (has_nopl == nopl_signature) | ||
480 | set_cpu_cap(c, X86_FEATURE_NOPL); | ||
415 | } | 481 | } |
416 | } | 482 | } |
417 | 483 | ||
418 | static int __init x86_serial_nr_setup(char *s) | 484 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) |
419 | { | 485 | { |
420 | disable_x86_serial_nr = 0; | 486 | if (!have_cpuid_p()) |
421 | return 1; | 487 | return; |
422 | } | ||
423 | __setup("serialnumber", x86_serial_nr_setup); | ||
424 | 488 | ||
489 | c->extended_cpuid_level = 0; | ||
425 | 490 | ||
491 | cpu_detect(c); | ||
492 | |||
493 | get_cpu_vendor(c); | ||
494 | |||
495 | get_cpu_cap(c); | ||
496 | |||
497 | if (c->cpuid_level >= 0x00000001) { | ||
498 | c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; | ||
499 | #ifdef CONFIG_X86_HT | ||
500 | c->apicid = phys_pkg_id(c->initial_apicid, 0); | ||
501 | c->phys_proc_id = c->initial_apicid; | ||
502 | #else | ||
503 | c->apicid = c->initial_apicid; | ||
504 | #endif | ||
505 | } | ||
506 | |||
507 | if (c->extended_cpuid_level >= 0x80000004) | ||
508 | get_model_name(c); /* Default name */ | ||
509 | |||
510 | init_scattered_cpuid_features(c); | ||
511 | detect_nopl(c); | ||
512 | } | ||
426 | 513 | ||
427 | /* | 514 | /* |
428 | * This does the hard work of actually picking apart the CPU stuff... | 515 | * This does the hard work of actually picking apart the CPU stuff... |
@@ -499,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
499 | */ | 586 | */ |
500 | if (c != &boot_cpu_data) { | 587 | if (c != &boot_cpu_data) { |
501 | /* AND the already accumulated flags with these */ | 588 | /* AND the already accumulated flags with these */ |
502 | for (i = 0 ; i < NCAPINTS ; i++) | 589 | for (i = 0; i < NCAPINTS; i++) |
503 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; | 590 | boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; |
504 | } | 591 | } |
505 | 592 | ||
@@ -528,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
528 | mtrr_ap_init(); | 615 | mtrr_ap_init(); |
529 | } | 616 | } |
530 | 617 | ||
531 | #ifdef CONFIG_X86_HT | 618 | struct msr_range { |
532 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 619 | unsigned min; |
533 | { | 620 | unsigned max; |
534 | u32 eax, ebx, ecx, edx; | 621 | }; |
535 | int index_msb, core_bits; | ||
536 | |||
537 | cpuid(1, &eax, &ebx, &ecx, &edx); | ||
538 | |||
539 | if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) | ||
540 | return; | ||
541 | |||
542 | smp_num_siblings = (ebx & 0xff0000) >> 16; | ||
543 | 622 | ||
544 | if (smp_num_siblings == 1) { | 623 | static struct msr_range msr_range_array[] __cpuinitdata = { |
545 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 624 | { 0x00000000, 0x00000418}, |
546 | } else if (smp_num_siblings > 1) { | 625 | { 0xc0000000, 0xc000040b}, |
626 | { 0xc0010000, 0xc0010142}, | ||
627 | { 0xc0011000, 0xc001103b}, | ||
628 | }; | ||
547 | 629 | ||
548 | if (smp_num_siblings > NR_CPUS) { | 630 | static void __cpuinit print_cpu_msr(void) |
549 | printk(KERN_WARNING "CPU: Unsupported number of the " | 631 | { |
550 | "siblings %d", smp_num_siblings); | 632 | unsigned index; |
551 | smp_num_siblings = 1; | 633 | u64 val; |
552 | return; | 634 | int i; |
635 | unsigned index_min, index_max; | ||
636 | |||
637 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | ||
638 | index_min = msr_range_array[i].min; | ||
639 | index_max = msr_range_array[i].max; | ||
640 | for (index = index_min; index < index_max; index++) { | ||
641 | if (rdmsrl_amd_safe(index, &val)) | ||
642 | continue; | ||
643 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | ||
553 | } | 644 | } |
645 | } | ||
646 | } | ||
554 | 647 | ||
555 | index_msb = get_count_order(smp_num_siblings); | 648 | static int show_msr __cpuinitdata; |
556 | c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); | 649 | static __init int setup_show_msr(char *arg) |
557 | 650 | { | |
558 | printk(KERN_INFO "CPU: Physical Processor ID: %d\n", | 651 | int num; |
559 | c->phys_proc_id); | ||
560 | |||
561 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | ||
562 | |||
563 | index_msb = get_count_order(smp_num_siblings) ; | ||
564 | |||
565 | core_bits = get_count_order(c->x86_max_cores); | ||
566 | 652 | ||
567 | c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & | 653 | get_option(&arg, &num); |
568 | ((1 << core_bits) - 1); | ||
569 | 654 | ||
570 | if (c->x86_max_cores > 1) | 655 | if (num > 0) |
571 | printk(KERN_INFO "CPU: Processor Core ID: %d\n", | 656 | show_msr = num; |
572 | c->cpu_core_id); | 657 | return 1; |
573 | } | ||
574 | } | 658 | } |
575 | #endif | 659 | __setup("show_msr=", setup_show_msr); |
576 | 660 | ||
577 | static __init int setup_noclflush(char *arg) | 661 | static __init int setup_noclflush(char *arg) |
578 | { | 662 | { |
@@ -591,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
591 | vendor = c->x86_vendor_id; | 675 | vendor = c->x86_vendor_id; |
592 | 676 | ||
593 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) | 677 | if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor))) |
594 | printk("%s ", vendor); | 678 | printk(KERN_CONT "%s ", vendor); |
595 | 679 | ||
596 | if (!c->x86_model_id[0]) | 680 | if (c->x86_model_id[0]) |
597 | printk("%d86", c->x86); | 681 | printk(KERN_CONT "%s", c->x86_model_id); |
598 | else | 682 | else |
599 | printk("%s", c->x86_model_id); | 683 | printk(KERN_CONT "%d86", c->x86); |
600 | 684 | ||
601 | if (c->x86_mask || c->cpuid_level >= 0) | 685 | if (c->x86_mask || c->cpuid_level >= 0) |
602 | printk(" stepping %02x\n", c->x86_mask); | 686 | printk(KERN_CONT " stepping %02x\n", c->x86_mask); |
603 | else | 687 | else |
604 | printk("\n"); | 688 | printk(KERN_CONT "\n"); |
689 | |||
690 | #ifdef CONFIG_SMP | ||
691 | if (c->cpu_index < show_msr) | ||
692 | print_cpu_msr(); | ||
693 | #else | ||
694 | if (show_msr) | ||
695 | print_cpu_msr(); | ||
696 | #endif | ||
605 | } | 697 | } |
606 | 698 | ||
607 | static __init int setup_disablecpuid(char *arg) | 699 | static __init int setup_disablecpuid(char *arg) |
@@ -617,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid); | |||
617 | 709 | ||
618 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; | 710 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
619 | 711 | ||
620 | void __init early_cpu_init(void) | ||
621 | { | ||
622 | struct cpu_vendor_dev *cvdev; | ||
623 | |||
624 | for (cvdev = __x86cpuvendor_start ; | ||
625 | cvdev < __x86cpuvendor_end ; | ||
626 | cvdev++) | ||
627 | cpu_devs[cvdev->vendor] = cvdev->cpu_dev; | ||
628 | |||
629 | early_cpu_detect(); | ||
630 | validate_pat_support(&boot_cpu_data); | ||
631 | } | ||
632 | |||
633 | /* Make sure %fs is initialized properly in idle threads */ | 712 | /* Make sure %fs is initialized properly in idle threads */ |
634 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | 713 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) |
635 | { | 714 | { |
@@ -638,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
638 | return regs; | 717 | return regs; |
639 | } | 718 | } |
640 | 719 | ||
641 | /* Current gdt points %fs at the "master" per-cpu area: after this, | ||
642 | * it's on the real one. */ | ||
643 | void switch_to_new_gdt(void) | ||
644 | { | ||
645 | struct desc_ptr gdt_descr; | ||
646 | |||
647 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | ||
648 | gdt_descr.size = GDT_SIZE - 1; | ||
649 | load_gdt(&gdt_descr); | ||
650 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | ||
651 | } | ||
652 | |||
653 | /* | 720 | /* |
654 | * cpu_init() initializes state that is per-CPU. Some data is already | 721 | * cpu_init() initializes state that is per-CPU. Some data is already |
655 | * initialized (naturally) in the bootstrap process, such as the GDT | 722 | * initialized (naturally) in the bootstrap process, such as the GDT |
@@ -709,9 +776,20 @@ void __cpuinit cpu_init(void) | |||
709 | /* | 776 | /* |
710 | * Force FPU initialization: | 777 | * Force FPU initialization: |
711 | */ | 778 | */ |
712 | current_thread_info()->status = 0; | 779 | if (cpu_has_xsave) |
780 | current_thread_info()->status = TS_XSAVE; | ||
781 | else | ||
782 | current_thread_info()->status = 0; | ||
713 | clear_used_math(); | 783 | clear_used_math(); |
714 | mxcsr_feature_mask_init(); | 784 | mxcsr_feature_mask_init(); |
785 | |||
786 | /* | ||
787 | * Boot processor to setup the FP and extended state context info. | ||
788 | */ | ||
789 | if (!smp_processor_id()) | ||
790 | init_thread_xstate(); | ||
791 | |||
792 | xsave_init(); | ||
715 | } | 793 | } |
716 | 794 | ||
717 | #ifdef CONFIG_HOTPLUG_CPU | 795 | #ifdef CONFIG_HOTPLUG_CPU |