aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-09-07 20:58:54 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-08 09:32:02 -0400
commit6c62aa4a3c12989a1f1fcbbe6f1ee5d4de4b2300 (patch)
tree40e722bdcf16231a59175ca7c98c00aa7b95e560 /arch
parent8d71a2ea0ad4ef9b9076ffd44726bad1f0ccf59b (diff)
x86: make amd.c have 64bit support code
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/amd.c137
1 files changed, 126 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 3c8090d10053..32e73520adf7 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -16,6 +16,7 @@
16 16
17#include "cpu.h" 17#include "cpu.h"
18 18
19#ifdef CONFIG_X86_32
19/* 20/*
20 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 21 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
21 * misexecution of code under Linux. Owners of such processors should 22 * misexecution of code under Linux. Owners of such processors should
@@ -177,6 +178,26 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
177 178
178 set_cpu_cap(c, X86_FEATURE_K7); 179 set_cpu_cap(c, X86_FEATURE_K7);
179} 180}
181#endif
182
183#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
184static int __cpuinit nearby_node(int apicid)
185{
186 int i, node;
187
188 for (i = apicid - 1; i >= 0; i--) {
189 node = apicid_to_node[i];
190 if (node != NUMA_NO_NODE && node_online(node))
191 return node;
192 }
193 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
194 node = apicid_to_node[i];
195 if (node != NUMA_NO_NODE && node_online(node))
196 return node;
197 }
198 return first_node(node_online_map); /* Shouldn't happen */
199}
200#endif
180 201
181/* 202/*
182 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 203 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
@@ -196,6 +217,42 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
196#endif 217#endif
197} 218}
198 219
220static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
221{
222#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
223 int cpu = smp_processor_id();
224 int node;
225 unsigned apicid = hard_smp_processor_id();
226
227 node = c->phys_proc_id;
228 if (apicid_to_node[apicid] != NUMA_NO_NODE)
229 node = apicid_to_node[apicid];
230 if (!node_online(node)) {
231 /* Two possibilities here:
232 - The CPU is missing memory and no node was created.
233 In that case try picking one from a nearby CPU
234 - The APIC IDs differ from the HyperTransport node IDs
235 which the K8 northbridge parsing fills in.
236 Assume they are all increased by a constant offset,
237 but in the same order as the HT nodeids.
238 If that doesn't result in a usable node fall back to the
239 path for the previous case. */
240
241 int ht_nodeid = c->initial_apicid;
242
243 if (ht_nodeid >= 0 &&
244 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
245 node = apicid_to_node[ht_nodeid];
246 /* Pick a nearby node */
247 if (!node_online(node))
248 node = nearby_node(apicid);
249 }
250 numa_set_node(cpu, node);
251
252 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
253#endif
254}
255
199static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 256static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
200{ 257{
201#ifdef CONFIG_X86_HT 258#ifdef CONFIG_X86_HT
@@ -226,13 +283,19 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
226{ 283{
227 early_init_amd_mc(c); 284 early_init_amd_mc(c);
228 285
286 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
229 if (c->x86_power & (1<<8)) 287 if (c->x86_power & (1<<8))
230 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 288 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
231 289
290#ifdef CONFIG_X86_64
291 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
292#else
232 /* Set MTRR capability flag if appropriate */ 293 /* Set MTRR capability flag if appropriate */
233 if (c->x86_model == 13 || c->x86_model == 9 || 294 if (c->x86 == 5)
234 (c->x86_model == 8 && c->x86_mask >= 8)) 295 if (c->x86_model == 13 || c->x86_model == 9 ||
235 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 296 (c->x86_model == 8 && c->x86_mask >= 8))
297 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
298#endif
236} 299}
237 300
238static void __cpuinit init_amd(struct cpuinfo_x86 *c) 301static void __cpuinit init_amd(struct cpuinfo_x86 *c)
@@ -257,17 +320,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
257 early_init_amd(c); 320 early_init_amd(c);
258 321
259 /* 322 /*
260 * FIXME: We should handle the K5 here. Set up the write
261 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
262 * no bus pipeline)
263 */
264
265 /*
266 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 323 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
267 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 324 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
268 */ 325 */
269 clear_cpu_cap(c, 0*32+31); 326 clear_cpu_cap(c, 0*32+31);
270 327
328#ifdef CONFIG_X86_64
329 /* On C+ stepping K8 rep microcode works well for copy/memset */
330 if (c->x86 == 0xf) {
331 u32 level;
332
333 level = cpuid_eax(1);
334 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
335 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
336 }
337 if (c->x86 == 0x10 || c->x86 == 0x11)
338 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
339#else
340
341 /*
342 * FIXME: We should handle the K5 here. Set up the write
343 * range and also turn on MSR 83 bits 4 and 31 (write alloc,
344 * no bus pipeline)
345 */
346
271 switch (c->x86) { 347 switch (c->x86) {
272 case 4: 348 case 4:
273 init_amd_k5(c); 349 init_amd_k5(c);
@@ -283,7 +359,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
283 /* K6s reports MCEs but don't actually have all the MSRs */ 359 /* K6s reports MCEs but don't actually have all the MSRs */
284 if (c->x86 < 6) 360 if (c->x86 < 6)
285 clear_cpu_cap(c, X86_FEATURE_MCE); 361 clear_cpu_cap(c, X86_FEATURE_MCE);
362#endif
286 363
364 /* Enable workaround for FXSAVE leak */
287 if (c->x86 >= 6) 365 if (c->x86 >= 6)
288 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 366 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
289 367
@@ -300,10 +378,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
300 display_cacheinfo(c); 378 display_cacheinfo(c);
301 379
302 /* Multi core CPU? */ 380 /* Multi core CPU? */
303 if (c->extended_cpuid_level >= 0x80000008) 381 if (c->extended_cpuid_level >= 0x80000008) {
304 amd_detect_cmp(c); 382 amd_detect_cmp(c);
383 srat_detect_node(c);
384 }
305 385
386#ifdef CONFIG_X86_32
306 detect_ht(c); 387 detect_ht(c);
388#endif
307 389
308 if (c->extended_cpuid_level >= 0x80000006) { 390 if (c->extended_cpuid_level >= 0x80000006) {
309 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000)) 391 if ((c->x86 >= 0x0f) && (cpuid_edx(0x80000006) & 0xf000))
@@ -319,8 +401,38 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
319 /* MFENCE stops RDTSC speculation */ 401 /* MFENCE stops RDTSC speculation */
320 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 402 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
321 } 403 }
404
405#ifdef CONFIG_X86_64
406 if (c->x86 == 0x10) {
407 /* do this for boot cpu */
408 if (c == &boot_cpu_data)
409 check_enable_amd_mmconf_dmi();
410
411 fam10h_check_enable_mmcfg();
412 }
413
414 if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
415 unsigned long long tseg;
416
417 /*
418 * Split up direct mapping around the TSEG SMM area.
419 * Don't do it for gbpages because there seems very little
420 * benefit in doing so.
421 */
422 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
423 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
424 if ((tseg>>PMD_SHIFT) <
425 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
426 ((tseg>>PMD_SHIFT) <
427 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
428 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
429 set_memory_4k((unsigned long)__va(tseg), 1);
430 }
431 }
432#endif
322} 433}
323 434
435#ifdef CONFIG_X86_32
324static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 436static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
325{ 437{
326 /* AMD errata T13 (order #21922) */ 438 /* AMD errata T13 (order #21922) */
@@ -333,10 +445,12 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
333 } 445 }
334 return size; 446 return size;
335} 447}
448#endif
336 449
337static struct cpu_dev amd_cpu_dev __cpuinitdata = { 450static struct cpu_dev amd_cpu_dev __cpuinitdata = {
338 .c_vendor = "AMD", 451 .c_vendor = "AMD",
339 .c_ident = { "AuthenticAMD" }, 452 .c_ident = { "AuthenticAMD" },
453#ifdef CONFIG_X86_32
340 .c_models = { 454 .c_models = {
341 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 455 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
342 { 456 {
@@ -349,9 +463,10 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
349 } 463 }
350 }, 464 },
351 }, 465 },
466 .c_size_cache = amd_size_cache,
467#endif
352 .c_early_init = early_init_amd, 468 .c_early_init = early_init_amd,
353 .c_init = init_amd, 469 .c_init = init_amd,
354 .c_size_cache = amd_size_cache,
355 .c_x86_vendor = X86_VENDOR_AMD, 470 .c_x86_vendor = X86_VENDOR_AMD,
356}; 471};
357 472