diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:58:34 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:58:34 -0400 |
commit | 4cafc4b8d7219b70e15f22e4a51b3ce847810caf (patch) | |
tree | 8051ea3f36f0682d08f47df8e35e14ca7eb7a5d7 /arch/x86/kernel/smpboot.c | |
parent | b47fad3bfb5940cc3e28a1c69716f6dc44e4b7e6 (diff) | |
parent | dbd1e66e04558a582e673bc4a9cd933ce0228d93 (diff) |
Merge branch 'oprofile/core' into oprofile/x86
Conflicts:
arch/x86/oprofile/op_model_amd.c
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 118 |
1 files changed, 103 insertions, 15 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b3bfc4dd708..dfb50890b5b7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #include <asm/pgtable.h> | 62 | #include <asm/pgtable.h> |
63 | #include <asm/tlbflush.h> | 63 | #include <asm/tlbflush.h> |
64 | #include <asm/mtrr.h> | 64 | #include <asm/mtrr.h> |
65 | #include <asm/vmi.h> | 65 | #include <asm/mwait.h> |
66 | #include <asm/apic.h> | 66 | #include <asm/apic.h> |
67 | #include <asm/setup.h> | 67 | #include <asm/setup.h> |
68 | #include <asm/uv/uv.h> | 68 | #include <asm/uv/uv.h> |
@@ -311,7 +311,6 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
311 | __flush_tlb_all(); | 311 | __flush_tlb_all(); |
312 | #endif | 312 | #endif |
313 | 313 | ||
314 | vmi_bringup(); | ||
315 | cpu_init(); | 314 | cpu_init(); |
316 | preempt_disable(); | 315 | preempt_disable(); |
317 | smp_callin(); | 316 | smp_callin(); |
@@ -324,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused) | |||
324 | check_tsc_sync_target(); | 323 | check_tsc_sync_target(); |
325 | 324 | ||
326 | if (nmi_watchdog == NMI_IO_APIC) { | 325 | if (nmi_watchdog == NMI_IO_APIC) { |
327 | legacy_pic->chip->mask(0); | 326 | legacy_pic->mask(0); |
328 | enable_NMI_through_LVT0(); | 327 | enable_NMI_through_LVT0(); |
329 | legacy_pic->chip->unmask(0); | 328 | legacy_pic->unmask(0); |
330 | } | 329 | } |
331 | 330 | ||
332 | /* This must be done before setting cpu_online_mask */ | 331 | /* This must be done before setting cpu_online_mask */ |
@@ -397,6 +396,19 @@ void __cpuinit smp_store_cpu_info(int id) | |||
397 | identify_secondary_cpu(c); | 396 | identify_secondary_cpu(c); |
398 | } | 397 | } |
399 | 398 | ||
399 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | ||
400 | { | ||
401 | struct cpuinfo_x86 *c1 = &cpu_data(cpu1); | ||
402 | struct cpuinfo_x86 *c2 = &cpu_data(cpu2); | ||
403 | |||
404 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | ||
405 | cpumask_set_cpu(cpu2, cpu_sibling_mask(cpu1)); | ||
406 | cpumask_set_cpu(cpu1, cpu_core_mask(cpu2)); | ||
407 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | ||
408 | cpumask_set_cpu(cpu1, c2->llc_shared_map); | ||
409 | cpumask_set_cpu(cpu2, c1->llc_shared_map); | ||
410 | } | ||
411 | |||
400 | 412 | ||
401 | void __cpuinit set_cpu_sibling_map(int cpu) | 413 | void __cpuinit set_cpu_sibling_map(int cpu) |
402 | { | 414 | { |
@@ -409,14 +421,13 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
409 | for_each_cpu(i, cpu_sibling_setup_mask) { | 421 | for_each_cpu(i, cpu_sibling_setup_mask) { |
410 | struct cpuinfo_x86 *o = &cpu_data(i); | 422 | struct cpuinfo_x86 *o = &cpu_data(i); |
411 | 423 | ||
412 | if (c->phys_proc_id == o->phys_proc_id && | 424 | if (cpu_has(c, X86_FEATURE_TOPOEXT)) { |
413 | c->cpu_core_id == o->cpu_core_id) { | 425 | if (c->phys_proc_id == o->phys_proc_id && |
414 | cpumask_set_cpu(i, cpu_sibling_mask(cpu)); | 426 | c->compute_unit_id == o->compute_unit_id) |
415 | cpumask_set_cpu(cpu, cpu_sibling_mask(i)); | 427 | link_thread_siblings(cpu, i); |
416 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 428 | } else if (c->phys_proc_id == o->phys_proc_id && |
417 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 429 | c->cpu_core_id == o->cpu_core_id) { |
418 | cpumask_set_cpu(i, c->llc_shared_map); | 430 | link_thread_siblings(cpu, i); |
419 | cpumask_set_cpu(cpu, o->llc_shared_map); | ||
420 | } | 431 | } |
421 | } | 432 | } |
422 | } else { | 433 | } else { |
@@ -1109,8 +1120,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1109 | } | 1120 | } |
1110 | set_cpu_sibling_map(0); | 1121 | set_cpu_sibling_map(0); |
1111 | 1122 | ||
1112 | enable_IR_x2apic(); | ||
1113 | default_setup_apic_routing(); | ||
1114 | 1123 | ||
1115 | if (smp_sanity_check(max_cpus) < 0) { | 1124 | if (smp_sanity_check(max_cpus) < 0) { |
1116 | printk(KERN_INFO "SMP disabled\n"); | 1125 | printk(KERN_INFO "SMP disabled\n"); |
@@ -1118,6 +1127,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1118 | goto out; | 1127 | goto out; |
1119 | } | 1128 | } |
1120 | 1129 | ||
1130 | default_setup_apic_routing(); | ||
1131 | |||
1121 | preempt_disable(); | 1132 | preempt_disable(); |
1122 | if (read_apic_id() != boot_cpu_physical_apicid) { | 1133 | if (read_apic_id() != boot_cpu_physical_apicid) { |
1123 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", | 1134 | panic("Boot APIC ID in local APIC unexpected (%d vs %d)", |
@@ -1383,11 +1394,88 @@ void play_dead_common(void) | |||
1383 | local_irq_disable(); | 1394 | local_irq_disable(); |
1384 | } | 1395 | } |
1385 | 1396 | ||
1397 | /* | ||
1398 | * We need to flush the caches before going to sleep, lest we have | ||
1399 | * dirty data in our caches when we come back up. | ||
1400 | */ | ||
1401 | static inline void mwait_play_dead(void) | ||
1402 | { | ||
1403 | unsigned int eax, ebx, ecx, edx; | ||
1404 | unsigned int highest_cstate = 0; | ||
1405 | unsigned int highest_subcstate = 0; | ||
1406 | int i; | ||
1407 | void *mwait_ptr; | ||
1408 | |||
1409 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) | ||
1410 | return; | ||
1411 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) | ||
1412 | return; | ||
1413 | if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
1414 | return; | ||
1415 | |||
1416 | eax = CPUID_MWAIT_LEAF; | ||
1417 | ecx = 0; | ||
1418 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
1419 | |||
1420 | /* | ||
1421 | * eax will be 0 if EDX enumeration is not valid. | ||
1422 | * Initialized below to cstate, sub_cstate value when EDX is valid. | ||
1423 | */ | ||
1424 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { | ||
1425 | eax = 0; | ||
1426 | } else { | ||
1427 | edx >>= MWAIT_SUBSTATE_SIZE; | ||
1428 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | ||
1429 | if (edx & MWAIT_SUBSTATE_MASK) { | ||
1430 | highest_cstate = i; | ||
1431 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | ||
1432 | } | ||
1433 | } | ||
1434 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | ||
1435 | (highest_subcstate - 1); | ||
1436 | } | ||
1437 | |||
1438 | /* | ||
1439 | * This should be a memory location in a cache line which is | ||
1440 | * unlikely to be touched by other processors. The actual | ||
1441 | * content is immaterial as it is not actually modified in any way. | ||
1442 | */ | ||
1443 | mwait_ptr = ¤t_thread_info()->flags; | ||
1444 | |||
1445 | wbinvd(); | ||
1446 | |||
1447 | while (1) { | ||
1448 | /* | ||
1449 | * The CLFLUSH is a workaround for erratum AAI65 for | ||
1450 | * the Xeon 7400 series. It's not clear it is actually | ||
1451 | * needed, but it should be harmless in either case. | ||
1452 | * The WBINVD is insufficient due to the spurious-wakeup | ||
1453 | * case where we return around the loop. | ||
1454 | */ | ||
1455 | clflush(mwait_ptr); | ||
1456 | __monitor(mwait_ptr, 0, 0); | ||
1457 | mb(); | ||
1458 | __mwait(eax, 0); | ||
1459 | } | ||
1460 | } | ||
1461 | |||
1462 | static inline void hlt_play_dead(void) | ||
1463 | { | ||
1464 | if (current_cpu_data.x86 >= 4) | ||
1465 | wbinvd(); | ||
1466 | |||
1467 | while (1) { | ||
1468 | native_halt(); | ||
1469 | } | ||
1470 | } | ||
1471 | |||
1386 | void native_play_dead(void) | 1472 | void native_play_dead(void) |
1387 | { | 1473 | { |
1388 | play_dead_common(); | 1474 | play_dead_common(); |
1389 | tboot_shutdown(TB_SHUTDOWN_WFS); | 1475 | tboot_shutdown(TB_SHUTDOWN_WFS); |
1390 | wbinvd_halt(); | 1476 | |
1477 | mwait_play_dead(); /* Only returns on failure */ | ||
1478 | hlt_play_dead(); | ||
1391 | } | 1479 | } |
1392 | 1480 | ||
1393 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 1481 | #else /* ... !CONFIG_HOTPLUG_CPU */ |