diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2009-08-19 21:05:36 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-08-21 19:25:55 -0400 |
commit | d0af9eed5aa91b6b7b5049cae69e5ea956fd85c3 (patch) | |
tree | b9214db00ba734e5b943165082c30336f7a8425a | |
parent | 269c861baa2fe7c114c3bc7831292758d29eb336 (diff) |
x86, pat/mtrr: Rendezvous all the cpus for MTRR/PAT init
SDM Vol 3a section titled "MTRR considerations in MP systems" specifies
the need for synchronizing the logical cpu's while initializing/updating
MTRR.
Currently Linux kernel does the synchronization of all cpu's only when
a single MTRR register is programmed/updated. During an AP online
(during boot/cpu-online/resume) where we initialize all the MTRR/PAT registers,
we don't follow this synchronization algorithm.
This can lead to scenarios where during a dynamic cpu online, that logical cpu
is initializing MTRR/PAT with cache disabled (cr0.cd=1) etc while other logical
HT sibling continue to run (also with cache disabled because of cr0.cd=1
on its sibling).
Starting from Westmere, VMX transitions with cr0.cd=1 don't work properly
(because of some VMX performance optimizations) and the above scenario
(with one logical cpu doing VMX activity and another logical cpu coming online)
can result in system crash.
Fix the MTRR initialization by doing rendezvous of all the cpus. During
boot and resume, we delay the MTRR/PAT init for APs till all the
logical cpu's come online and the rendezvous process at the end of AP's bringup,
will initialize the MTRR/PAT for all AP's.
For dynamic single cpu online, we synchronize all the logical cpus and
do the MTRR/PAT init on the AP that is coming online.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r-- | arch/x86/include/asm/mtrr.h | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 46 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 14 | ||||
-rw-r--r-- | arch/x86/power/cpu.c | 2 | ||||
-rw-r--r-- | kernel/cpu.c | 14 |
5 files changed, 73 insertions, 10 deletions
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index a51ada8467de..d5366ec5cb8f 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
@@ -121,8 +121,12 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size); | |||
121 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); | 121 | extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); |
122 | extern void mtrr_ap_init(void); | 122 | extern void mtrr_ap_init(void); |
123 | extern void mtrr_bp_init(void); | 123 | extern void mtrr_bp_init(void); |
124 | extern void set_mtrr_aps_delayed_init(void); | ||
125 | extern void mtrr_aps_init(void); | ||
126 | extern void mtrr_bp_restore(void); | ||
124 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); | 127 | extern int mtrr_trim_uncached_memory(unsigned long end_pfn); |
125 | extern int amd_special_default_mtrr(void); | 128 | extern int amd_special_default_mtrr(void); |
129 | extern u32 mtrr_aps_delayed_init; | ||
126 | # else | 130 | # else |
127 | static inline u8 mtrr_type_lookup(u64 addr, u64 end) | 131 | static inline u8 mtrr_type_lookup(u64 addr, u64 end) |
128 | { | 132 | { |
@@ -161,6 +165,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) | |||
161 | 165 | ||
162 | #define mtrr_ap_init() do {} while (0) | 166 | #define mtrr_ap_init() do {} while (0) |
163 | #define mtrr_bp_init() do {} while (0) | 167 | #define mtrr_bp_init() do {} while (0) |
168 | #define set_mtrr_aps_delayed_init() do {} while (0) | ||
169 | #define mtrr_aps_init() do {} while (0) | ||
170 | #define mtrr_bp_restore() do {} while (0) | ||
164 | # endif | 171 | # endif |
165 | 172 | ||
166 | #ifdef CONFIG_COMPAT | 173 | #ifdef CONFIG_COMPAT |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 7af0f88a4163..7339be0aa580 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -58,6 +58,7 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; | |||
58 | static DEFINE_MUTEX(mtrr_mutex); | 58 | static DEFINE_MUTEX(mtrr_mutex); |
59 | 59 | ||
60 | u64 size_or_mask, size_and_mask; | 60 | u64 size_or_mask, size_and_mask; |
61 | u32 mtrr_aps_delayed_init; | ||
61 | 62 | ||
62 | static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; | 63 | static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; |
63 | 64 | ||
@@ -163,7 +164,10 @@ static void ipi_handler(void *info) | |||
163 | if (data->smp_reg != ~0U) { | 164 | if (data->smp_reg != ~0U) { |
164 | mtrr_if->set(data->smp_reg, data->smp_base, | 165 | mtrr_if->set(data->smp_reg, data->smp_base, |
165 | data->smp_size, data->smp_type); | 166 | data->smp_size, data->smp_type); |
166 | } else { | 167 | } else if (mtrr_aps_delayed_init) { |
168 | /* | ||
169 | * Initialize the MTRRs inaddition to the synchronisation. | ||
170 | */ | ||
167 | mtrr_if->set_all(); | 171 | mtrr_if->set_all(); |
168 | } | 172 | } |
169 | 173 | ||
@@ -265,6 +269,8 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ | |||
265 | */ | 269 | */ |
266 | if (reg != ~0U) | 270 | if (reg != ~0U) |
267 | mtrr_if->set(reg, base, size, type); | 271 | mtrr_if->set(reg, base, size, type); |
272 | else if (!mtrr_aps_delayed_init) | ||
273 | mtrr_if->set_all(); | ||
268 | 274 | ||
269 | /* Wait for the others */ | 275 | /* Wait for the others */ |
270 | while (atomic_read(&data.count)) | 276 | while (atomic_read(&data.count)) |
@@ -721,9 +727,7 @@ void __init mtrr_bp_init(void) | |||
721 | 727 | ||
722 | void mtrr_ap_init(void) | 728 | void mtrr_ap_init(void) |
723 | { | 729 | { |
724 | unsigned long flags; | 730 | if (!use_intel() || mtrr_aps_delayed_init) |
725 | |||
726 | if (!mtrr_if || !use_intel()) | ||
727 | return; | 731 | return; |
728 | /* | 732 | /* |
729 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries | 733 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries |
@@ -738,11 +742,7 @@ void mtrr_ap_init(void) | |||
738 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug | 742 | * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug |
739 | * lock to prevent mtrr entry changes | 743 | * lock to prevent mtrr entry changes |
740 | */ | 744 | */ |
741 | local_irq_save(flags); | 745 | set_mtrr(~0U, 0, 0, 0); |
742 | |||
743 | mtrr_if->set_all(); | ||
744 | |||
745 | local_irq_restore(flags); | ||
746 | } | 746 | } |
747 | 747 | ||
748 | /** | 748 | /** |
@@ -753,6 +753,34 @@ void mtrr_save_state(void) | |||
753 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); | 753 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); |
754 | } | 754 | } |
755 | 755 | ||
756 | void set_mtrr_aps_delayed_init(void) | ||
757 | { | ||
758 | if (!use_intel()) | ||
759 | return; | ||
760 | |||
761 | mtrr_aps_delayed_init = 1; | ||
762 | } | ||
763 | |||
764 | /* | ||
765 | * MTRR initialization for all AP's | ||
766 | */ | ||
767 | void mtrr_aps_init(void) | ||
768 | { | ||
769 | if (!use_intel()) | ||
770 | return; | ||
771 | |||
772 | set_mtrr(~0U, 0, 0, 0); | ||
773 | mtrr_aps_delayed_init = 0; | ||
774 | } | ||
775 | |||
776 | void mtrr_bp_restore(void) | ||
777 | { | ||
778 | if (!use_intel()) | ||
779 | return; | ||
780 | |||
781 | mtrr_if->set_all(); | ||
782 | } | ||
783 | |||
756 | static int __init mtrr_init_finialize(void) | 784 | static int __init mtrr_init_finialize(void) |
757 | { | 785 | { |
758 | if (!mtrr_if) | 786 | if (!mtrr_if) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2fecda69ee64..d720b7e0cf3d 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1116,9 +1116,22 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1116 | 1116 | ||
1117 | if (is_uv_system()) | 1117 | if (is_uv_system()) |
1118 | uv_system_init(); | 1118 | uv_system_init(); |
1119 | |||
1120 | set_mtrr_aps_delayed_init(); | ||
1119 | out: | 1121 | out: |
1120 | preempt_enable(); | 1122 | preempt_enable(); |
1121 | } | 1123 | } |
1124 | |||
1125 | void arch_enable_nonboot_cpus_begin(void) | ||
1126 | { | ||
1127 | set_mtrr_aps_delayed_init(); | ||
1128 | } | ||
1129 | |||
1130 | void arch_enable_nonboot_cpus_end(void) | ||
1131 | { | ||
1132 | mtrr_aps_init(); | ||
1133 | } | ||
1134 | |||
1122 | /* | 1135 | /* |
1123 | * Early setup to make printk work. | 1136 | * Early setup to make printk work. |
1124 | */ | 1137 | */ |
@@ -1140,6 +1153,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1140 | setup_ioapic_dest(); | 1153 | setup_ioapic_dest(); |
1141 | #endif | 1154 | #endif |
1142 | check_nmi_watchdog(); | 1155 | check_nmi_watchdog(); |
1156 | mtrr_aps_init(); | ||
1143 | } | 1157 | } |
1144 | 1158 | ||
1145 | static int __initdata setup_possible_cpus = -1; | 1159 | static int __initdata setup_possible_cpus = -1; |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index b3d20b9cac63..417c9f5b4afa 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -242,7 +242,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
242 | fix_processor_context(); | 242 | fix_processor_context(); |
243 | 243 | ||
244 | do_fpu_end(); | 244 | do_fpu_end(); |
245 | mtrr_ap_init(); | 245 | mtrr_bp_restore(); |
246 | 246 | ||
247 | #ifdef CONFIG_X86_OLD_MCE | 247 | #ifdef CONFIG_X86_OLD_MCE |
248 | mcheck_init(&boot_cpu_data); | 248 | mcheck_init(&boot_cpu_data); |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 8ce10043e4ac..f5f9485b8c0f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -413,6 +413,14 @@ int disable_nonboot_cpus(void) | |||
413 | return error; | 413 | return error; |
414 | } | 414 | } |
415 | 415 | ||
416 | void __weak arch_enable_nonboot_cpus_begin(void) | ||
417 | { | ||
418 | } | ||
419 | |||
420 | void __weak arch_enable_nonboot_cpus_end(void) | ||
421 | { | ||
422 | } | ||
423 | |||
416 | void __ref enable_nonboot_cpus(void) | 424 | void __ref enable_nonboot_cpus(void) |
417 | { | 425 | { |
418 | int cpu, error; | 426 | int cpu, error; |
@@ -424,6 +432,9 @@ void __ref enable_nonboot_cpus(void) | |||
424 | goto out; | 432 | goto out; |
425 | 433 | ||
426 | printk("Enabling non-boot CPUs ...\n"); | 434 | printk("Enabling non-boot CPUs ...\n"); |
435 | |||
436 | arch_enable_nonboot_cpus_begin(); | ||
437 | |||
427 | for_each_cpu(cpu, frozen_cpus) { | 438 | for_each_cpu(cpu, frozen_cpus) { |
428 | error = _cpu_up(cpu, 1); | 439 | error = _cpu_up(cpu, 1); |
429 | if (!error) { | 440 | if (!error) { |
@@ -432,6 +443,9 @@ void __ref enable_nonboot_cpus(void) | |||
432 | } | 443 | } |
433 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 444 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
434 | } | 445 | } |
446 | |||
447 | arch_enable_nonboot_cpus_end(); | ||
448 | |||
435 | cpumask_clear(frozen_cpus); | 449 | cpumask_clear(frozen_cpus); |
436 | out: | 450 | out: |
437 | cpu_maps_update_done(); | 451 | cpu_maps_update_done(); |