aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/mtrr.h
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-08-19 21:05:36 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-08-21 19:25:55 -0400
commitd0af9eed5aa91b6b7b5049cae69e5ea956fd85c3 (patch)
treeb9214db00ba734e5b943165082c30336f7a8425a /arch/x86/include/asm/mtrr.h
parent269c861baa2fe7c114c3bc7831292758d29eb336 (diff)
x86, pat/mtrr: Rendezvous all the cpus for MTRR/PAT init
SDM Vol 3a section titled "MTRR considerations in MP systems" specifies the need for synchronizing the logical cpu's while initializing/updating MTRR. Currently Linux kernel does the synchronization of all cpu's only when a single MTRR register is programmed/updated. During an AP online (during boot/cpu-online/resume) where we initialize all the MTRR/PAT registers, we don't follow this synchronization algorithm. This can lead to scenarios where during a dynamic cpu online, that logical cpu is initializing MTRR/PAT with cache disabled (cr0.cd=1) etc while other logical HT sibling continue to run (also with cache disabled because of cr0.cd=1 on its sibling). Starting from Westmere, VMX transitions with cr0.cd=1 don't work properly (because of some VMX performance optimizations) and the above scenario (with one logical cpu doing VMX activity and another logical cpu coming online) can result in system crash. Fix the MTRR initialization by doing rendezvous of all the cpus. During boot and resume, we delay the MTRR/PAT init for APs till all the logical cpu's come online and the rendezvous process at the end of AP's bringup, will initialize the MTRR/PAT for all AP's. For dynamic single cpu online, we synchronize all the logical cpus and do the MTRR/PAT init on the AP that is coming online. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/include/asm/mtrr.h')
-rw-r--r--arch/x86/include/asm/mtrr.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index a51ada8467de..d5366ec5cb8f 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -121,8 +121,12 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
121extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); 121extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
122extern void mtrr_ap_init(void); 122extern void mtrr_ap_init(void);
123extern void mtrr_bp_init(void); 123extern void mtrr_bp_init(void);
124extern void set_mtrr_aps_delayed_init(void);
125extern void mtrr_aps_init(void);
126extern void mtrr_bp_restore(void);
124extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 127extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
125extern int amd_special_default_mtrr(void); 128extern int amd_special_default_mtrr(void);
129extern u32 mtrr_aps_delayed_init;
126# else 130# else
127static inline u8 mtrr_type_lookup(u64 addr, u64 end) 131static inline u8 mtrr_type_lookup(u64 addr, u64 end)
128{ 132{
@@ -161,6 +165,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
161 165
162#define mtrr_ap_init() do {} while (0) 166#define mtrr_ap_init() do {} while (0)
163#define mtrr_bp_init() do {} while (0) 167#define mtrr_bp_init() do {} while (0)
168#define set_mtrr_aps_delayed_init() do {} while (0)
169#define mtrr_aps_init() do {} while (0)
170#define mtrr_bp_restore() do {} while (0)
164# endif 171# endif
165 172
166#ifdef CONFIG_COMPAT 173#ifdef CONFIG_COMPAT