diff options
author | Borislav Petkov <petkovbb@googlemail.com> | 2009-08-31 03:50:09 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-08-31 18:14:26 -0400 |
commit | 132ec92f3f70fe365c1f4b8d46e66cf8a2a16880 (patch) | |
tree | f8e3f3ab5541f583030b0bcd5f3f81ca338f77a2 /arch/x86/include | |
parent | 366d19e181be873c70f4aafca3931d77d781ccd7 (diff) |
x86, msr: Add rd/wrmsr interfaces with preset registers
native_{rdmsr,wrmsr}_safe_regs are two new interfaces which allow
presetting of a subset of eight x86 GPRs before executing the rd/wrmsr
instructions. This is needed at least on AMD K8 for accessing an erratum
workaround MSR.
Originally based on an idea by H. Peter Anvin.
Signed-off-by: Borislav Petkov <petkovbb@gmail.com>
LKML-Reference: <1251705011-18636-1-git-send-email-petkovbb@gmail.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/msr.h | 13 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 16 |
2 files changed, 29 insertions, 0 deletions
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 48ad9d29484a..184d4a113961 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -113,6 +113,9 @@ notrace static inline int native_write_msr_safe(unsigned int msr, | |||
113 | 113 | ||
114 | extern unsigned long long native_read_tsc(void); | 114 | extern unsigned long long native_read_tsc(void); |
115 | 115 | ||
116 | extern int native_rdmsr_safe_regs(u32 *regs); | ||
117 | extern int native_wrmsr_safe_regs(u32 *regs); | ||
118 | |||
116 | static __always_inline unsigned long long __native_read_tsc(void) | 119 | static __always_inline unsigned long long __native_read_tsc(void) |
117 | { | 120 | { |
118 | DECLARE_ARGS(val, low, high); | 121 | DECLARE_ARGS(val, low, high); |
@@ -189,6 +192,16 @@ static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | |||
189 | return err; | 192 | return err; |
190 | } | 193 | } |
191 | 194 | ||
195 | static inline int rdmsr_safe_regs(u32 *regs) | ||
196 | { | ||
197 | return native_rdmsr_safe_regs(regs); | ||
198 | } | ||
199 | |||
200 | static inline int wrmsr_safe_regs(u32 *regs) | ||
201 | { | ||
202 | return native_wrmsr_safe_regs(regs); | ||
203 | } | ||
204 | |||
192 | #define rdtscl(low) \ | 205 | #define rdtscl(low) \ |
193 | ((low) = (u32)__native_read_tsc()) | 206 | ((low) = (u32)__native_read_tsc()) |
194 | 207 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4fb37c8a0832..1705944e0374 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -168,7 +168,9 @@ struct pv_cpu_ops { | |||
168 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 168 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
169 | u64 (*read_msr_amd)(unsigned int msr, int *err); | 169 | u64 (*read_msr_amd)(unsigned int msr, int *err); |
170 | u64 (*read_msr)(unsigned int msr, int *err); | 170 | u64 (*read_msr)(unsigned int msr, int *err); |
171 | int (*rdmsr_regs)(u32 *regs); | ||
171 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | 172 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
173 | int (*wrmsr_regs)(u32 *regs); | ||
172 | 174 | ||
173 | u64 (*read_tsc)(void); | 175 | u64 (*read_tsc)(void); |
174 | u64 (*read_pmc)(int counter); | 176 | u64 (*read_pmc)(int counter); |
@@ -820,6 +822,12 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) | |||
820 | { | 822 | { |
821 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); | 823 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
822 | } | 824 | } |
825 | |||
826 | static inline int paravirt_rdmsr_regs(u32 *regs) | ||
827 | { | ||
828 | return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs); | ||
829 | } | ||
830 | |||
823 | static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) | 831 | static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) |
824 | { | 832 | { |
825 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); | 833 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); |
@@ -829,6 +837,11 @@ static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | |||
829 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); | 837 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
830 | } | 838 | } |
831 | 839 | ||
840 | static inline int paravirt_wrmsr_regs(u32 *regs) | ||
841 | { | ||
842 | return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs); | ||
843 | } | ||
844 | |||
832 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 845 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
833 | #define rdmsr(msr, val1, val2) \ | 846 | #define rdmsr(msr, val1, val2) \ |
834 | do { \ | 847 | do { \ |
@@ -862,6 +875,9 @@ do { \ | |||
862 | _err; \ | 875 | _err; \ |
863 | }) | 876 | }) |
864 | 877 | ||
878 | #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs) | ||
879 | #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs) | ||
880 | |||
865 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | 881 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
866 | { | 882 | { |
867 | int err; | 883 | int err; |