diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-07-05 15:12:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-07-05 15:12:11 -0400 |
commit | b0338e99b2a775c157e3e795f49fdcfb6c257f7a (patch) | |
tree | 59573cd20718ebb5145b05938ef3fb428bde65c7 | |
parent | 90574ebb7e6e0f7f74636ee87315890ba88d6a4a (diff) | |
parent | 715c85b1fc824e9cd0ea07d6ceb80d2262f32e90 (diff) |
Merge branch 'x86/cpu' into perf/core
Merge this branch because we changed the wrmsr*_safe() API and there's
a conflict.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/msr.h | 44 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 39 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 39 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p6.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 4 | ||||
-rw-r--r-- | arch/x86/lib/msr-reg-export.c | 4 | ||||
-rw-r--r-- | arch/x86/lib/msr-reg.S | 10 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 6 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 2 |
15 files changed, 65 insertions, 115 deletions
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index e489c1475be9..813ed103f45e 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr, | |||
115 | 115 | ||
116 | extern unsigned long long native_read_tsc(void); | 116 | extern unsigned long long native_read_tsc(void); |
117 | 117 | ||
118 | extern int native_rdmsr_safe_regs(u32 regs[8]); | 118 | extern int rdmsr_safe_regs(u32 regs[8]); |
119 | extern int native_wrmsr_safe_regs(u32 regs[8]); | 119 | extern int wrmsr_safe_regs(u32 regs[8]); |
120 | 120 | ||
121 | static __always_inline unsigned long long __native_read_tsc(void) | 121 | static __always_inline unsigned long long __native_read_tsc(void) |
122 | { | 122 | { |
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
187 | return err; | 187 | return err; |
188 | } | 188 | } |
189 | 189 | ||
190 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
191 | { | ||
192 | u32 gprs[8] = { 0 }; | ||
193 | int err; | ||
194 | |||
195 | gprs[1] = msr; | ||
196 | gprs[7] = 0x9c5a203a; | ||
197 | |||
198 | err = native_rdmsr_safe_regs(gprs); | ||
199 | |||
200 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
201 | |||
202 | return err; | ||
203 | } | ||
204 | |||
205 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
206 | { | ||
207 | u32 gprs[8] = { 0 }; | ||
208 | |||
209 | gprs[0] = (u32)val; | ||
210 | gprs[1] = msr; | ||
211 | gprs[2] = val >> 32; | ||
212 | gprs[7] = 0x9c5a203a; | ||
213 | |||
214 | return native_wrmsr_safe_regs(gprs); | ||
215 | } | ||
216 | |||
217 | static inline int rdmsr_safe_regs(u32 regs[8]) | ||
218 | { | ||
219 | return native_rdmsr_safe_regs(regs); | ||
220 | } | ||
221 | |||
222 | static inline int wrmsr_safe_regs(u32 regs[8]) | ||
223 | { | ||
224 | return native_wrmsr_safe_regs(regs); | ||
225 | } | ||
226 | |||
227 | #define rdtscl(low) \ | 190 | #define rdtscl(low) \ |
228 | ((low) = (u32)__native_read_tsc()) | 191 | ((low) = (u32)__native_read_tsc()) |
229 | 192 | ||
@@ -250,8 +213,7 @@ do { \ | |||
250 | 213 | ||
251 | #endif /* !CONFIG_PARAVIRT */ | 214 | #endif /* !CONFIG_PARAVIRT */ |
252 | 215 | ||
253 | 216 | #define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ | |
254 | #define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \ | ||
255 | (u32)((val) >> 32)) | 217 | (u32)((val) >> 32)) |
256 | 218 | ||
257 | #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) | 219 | #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2)) |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 14ce05dfe04e..0b47ddb6f00b 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) | |||
128 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); | 128 | return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline int paravirt_rdmsr_regs(u32 *regs) | ||
132 | { | ||
133 | return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs); | ||
134 | } | ||
135 | |||
136 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | 131 | static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) |
137 | { | 132 | { |
138 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); | 133 | return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); |
139 | } | 134 | } |
140 | 135 | ||
141 | static inline int paravirt_wrmsr_regs(u32 *regs) | ||
142 | { | ||
143 | return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs); | ||
144 | } | ||
145 | |||
146 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ | 136 | /* These should all do BUG_ON(_err), but our headers are too tangled. */ |
147 | #define rdmsr(msr, val1, val2) \ | 137 | #define rdmsr(msr, val1, val2) \ |
148 | do { \ | 138 | do { \ |
@@ -176,9 +166,6 @@ do { \ | |||
176 | _err; \ | 166 | _err; \ |
177 | }) | 167 | }) |
178 | 168 | ||
179 | #define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs) | ||
180 | #define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs) | ||
181 | |||
182 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | 169 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
183 | { | 170 | { |
184 | int err; | 171 | int err; |
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) | |||
186 | *p = paravirt_read_msr(msr, &err); | 173 | *p = paravirt_read_msr(msr, &err); |
187 | return err; | 174 | return err; |
188 | } | 175 | } |
189 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
190 | { | ||
191 | u32 gprs[8] = { 0 }; | ||
192 | int err; | ||
193 | |||
194 | gprs[1] = msr; | ||
195 | gprs[7] = 0x9c5a203a; | ||
196 | |||
197 | err = paravirt_rdmsr_regs(gprs); | ||
198 | |||
199 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
200 | |||
201 | return err; | ||
202 | } | ||
203 | |||
204 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
205 | { | ||
206 | u32 gprs[8] = { 0 }; | ||
207 | |||
208 | gprs[0] = (u32)val; | ||
209 | gprs[1] = msr; | ||
210 | gprs[2] = val >> 32; | ||
211 | gprs[7] = 0x9c5a203a; | ||
212 | |||
213 | return paravirt_wrmsr_regs(gprs); | ||
214 | } | ||
215 | 176 | ||
216 | static inline u64 paravirt_read_tsc(void) | 177 | static inline u64 paravirt_read_tsc(void) |
217 | { | 178 | { |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 8e8b9a4987ee..8613cbb7ba41 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -153,9 +153,7 @@ struct pv_cpu_ops { | |||
153 | /* MSR, PMC and TSR operations. | 153 | /* MSR, PMC and TSR operations. |
154 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | 154 | err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ |
155 | u64 (*read_msr)(unsigned int msr, int *err); | 155 | u64 (*read_msr)(unsigned int msr, int *err); |
156 | int (*rdmsr_regs)(u32 *regs); | ||
157 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | 156 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); |
158 | int (*wrmsr_regs)(u32 *regs); | ||
159 | 157 | ||
160 | u64 (*read_tsc)(void); | 158 | u64 (*read_tsc)(void); |
161 | u64 (*read_pmc)(int counter); | 159 | u64 (*read_pmc)(int counter); |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 146bb6218eec..9d92e19039f0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -19,6 +19,39 @@ | |||
19 | 19 | ||
20 | #include "cpu.h" | 20 | #include "cpu.h" |
21 | 21 | ||
22 | static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | ||
23 | { | ||
24 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | ||
25 | u32 gprs[8] = { 0 }; | ||
26 | int err; | ||
27 | |||
28 | WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); | ||
29 | |||
30 | gprs[1] = msr; | ||
31 | gprs[7] = 0x9c5a203a; | ||
32 | |||
33 | err = rdmsr_safe_regs(gprs); | ||
34 | |||
35 | *p = gprs[0] | ((u64)gprs[2] << 32); | ||
36 | |||
37 | return err; | ||
38 | } | ||
39 | |||
40 | static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | ||
41 | { | ||
42 | struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); | ||
43 | u32 gprs[8] = { 0 }; | ||
44 | |||
45 | WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__); | ||
46 | |||
47 | gprs[0] = (u32)val; | ||
48 | gprs[1] = msr; | ||
49 | gprs[2] = val >> 32; | ||
50 | gprs[7] = 0x9c5a203a; | ||
51 | |||
52 | return wrmsr_safe_regs(gprs); | ||
53 | } | ||
54 | |||
22 | #ifdef CONFIG_X86_32 | 55 | #ifdef CONFIG_X86_32 |
23 | /* | 56 | /* |
24 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | 57 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause |
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
586 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | 619 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
587 | u64 val; | 620 | u64 val; |
588 | 621 | ||
589 | if (!rdmsrl_amd_safe(0xc0011005, &val)) { | 622 | if (!rdmsrl_safe(0xc0011005, &val)) { |
590 | val |= 1ULL << 54; | 623 | val |= 1ULL << 54; |
591 | wrmsrl_amd_safe(0xc0011005, val); | 624 | wrmsrl_safe(0xc0011005, val); |
592 | rdmsrl(0xc0011005, val); | 625 | rdmsrl(0xc0011005, val); |
593 | if (val & (1ULL << 54)) { | 626 | if (val & (1ULL << 54)) { |
594 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | 627 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); |
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
679 | err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); | 712 | err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); |
680 | if (err == 0) { | 713 | if (err == 0) { |
681 | mask |= (1 << 10); | 714 | mask |= (1 << 10); |
682 | checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | 715 | wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); |
683 | } | 716 | } |
684 | } | 717 | } |
685 | 718 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6b9333b429ba..5bbc082c47ad 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void) | |||
947 | index_max = msr_range_array[i].max; | 947 | index_max = msr_range_array[i].max; |
948 | 948 | ||
949 | for (index = index_min; index < index_max; index++) { | 949 | for (index = index_min; index < index_max; index++) { |
950 | if (rdmsrl_amd_safe(index, &val)) | 950 | if (rdmsrl_safe(index, &val)) |
951 | continue; | 951 | continue; |
952 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); | 952 | printk(KERN_INFO " MSR%08x: %016llx\n", index, val); |
953 | } | 953 | } |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 69d5feadab27..e677d9923f4f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -211,7 +211,7 @@ static bool check_hw_exists(void) | |||
211 | * that don't trap on the MSR access and always return 0s. | 211 | * that don't trap on the MSR access and always return 0s. |
212 | */ | 212 | */ |
213 | val = 0xabcdUL; | 213 | val = 0xabcdUL; |
214 | ret = checking_wrmsrl(x86_pmu_event_addr(0), val); | 214 | ret = wrmsrl_safe(x86_pmu_event_addr(0), val); |
215 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); | 215 | ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new); |
216 | if (ret || val != val_new) | 216 | if (ret || val != val_new) |
217 | goto msr_fail; | 217 | goto msr_fail; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index e23e71f25264..8408e37f5fa4 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1003,11 +1003,11 @@ static void intel_pmu_reset(void) | |||
1003 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | 1003 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); |
1004 | 1004 | ||
1005 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1005 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1006 | checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); | 1006 | wrmsrl_safe(x86_pmu_config_addr(idx), 0ull); |
1007 | checking_wrmsrl(x86_pmu_event_addr(idx), 0ull); | 1007 | wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); |
1008 | } | 1008 | } |
1009 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) | 1009 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) |
1010 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1010 | wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
1011 | 1011 | ||
1012 | if (ds) | 1012 | if (ds) |
1013 | ds->bts_index = ds->bts_buffer_base; | 1013 | ds->bts_index = ds->bts_buffer_base; |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 47124a73dd73..6c82e4037989 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void) | |||
895 | * So at moment let leave metrics turned on forever -- it's | 895 | * So at moment let leave metrics turned on forever -- it's |
896 | * ok for now but need to be revisited! | 896 | * ok for now but need to be revisited! |
897 | * | 897 | * |
898 | * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0); | 898 | * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); |
899 | * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0); | 899 | * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); |
900 | */ | 900 | */ |
901 | } | 901 | } |
902 | 902 | ||
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
909 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get | 909 | * state we need to clear P4_CCCR_OVF, otherwise interrupt get |
910 | * asserted again and again | 910 | * asserted again and again |
911 | */ | 911 | */ |
912 | (void)checking_wrmsrl(hwc->config_base, | 912 | (void)wrmsrl_safe(hwc->config_base, |
913 | (u64)(p4_config_unpack_cccr(hwc->config)) & | 913 | (u64)(p4_config_unpack_cccr(hwc->config)) & |
914 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); | 914 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); |
915 | } | 915 | } |
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config) | |||
943 | 943 | ||
944 | bind = &p4_pebs_bind_map[idx]; | 944 | bind = &p4_pebs_bind_map[idx]; |
945 | 945 | ||
946 | (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); | 946 | (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs); |
947 | (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); | 947 | (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert); |
948 | } | 948 | } |
949 | 949 | ||
950 | static void p4_pmu_enable_event(struct perf_event *event) | 950 | static void p4_pmu_enable_event(struct perf_event *event) |
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
978 | */ | 978 | */ |
979 | p4_pmu_enable_pebs(hwc->config); | 979 | p4_pmu_enable_pebs(hwc->config); |
980 | 980 | ||
981 | (void)checking_wrmsrl(escr_addr, escr_conf); | 981 | (void)wrmsrl_safe(escr_addr, escr_conf); |
982 | (void)checking_wrmsrl(hwc->config_base, | 982 | (void)wrmsrl_safe(hwc->config_base, |
983 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); | 983 | (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE); |
984 | } | 984 | } |
985 | 985 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 32bcfc7dd230..e4dd0f7a0453 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event) | |||
71 | if (cpuc->enabled) | 71 | if (cpuc->enabled) |
72 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 72 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
73 | 73 | ||
74 | (void)checking_wrmsrl(hwc->config_base, val); | 74 | (void)wrmsrl_safe(hwc->config_base, val); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void p6_pmu_enable_event(struct perf_event *event) | 77 | static void p6_pmu_enable_event(struct perf_event *event) |
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event) | |||
84 | if (cpuc->enabled) | 84 | if (cpuc->enabled) |
85 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; | 85 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
86 | 86 | ||
87 | (void)checking_wrmsrl(hwc->config_base, val); | 87 | (void)wrmsrl_safe(hwc->config_base, val); |
88 | } | 88 | } |
89 | 89 | ||
90 | PMU_FORMAT_ATTR(event, "config:0-7" ); | 90 | PMU_FORMAT_ATTR(event, "config:0-7" ); |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 9ce885996fd7..17fff18a1031 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
352 | #endif | 352 | #endif |
353 | .wbinvd = native_wbinvd, | 353 | .wbinvd = native_wbinvd, |
354 | .read_msr = native_read_msr_safe, | 354 | .read_msr = native_read_msr_safe, |
355 | .rdmsr_regs = native_rdmsr_safe_regs, | ||
356 | .write_msr = native_write_msr_safe, | 355 | .write_msr = native_write_msr_safe, |
357 | .wrmsr_regs = native_wrmsr_safe_regs, | ||
358 | .read_tsc = native_read_tsc, | 356 | .read_tsc = native_read_tsc, |
359 | .read_pmc = native_read_pmc, | 357 | .read_pmc = native_read_pmc, |
360 | .read_tscp = native_read_tscp, | 358 | .read_tscp = native_read_tscp, |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 61cdf7fdf099..3e215ba68766 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
466 | task->thread.gs = addr; | 466 | task->thread.gs = addr; |
467 | if (doit) { | 467 | if (doit) { |
468 | load_gs_index(0); | 468 | load_gs_index(0); |
469 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 469 | ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | put_cpu(); | 472 | put_cpu(); |
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | |||
494 | /* set the selector to 0 to not confuse | 494 | /* set the selector to 0 to not confuse |
495 | __switch_to */ | 495 | __switch_to */ |
496 | loadsegment(fs, 0); | 496 | loadsegment(fs, 0); |
497 | ret = checking_wrmsrl(MSR_FS_BASE, addr); | 497 | ret = wrmsrl_safe(MSR_FS_BASE, addr); |
498 | } | 498 | } |
499 | } | 499 | } |
500 | put_cpu(); | 500 | put_cpu(); |
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c index a311cc59b65d..8d6ef78b5d01 100644 --- a/arch/x86/lib/msr-reg-export.c +++ b/arch/x86/lib/msr-reg-export.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <asm/msr.h> | 2 | #include <asm/msr.h> |
3 | 3 | ||
4 | EXPORT_SYMBOL(native_rdmsr_safe_regs); | 4 | EXPORT_SYMBOL(rdmsr_safe_regs); |
5 | EXPORT_SYMBOL(native_wrmsr_safe_regs); | 5 | EXPORT_SYMBOL(wrmsr_safe_regs); |
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S index 69fa10623f21..f6d13eefad10 100644 --- a/arch/x86/lib/msr-reg.S +++ b/arch/x86/lib/msr-reg.S | |||
@@ -6,13 +6,13 @@ | |||
6 | 6 | ||
7 | #ifdef CONFIG_X86_64 | 7 | #ifdef CONFIG_X86_64 |
8 | /* | 8 | /* |
9 | * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]); | 9 | * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]); |
10 | * | 10 | * |
11 | * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] | 11 | * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi] |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | .macro op_safe_regs op | 14 | .macro op_safe_regs op |
15 | ENTRY(native_\op\()_safe_regs) | 15 | ENTRY(\op\()_safe_regs) |
16 | CFI_STARTPROC | 16 | CFI_STARTPROC |
17 | pushq_cfi %rbx | 17 | pushq_cfi %rbx |
18 | pushq_cfi %rbp | 18 | pushq_cfi %rbp |
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs) | |||
45 | 45 | ||
46 | _ASM_EXTABLE(1b, 3b) | 46 | _ASM_EXTABLE(1b, 3b) |
47 | CFI_ENDPROC | 47 | CFI_ENDPROC |
48 | ENDPROC(native_\op\()_safe_regs) | 48 | ENDPROC(\op\()_safe_regs) |
49 | .endm | 49 | .endm |
50 | 50 | ||
51 | #else /* X86_32 */ | 51 | #else /* X86_32 */ |
52 | 52 | ||
53 | .macro op_safe_regs op | 53 | .macro op_safe_regs op |
54 | ENTRY(native_\op\()_safe_regs) | 54 | ENTRY(\op\()_safe_regs) |
55 | CFI_STARTPROC | 55 | CFI_STARTPROC |
56 | pushl_cfi %ebx | 56 | pushl_cfi %ebx |
57 | pushl_cfi %ebp | 57 | pushl_cfi %ebp |
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs) | |||
92 | 92 | ||
93 | _ASM_EXTABLE(1b, 3b) | 93 | _ASM_EXTABLE(1b, 3b) |
94 | CFI_ENDPROC | 94 | CFI_ENDPROC |
95 | ENDPROC(native_\op\()_safe_regs) | 95 | ENDPROC(\op\()_safe_regs) |
96 | .endm | 96 | .endm |
97 | 97 | ||
98 | #endif | 98 | #endif |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 66e6d9359826..0faad646f5fd 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void) | |||
205 | { | 205 | { |
206 | /* Load these always in case some future AMD CPU supports | 206 | /* Load these always in case some future AMD CPU supports |
207 | SYSENTER from compat mode too. */ | 207 | SYSENTER from compat mode too. */ |
208 | checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); | 208 | wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); |
209 | checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL); | 209 | wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); |
210 | checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); | 210 | wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target); |
211 | 211 | ||
212 | wrmsrl(MSR_CSTAR, ia32_cstar_target); | 212 | wrmsrl(MSR_CSTAR, ia32_cstar_target); |
213 | } | 213 | } |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index ff962d4b821e..ed7d54985d0c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1124,9 +1124,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { | |||
1124 | .wbinvd = native_wbinvd, | 1124 | .wbinvd = native_wbinvd, |
1125 | 1125 | ||
1126 | .read_msr = native_read_msr_safe, | 1126 | .read_msr = native_read_msr_safe, |
1127 | .rdmsr_regs = native_rdmsr_safe_regs, | ||
1128 | .write_msr = xen_write_msr_safe, | 1127 | .write_msr = xen_write_msr_safe, |
1129 | .wrmsr_regs = native_wrmsr_safe_regs, | ||
1130 | 1128 | ||
1131 | .read_tsc = native_read_tsc, | 1129 | .read_tsc = native_read_tsc, |
1132 | .read_pmc = native_read_pmc, | 1130 | .read_pmc = native_read_pmc, |