aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/msr.h
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2008-01-01 13:12:15 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-01 13:12:15 -0500
commit56986d4210e5077d67e4eff538a316a6cc4e5158 (patch)
treef638e1297e3bfac457259f57b26bebe9aeeee1c7 /include/asm-x86/msr.h
parente0c4ae06495494a38843da8445e2b6e1f59b9253 (diff)
x86: fix asm-x86/msr.h for user-space export
Use __asm__ and __volatile__ in code that is exported to userspace. Wrap kernel functions with __KERNEL__ so they get scrubbed. No code changed: text data bss dec hex filename 9681036 1698924 3407872 14787832 e1a4f8 vmlinux.before 9681036 1698924 3407872 14787832 e1a4f8 vmlinux.after Signed-off-by: Mike Frysinger <vapier@gentoo.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/msr.h')
-rw-r--r--include/asm-x86/msr.h74
1 files changed, 39 insertions, 35 deletions
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index ba4b31432120..664a2fa7adc9 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -191,38 +191,6 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
191 191
192#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 192#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
193 193
194/* wrmsr with exception handling */
195#define wrmsr_safe(msr,a,b) ({ int ret__; \
196 asm volatile("2: wrmsr ; xorl %0,%0\n" \
197 "1:\n\t" \
198 ".section .fixup,\"ax\"\n\t" \
199 "3: movl %4,%0 ; jmp 1b\n\t" \
200 ".previous\n\t" \
201 ".section __ex_table,\"a\"\n" \
202 " .align 8\n\t" \
203 " .quad 2b,3b\n\t" \
204 ".previous" \
205 : "=a" (ret__) \
206 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
207 ret__; })
208
209#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
210
211#define rdmsr_safe(msr,a,b) \
212 ({ int ret__; \
213 asm volatile ("1: rdmsr\n" \
214 "2:\n" \
215 ".section .fixup,\"ax\"\n" \
216 "3: movl %4,%0\n" \
217 " jmp 2b\n" \
218 ".previous\n" \
219 ".section __ex_table,\"a\"\n" \
220 " .align 8\n" \
221 " .quad 1b,3b\n" \
222 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
223 :"c"(msr), "i"(-EIO), "0"(0)); \
224 ret__; })
225
226#define rdtsc(low,high) \ 194#define rdtsc(low,high) \
227 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 195 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
228 196
@@ -230,17 +198,17 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
230 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") 198 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
231 199
232#define rdtscp(low,high,aux) \ 200#define rdtscp(low,high,aux) \
233 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) 201 __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
234 202
235#define rdtscll(val) do { \ 203#define rdtscll(val) do { \
236 unsigned int __a,__d; \ 204 unsigned int __a,__d; \
237 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ 205 __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
238 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 206 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
239} while(0) 207} while(0)
240 208
241#define rdtscpll(val, aux) do { \ 209#define rdtscpll(val, aux) do { \
242 unsigned long __a, __d; \ 210 unsigned long __a, __d; \
243 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ 211 __asm__ __volatile__ (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
244 (val) = (__d << 32) | __a; \ 212 (val) = (__d << 32) | __a; \
245} while (0) 213} while (0)
246 214
@@ -253,6 +221,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
253 : "=a" (low), "=d" (high) \ 221 : "=a" (low), "=d" (high) \
254 : "c" (counter)) 222 : "c" (counter))
255 223
224
256static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, 225static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
257 unsigned int *ecx, unsigned int *edx) 226 unsigned int *ecx, unsigned int *edx)
258{ 227{
@@ -320,6 +289,40 @@ static inline unsigned int cpuid_edx(unsigned int op)
320 return edx; 289 return edx;
321} 290}
322 291
292#ifdef __KERNEL__
293
294/* wrmsr with exception handling */
295#define wrmsr_safe(msr,a,b) ({ int ret__; \
296 asm volatile("2: wrmsr ; xorl %0,%0\n" \
297 "1:\n\t" \
298 ".section .fixup,\"ax\"\n\t" \
299 "3: movl %4,%0 ; jmp 1b\n\t" \
300 ".previous\n\t" \
301 ".section __ex_table,\"a\"\n" \
302 " .align 8\n\t" \
303 " .quad 2b,3b\n\t" \
304 ".previous" \
305 : "=a" (ret__) \
306 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
307 ret__; })
308
309#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
310
311#define rdmsr_safe(msr,a,b) \
312 ({ int ret__; \
313 asm volatile ("1: rdmsr\n" \
314 "2:\n" \
315 ".section .fixup,\"ax\"\n" \
316 "3: movl %4,%0\n" \
317 " jmp 2b\n" \
318 ".previous\n" \
319 ".section __ex_table,\"a\"\n" \
320 " .align 8\n" \
321 " .quad 1b,3b\n" \
322 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \
323 :"c"(msr), "i"(-EIO), "0"(0)); \
324 ret__; })
325
323#ifdef CONFIG_SMP 326#ifdef CONFIG_SMP
324void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 327void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
325void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 328void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
@@ -343,6 +346,7 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
343 return wrmsr_safe(msr_no, l, h); 346 return wrmsr_safe(msr_no, l, h);
344} 347}
345#endif /* CONFIG_SMP */ 348#endif /* CONFIG_SMP */
349#endif /* __KERNEL__ */
346#endif /* __ASSEMBLY__ */ 350#endif /* __ASSEMBLY__ */
347 351
348#endif /* !__i386__ */ 352#endif /* !__i386__ */