aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/uaccess.h
diff options
context:
space:
mode:
authorHiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>2009-01-23 18:49:41 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2009-01-23 20:17:36 -0500
commitfe40c0af3cff3ea461cf25bddb979abc7279d4df (patch)
treebe37f58bce212299476186147e44dbd486be34a1 /arch/x86/include/asm/uaccess.h
parentcc86c9e0dc1a41451240b948bb39d46bb2536ae8 (diff)
x86: uaccess: introduce try and catch framework
Impact: introduce new uaccess exception handling framework Introduce {get|put}_user_try and {get|put}_user_catch as new uaccess exception handling framework. {get|put}_user_try begins exception block and {get|put}_user_catch(err) ends the block and gets err if an exception occured in {get|put}_user_ex() in the block. The exception is stored thread_info->uaccess_err. The example usage of this framework is below; int func() { int err = 0; get_user_try { get_user_ex(...); get_user_ex(...); : } get_user_catch(err); return err; } Note: get_user_ex() is not clear the value when an exception occurs, it's different from the behavior of __get_user(), but I think it doesn't matter. Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/uaccess.h')
-rw-r--r--arch/x86/include/asm/uaccess.h103
1 files changed, 103 insertions, 0 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 69d2757cca9b..0ec6de4bcb0b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -199,12 +199,22 @@ extern int __get_user_bad(void);
199 : "=r" (err) \ 199 : "=r" (err) \
200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
201 201
202#define __put_user_asm_ex_u64(x, addr) \
203 asm volatile("1: movl %%eax,0(%1)\n" \
204 "2: movl %%edx,4(%1)\n" \
205 "3:\n" \
206 _ASM_EXTABLE(1b, 2b - 1b) \
207 _ASM_EXTABLE(2b, 3b - 2b) \
208 : : "A" (x), "r" (addr))
209
202#define __put_user_x8(x, ptr, __ret_pu) \ 210#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else 213#else
206#define __put_user_asm_u64(x, ptr, retval) \ 214#define __put_user_asm_u64(x, ptr, retval) \
207 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) 215 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr")
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif 219#endif
210 220
@@ -286,6 +296,27 @@ do { \
286 } \ 296 } \
287} while (0) 297} while (0)
288 298
299#define __put_user_size_ex(x, ptr, size) \
300do { \
301 __chk_user_ptr(ptr); \
302 switch (size) { \
303 case 1: \
304 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
305 break; \
306 case 2: \
307 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
308 break; \
309 case 4: \
310 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
311 break; \
312 case 8: \
313 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
314 break; \
315 default: \
316 __put_user_bad(); \
317 } \
318} while (0)
319
289#else 320#else
290 321
291#define __put_user_size(x, ptr, size, retval, errret) \ 322#define __put_user_size(x, ptr, size, retval, errret) \
@@ -311,9 +342,12 @@ do { \
311 342
312#ifdef CONFIG_X86_32 343#ifdef CONFIG_X86_32
313#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 344#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
345#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
314#else 346#else
315#define __get_user_asm_u64(x, ptr, retval, errret) \ 347#define __get_user_asm_u64(x, ptr, retval, errret) \
316 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349#define __get_user_asm_ex_u64(x, ptr) \
350 __get_user_asm_ex(x, ptr, "q", "", "=r")
317#endif 351#endif
318 352
319#define __get_user_size(x, ptr, size, retval, errret) \ 353#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +384,33 @@ do { \
350 : "=r" (err), ltype(x) \ 384 : "=r" (err), ltype(x) \
351 : "m" (__m(addr)), "i" (errret), "0" (err)) 385 : "m" (__m(addr)), "i" (errret), "0" (err))
352 386
387#define __get_user_size_ex(x, ptr, size) \
388do { \
389 __chk_user_ptr(ptr); \
390 switch (size) { \
391 case 1: \
392 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
393 break; \
394 case 2: \
395 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
396 break; \
397 case 4: \
398 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
399 break; \
400 case 8: \
401 __get_user_asm_ex_u64(x, ptr); \
402 break; \
403 default: \
404 (x) = __get_user_bad(); \
405 } \
406} while (0)
407
408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
410 "2:\n" \
411 _ASM_EXTABLE(1b, 2b - 1b) \
412 : ltype(x) : "m" (__m(addr)))
413
353#define __put_user_nocheck(x, ptr, size) \ 414#define __put_user_nocheck(x, ptr, size) \
354({ \ 415({ \
355 int __pu_err; \ 416 int __pu_err; \
@@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; };
385 _ASM_EXTABLE(1b, 3b) \ 446 _ASM_EXTABLE(1b, 3b) \
386 : "=r"(err) \ 447 : "=r"(err) \
387 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 448 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
449
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
452 "2:\n" \
453 _ASM_EXTABLE(1b, 2b - 1b) \
454 : : ltype(x), "m" (__m(addr)))
455
456/*
457 * uaccess_try and catch
458 */
459#define uaccess_try do { \
460 int prev_err = current_thread_info()->uaccess_err; \
461 current_thread_info()->uaccess_err = 0; \
462 barrier();
463
464#define uaccess_catch(err) \
465 (err) |= current_thread_info()->uaccess_err; \
466 current_thread_info()->uaccess_err = prev_err; \
467} while (0)
468
388/** 469/**
389 * __get_user: - Get a simple variable from user space, with less checking. 470 * __get_user: - Get a simple variable from user space, with less checking.
390 * @x: Variable to store result. 471 * @x: Variable to store result.
@@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; };
408 489
409#define __get_user(x, ptr) \ 490#define __get_user(x, ptr) \
410 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 491 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
492
411/** 493/**
412 * __put_user: - Write a simple value into user space, with less checking. 494 * __put_user: - Write a simple value into user space, with less checking.
413 * @x: Value to copy to user space. 495 * @x: Value to copy to user space.
@@ -435,6 +517,27 @@ struct __large_struct { unsigned long buf[100]; };
435#define __put_user_unaligned __put_user 517#define __put_user_unaligned __put_user
436 518
437/* 519/*
520 * {get|put}_user_try and catch
521 *
522 * get_user_try {
523 * get_user_ex(...);
524 * } get_user_catch(err)
525 */
526#define get_user_try uaccess_try
527#define get_user_catch(err) uaccess_catch(err)
528#define put_user_try uaccess_try
529#define put_user_catch(err) uaccess_catch(err)
530
531#define get_user_ex(x, ptr) do { \
532 unsigned long __gue_val; \
533 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
534 (x) = (__force __typeof__(*(ptr)))__gue_val; \
535} while (0)
536
537#define put_user_ex(x, ptr) \
538 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
539
540/*
438 * movsl can be slow when source and dest are not both 8-byte aligned 541 * movsl can be slow when source and dest are not both 8-byte aligned
439 */ 542 */
440#ifdef CONFIG_X86_INTEL_USERCOPY 543#ifdef CONFIG_X86_INTEL_USERCOPY