diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/thread_info.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 103 |
2 files changed, 104 insertions, 0 deletions
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 98789647baa9..3f90aeb456bc 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -40,6 +40,7 @@ struct thread_info { | |||
40 | */ | 40 | */ |
41 | __u8 supervisor_stack[0]; | 41 | __u8 supervisor_stack[0]; |
42 | #endif | 42 | #endif |
43 | int uaccess_err; | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | #define INIT_THREAD_INFO(tsk) \ | 46 | #define INIT_THREAD_INFO(tsk) \ |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 69d2757cca9b..0ec6de4bcb0b 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -199,12 +199,22 @@ extern int __get_user_bad(void); | |||
199 | : "=r" (err) \ | 199 | : "=r" (err) \ |
200 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) | 200 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) |
201 | 201 | ||
202 | #define __put_user_asm_ex_u64(x, addr) \ | ||
203 | asm volatile("1: movl %%eax,0(%1)\n" \ | ||
204 | "2: movl %%edx,4(%1)\n" \ | ||
205 | "3:\n" \ | ||
206 | _ASM_EXTABLE(1b, 2b - 1b) \ | ||
207 | _ASM_EXTABLE(2b, 3b - 2b) \ | ||
208 | : : "A" (x), "r" (addr)) | ||
209 | |||
202 | #define __put_user_x8(x, ptr, __ret_pu) \ | 210 | #define __put_user_x8(x, ptr, __ret_pu) \ |
203 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ | 211 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ |
204 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") | 212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
205 | #else | 213 | #else |
206 | #define __put_user_asm_u64(x, ptr, retval) \ | 214 | #define __put_user_asm_u64(x, ptr, retval) \ |
207 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) | 215 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) |
216 | #define __put_user_asm_ex_u64(x, addr) \ | ||
217 | __put_user_asm_ex(x, addr, "q", "", "Zr") | ||
208 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) | 218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) |
209 | #endif | 219 | #endif |
210 | 220 | ||
@@ -286,6 +296,27 @@ do { \ | |||
286 | } \ | 296 | } \ |
287 | } while (0) | 297 | } while (0) |
288 | 298 | ||
299 | #define __put_user_size_ex(x, ptr, size) \ | ||
300 | do { \ | ||
301 | __chk_user_ptr(ptr); \ | ||
302 | switch (size) { \ | ||
303 | case 1: \ | ||
304 | __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ | ||
305 | break; \ | ||
306 | case 2: \ | ||
307 | __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ | ||
308 | break; \ | ||
309 | case 4: \ | ||
310 | __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ | ||
311 | break; \ | ||
312 | case 8: \ | ||
313 | __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ | ||
314 | break; \ | ||
315 | default: \ | ||
316 | __put_user_bad(); \ | ||
317 | } \ | ||
318 | } while (0) | ||
319 | |||
289 | #else | 320 | #else |
290 | 321 | ||
291 | #define __put_user_size(x, ptr, size, retval, errret) \ | 322 | #define __put_user_size(x, ptr, size, retval, errret) \ |
@@ -311,9 +342,12 @@ do { \ | |||
311 | 342 | ||
312 | #ifdef CONFIG_X86_32 | 343 | #ifdef CONFIG_X86_32 |
313 | #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() | 344 | #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() |
345 | #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() | ||
314 | #else | 346 | #else |
315 | #define __get_user_asm_u64(x, ptr, retval, errret) \ | 347 | #define __get_user_asm_u64(x, ptr, retval, errret) \ |
316 | __get_user_asm(x, ptr, retval, "q", "", "=r", errret) | 348 | __get_user_asm(x, ptr, retval, "q", "", "=r", errret) |
349 | #define __get_user_asm_ex_u64(x, ptr) \ | ||
350 | __get_user_asm_ex(x, ptr, "q", "", "=r") | ||
317 | #endif | 351 | #endif |
318 | 352 | ||
319 | #define __get_user_size(x, ptr, size, retval, errret) \ | 353 | #define __get_user_size(x, ptr, size, retval, errret) \ |
@@ -350,6 +384,33 @@ do { \ | |||
350 | : "=r" (err), ltype(x) \ | 384 | : "=r" (err), ltype(x) \ |
351 | : "m" (__m(addr)), "i" (errret), "0" (err)) | 385 | : "m" (__m(addr)), "i" (errret), "0" (err)) |
352 | 386 | ||
387 | #define __get_user_size_ex(x, ptr, size) \ | ||
388 | do { \ | ||
389 | __chk_user_ptr(ptr); \ | ||
390 | switch (size) { \ | ||
391 | case 1: \ | ||
392 | __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ | ||
393 | break; \ | ||
394 | case 2: \ | ||
395 | __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ | ||
396 | break; \ | ||
397 | case 4: \ | ||
398 | __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ | ||
399 | break; \ | ||
400 | case 8: \ | ||
401 | __get_user_asm_ex_u64(x, ptr); \ | ||
402 | break; \ | ||
403 | default: \ | ||
404 | (x) = __get_user_bad(); \ | ||
405 | } \ | ||
406 | } while (0) | ||
407 | |||
408 | #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ | ||
409 | asm volatile("1: mov"itype" %1,%"rtype"0\n" \ | ||
410 | "2:\n" \ | ||
411 | _ASM_EXTABLE(1b, 2b - 1b) \ | ||
412 | : ltype(x) : "m" (__m(addr))) | ||
413 | |||
353 | #define __put_user_nocheck(x, ptr, size) \ | 414 | #define __put_user_nocheck(x, ptr, size) \ |
354 | ({ \ | 415 | ({ \ |
355 | int __pu_err; \ | 416 | int __pu_err; \ |
@@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; }; | |||
385 | _ASM_EXTABLE(1b, 3b) \ | 446 | _ASM_EXTABLE(1b, 3b) \ |
386 | : "=r"(err) \ | 447 | : "=r"(err) \ |
387 | : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) | 448 | : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) |
449 | |||
450 | #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ | ||
451 | asm volatile("1: mov"itype" %"rtype"0,%1\n" \ | ||
452 | "2:\n" \ | ||
453 | _ASM_EXTABLE(1b, 2b - 1b) \ | ||
454 | : : ltype(x), "m" (__m(addr))) | ||
455 | |||
456 | /* | ||
457 | * uaccess_try and catch | ||
458 | */ | ||
459 | #define uaccess_try do { \ | ||
460 | int prev_err = current_thread_info()->uaccess_err; \ | ||
461 | current_thread_info()->uaccess_err = 0; \ | ||
462 | barrier(); | ||
463 | |||
464 | #define uaccess_catch(err) \ | ||
465 | (err) |= current_thread_info()->uaccess_err; \ | ||
466 | current_thread_info()->uaccess_err = prev_err; \ | ||
467 | } while (0) | ||
468 | |||
388 | /** | 469 | /** |
389 | * __get_user: - Get a simple variable from user space, with less checking. | 470 | * __get_user: - Get a simple variable from user space, with less checking. |
390 | * @x: Variable to store result. | 471 | * @x: Variable to store result. |
@@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; }; | |||
408 | 489 | ||
409 | #define __get_user(x, ptr) \ | 490 | #define __get_user(x, ptr) \ |
410 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | 491 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
492 | |||
411 | /** | 493 | /** |
412 | * __put_user: - Write a simple value into user space, with less checking. | 494 | * __put_user: - Write a simple value into user space, with less checking. |
413 | * @x: Value to copy to user space. | 495 | * @x: Value to copy to user space. |
@@ -435,6 +517,27 @@ struct __large_struct { unsigned long buf[100]; }; | |||
435 | #define __put_user_unaligned __put_user | 517 | #define __put_user_unaligned __put_user |
436 | 518 | ||
437 | /* | 519 | /* |
520 | * {get|put}_user_try and catch | ||
521 | * | ||
522 | * get_user_try { | ||
523 | * get_user_ex(...); | ||
524 | * } get_user_catch(err) | ||
525 | */ | ||
526 | #define get_user_try uaccess_try | ||
527 | #define get_user_catch(err) uaccess_catch(err) | ||
528 | #define put_user_try uaccess_try | ||
529 | #define put_user_catch(err) uaccess_catch(err) | ||
530 | |||
531 | #define get_user_ex(x, ptr) do { \ | ||
532 | unsigned long __gue_val; \ | ||
533 | __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ | ||
534 | (x) = (__force __typeof__(*(ptr)))__gue_val; \ | ||
535 | } while (0) | ||
536 | |||
537 | #define put_user_ex(x, ptr) \ | ||
538 | __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
539 | |||
540 | /* | ||
438 | * movsl can be slow when source and dest are not both 8-byte aligned | 541 | * movsl can be slow when source and dest are not both 8-byte aligned |
439 | */ | 542 | */ |
440 | #ifdef CONFIG_X86_INTEL_USERCOPY | 543 | #ifdef CONFIG_X86_INTEL_USERCOPY |