aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/uaccess.h')
-rw-r--r--arch/x86/include/asm/uaccess.h115
1 files changed, 109 insertions, 6 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4340055b7559..0ec6de4bcb0b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
121 121
122#define __get_user_x(size, ret, x, ptr) \ 122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \ 123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \ 124 : "=a" (ret), "=d" (x) \
125 : "0" (ptr)) \ 125 : "0" (ptr)) \
126 126
127/* Careful: we have to cast the result to the type of the pointer 127/* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
181 181
182#define __put_user_x(size, x, ptr, __ret_pu) \ 182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ 183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
184 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
185 185
186 186
187 187
188#ifdef CONFIG_X86_32 188#ifdef CONFIG_X86_32
189#define __put_user_u64(x, addr, err) \ 189#define __put_user_asm_u64(x, addr, err) \
190 asm volatile("1: movl %%eax,0(%2)\n" \ 190 asm volatile("1: movl %%eax,0(%2)\n" \
191 "2: movl %%edx,4(%2)\n" \ 191 "2: movl %%edx,4(%2)\n" \
192 "3:\n" \ 192 "3:\n" \
@@ -199,12 +199,22 @@ extern int __get_user_bad(void);
199 : "=r" (err) \ 199 : "=r" (err) \
200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 200 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
201 201
202#define __put_user_asm_ex_u64(x, addr) \
203 asm volatile("1: movl %%eax,0(%1)\n" \
204 "2: movl %%edx,4(%1)\n" \
205 "3:\n" \
206 _ASM_EXTABLE(1b, 2b - 1b) \
207 _ASM_EXTABLE(2b, 3b - 2b) \
208 : : "A" (x), "r" (addr))
209
202#define __put_user_x8(x, ptr, __ret_pu) \ 210#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \ 211 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
205#else 213#else
206#define __put_user_u64(x, ptr, retval) \ 214#define __put_user_asm_u64(x, ptr, retval) \
207 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) 215 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr")
208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
209#endif 219#endif
210 220
@@ -276,10 +286,31 @@ do { \
276 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ 286 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
277 break; \ 287 break; \
278 case 4: \ 288 case 4: \
279 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ 289 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
280 break; \ 290 break; \
281 case 8: \ 291 case 8: \
282 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ 292 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \
293 break; \
294 default: \
295 __put_user_bad(); \
296 } \
297} while (0)
298
299#define __put_user_size_ex(x, ptr, size) \
300do { \
301 __chk_user_ptr(ptr); \
302 switch (size) { \
303 case 1: \
304 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
305 break; \
306 case 2: \
307 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
308 break; \
309 case 4: \
310 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
311 break; \
312 case 8: \
313 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
283 break; \ 314 break; \
284 default: \ 315 default: \
285 __put_user_bad(); \ 316 __put_user_bad(); \
@@ -311,9 +342,12 @@ do { \
311 342
312#ifdef CONFIG_X86_32 343#ifdef CONFIG_X86_32
313#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() 344#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
345#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
314#else 346#else
315#define __get_user_asm_u64(x, ptr, retval, errret) \ 347#define __get_user_asm_u64(x, ptr, retval, errret) \
316 __get_user_asm(x, ptr, retval, "q", "", "=r", errret) 348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349#define __get_user_asm_ex_u64(x, ptr) \
350 __get_user_asm_ex(x, ptr, "q", "", "=r")
317#endif 351#endif
318 352
319#define __get_user_size(x, ptr, size, retval, errret) \ 353#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +384,33 @@ do { \
350 : "=r" (err), ltype(x) \ 384 : "=r" (err), ltype(x) \
351 : "m" (__m(addr)), "i" (errret), "0" (err)) 385 : "m" (__m(addr)), "i" (errret), "0" (err))
352 386
387#define __get_user_size_ex(x, ptr, size) \
388do { \
389 __chk_user_ptr(ptr); \
390 switch (size) { \
391 case 1: \
392 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
393 break; \
394 case 2: \
395 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
396 break; \
397 case 4: \
398 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
399 break; \
400 case 8: \
401 __get_user_asm_ex_u64(x, ptr); \
402 break; \
403 default: \
404 (x) = __get_user_bad(); \
405 } \
406} while (0)
407
408#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
409 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
410 "2:\n" \
411 _ASM_EXTABLE(1b, 2b - 1b) \
412 : ltype(x) : "m" (__m(addr)))
413
353#define __put_user_nocheck(x, ptr, size) \ 414#define __put_user_nocheck(x, ptr, size) \
354({ \ 415({ \
355 int __pu_err; \ 416 int __pu_err; \
@@ -385,6 +446,26 @@ struct __large_struct { unsigned long buf[100]; };
385 _ASM_EXTABLE(1b, 3b) \ 446 _ASM_EXTABLE(1b, 3b) \
386 : "=r"(err) \ 447 : "=r"(err) \
387 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) 448 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
449
450#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
451 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
452 "2:\n" \
453 _ASM_EXTABLE(1b, 2b - 1b) \
454 : : ltype(x), "m" (__m(addr)))
455
456/*
457 * uaccess_try and catch
458 */
459#define uaccess_try do { \
460 int prev_err = current_thread_info()->uaccess_err; \
461 current_thread_info()->uaccess_err = 0; \
462 barrier();
463
464#define uaccess_catch(err) \
465 (err) |= current_thread_info()->uaccess_err; \
466 current_thread_info()->uaccess_err = prev_err; \
467} while (0)
468
388/** 469/**
389 * __get_user: - Get a simple variable from user space, with less checking. 470 * __get_user: - Get a simple variable from user space, with less checking.
390 * @x: Variable to store result. 471 * @x: Variable to store result.
@@ -408,6 +489,7 @@ struct __large_struct { unsigned long buf[100]; };
408 489
409#define __get_user(x, ptr) \ 490#define __get_user(x, ptr) \
410 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 491 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
492
411/** 493/**
412 * __put_user: - Write a simple value into user space, with less checking. 494 * __put_user: - Write a simple value into user space, with less checking.
413 * @x: Value to copy to user space. 495 * @x: Value to copy to user space.
@@ -435,6 +517,27 @@ struct __large_struct { unsigned long buf[100]; };
435#define __put_user_unaligned __put_user 517#define __put_user_unaligned __put_user
436 518
437/* 519/*
520 * {get|put}_user_try and catch
521 *
522 * get_user_try {
523 * get_user_ex(...);
524 * } get_user_catch(err)
525 */
526#define get_user_try uaccess_try
527#define get_user_catch(err) uaccess_catch(err)
528#define put_user_try uaccess_try
529#define put_user_catch(err) uaccess_catch(err)
530
531#define get_user_ex(x, ptr) do { \
532 unsigned long __gue_val; \
533 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
534 (x) = (__force __typeof__(*(ptr)))__gue_val; \
535} while (0)
536
537#define put_user_ex(x, ptr) \
538 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
539
540/*
438 * movsl can be slow when source and dest are not both 8-byte aligned 541 * movsl can be slow when source and dest are not both 8-byte aligned
439 */ 542 */
440#ifdef CONFIG_X86_INTEL_USERCOPY 543#ifdef CONFIG_X86_INTEL_USERCOPY