diff options
author | Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> | 2009-01-30 21:16:46 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2009-02-04 20:28:21 -0500 |
commit | 18114f61359ac05e3aa797d53d63f40db41f798d (patch) | |
tree | 7eccf7f9377329b9f0b91ac7b72a7a6344f7c4f1 /arch/x86/include/asm/uaccess.h | |
parent | 019a1369667c3978f9644982ebe6d261dd2bbc40 (diff) |
x86: uaccess: use errret as error value in __put_user_size()
Impact: cleanup
In __put_user_size() macro errret is used for error value.
But if size is 8, errret isn't passed to__put_user_asm_u64().
This behavior is inconsistent.
Signed-off-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/uaccess.h')
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index b9a24155f7af..b685ece89d5c 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -186,7 +186,7 @@ extern int __get_user_bad(void); | |||
186 | 186 | ||
187 | 187 | ||
188 | #ifdef CONFIG_X86_32 | 188 | #ifdef CONFIG_X86_32 |
189 | #define __put_user_asm_u64(x, addr, err) \ | 189 | #define __put_user_asm_u64(x, addr, err, errret) \ |
190 | asm volatile("1: movl %%eax,0(%2)\n" \ | 190 | asm volatile("1: movl %%eax,0(%2)\n" \ |
191 | "2: movl %%edx,4(%2)\n" \ | 191 | "2: movl %%edx,4(%2)\n" \ |
192 | "3:\n" \ | 192 | "3:\n" \ |
@@ -197,7 +197,7 @@ extern int __get_user_bad(void); | |||
197 | _ASM_EXTABLE(1b, 4b) \ | 197 | _ASM_EXTABLE(1b, 4b) \ |
198 | _ASM_EXTABLE(2b, 4b) \ | 198 | _ASM_EXTABLE(2b, 4b) \ |
199 | : "=r" (err) \ | 199 | : "=r" (err) \ |
200 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) | 200 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) |
201 | 201 | ||
202 | #define __put_user_asm_ex_u64(x, addr) \ | 202 | #define __put_user_asm_ex_u64(x, addr) \ |
203 | asm volatile("1: movl %%eax,0(%1)\n" \ | 203 | asm volatile("1: movl %%eax,0(%1)\n" \ |
@@ -211,8 +211,8 @@ extern int __get_user_bad(void); | |||
211 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ | 211 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ |
212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") | 212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
213 | #else | 213 | #else |
214 | #define __put_user_asm_u64(x, ptr, retval) \ | 214 | #define __put_user_asm_u64(x, ptr, retval, errret) \ |
215 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) | 215 | __put_user_asm(x, ptr, retval, "q", "", "Zr", errret) |
216 | #define __put_user_asm_ex_u64(x, addr) \ | 216 | #define __put_user_asm_ex_u64(x, addr) \ |
217 | __put_user_asm_ex(x, addr, "q", "", "Zr") | 217 | __put_user_asm_ex(x, addr, "q", "", "Zr") |
218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) | 218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) |
@@ -289,7 +289,8 @@ do { \ | |||
289 | __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ | 289 | __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ |
290 | break; \ | 290 | break; \ |
291 | case 8: \ | 291 | case 8: \ |
292 | __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval); \ | 292 | __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \ |
293 | errret); \ | ||
293 | break; \ | 294 | break; \ |
294 | default: \ | 295 | default: \ |
295 | __put_user_bad(); \ | 296 | __put_user_bad(); \ |