diff options
-rw-r--r-- | arch/alpha/include/asm/uaccess.h | 86 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 96 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/avr32/include/asm/uaccess.h | 24 | ||||
-rw-r--r-- | arch/blackfin/include/asm/uaccess.h | 32 | ||||
-rw-r--r-- | arch/cris/include/asm/uaccess.h | 117 | ||||
-rw-r--r-- | arch/frv/include/asm/segment.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/uaccess.h | 11 | ||||
-rw-r--r-- | arch/m32r/include/asm/uaccess.h | 88 | ||||
-rw-r--r-- | arch/m68k/include/asm/segment.h | 2 | ||||
-rw-r--r-- | arch/m68k/include/asm/uaccess_mm.h | 40 | ||||
-rw-r--r-- | arch/metag/include/asm/uaccess.h | 25 | ||||
-rw-r--r-- | arch/openrisc/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/parisc/include/asm/uaccess.h | 116 | ||||
-rw-r--r-- | arch/sh/include/asm/segment.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | arch/sh/include/asm/uaccess_64.h | 8 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_32.h | 339 | ||||
-rw-r--r-- | arch/sparc/include/asm/uaccess_64.h | 222 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 2 | ||||
-rw-r--r-- | arch/xtensa/include/asm/uaccess.h | 90 |
21 files changed, 700 insertions, 614 deletions
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 766fdfde2b7a..9b0d40093c9a 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #define get_ds() (KERNEL_DS) | 27 | #define get_ds() (KERNEL_DS) |
28 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 28 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
29 | 29 | ||
30 | #define segment_eq(a,b) ((a).seg == (b).seg) | 30 | #define segment_eq(a, b) ((a).seg == (b).seg) |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * Is a address valid? This does a straightforward calculation rather | 33 | * Is a address valid? This does a straightforward calculation rather |
@@ -39,13 +39,13 @@ | |||
39 | * - AND "addr+size" doesn't have any high-bits set | 39 | * - AND "addr+size" doesn't have any high-bits set |
40 | * - OR we are in kernel mode. | 40 | * - OR we are in kernel mode. |
41 | */ | 41 | */ |
42 | #define __access_ok(addr,size,segment) \ | 42 | #define __access_ok(addr, size, segment) \ |
43 | (((segment).seg & (addr | size | (addr+size))) == 0) | 43 | (((segment).seg & (addr | size | (addr+size))) == 0) |
44 | 44 | ||
45 | #define access_ok(type,addr,size) \ | 45 | #define access_ok(type, addr, size) \ |
46 | ({ \ | 46 | ({ \ |
47 | __chk_user_ptr(addr); \ | 47 | __chk_user_ptr(addr); \ |
48 | __access_ok(((unsigned long)(addr)),(size),get_fs()); \ | 48 | __access_ok(((unsigned long)(addr)), (size), get_fs()); \ |
49 | }) | 49 | }) |
50 | 50 | ||
51 | /* | 51 | /* |
@@ -60,20 +60,20 @@ | |||
60 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) | 60 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) |
61 | * (b) require any knowledge of processes at this stage | 61 | * (b) require any knowledge of processes at this stage |
62 | */ | 62 | */ |
63 | #define put_user(x,ptr) \ | 63 | #define put_user(x, ptr) \ |
64 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) | 64 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) |
65 | #define get_user(x,ptr) \ | 65 | #define get_user(x, ptr) \ |
66 | __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) | 66 | __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * The "__xxx" versions do not do address space checking, useful when | 69 | * The "__xxx" versions do not do address space checking, useful when |
70 | * doing multiple accesses to the same area (the programmer has to do the | 70 | * doing multiple accesses to the same area (the programmer has to do the |
71 | * checks by hand with "access_ok()") | 71 | * checks by hand with "access_ok()") |
72 | */ | 72 | */ |
73 | #define __put_user(x,ptr) \ | 73 | #define __put_user(x, ptr) \ |
74 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 74 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
75 | #define __get_user(x,ptr) \ | 75 | #define __get_user(x, ptr) \ |
76 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 76 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to | 79 | * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to |
@@ -84,7 +84,7 @@ | |||
84 | 84 | ||
85 | extern void __get_user_unknown(void); | 85 | extern void __get_user_unknown(void); |
86 | 86 | ||
87 | #define __get_user_nocheck(x,ptr,size) \ | 87 | #define __get_user_nocheck(x, ptr, size) \ |
88 | ({ \ | 88 | ({ \ |
89 | long __gu_err = 0; \ | 89 | long __gu_err = 0; \ |
90 | unsigned long __gu_val; \ | 90 | unsigned long __gu_val; \ |
@@ -96,16 +96,16 @@ extern void __get_user_unknown(void); | |||
96 | case 8: __get_user_64(ptr); break; \ | 96 | case 8: __get_user_64(ptr); break; \ |
97 | default: __get_user_unknown(); break; \ | 97 | default: __get_user_unknown(); break; \ |
98 | } \ | 98 | } \ |
99 | (x) = (__typeof__(*(ptr))) __gu_val; \ | 99 | (x) = (__force __typeof__(*(ptr))) __gu_val; \ |
100 | __gu_err; \ | 100 | __gu_err; \ |
101 | }) | 101 | }) |
102 | 102 | ||
103 | #define __get_user_check(x,ptr,size,segment) \ | 103 | #define __get_user_check(x, ptr, size, segment) \ |
104 | ({ \ | 104 | ({ \ |
105 | long __gu_err = -EFAULT; \ | 105 | long __gu_err = -EFAULT; \ |
106 | unsigned long __gu_val = 0; \ | 106 | unsigned long __gu_val = 0; \ |
107 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | 107 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
108 | if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ | 108 | if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ |
109 | __gu_err = 0; \ | 109 | __gu_err = 0; \ |
110 | switch (size) { \ | 110 | switch (size) { \ |
111 | case 1: __get_user_8(__gu_addr); break; \ | 111 | case 1: __get_user_8(__gu_addr); break; \ |
@@ -115,7 +115,7 @@ extern void __get_user_unknown(void); | |||
115 | default: __get_user_unknown(); break; \ | 115 | default: __get_user_unknown(); break; \ |
116 | } \ | 116 | } \ |
117 | } \ | 117 | } \ |
118 | (x) = (__typeof__(*(ptr))) __gu_val; \ | 118 | (x) = (__force __typeof__(*(ptr))) __gu_val; \ |
119 | __gu_err; \ | 119 | __gu_err; \ |
120 | }) | 120 | }) |
121 | 121 | ||
@@ -201,31 +201,31 @@ struct __large_struct { unsigned long buf[100]; }; | |||
201 | 201 | ||
202 | extern void __put_user_unknown(void); | 202 | extern void __put_user_unknown(void); |
203 | 203 | ||
204 | #define __put_user_nocheck(x,ptr,size) \ | 204 | #define __put_user_nocheck(x, ptr, size) \ |
205 | ({ \ | 205 | ({ \ |
206 | long __pu_err = 0; \ | 206 | long __pu_err = 0; \ |
207 | __chk_user_ptr(ptr); \ | 207 | __chk_user_ptr(ptr); \ |
208 | switch (size) { \ | 208 | switch (size) { \ |
209 | case 1: __put_user_8(x,ptr); break; \ | 209 | case 1: __put_user_8(x, ptr); break; \ |
210 | case 2: __put_user_16(x,ptr); break; \ | 210 | case 2: __put_user_16(x, ptr); break; \ |
211 | case 4: __put_user_32(x,ptr); break; \ | 211 | case 4: __put_user_32(x, ptr); break; \ |
212 | case 8: __put_user_64(x,ptr); break; \ | 212 | case 8: __put_user_64(x, ptr); break; \ |
213 | default: __put_user_unknown(); break; \ | 213 | default: __put_user_unknown(); break; \ |
214 | } \ | 214 | } \ |
215 | __pu_err; \ | 215 | __pu_err; \ |
216 | }) | 216 | }) |
217 | 217 | ||
218 | #define __put_user_check(x,ptr,size,segment) \ | 218 | #define __put_user_check(x, ptr, size, segment) \ |
219 | ({ \ | 219 | ({ \ |
220 | long __pu_err = -EFAULT; \ | 220 | long __pu_err = -EFAULT; \ |
221 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | 221 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
222 | if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ | 222 | if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ |
223 | __pu_err = 0; \ | 223 | __pu_err = 0; \ |
224 | switch (size) { \ | 224 | switch (size) { \ |
225 | case 1: __put_user_8(x,__pu_addr); break; \ | 225 | case 1: __put_user_8(x, __pu_addr); break; \ |
226 | case 2: __put_user_16(x,__pu_addr); break; \ | 226 | case 2: __put_user_16(x, __pu_addr); break; \ |
227 | case 4: __put_user_32(x,__pu_addr); break; \ | 227 | case 4: __put_user_32(x, __pu_addr); break; \ |
228 | case 8: __put_user_64(x,__pu_addr); break; \ | 228 | case 8: __put_user_64(x, __pu_addr); break; \ |
229 | default: __put_user_unknown(); break; \ | 229 | default: __put_user_unknown(); break; \ |
230 | } \ | 230 | } \ |
231 | } \ | 231 | } \ |
@@ -237,7 +237,7 @@ extern void __put_user_unknown(void); | |||
237 | * instead of writing: this is because they do not write to | 237 | * instead of writing: this is because they do not write to |
238 | * any memory gcc knows about, so there are no aliasing issues | 238 | * any memory gcc knows about, so there are no aliasing issues |
239 | */ | 239 | */ |
240 | #define __put_user_64(x,addr) \ | 240 | #define __put_user_64(x, addr) \ |
241 | __asm__ __volatile__("1: stq %r2,%1\n" \ | 241 | __asm__ __volatile__("1: stq %r2,%1\n" \ |
242 | "2:\n" \ | 242 | "2:\n" \ |
243 | ".section __ex_table,\"a\"\n" \ | 243 | ".section __ex_table,\"a\"\n" \ |
@@ -247,7 +247,7 @@ __asm__ __volatile__("1: stq %r2,%1\n" \ | |||
247 | : "=r"(__pu_err) \ | 247 | : "=r"(__pu_err) \ |
248 | : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) | 248 | : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) |
249 | 249 | ||
250 | #define __put_user_32(x,addr) \ | 250 | #define __put_user_32(x, addr) \ |
251 | __asm__ __volatile__("1: stl %r2,%1\n" \ | 251 | __asm__ __volatile__("1: stl %r2,%1\n" \ |
252 | "2:\n" \ | 252 | "2:\n" \ |
253 | ".section __ex_table,\"a\"\n" \ | 253 | ".section __ex_table,\"a\"\n" \ |
@@ -260,7 +260,7 @@ __asm__ __volatile__("1: stl %r2,%1\n" \ | |||
260 | #ifdef __alpha_bwx__ | 260 | #ifdef __alpha_bwx__ |
261 | /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ | 261 | /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ |
262 | 262 | ||
263 | #define __put_user_16(x,addr) \ | 263 | #define __put_user_16(x, addr) \ |
264 | __asm__ __volatile__("1: stw %r2,%1\n" \ | 264 | __asm__ __volatile__("1: stw %r2,%1\n" \ |
265 | "2:\n" \ | 265 | "2:\n" \ |
266 | ".section __ex_table,\"a\"\n" \ | 266 | ".section __ex_table,\"a\"\n" \ |
@@ -270,7 +270,7 @@ __asm__ __volatile__("1: stw %r2,%1\n" \ | |||
270 | : "=r"(__pu_err) \ | 270 | : "=r"(__pu_err) \ |
271 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | 271 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) |
272 | 272 | ||
273 | #define __put_user_8(x,addr) \ | 273 | #define __put_user_8(x, addr) \ |
274 | __asm__ __volatile__("1: stb %r2,%1\n" \ | 274 | __asm__ __volatile__("1: stb %r2,%1\n" \ |
275 | "2:\n" \ | 275 | "2:\n" \ |
276 | ".section __ex_table,\"a\"\n" \ | 276 | ".section __ex_table,\"a\"\n" \ |
@@ -283,7 +283,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ | |||
283 | /* Unfortunately, we can't get an unaligned access trap for the sub-word | 283 | /* Unfortunately, we can't get an unaligned access trap for the sub-word |
284 | write, so we have to do a general unaligned operation. */ | 284 | write, so we have to do a general unaligned operation. */ |
285 | 285 | ||
286 | #define __put_user_16(x,addr) \ | 286 | #define __put_user_16(x, addr) \ |
287 | { \ | 287 | { \ |
288 | long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ | 288 | long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ |
289 | __asm__ __volatile__( \ | 289 | __asm__ __volatile__( \ |
@@ -308,13 +308,13 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ | |||
308 | " .long 4b - .\n" \ | 308 | " .long 4b - .\n" \ |
309 | " lda $31, 5b-4b(%0)\n" \ | 309 | " lda $31, 5b-4b(%0)\n" \ |
310 | ".previous" \ | 310 | ".previous" \ |
311 | : "=r"(__pu_err), "=&r"(__pu_tmp1), \ | 311 | : "=r"(__pu_err), "=&r"(__pu_tmp1), \ |
312 | "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ | 312 | "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ |
313 | "=&r"(__pu_tmp4) \ | 313 | "=&r"(__pu_tmp4) \ |
314 | : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ | 314 | : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ |
315 | } | 315 | } |
316 | 316 | ||
317 | #define __put_user_8(x,addr) \ | 317 | #define __put_user_8(x, addr) \ |
318 | { \ | 318 | { \ |
319 | long __pu_tmp1, __pu_tmp2; \ | 319 | long __pu_tmp1, __pu_tmp2; \ |
320 | __asm__ __volatile__( \ | 320 | __asm__ __volatile__( \ |
@@ -330,7 +330,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ | |||
330 | " .long 2b - .\n" \ | 330 | " .long 2b - .\n" \ |
331 | " lda $31, 3b-2b(%0)\n" \ | 331 | " lda $31, 3b-2b(%0)\n" \ |
332 | ".previous" \ | 332 | ".previous" \ |
333 | : "=r"(__pu_err), \ | 333 | : "=r"(__pu_err), \ |
334 | "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ | 334 | "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ |
335 | : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ | 335 | : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ |
336 | } | 336 | } |
@@ -366,7 +366,7 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) | |||
366 | : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) | 366 | : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) |
367 | : __module_address(__copy_user) | 367 | : __module_address(__copy_user) |
368 | "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) | 368 | "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) |
369 | : "$1","$2","$3","$4","$5","$28","memory"); | 369 | : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); |
370 | 370 | ||
371 | return __cu_len; | 371 | return __cu_len; |
372 | } | 372 | } |
@@ -379,15 +379,15 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali | |||
379 | return len; | 379 | return len; |
380 | } | 380 | } |
381 | 381 | ||
382 | #define __copy_to_user(to,from,n) \ | 382 | #define __copy_to_user(to, from, n) \ |
383 | ({ \ | 383 | ({ \ |
384 | __chk_user_ptr(to); \ | 384 | __chk_user_ptr(to); \ |
385 | __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ | 385 | __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ |
386 | }) | 386 | }) |
387 | #define __copy_from_user(to,from,n) \ | 387 | #define __copy_from_user(to, from, n) \ |
388 | ({ \ | 388 | ({ \ |
389 | __chk_user_ptr(from); \ | 389 | __chk_user_ptr(from); \ |
390 | __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ | 390 | __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ |
391 | }) | 391 | }) |
392 | 392 | ||
393 | #define __copy_to_user_inatomic __copy_to_user | 393 | #define __copy_to_user_inatomic __copy_to_user |
@@ -418,7 +418,7 @@ __clear_user(void __user *to, long len) | |||
418 | : "=r"(__cl_len), "=r"(__cl_to) | 418 | : "=r"(__cl_len), "=r"(__cl_to) |
419 | : __module_address(__do_clear_user) | 419 | : __module_address(__do_clear_user) |
420 | "0"(__cl_len), "1"(__cl_to) | 420 | "0"(__cl_len), "1"(__cl_to) |
421 | : "$1","$2","$3","$4","$5","$28","memory"); | 421 | : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); |
422 | return __cl_len; | 422 | return __cl_len; |
423 | } | 423 | } |
424 | 424 | ||
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 4767eb9caa78..ce0786efd26c 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs) | |||
73 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); | 73 | modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); |
74 | } | 74 | } |
75 | 75 | ||
76 | #define segment_eq(a,b) ((a) == (b)) | 76 | #define segment_eq(a, b) ((a) == (b)) |
77 | 77 | ||
78 | #define __addr_ok(addr) ({ \ | 78 | #define __addr_ok(addr) ({ \ |
79 | unsigned long flag; \ | 79 | unsigned long flag; \ |
@@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs) | |||
84 | (flag == 0); }) | 84 | (flag == 0); }) |
85 | 85 | ||
86 | /* We use 33-bit arithmetic here... */ | 86 | /* We use 33-bit arithmetic here... */ |
87 | #define __range_ok(addr,size) ({ \ | 87 | #define __range_ok(addr, size) ({ \ |
88 | unsigned long flag, roksum; \ | 88 | unsigned long flag, roksum; \ |
89 | __chk_user_ptr(addr); \ | 89 | __chk_user_ptr(addr); \ |
90 | __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ | 90 | __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ |
@@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *); | |||
123 | #define __GUP_CLOBBER_32t_8 "lr", "cc" | 123 | #define __GUP_CLOBBER_32t_8 "lr", "cc" |
124 | #define __GUP_CLOBBER_8 "lr", "cc" | 124 | #define __GUP_CLOBBER_8 "lr", "cc" |
125 | 125 | ||
126 | #define __get_user_x(__r2,__p,__e,__l,__s) \ | 126 | #define __get_user_x(__r2, __p, __e, __l, __s) \ |
127 | __asm__ __volatile__ ( \ | 127 | __asm__ __volatile__ ( \ |
128 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ | 128 | __asmeq("%0", "r0") __asmeq("%1", "r2") \ |
129 | __asmeq("%3", "r1") \ | 129 | __asmeq("%3", "r1") \ |
@@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *); | |||
134 | 134 | ||
135 | /* narrowing a double-word get into a single 32bit word register: */ | 135 | /* narrowing a double-word get into a single 32bit word register: */ |
136 | #ifdef __ARMEB__ | 136 | #ifdef __ARMEB__ |
137 | #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ | 137 | #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ |
138 | __get_user_x(__r2, __p, __e, __l, 32t_8) | 138 | __get_user_x(__r2, __p, __e, __l, 32t_8) |
139 | #else | 139 | #else |
140 | #define __get_user_x_32t __get_user_x | 140 | #define __get_user_x_32t __get_user_x |
@@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *); | |||
158 | #endif | 158 | #endif |
159 | 159 | ||
160 | 160 | ||
161 | #define __get_user_check(x,p) \ | 161 | #define __get_user_check(x, p) \ |
162 | ({ \ | 162 | ({ \ |
163 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | 163 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ |
164 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ | 164 | register const typeof(*(p)) __user *__p asm("r0") = (p);\ |
@@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *); | |||
196 | __e; \ | 196 | __e; \ |
197 | }) | 197 | }) |
198 | 198 | ||
199 | #define get_user(x,p) \ | 199 | #define get_user(x, p) \ |
200 | ({ \ | 200 | ({ \ |
201 | might_fault(); \ | 201 | might_fault(); \ |
202 | __get_user_check(x,p); \ | 202 | __get_user_check(x, p); \ |
203 | }) | 203 | }) |
204 | 204 | ||
205 | extern int __put_user_1(void *, unsigned int); | 205 | extern int __put_user_1(void *, unsigned int); |
@@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int); | |||
207 | extern int __put_user_4(void *, unsigned int); | 207 | extern int __put_user_4(void *, unsigned int); |
208 | extern int __put_user_8(void *, unsigned long long); | 208 | extern int __put_user_8(void *, unsigned long long); |
209 | 209 | ||
210 | #define __put_user_x(__r2,__p,__e,__l,__s) \ | 210 | #define __put_user_x(__r2, __p, __e, __l, __s) \ |
211 | __asm__ __volatile__ ( \ | 211 | __asm__ __volatile__ ( \ |
212 | __asmeq("%0", "r0") __asmeq("%2", "r2") \ | 212 | __asmeq("%0", "r0") __asmeq("%2", "r2") \ |
213 | __asmeq("%3", "r1") \ | 213 | __asmeq("%3", "r1") \ |
@@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long); | |||
216 | : "0" (__p), "r" (__r2), "r" (__l) \ | 216 | : "0" (__p), "r" (__r2), "r" (__l) \ |
217 | : "ip", "lr", "cc") | 217 | : "ip", "lr", "cc") |
218 | 218 | ||
219 | #define __put_user_check(x,p) \ | 219 | #define __put_user_check(x, p) \ |
220 | ({ \ | 220 | ({ \ |
221 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ | 221 | unsigned long __limit = current_thread_info()->addr_limit - 1; \ |
222 | const typeof(*(p)) __user *__tmp_p = (p); \ | 222 | const typeof(*(p)) __user *__tmp_p = (p); \ |
@@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long); | |||
242 | __e; \ | 242 | __e; \ |
243 | }) | 243 | }) |
244 | 244 | ||
245 | #define put_user(x,p) \ | 245 | #define put_user(x, p) \ |
246 | ({ \ | 246 | ({ \ |
247 | might_fault(); \ | 247 | might_fault(); \ |
248 | __put_user_check(x,p); \ | 248 | __put_user_check(x, p); \ |
249 | }) | 249 | }) |
250 | 250 | ||
251 | #else /* CONFIG_MMU */ | 251 | #else /* CONFIG_MMU */ |
@@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long); | |||
255 | */ | 255 | */ |
256 | #define USER_DS KERNEL_DS | 256 | #define USER_DS KERNEL_DS |
257 | 257 | ||
258 | #define segment_eq(a,b) (1) | 258 | #define segment_eq(a, b) (1) |
259 | #define __addr_ok(addr) ((void)(addr),1) | 259 | #define __addr_ok(addr) ((void)(addr), 1) |
260 | #define __range_ok(addr,size) ((void)(addr),0) | 260 | #define __range_ok(addr, size) ((void)(addr), 0) |
261 | #define get_fs() (KERNEL_DS) | 261 | #define get_fs() (KERNEL_DS) |
262 | 262 | ||
263 | static inline void set_fs(mm_segment_t fs) | 263 | static inline void set_fs(mm_segment_t fs) |
264 | { | 264 | { |
265 | } | 265 | } |
266 | 266 | ||
267 | #define get_user(x,p) __get_user(x,p) | 267 | #define get_user(x, p) __get_user(x, p) |
268 | #define put_user(x,p) __put_user(x,p) | 268 | #define put_user(x, p) __put_user(x, p) |
269 | 269 | ||
270 | #endif /* CONFIG_MMU */ | 270 | #endif /* CONFIG_MMU */ |
271 | 271 | ||
272 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | 272 | #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) |
273 | 273 | ||
274 | #define user_addr_max() \ | 274 | #define user_addr_max() \ |
275 | (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) | 275 | (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) |
@@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs) | |||
283 | * error occurs, and leave it unchanged on success. Note that these | 283 | * error occurs, and leave it unchanged on success. Note that these |
284 | * versions are void (ie, don't return a value as such). | 284 | * versions are void (ie, don't return a value as such). |
285 | */ | 285 | */ |
286 | #define __get_user(x,ptr) \ | 286 | #define __get_user(x, ptr) \ |
287 | ({ \ | 287 | ({ \ |
288 | long __gu_err = 0; \ | 288 | long __gu_err = 0; \ |
289 | __get_user_err((x),(ptr),__gu_err); \ | 289 | __get_user_err((x), (ptr), __gu_err); \ |
290 | __gu_err; \ | 290 | __gu_err; \ |
291 | }) | 291 | }) |
292 | 292 | ||
293 | #define __get_user_error(x,ptr,err) \ | 293 | #define __get_user_error(x, ptr, err) \ |
294 | ({ \ | 294 | ({ \ |
295 | __get_user_err((x),(ptr),err); \ | 295 | __get_user_err((x), (ptr), err); \ |
296 | (void) 0; \ | 296 | (void) 0; \ |
297 | }) | 297 | }) |
298 | 298 | ||
299 | #define __get_user_err(x,ptr,err) \ | 299 | #define __get_user_err(x, ptr, err) \ |
300 | do { \ | 300 | do { \ |
301 | unsigned long __gu_addr = (unsigned long)(ptr); \ | 301 | unsigned long __gu_addr = (unsigned long)(ptr); \ |
302 | unsigned long __gu_val; \ | 302 | unsigned long __gu_val; \ |
303 | __chk_user_ptr(ptr); \ | 303 | __chk_user_ptr(ptr); \ |
304 | might_fault(); \ | 304 | might_fault(); \ |
305 | switch (sizeof(*(ptr))) { \ | 305 | switch (sizeof(*(ptr))) { \ |
306 | case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ | 306 | case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ |
307 | case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ | 307 | case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ |
308 | case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ | 308 | case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ |
309 | default: (__gu_val) = __get_user_bad(); \ | 309 | default: (__gu_val) = __get_user_bad(); \ |
310 | } \ | 310 | } \ |
311 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 311 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
312 | } while (0) | 312 | } while (0) |
313 | 313 | ||
314 | #define __get_user_asm_byte(x,addr,err) \ | 314 | #define __get_user_asm_byte(x, addr, err) \ |
315 | __asm__ __volatile__( \ | 315 | __asm__ __volatile__( \ |
316 | "1: " TUSER(ldrb) " %1,[%2],#0\n" \ | 316 | "1: " TUSER(ldrb) " %1,[%2],#0\n" \ |
317 | "2:\n" \ | 317 | "2:\n" \ |
@@ -330,7 +330,7 @@ do { \ | |||
330 | : "cc") | 330 | : "cc") |
331 | 331 | ||
332 | #ifndef __ARMEB__ | 332 | #ifndef __ARMEB__ |
333 | #define __get_user_asm_half(x,__gu_addr,err) \ | 333 | #define __get_user_asm_half(x, __gu_addr, err) \ |
334 | ({ \ | 334 | ({ \ |
335 | unsigned long __b1, __b2; \ | 335 | unsigned long __b1, __b2; \ |
336 | __get_user_asm_byte(__b1, __gu_addr, err); \ | 336 | __get_user_asm_byte(__b1, __gu_addr, err); \ |
@@ -338,7 +338,7 @@ do { \ | |||
338 | (x) = __b1 | (__b2 << 8); \ | 338 | (x) = __b1 | (__b2 << 8); \ |
339 | }) | 339 | }) |
340 | #else | 340 | #else |
341 | #define __get_user_asm_half(x,__gu_addr,err) \ | 341 | #define __get_user_asm_half(x, __gu_addr, err) \ |
342 | ({ \ | 342 | ({ \ |
343 | unsigned long __b1, __b2; \ | 343 | unsigned long __b1, __b2; \ |
344 | __get_user_asm_byte(__b1, __gu_addr, err); \ | 344 | __get_user_asm_byte(__b1, __gu_addr, err); \ |
@@ -347,7 +347,7 @@ do { \ | |||
347 | }) | 347 | }) |
348 | #endif | 348 | #endif |
349 | 349 | ||
350 | #define __get_user_asm_word(x,addr,err) \ | 350 | #define __get_user_asm_word(x, addr, err) \ |
351 | __asm__ __volatile__( \ | 351 | __asm__ __volatile__( \ |
352 | "1: " TUSER(ldr) " %1,[%2],#0\n" \ | 352 | "1: " TUSER(ldr) " %1,[%2],#0\n" \ |
353 | "2:\n" \ | 353 | "2:\n" \ |
@@ -365,35 +365,35 @@ do { \ | |||
365 | : "r" (addr), "i" (-EFAULT) \ | 365 | : "r" (addr), "i" (-EFAULT) \ |
366 | : "cc") | 366 | : "cc") |
367 | 367 | ||
368 | #define __put_user(x,ptr) \ | 368 | #define __put_user(x, ptr) \ |
369 | ({ \ | 369 | ({ \ |
370 | long __pu_err = 0; \ | 370 | long __pu_err = 0; \ |
371 | __put_user_err((x),(ptr),__pu_err); \ | 371 | __put_user_err((x), (ptr), __pu_err); \ |
372 | __pu_err; \ | 372 | __pu_err; \ |
373 | }) | 373 | }) |
374 | 374 | ||
375 | #define __put_user_error(x,ptr,err) \ | 375 | #define __put_user_error(x, ptr, err) \ |
376 | ({ \ | 376 | ({ \ |
377 | __put_user_err((x),(ptr),err); \ | 377 | __put_user_err((x), (ptr), err); \ |
378 | (void) 0; \ | 378 | (void) 0; \ |
379 | }) | 379 | }) |
380 | 380 | ||
381 | #define __put_user_err(x,ptr,err) \ | 381 | #define __put_user_err(x, ptr, err) \ |
382 | do { \ | 382 | do { \ |
383 | unsigned long __pu_addr = (unsigned long)(ptr); \ | 383 | unsigned long __pu_addr = (unsigned long)(ptr); \ |
384 | __typeof__(*(ptr)) __pu_val = (x); \ | 384 | __typeof__(*(ptr)) __pu_val = (x); \ |
385 | __chk_user_ptr(ptr); \ | 385 | __chk_user_ptr(ptr); \ |
386 | might_fault(); \ | 386 | might_fault(); \ |
387 | switch (sizeof(*(ptr))) { \ | 387 | switch (sizeof(*(ptr))) { \ |
388 | case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ | 388 | case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ |
389 | case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ | 389 | case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ |
390 | case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ | 390 | case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \ |
391 | case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ | 391 | case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ |
392 | default: __put_user_bad(); \ | 392 | default: __put_user_bad(); \ |
393 | } \ | 393 | } \ |
394 | } while (0) | 394 | } while (0) |
395 | 395 | ||
396 | #define __put_user_asm_byte(x,__pu_addr,err) \ | 396 | #define __put_user_asm_byte(x, __pu_addr, err) \ |
397 | __asm__ __volatile__( \ | 397 | __asm__ __volatile__( \ |
398 | "1: " TUSER(strb) " %1,[%2],#0\n" \ | 398 | "1: " TUSER(strb) " %1,[%2],#0\n" \ |
399 | "2:\n" \ | 399 | "2:\n" \ |
@@ -411,22 +411,22 @@ do { \ | |||
411 | : "cc") | 411 | : "cc") |
412 | 412 | ||
413 | #ifndef __ARMEB__ | 413 | #ifndef __ARMEB__ |
414 | #define __put_user_asm_half(x,__pu_addr,err) \ | 414 | #define __put_user_asm_half(x, __pu_addr, err) \ |
415 | ({ \ | 415 | ({ \ |
416 | unsigned long __temp = (unsigned long)(x); \ | 416 | unsigned long __temp = (__force unsigned long)(x); \ |
417 | __put_user_asm_byte(__temp, __pu_addr, err); \ | 417 | __put_user_asm_byte(__temp, __pu_addr, err); \ |
418 | __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ | 418 | __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ |
419 | }) | 419 | }) |
420 | #else | 420 | #else |
421 | #define __put_user_asm_half(x,__pu_addr,err) \ | 421 | #define __put_user_asm_half(x, __pu_addr, err) \ |
422 | ({ \ | 422 | ({ \ |
423 | unsigned long __temp = (unsigned long)(x); \ | 423 | unsigned long __temp = (__force unsigned long)(x); \ |
424 | __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ | 424 | __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ |
425 | __put_user_asm_byte(__temp, __pu_addr + 1, err); \ | 425 | __put_user_asm_byte(__temp, __pu_addr + 1, err); \ |
426 | }) | 426 | }) |
427 | #endif | 427 | #endif |
428 | 428 | ||
429 | #define __put_user_asm_word(x,__pu_addr,err) \ | 429 | #define __put_user_asm_word(x, __pu_addr, err) \ |
430 | __asm__ __volatile__( \ | 430 | __asm__ __volatile__( \ |
431 | "1: " TUSER(str) " %1,[%2],#0\n" \ | 431 | "1: " TUSER(str) " %1,[%2],#0\n" \ |
432 | "2:\n" \ | 432 | "2:\n" \ |
@@ -451,7 +451,7 @@ do { \ | |||
451 | #define __reg_oper1 "%R2" | 451 | #define __reg_oper1 "%R2" |
452 | #endif | 452 | #endif |
453 | 453 | ||
454 | #define __put_user_asm_dword(x,__pu_addr,err) \ | 454 | #define __put_user_asm_dword(x, __pu_addr, err) \ |
455 | __asm__ __volatile__( \ | 455 | __asm__ __volatile__( \ |
456 | ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ | 456 | ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ |
457 | ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ | 457 | ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ |
@@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void | |||
480 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); | 480 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); |
481 | extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); | 481 | extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); |
482 | #else | 482 | #else |
483 | #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) | 483 | #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) |
484 | #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) | 484 | #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) |
485 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) | 485 | #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) |
486 | #endif | 486 | #endif |
487 | 487 | ||
488 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) | 488 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) |
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 3bf8f4e99a51..07e1ba449bf1 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs) | |||
63 | current_thread_info()->addr_limit = fs; | 63 | current_thread_info()->addr_limit = fs; |
64 | } | 64 | } |
65 | 65 | ||
66 | #define segment_eq(a,b) ((a) == (b)) | 66 | #define segment_eq(a, b) ((a) == (b)) |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Return 1 if addr < current->addr_limit, 0 otherwise. | 69 | * Return 1 if addr < current->addr_limit, 0 otherwise. |
@@ -147,7 +147,7 @@ do { \ | |||
147 | default: \ | 147 | default: \ |
148 | BUILD_BUG(); \ | 148 | BUILD_BUG(); \ |
149 | } \ | 149 | } \ |
150 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 150 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
151 | } while (0) | 151 | } while (0) |
152 | 152 | ||
153 | #define __get_user(x, ptr) \ | 153 | #define __get_user(x, ptr) \ |
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 245b2ee213c9..a46f7cf3e1ea 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h | |||
@@ -26,7 +26,7 @@ typedef struct { | |||
26 | * For historical reasons (Data Segment Register?), these macros are misnamed. | 26 | * For historical reasons (Data Segment Register?), these macros are misnamed. |
27 | */ | 27 | */ |
28 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | 28 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
29 | #define segment_eq(a,b) ((a).is_user_space == (b).is_user_space) | 29 | #define segment_eq(a, b) ((a).is_user_space == (b).is_user_space) |
30 | 30 | ||
31 | #define USER_ADDR_LIMIT 0x80000000 | 31 | #define USER_ADDR_LIMIT 0x80000000 |
32 | 32 | ||
@@ -108,8 +108,8 @@ static inline __kernel_size_t __copy_from_user(void *to, | |||
108 | * | 108 | * |
109 | * Returns zero on success, or -EFAULT on error. | 109 | * Returns zero on success, or -EFAULT on error. |
110 | */ | 110 | */ |
111 | #define put_user(x,ptr) \ | 111 | #define put_user(x, ptr) \ |
112 | __put_user_check((x),(ptr),sizeof(*(ptr))) | 112 | __put_user_check((x), (ptr), sizeof(*(ptr))) |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * get_user: - Get a simple variable from user space. | 115 | * get_user: - Get a simple variable from user space. |
@@ -128,8 +128,8 @@ static inline __kernel_size_t __copy_from_user(void *to, | |||
128 | * Returns zero on success, or -EFAULT on error. | 128 | * Returns zero on success, or -EFAULT on error. |
129 | * On error, the variable @x is set to zero. | 129 | * On error, the variable @x is set to zero. |
130 | */ | 130 | */ |
131 | #define get_user(x,ptr) \ | 131 | #define get_user(x, ptr) \ |
132 | __get_user_check((x),(ptr),sizeof(*(ptr))) | 132 | __get_user_check((x), (ptr), sizeof(*(ptr))) |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * __put_user: - Write a simple value into user space, with less checking. | 135 | * __put_user: - Write a simple value into user space, with less checking. |
@@ -150,8 +150,8 @@ static inline __kernel_size_t __copy_from_user(void *to, | |||
150 | * | 150 | * |
151 | * Returns zero on success, or -EFAULT on error. | 151 | * Returns zero on success, or -EFAULT on error. |
152 | */ | 152 | */ |
153 | #define __put_user(x,ptr) \ | 153 | #define __put_user(x, ptr) \ |
154 | __put_user_nocheck((x),(ptr),sizeof(*(ptr))) | 154 | __put_user_nocheck((x), (ptr), sizeof(*(ptr))) |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * __get_user: - Get a simple variable from user space, with less checking. | 157 | * __get_user: - Get a simple variable from user space, with less checking. |
@@ -173,8 +173,8 @@ static inline __kernel_size_t __copy_from_user(void *to, | |||
173 | * Returns zero on success, or -EFAULT on error. | 173 | * Returns zero on success, or -EFAULT on error. |
174 | * On error, the variable @x is set to zero. | 174 | * On error, the variable @x is set to zero. |
175 | */ | 175 | */ |
176 | #define __get_user(x,ptr) \ | 176 | #define __get_user(x, ptr) \ |
177 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 177 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
178 | 178 | ||
179 | extern int __get_user_bad(void); | 179 | extern int __get_user_bad(void); |
180 | extern int __put_user_bad(void); | 180 | extern int __put_user_bad(void); |
@@ -191,7 +191,7 @@ extern int __put_user_bad(void); | |||
191 | default: __gu_err = __get_user_bad(); break; \ | 191 | default: __gu_err = __get_user_bad(); break; \ |
192 | } \ | 192 | } \ |
193 | \ | 193 | \ |
194 | x = (typeof(*(ptr)))__gu_val; \ | 194 | x = (__force typeof(*(ptr)))__gu_val; \ |
195 | __gu_err; \ | 195 | __gu_err; \ |
196 | }) | 196 | }) |
197 | 197 | ||
@@ -222,7 +222,7 @@ extern int __put_user_bad(void); | |||
222 | } else { \ | 222 | } else { \ |
223 | __gu_err = -EFAULT; \ | 223 | __gu_err = -EFAULT; \ |
224 | } \ | 224 | } \ |
225 | x = (typeof(*(ptr)))__gu_val; \ | 225 | x = (__force typeof(*(ptr)))__gu_val; \ |
226 | __gu_err; \ | 226 | __gu_err; \ |
227 | }) | 227 | }) |
228 | 228 | ||
@@ -278,7 +278,7 @@ extern int __put_user_bad(void); | |||
278 | __pu_err); \ | 278 | __pu_err); \ |
279 | break; \ | 279 | break; \ |
280 | case 8: \ | 280 | case 8: \ |
281 | __put_user_asm("d", __pu_addr, __pu_val, \ | 281 | __put_user_asm("d", __pu_addr, __pu_val, \ |
282 | __pu_err); \ | 282 | __pu_err); \ |
283 | break; \ | 283 | break; \ |
284 | default: \ | 284 | default: \ |
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 57701c3b8a59..90612a7f2cf3 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h | |||
@@ -27,7 +27,7 @@ static inline void set_fs(mm_segment_t fs) | |||
27 | current_thread_info()->addr_limit = fs; | 27 | current_thread_info()->addr_limit = fs; |
28 | } | 28 | } |
29 | 29 | ||
30 | #define segment_eq(a,b) ((a) == (b)) | 30 | #define segment_eq(a, b) ((a) == (b)) |
31 | 31 | ||
32 | #define VERIFY_READ 0 | 32 | #define VERIFY_READ 0 |
33 | #define VERIFY_WRITE 1 | 33 | #define VERIFY_WRITE 1 |
@@ -68,11 +68,11 @@ struct exception_table_entry { | |||
68 | * use the right size if we just have the right pointer type. | 68 | * use the right size if we just have the right pointer type. |
69 | */ | 69 | */ |
70 | 70 | ||
71 | #define put_user(x,p) \ | 71 | #define put_user(x, p) \ |
72 | ({ \ | 72 | ({ \ |
73 | int _err = 0; \ | 73 | int _err = 0; \ |
74 | typeof(*(p)) _x = (x); \ | 74 | typeof(*(p)) _x = (x); \ |
75 | typeof(*(p)) __user *_p = (p); \ | 75 | typeof(*(p)) __user *_p = (p); \ |
76 | if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ | 76 | if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ |
77 | _err = -EFAULT; \ | 77 | _err = -EFAULT; \ |
78 | } \ | 78 | } \ |
@@ -89,10 +89,10 @@ struct exception_table_entry { | |||
89 | break; \ | 89 | break; \ |
90 | case 8: { \ | 90 | case 8: { \ |
91 | long _xl, _xh; \ | 91 | long _xl, _xh; \ |
92 | _xl = ((long *)&_x)[0]; \ | 92 | _xl = ((__force long *)&_x)[0]; \ |
93 | _xh = ((long *)&_x)[1]; \ | 93 | _xh = ((__force long *)&_x)[1]; \ |
94 | __put_user_asm(_xl, ((long __user *)_p)+0, ); \ | 94 | __put_user_asm(_xl, ((__force long __user *)_p)+0, );\ |
95 | __put_user_asm(_xh, ((long __user *)_p)+1, ); \ | 95 | __put_user_asm(_xh, ((__force long __user *)_p)+1, );\ |
96 | } break; \ | 96 | } break; \ |
97 | default: \ | 97 | default: \ |
98 | _err = __put_user_bad(); \ | 98 | _err = __put_user_bad(); \ |
@@ -102,7 +102,7 @@ struct exception_table_entry { | |||
102 | _err; \ | 102 | _err; \ |
103 | }) | 103 | }) |
104 | 104 | ||
105 | #define __put_user(x,p) put_user(x,p) | 105 | #define __put_user(x, p) put_user(x, p) |
106 | static inline int bad_user_access_length(void) | 106 | static inline int bad_user_access_length(void) |
107 | { | 107 | { |
108 | panic("bad_user_access_length"); | 108 | panic("bad_user_access_length"); |
@@ -121,10 +121,10 @@ static inline int bad_user_access_length(void) | |||
121 | 121 | ||
122 | #define __ptr(x) ((unsigned long __force *)(x)) | 122 | #define __ptr(x) ((unsigned long __force *)(x)) |
123 | 123 | ||
124 | #define __put_user_asm(x,p,bhw) \ | 124 | #define __put_user_asm(x, p, bhw) \ |
125 | __asm__ (#bhw"[%1] = %0;\n\t" \ | 125 | __asm__ (#bhw"[%1] = %0;\n\t" \ |
126 | : /* no outputs */ \ | 126 | : /* no outputs */ \ |
127 | :"d" (x),"a" (__ptr(p)) : "memory") | 127 | :"d" (x), "a" (__ptr(p)) : "memory") |
128 | 128 | ||
129 | #define get_user(x, ptr) \ | 129 | #define get_user(x, ptr) \ |
130 | ({ \ | 130 | ({ \ |
@@ -136,10 +136,10 @@ static inline int bad_user_access_length(void) | |||
136 | BUILD_BUG_ON(ptr_size >= 8); \ | 136 | BUILD_BUG_ON(ptr_size >= 8); \ |
137 | switch (ptr_size) { \ | 137 | switch (ptr_size) { \ |
138 | case 1: \ | 138 | case 1: \ |
139 | __get_user_asm(_val, _p, B,(Z)); \ | 139 | __get_user_asm(_val, _p, B, (Z)); \ |
140 | break; \ | 140 | break; \ |
141 | case 2: \ | 141 | case 2: \ |
142 | __get_user_asm(_val, _p, W,(Z)); \ | 142 | __get_user_asm(_val, _p, W, (Z)); \ |
143 | break; \ | 143 | break; \ |
144 | case 4: \ | 144 | case 4: \ |
145 | __get_user_asm(_val, _p, , ); \ | 145 | __get_user_asm(_val, _p, , ); \ |
@@ -147,11 +147,11 @@ static inline int bad_user_access_length(void) | |||
147 | } \ | 147 | } \ |
148 | } else \ | 148 | } else \ |
149 | _err = -EFAULT; \ | 149 | _err = -EFAULT; \ |
150 | x = (typeof(*(ptr)))_val; \ | 150 | x = (__force typeof(*(ptr)))_val; \ |
151 | _err; \ | 151 | _err; \ |
152 | }) | 152 | }) |
153 | 153 | ||
154 | #define __get_user(x,p) get_user(x,p) | 154 | #define __get_user(x, p) get_user(x, p) |
155 | 155 | ||
156 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) | 156 | #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) |
157 | 157 | ||
@@ -168,10 +168,10 @@ static inline int bad_user_access_length(void) | |||
168 | #define __copy_to_user_inatomic __copy_to_user | 168 | #define __copy_to_user_inatomic __copy_to_user |
169 | #define __copy_from_user_inatomic __copy_from_user | 169 | #define __copy_from_user_inatomic __copy_from_user |
170 | 170 | ||
171 | #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\ | 171 | #define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\ |
172 | return retval; }) | 172 | return retval; }) |
173 | 173 | ||
174 | #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ | 174 | #define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\ |
175 | return retval; }) | 175 | return retval; }) |
176 | 176 | ||
177 | static inline unsigned long __must_check | 177 | static inline unsigned long __must_check |
diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 914540801c5e..a21344ab8616 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h | |||
@@ -47,12 +47,13 @@ | |||
47 | #define get_fs() (current_thread_info()->addr_limit) | 47 | #define get_fs() (current_thread_info()->addr_limit) |
48 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 48 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
49 | 49 | ||
50 | #define segment_eq(a,b) ((a).seg == (b).seg) | 50 | #define segment_eq(a, b) ((a).seg == (b).seg) |
51 | 51 | ||
52 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 52 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
53 | #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) | 53 | #define __user_ok(addr, size) \ |
54 | #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) | 54 | (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) |
55 | #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) | 55 | #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) |
56 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) | ||
56 | 57 | ||
57 | #include <arch/uaccess.h> | 58 | #include <arch/uaccess.h> |
58 | 59 | ||
@@ -92,56 +93,56 @@ struct exception_table_entry | |||
92 | * CRIS, we can just do these as direct assignments. (Of course, the | 93 | * CRIS, we can just do these as direct assignments. (Of course, the |
93 | * exception handling means that it's no longer "just"...) | 94 | * exception handling means that it's no longer "just"...) |
94 | */ | 95 | */ |
95 | #define get_user(x,ptr) \ | 96 | #define get_user(x, ptr) \ |
96 | __get_user_check((x),(ptr),sizeof(*(ptr))) | 97 | __get_user_check((x), (ptr), sizeof(*(ptr))) |
97 | #define put_user(x,ptr) \ | 98 | #define put_user(x, ptr) \ |
98 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 99 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
99 | 100 | ||
100 | #define __get_user(x,ptr) \ | 101 | #define __get_user(x, ptr) \ |
101 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 102 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
102 | #define __put_user(x,ptr) \ | 103 | #define __put_user(x, ptr) \ |
103 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 104 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
104 | 105 | ||
105 | extern long __put_user_bad(void); | 106 | extern long __put_user_bad(void); |
106 | 107 | ||
107 | #define __put_user_size(x,ptr,size,retval) \ | 108 | #define __put_user_size(x, ptr, size, retval) \ |
108 | do { \ | 109 | do { \ |
109 | retval = 0; \ | 110 | retval = 0; \ |
110 | switch (size) { \ | 111 | switch (size) { \ |
111 | case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \ | 112 | case 1: __put_user_asm(x, ptr, retval, "move.b"); break; \ |
112 | case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \ | 113 | case 2: __put_user_asm(x, ptr, retval, "move.w"); break; \ |
113 | case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \ | 114 | case 4: __put_user_asm(x, ptr, retval, "move.d"); break; \ |
114 | case 8: __put_user_asm_64(x,ptr,retval); break; \ | 115 | case 8: __put_user_asm_64(x, ptr, retval); break; \ |
115 | default: __put_user_bad(); \ | 116 | default: __put_user_bad(); \ |
116 | } \ | 117 | } \ |
117 | } while (0) | 118 | } while (0) |
118 | 119 | ||
119 | #define __get_user_size(x,ptr,size,retval) \ | 120 | #define __get_user_size(x, ptr, size, retval) \ |
120 | do { \ | 121 | do { \ |
121 | retval = 0; \ | 122 | retval = 0; \ |
122 | switch (size) { \ | 123 | switch (size) { \ |
123 | case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \ | 124 | case 1: __get_user_asm(x, ptr, retval, "move.b"); break; \ |
124 | case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \ | 125 | case 2: __get_user_asm(x, ptr, retval, "move.w"); break; \ |
125 | case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \ | 126 | case 4: __get_user_asm(x, ptr, retval, "move.d"); break; \ |
126 | case 8: __get_user_asm_64(x,ptr,retval); break; \ | 127 | case 8: __get_user_asm_64(x, ptr, retval); break; \ |
127 | default: (x) = __get_user_bad(); \ | 128 | default: (x) = __get_user_bad(); \ |
128 | } \ | 129 | } \ |
129 | } while (0) | 130 | } while (0) |
130 | 131 | ||
131 | #define __put_user_nocheck(x,ptr,size) \ | 132 | #define __put_user_nocheck(x, ptr, size) \ |
132 | ({ \ | 133 | ({ \ |
133 | long __pu_err; \ | 134 | long __pu_err; \ |
134 | __put_user_size((x),(ptr),(size),__pu_err); \ | 135 | __put_user_size((x), (ptr), (size), __pu_err); \ |
135 | __pu_err; \ | 136 | __pu_err; \ |
136 | }) | 137 | }) |
137 | 138 | ||
138 | #define __put_user_check(x,ptr,size) \ | 139 | #define __put_user_check(x, ptr, size) \ |
139 | ({ \ | 140 | ({ \ |
140 | long __pu_err = -EFAULT; \ | 141 | long __pu_err = -EFAULT; \ |
141 | __typeof__(*(ptr)) *__pu_addr = (ptr); \ | 142 | __typeof__(*(ptr)) *__pu_addr = (ptr); \ |
142 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ | 143 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ |
143 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | 144 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
144 | __pu_err; \ | 145 | __pu_err; \ |
145 | }) | 146 | }) |
146 | 147 | ||
147 | struct __large_struct { unsigned long buf[100]; }; | 148 | struct __large_struct { unsigned long buf[100]; }; |
@@ -149,21 +150,21 @@ struct __large_struct { unsigned long buf[100]; }; | |||
149 | 150 | ||
150 | 151 | ||
151 | 152 | ||
152 | #define __get_user_nocheck(x,ptr,size) \ | 153 | #define __get_user_nocheck(x, ptr, size) \ |
153 | ({ \ | 154 | ({ \ |
154 | long __gu_err, __gu_val; \ | 155 | long __gu_err, __gu_val; \ |
155 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ | 156 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
156 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 157 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
157 | __gu_err; \ | 158 | __gu_err; \ |
158 | }) | 159 | }) |
159 | 160 | ||
160 | #define __get_user_check(x,ptr,size) \ | 161 | #define __get_user_check(x, ptr, size) \ |
161 | ({ \ | 162 | ({ \ |
162 | long __gu_err = -EFAULT, __gu_val = 0; \ | 163 | long __gu_err = -EFAULT, __gu_val = 0; \ |
163 | const __typeof__(*(ptr)) *__gu_addr = (ptr); \ | 164 | const __typeof__(*(ptr)) *__gu_addr = (ptr); \ |
164 | if (access_ok(VERIFY_READ,__gu_addr,size)) \ | 165 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ |
165 | __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ | 166 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
166 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 167 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
167 | __gu_err; \ | 168 | __gu_err; \ |
168 | }) | 169 | }) |
169 | 170 | ||
@@ -180,7 +181,7 @@ static inline unsigned long | |||
180 | __generic_copy_to_user(void __user *to, const void *from, unsigned long n) | 181 | __generic_copy_to_user(void __user *to, const void *from, unsigned long n) |
181 | { | 182 | { |
182 | if (access_ok(VERIFY_WRITE, to, n)) | 183 | if (access_ok(VERIFY_WRITE, to, n)) |
183 | return __copy_user(to,from,n); | 184 | return __copy_user(to, from, n); |
184 | return n; | 185 | return n; |
185 | } | 186 | } |
186 | 187 | ||
@@ -188,7 +189,7 @@ static inline unsigned long | |||
188 | __generic_copy_from_user(void *to, const void __user *from, unsigned long n) | 189 | __generic_copy_from_user(void *to, const void __user *from, unsigned long n) |
189 | { | 190 | { |
190 | if (access_ok(VERIFY_READ, from, n)) | 191 | if (access_ok(VERIFY_READ, from, n)) |
191 | return __copy_user_zeroing(to,from,n); | 192 | return __copy_user_zeroing(to, from, n); |
192 | return n; | 193 | return n; |
193 | } | 194 | } |
194 | 195 | ||
@@ -196,7 +197,7 @@ static inline unsigned long | |||
196 | __generic_clear_user(void __user *to, unsigned long n) | 197 | __generic_clear_user(void __user *to, unsigned long n) |
197 | { | 198 | { |
198 | if (access_ok(VERIFY_WRITE, to, n)) | 199 | if (access_ok(VERIFY_WRITE, to, n)) |
199 | return __do_clear_user(to,n); | 200 | return __do_clear_user(to, n); |
200 | return n; | 201 | return n; |
201 | } | 202 | } |
202 | 203 | ||
@@ -373,29 +374,31 @@ static inline unsigned long | |||
373 | __generic_copy_from_user_nocheck(void *to, const void __user *from, | 374 | __generic_copy_from_user_nocheck(void *to, const void __user *from, |
374 | unsigned long n) | 375 | unsigned long n) |
375 | { | 376 | { |
376 | return __copy_user_zeroing(to,from,n); | 377 | return __copy_user_zeroing(to, from, n); |
377 | } | 378 | } |
378 | 379 | ||
379 | static inline unsigned long | 380 | static inline unsigned long |
380 | __generic_copy_to_user_nocheck(void __user *to, const void *from, | 381 | __generic_copy_to_user_nocheck(void __user *to, const void *from, |
381 | unsigned long n) | 382 | unsigned long n) |
382 | { | 383 | { |
383 | return __copy_user(to,from,n); | 384 | return __copy_user(to, from, n); |
384 | } | 385 | } |
385 | 386 | ||
386 | static inline unsigned long | 387 | static inline unsigned long |
387 | __generic_clear_user_nocheck(void __user *to, unsigned long n) | 388 | __generic_clear_user_nocheck(void __user *to, unsigned long n) |
388 | { | 389 | { |
389 | return __do_clear_user(to,n); | 390 | return __do_clear_user(to, n); |
390 | } | 391 | } |
391 | 392 | ||
392 | /* without checking */ | 393 | /* without checking */ |
393 | 394 | ||
394 | #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) | 395 | #define __copy_to_user(to, from, n) \ |
395 | #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) | 396 | __generic_copy_to_user_nocheck((to), (from), (n)) |
397 | #define __copy_from_user(to, from, n) \ | ||
398 | __generic_copy_from_user_nocheck((to), (from), (n)) | ||
396 | #define __copy_to_user_inatomic __copy_to_user | 399 | #define __copy_to_user_inatomic __copy_to_user |
397 | #define __copy_from_user_inatomic __copy_from_user | 400 | #define __copy_from_user_inatomic __copy_from_user |
398 | #define __clear_user(to,n) __generic_clear_user_nocheck((to),(n)) | 401 | #define __clear_user(to, n) __generic_clear_user_nocheck((to), (n)) |
399 | 402 | ||
400 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) | 403 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) |
401 | 404 | ||
diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h index a2320a4a0042..4377c89a57f5 100644 --- a/arch/frv/include/asm/segment.h +++ b/arch/frv/include/asm/segment.h | |||
@@ -31,7 +31,7 @@ typedef struct { | |||
31 | 31 | ||
32 | #define get_ds() (KERNEL_DS) | 32 | #define get_ds() (KERNEL_DS) |
33 | #define get_fs() (__current_thread_info->addr_limit) | 33 | #define get_fs() (__current_thread_info->addr_limit) |
34 | #define segment_eq(a,b) ((a).seg == (b).seg) | 34 | #define segment_eq(a, b) ((a).seg == (b).seg) |
35 | #define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS) | 35 | #define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS) |
36 | #define get_addr_limit() (get_fs().seg) | 36 | #define get_addr_limit() (get_fs().seg) |
37 | 37 | ||
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 103bedc59644..4f3fb6ccbf21 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h | |||
@@ -169,10 +169,11 @@ do { \ | |||
169 | (err) = ia64_getreg(_IA64_REG_R8); \ | 169 | (err) = ia64_getreg(_IA64_REG_R8); \ |
170 | (val) = ia64_getreg(_IA64_REG_R9); \ | 170 | (val) = ia64_getreg(_IA64_REG_R9); \ |
171 | } while (0) | 171 | } while (0) |
172 | # define __put_user_size(val, addr, n, err) \ | 172 | # define __put_user_size(val, addr, n, err) \ |
173 | do { \ | 173 | do { \ |
174 | __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ | 174 | __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ |
175 | (err) = ia64_getreg(_IA64_REG_R8); \ | 175 | (__force unsigned long) (val)); \ |
176 | (err) = ia64_getreg(_IA64_REG_R8); \ | ||
176 | } while (0) | 177 | } while (0) |
177 | #endif /* !ASM_SUPPORTED */ | 178 | #endif /* !ASM_SUPPORTED */ |
178 | 179 | ||
@@ -197,7 +198,7 @@ extern void __get_user_unknown (void); | |||
197 | case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ | 198 | case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ |
198 | default: __get_user_unknown(); break; \ | 199 | default: __get_user_unknown(); break; \ |
199 | } \ | 200 | } \ |
200 | (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ | 201 | (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ |
201 | __gu_err; \ | 202 | __gu_err; \ |
202 | }) | 203 | }) |
203 | 204 | ||
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 84fe7ba53035..71adff209405 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h | |||
@@ -54,7 +54,7 @@ static inline void set_fs(mm_segment_t s) | |||
54 | 54 | ||
55 | #endif /* not CONFIG_MMU */ | 55 | #endif /* not CONFIG_MMU */ |
56 | 56 | ||
57 | #define segment_eq(a,b) ((a).seg == (b).seg) | 57 | #define segment_eq(a, b) ((a).seg == (b).seg) |
58 | 58 | ||
59 | #define __addr_ok(addr) \ | 59 | #define __addr_ok(addr) \ |
60 | ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) | 60 | ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) |
@@ -68,7 +68,7 @@ static inline void set_fs(mm_segment_t s) | |||
68 | * | 68 | * |
69 | * This needs 33-bit arithmetic. We have a carry... | 69 | * This needs 33-bit arithmetic. We have a carry... |
70 | */ | 70 | */ |
71 | #define __range_ok(addr,size) ({ \ | 71 | #define __range_ok(addr, size) ({ \ |
72 | unsigned long flag, roksum; \ | 72 | unsigned long flag, roksum; \ |
73 | __chk_user_ptr(addr); \ | 73 | __chk_user_ptr(addr); \ |
74 | asm ( \ | 74 | asm ( \ |
@@ -103,7 +103,7 @@ static inline void set_fs(mm_segment_t s) | |||
103 | * this function, memory access functions may still return -EFAULT. | 103 | * this function, memory access functions may still return -EFAULT. |
104 | */ | 104 | */ |
105 | #ifdef CONFIG_MMU | 105 | #ifdef CONFIG_MMU |
106 | #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) | 106 | #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) |
107 | #else | 107 | #else |
108 | static inline int access_ok(int type, const void *addr, unsigned long size) | 108 | static inline int access_ok(int type, const void *addr, unsigned long size) |
109 | { | 109 | { |
@@ -167,8 +167,8 @@ extern int fixup_exception(struct pt_regs *regs); | |||
167 | * Returns zero on success, or -EFAULT on error. | 167 | * Returns zero on success, or -EFAULT on error. |
168 | * On error, the variable @x is set to zero. | 168 | * On error, the variable @x is set to zero. |
169 | */ | 169 | */ |
170 | #define get_user(x,ptr) \ | 170 | #define get_user(x, ptr) \ |
171 | __get_user_check((x),(ptr),sizeof(*(ptr))) | 171 | __get_user_check((x), (ptr), sizeof(*(ptr))) |
172 | 172 | ||
173 | /** | 173 | /** |
174 | * put_user: - Write a simple value into user space. | 174 | * put_user: - Write a simple value into user space. |
@@ -186,8 +186,8 @@ extern int fixup_exception(struct pt_regs *regs); | |||
186 | * | 186 | * |
187 | * Returns zero on success, or -EFAULT on error. | 187 | * Returns zero on success, or -EFAULT on error. |
188 | */ | 188 | */ |
189 | #define put_user(x,ptr) \ | 189 | #define put_user(x, ptr) \ |
190 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 190 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
191 | 191 | ||
192 | /** | 192 | /** |
193 | * __get_user: - Get a simple variable from user space, with less checking. | 193 | * __get_user: - Get a simple variable from user space, with less checking. |
@@ -209,41 +209,41 @@ extern int fixup_exception(struct pt_regs *regs); | |||
209 | * Returns zero on success, or -EFAULT on error. | 209 | * Returns zero on success, or -EFAULT on error. |
210 | * On error, the variable @x is set to zero. | 210 | * On error, the variable @x is set to zero. |
211 | */ | 211 | */ |
212 | #define __get_user(x,ptr) \ | 212 | #define __get_user(x, ptr) \ |
213 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 213 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
214 | 214 | ||
215 | #define __get_user_nocheck(x,ptr,size) \ | 215 | #define __get_user_nocheck(x, ptr, size) \ |
216 | ({ \ | 216 | ({ \ |
217 | long __gu_err = 0; \ | 217 | long __gu_err = 0; \ |
218 | unsigned long __gu_val; \ | 218 | unsigned long __gu_val; \ |
219 | might_fault(); \ | 219 | might_fault(); \ |
220 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ | 220 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
221 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 221 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
222 | __gu_err; \ | 222 | __gu_err; \ |
223 | }) | 223 | }) |
224 | 224 | ||
225 | #define __get_user_check(x,ptr,size) \ | 225 | #define __get_user_check(x, ptr, size) \ |
226 | ({ \ | 226 | ({ \ |
227 | long __gu_err = -EFAULT; \ | 227 | long __gu_err = -EFAULT; \ |
228 | unsigned long __gu_val = 0; \ | 228 | unsigned long __gu_val = 0; \ |
229 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | 229 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
230 | might_fault(); \ | 230 | might_fault(); \ |
231 | if (access_ok(VERIFY_READ,__gu_addr,size)) \ | 231 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ |
232 | __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ | 232 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
233 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 233 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
234 | __gu_err; \ | 234 | __gu_err; \ |
235 | }) | 235 | }) |
236 | 236 | ||
237 | extern long __get_user_bad(void); | 237 | extern long __get_user_bad(void); |
238 | 238 | ||
239 | #define __get_user_size(x,ptr,size,retval) \ | 239 | #define __get_user_size(x, ptr, size, retval) \ |
240 | do { \ | 240 | do { \ |
241 | retval = 0; \ | 241 | retval = 0; \ |
242 | __chk_user_ptr(ptr); \ | 242 | __chk_user_ptr(ptr); \ |
243 | switch (size) { \ | 243 | switch (size) { \ |
244 | case 1: __get_user_asm(x,ptr,retval,"ub"); break; \ | 244 | case 1: __get_user_asm(x, ptr, retval, "ub"); break; \ |
245 | case 2: __get_user_asm(x,ptr,retval,"uh"); break; \ | 245 | case 2: __get_user_asm(x, ptr, retval, "uh"); break; \ |
246 | case 4: __get_user_asm(x,ptr,retval,""); break; \ | 246 | case 4: __get_user_asm(x, ptr, retval, ""); break; \ |
247 | default: (x) = __get_user_bad(); \ | 247 | default: (x) = __get_user_bad(); \ |
248 | } \ | 248 | } \ |
249 | } while (0) | 249 | } while (0) |
@@ -288,26 +288,26 @@ do { \ | |||
288 | * | 288 | * |
289 | * Returns zero on success, or -EFAULT on error. | 289 | * Returns zero on success, or -EFAULT on error. |
290 | */ | 290 | */ |
291 | #define __put_user(x,ptr) \ | 291 | #define __put_user(x, ptr) \ |
292 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 292 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
293 | 293 | ||
294 | 294 | ||
295 | #define __put_user_nocheck(x,ptr,size) \ | 295 | #define __put_user_nocheck(x, ptr, size) \ |
296 | ({ \ | 296 | ({ \ |
297 | long __pu_err; \ | 297 | long __pu_err; \ |
298 | might_fault(); \ | 298 | might_fault(); \ |
299 | __put_user_size((x),(ptr),(size),__pu_err); \ | 299 | __put_user_size((x), (ptr), (size), __pu_err); \ |
300 | __pu_err; \ | 300 | __pu_err; \ |
301 | }) | 301 | }) |
302 | 302 | ||
303 | 303 | ||
304 | #define __put_user_check(x,ptr,size) \ | 304 | #define __put_user_check(x, ptr, size) \ |
305 | ({ \ | 305 | ({ \ |
306 | long __pu_err = -EFAULT; \ | 306 | long __pu_err = -EFAULT; \ |
307 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | 307 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
308 | might_fault(); \ | 308 | might_fault(); \ |
309 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ | 309 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ |
310 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | 310 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
311 | __pu_err; \ | 311 | __pu_err; \ |
312 | }) | 312 | }) |
313 | 313 | ||
@@ -366,15 +366,15 @@ do { \ | |||
366 | 366 | ||
367 | extern void __put_user_bad(void); | 367 | extern void __put_user_bad(void); |
368 | 368 | ||
369 | #define __put_user_size(x,ptr,size,retval) \ | 369 | #define __put_user_size(x, ptr, size, retval) \ |
370 | do { \ | 370 | do { \ |
371 | retval = 0; \ | 371 | retval = 0; \ |
372 | __chk_user_ptr(ptr); \ | 372 | __chk_user_ptr(ptr); \ |
373 | switch (size) { \ | 373 | switch (size) { \ |
374 | case 1: __put_user_asm(x,ptr,retval,"b"); break; \ | 374 | case 1: __put_user_asm(x, ptr, retval, "b"); break; \ |
375 | case 2: __put_user_asm(x,ptr,retval,"h"); break; \ | 375 | case 2: __put_user_asm(x, ptr, retval, "h"); break; \ |
376 | case 4: __put_user_asm(x,ptr,retval,""); break; \ | 376 | case 4: __put_user_asm(x, ptr, retval, ""); break; \ |
377 | case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ | 377 | case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\ |
378 | default: __put_user_bad(); \ | 378 | default: __put_user_bad(); \ |
379 | } \ | 379 | } \ |
380 | } while (0) | 380 | } while (0) |
@@ -421,7 +421,7 @@ struct __large_struct { unsigned long buf[100]; }; | |||
421 | 421 | ||
422 | /* Generic arbitrary sized copy. */ | 422 | /* Generic arbitrary sized copy. */ |
423 | /* Return the number of bytes NOT copied. */ | 423 | /* Return the number of bytes NOT copied. */ |
424 | #define __copy_user(to,from,size) \ | 424 | #define __copy_user(to, from, size) \ |
425 | do { \ | 425 | do { \ |
426 | unsigned long __dst, __src, __c; \ | 426 | unsigned long __dst, __src, __c; \ |
427 | __asm__ __volatile__ ( \ | 427 | __asm__ __volatile__ ( \ |
@@ -478,7 +478,7 @@ do { \ | |||
478 | : "r14", "memory"); \ | 478 | : "r14", "memory"); \ |
479 | } while (0) | 479 | } while (0) |
480 | 480 | ||
481 | #define __copy_user_zeroing(to,from,size) \ | 481 | #define __copy_user_zeroing(to, from, size) \ |
482 | do { \ | 482 | do { \ |
483 | unsigned long __dst, __src, __c; \ | 483 | unsigned long __dst, __src, __c; \ |
484 | __asm__ __volatile__ ( \ | 484 | __asm__ __volatile__ ( \ |
@@ -548,14 +548,14 @@ do { \ | |||
548 | static inline unsigned long __generic_copy_from_user_nocheck(void *to, | 548 | static inline unsigned long __generic_copy_from_user_nocheck(void *to, |
549 | const void __user *from, unsigned long n) | 549 | const void __user *from, unsigned long n) |
550 | { | 550 | { |
551 | __copy_user_zeroing(to,from,n); | 551 | __copy_user_zeroing(to, from, n); |
552 | return n; | 552 | return n; |
553 | } | 553 | } |
554 | 554 | ||
555 | static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, | 555 | static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, |
556 | const void *from, unsigned long n) | 556 | const void *from, unsigned long n) |
557 | { | 557 | { |
558 | __copy_user(to,from,n); | 558 | __copy_user(to, from, n); |
559 | return n; | 559 | return n; |
560 | } | 560 | } |
561 | 561 | ||
@@ -576,8 +576,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon | |||
576 | * Returns number of bytes that could not be copied. | 576 | * Returns number of bytes that could not be copied. |
577 | * On success, this will be zero. | 577 | * On success, this will be zero. |
578 | */ | 578 | */ |
579 | #define __copy_to_user(to,from,n) \ | 579 | #define __copy_to_user(to, from, n) \ |
580 | __generic_copy_to_user_nocheck((to),(from),(n)) | 580 | __generic_copy_to_user_nocheck((to), (from), (n)) |
581 | 581 | ||
582 | #define __copy_to_user_inatomic __copy_to_user | 582 | #define __copy_to_user_inatomic __copy_to_user |
583 | #define __copy_from_user_inatomic __copy_from_user | 583 | #define __copy_from_user_inatomic __copy_from_user |
@@ -595,10 +595,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon | |||
595 | * Returns number of bytes that could not be copied. | 595 | * Returns number of bytes that could not be copied. |
596 | * On success, this will be zero. | 596 | * On success, this will be zero. |
597 | */ | 597 | */ |
598 | #define copy_to_user(to,from,n) \ | 598 | #define copy_to_user(to, from, n) \ |
599 | ({ \ | 599 | ({ \ |
600 | might_fault(); \ | 600 | might_fault(); \ |
601 | __generic_copy_to_user((to),(from),(n)); \ | 601 | __generic_copy_to_user((to), (from), (n)); \ |
602 | }) | 602 | }) |
603 | 603 | ||
604 | /** | 604 | /** |
@@ -617,8 +617,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon | |||
617 | * If some data could not be copied, this function will pad the copied | 617 | * If some data could not be copied, this function will pad the copied |
618 | * data to the requested size using zero bytes. | 618 | * data to the requested size using zero bytes. |
619 | */ | 619 | */ |
620 | #define __copy_from_user(to,from,n) \ | 620 | #define __copy_from_user(to, from, n) \ |
621 | __generic_copy_from_user_nocheck((to),(from),(n)) | 621 | __generic_copy_from_user_nocheck((to), (from), (n)) |
622 | 622 | ||
623 | /** | 623 | /** |
624 | * copy_from_user: - Copy a block of data from user space. | 624 | * copy_from_user: - Copy a block of data from user space. |
@@ -636,10 +636,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon | |||
636 | * If some data could not be copied, this function will pad the copied | 636 | * If some data could not be copied, this function will pad the copied |
637 | * data to the requested size using zero bytes. | 637 | * data to the requested size using zero bytes. |
638 | */ | 638 | */ |
639 | #define copy_from_user(to,from,n) \ | 639 | #define copy_from_user(to, from, n) \ |
640 | ({ \ | 640 | ({ \ |
641 | might_fault(); \ | 641 | might_fault(); \ |
642 | __generic_copy_from_user((to),(from),(n)); \ | 642 | __generic_copy_from_user((to), (from), (n)); \ |
643 | }) | 643 | }) |
644 | 644 | ||
645 | long __must_check strncpy_from_user(char *dst, const char __user *src, | 645 | long __must_check strncpy_from_user(char *dst, const char __user *src, |
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index 0fa80e97ed2d..98216b8111f0 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h | |||
@@ -58,7 +58,7 @@ static inline mm_segment_t get_ds(void) | |||
58 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | 58 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #define segment_eq(a,b) ((a).seg == (b).seg) | 61 | #define segment_eq(a, b) ((a).seg == (b).seg) |
62 | 62 | ||
63 | #endif /* __ASSEMBLY__ */ | 63 | #endif /* __ASSEMBLY__ */ |
64 | 64 | ||
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 15901db435b9..d228601b3afc 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h | |||
@@ -128,25 +128,25 @@ asm volatile ("\n" \ | |||
128 | #define put_user(x, ptr) __put_user(x, ptr) | 128 | #define put_user(x, ptr) __put_user(x, ptr) |
129 | 129 | ||
130 | 130 | ||
131 | #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ | 131 | #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ |
132 | type __gu_val; \ | 132 | type __gu_val; \ |
133 | asm volatile ("\n" \ | 133 | asm volatile ("\n" \ |
134 | "1: "MOVES"."#bwl" %2,%1\n" \ | 134 | "1: "MOVES"."#bwl" %2,%1\n" \ |
135 | "2:\n" \ | 135 | "2:\n" \ |
136 | " .section .fixup,\"ax\"\n" \ | 136 | " .section .fixup,\"ax\"\n" \ |
137 | " .even\n" \ | 137 | " .even\n" \ |
138 | "10: move.l %3,%0\n" \ | 138 | "10: move.l %3,%0\n" \ |
139 | " sub.l %1,%1\n" \ | 139 | " sub.l %1,%1\n" \ |
140 | " jra 2b\n" \ | 140 | " jra 2b\n" \ |
141 | " .previous\n" \ | 141 | " .previous\n" \ |
142 | "\n" \ | 142 | "\n" \ |
143 | " .section __ex_table,\"a\"\n" \ | 143 | " .section __ex_table,\"a\"\n" \ |
144 | " .align 4\n" \ | 144 | " .align 4\n" \ |
145 | " .long 1b,10b\n" \ | 145 | " .long 1b,10b\n" \ |
146 | " .previous" \ | 146 | " .previous" \ |
147 | : "+d" (res), "=&" #reg (__gu_val) \ | 147 | : "+d" (res), "=&" #reg (__gu_val) \ |
148 | : "m" (*(ptr)), "i" (err)); \ | 148 | : "m" (*(ptr)), "i" (err)); \ |
149 | (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ | 149 | (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ |
150 | }) | 150 | }) |
151 | 151 | ||
152 | #define __get_user(x, ptr) \ | 152 | #define __get_user(x, ptr) \ |
@@ -188,7 +188,7 @@ asm volatile ("\n" \ | |||
188 | "+a" (__gu_ptr) \ | 188 | "+a" (__gu_ptr) \ |
189 | : "i" (-EFAULT) \ | 189 | : "i" (-EFAULT) \ |
190 | : "memory"); \ | 190 | : "memory"); \ |
191 | (x) = (typeof(*(ptr)))__gu_val; \ | 191 | (x) = (__force typeof(*(ptr)))__gu_val; \ |
192 | break; \ | 192 | break; \ |
193 | } */ \ | 193 | } */ \ |
194 | default: \ | 194 | default: \ |
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 0748b0a97986..8282cbce7e39 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h | |||
@@ -107,18 +107,23 @@ extern long __put_user_asm_w(unsigned int x, void __user *addr); | |||
107 | extern long __put_user_asm_d(unsigned int x, void __user *addr); | 107 | extern long __put_user_asm_d(unsigned int x, void __user *addr); |
108 | extern long __put_user_asm_l(unsigned long long x, void __user *addr); | 108 | extern long __put_user_asm_l(unsigned long long x, void __user *addr); |
109 | 109 | ||
110 | #define __put_user_size(x, ptr, size, retval) \ | 110 | #define __put_user_size(x, ptr, size, retval) \ |
111 | do { \ | 111 | do { \ |
112 | retval = 0; \ | 112 | retval = 0; \ |
113 | switch (size) { \ | 113 | switch (size) { \ |
114 | case 1: \ | 114 | case 1: \ |
115 | retval = __put_user_asm_b((unsigned int)x, ptr); break; \ | 115 | retval = __put_user_asm_b((__force unsigned int)x, ptr);\ |
116 | break; \ | ||
116 | case 2: \ | 117 | case 2: \ |
117 | retval = __put_user_asm_w((unsigned int)x, ptr); break; \ | 118 | retval = __put_user_asm_w((__force unsigned int)x, ptr);\ |
119 | break; \ | ||
118 | case 4: \ | 120 | case 4: \ |
119 | retval = __put_user_asm_d((unsigned int)x, ptr); break; \ | 121 | retval = __put_user_asm_d((__force unsigned int)x, ptr);\ |
122 | break; \ | ||
120 | case 8: \ | 123 | case 8: \ |
121 | retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ | 124 | retval = __put_user_asm_l((__force unsigned long long)x,\ |
125 | ptr); \ | ||
126 | break; \ | ||
122 | default: \ | 127 | default: \ |
123 | __put_user_bad(); \ | 128 | __put_user_bad(); \ |
124 | } \ | 129 | } \ |
@@ -135,7 +140,7 @@ extern long __get_user_bad(void); | |||
135 | ({ \ | 140 | ({ \ |
136 | long __gu_err, __gu_val; \ | 141 | long __gu_err, __gu_val; \ |
137 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | 142 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
138 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 143 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
139 | __gu_err; \ | 144 | __gu_err; \ |
140 | }) | 145 | }) |
141 | 146 | ||
@@ -145,7 +150,7 @@ extern long __get_user_bad(void); | |||
145 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | 150 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
146 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ | 151 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ |
147 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | 152 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
148 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 153 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
149 | __gu_err; \ | 154 | __gu_err; \ |
150 | }) | 155 | }) |
151 | 156 | ||
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index ab2e7a198a4c..a6bd07ca3d6c 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h | |||
@@ -192,7 +192,7 @@ struct __large_struct { | |||
192 | ({ \ | 192 | ({ \ |
193 | long __gu_err, __gu_val; \ | 193 | long __gu_err, __gu_val; \ |
194 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | 194 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
195 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 195 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
196 | __gu_err; \ | 196 | __gu_err; \ |
197 | }) | 197 | }) |
198 | 198 | ||
@@ -202,7 +202,7 @@ struct __large_struct { | |||
202 | const __typeof__(*(ptr)) * __gu_addr = (ptr); \ | 202 | const __typeof__(*(ptr)) * __gu_addr = (ptr); \ |
203 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ | 203 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ |
204 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | 204 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
205 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 205 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
206 | __gu_err; \ | 206 | __gu_err; \ |
207 | }) | 207 | }) |
208 | 208 | ||
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index a5cb070b54bf..0abdd4c607ed 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define KERNEL_DS ((mm_segment_t){0}) | 17 | #define KERNEL_DS ((mm_segment_t){0}) |
18 | #define USER_DS ((mm_segment_t){1}) | 18 | #define USER_DS ((mm_segment_t){1}) |
19 | 19 | ||
20 | #define segment_eq(a,b) ((a).seg == (b).seg) | 20 | #define segment_eq(a, b) ((a).seg == (b).seg) |
21 | 21 | ||
22 | #define get_ds() (KERNEL_DS) | 22 | #define get_ds() (KERNEL_DS) |
23 | #define get_fs() (current_thread_info()->addr_limit) | 23 | #define get_fs() (current_thread_info()->addr_limit) |
@@ -42,14 +42,14 @@ static inline long access_ok(int type, const void __user * addr, | |||
42 | #if !defined(CONFIG_64BIT) | 42 | #if !defined(CONFIG_64BIT) |
43 | #define LDD_KERNEL(ptr) BUILD_BUG() | 43 | #define LDD_KERNEL(ptr) BUILD_BUG() |
44 | #define LDD_USER(ptr) BUILD_BUG() | 44 | #define LDD_USER(ptr) BUILD_BUG() |
45 | #define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) | 45 | #define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) |
46 | #define STD_USER(x, ptr) __put_user_asm64(x,ptr) | 46 | #define STD_USER(x, ptr) __put_user_asm64(x, ptr) |
47 | #define ASM_WORD_INSN ".word\t" | 47 | #define ASM_WORD_INSN ".word\t" |
48 | #else | 48 | #else |
49 | #define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr) | 49 | #define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) |
50 | #define LDD_USER(ptr) __get_user_asm("ldd",ptr) | 50 | #define LDD_USER(ptr) __get_user_asm("ldd", ptr) |
51 | #define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr) | 51 | #define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) |
52 | #define STD_USER(x, ptr) __put_user_asm("std",x,ptr) | 52 | #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) |
53 | #define ASM_WORD_INSN ".dword\t" | 53 | #define ASM_WORD_INSN ".dword\t" |
54 | #endif | 54 | #endif |
55 | 55 | ||
@@ -80,68 +80,68 @@ struct exception_data { | |||
80 | unsigned long fault_addr; | 80 | unsigned long fault_addr; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | #define __get_user(x,ptr) \ | 83 | #define __get_user(x, ptr) \ |
84 | ({ \ | 84 | ({ \ |
85 | register long __gu_err __asm__ ("r8") = 0; \ | 85 | register long __gu_err __asm__ ("r8") = 0; \ |
86 | register long __gu_val __asm__ ("r9") = 0; \ | 86 | register long __gu_val __asm__ ("r9") = 0; \ |
87 | \ | 87 | \ |
88 | if (segment_eq(get_fs(),KERNEL_DS)) { \ | 88 | if (segment_eq(get_fs(), KERNEL_DS)) { \ |
89 | switch (sizeof(*(ptr))) { \ | 89 | switch (sizeof(*(ptr))) { \ |
90 | case 1: __get_kernel_asm("ldb",ptr); break; \ | 90 | case 1: __get_kernel_asm("ldb", ptr); break; \ |
91 | case 2: __get_kernel_asm("ldh",ptr); break; \ | 91 | case 2: __get_kernel_asm("ldh", ptr); break; \ |
92 | case 4: __get_kernel_asm("ldw",ptr); break; \ | 92 | case 4: __get_kernel_asm("ldw", ptr); break; \ |
93 | case 8: LDD_KERNEL(ptr); break; \ | 93 | case 8: LDD_KERNEL(ptr); break; \ |
94 | default: BUILD_BUG(); break; \ | 94 | default: BUILD_BUG(); break; \ |
95 | } \ | 95 | } \ |
96 | } \ | 96 | } \ |
97 | else { \ | 97 | else { \ |
98 | switch (sizeof(*(ptr))) { \ | 98 | switch (sizeof(*(ptr))) { \ |
99 | case 1: __get_user_asm("ldb",ptr); break; \ | 99 | case 1: __get_user_asm("ldb", ptr); break; \ |
100 | case 2: __get_user_asm("ldh",ptr); break; \ | 100 | case 2: __get_user_asm("ldh", ptr); break; \ |
101 | case 4: __get_user_asm("ldw",ptr); break; \ | 101 | case 4: __get_user_asm("ldw", ptr); break; \ |
102 | case 8: LDD_USER(ptr); break; \ | 102 | case 8: LDD_USER(ptr); break; \ |
103 | default: BUILD_BUG(); break; \ | 103 | default: BUILD_BUG(); break; \ |
104 | } \ | 104 | } \ |
105 | } \ | 105 | } \ |
106 | \ | 106 | \ |
107 | (x) = (__typeof__(*(ptr))) __gu_val; \ | 107 | (x) = (__force __typeof__(*(ptr))) __gu_val; \ |
108 | __gu_err; \ | 108 | __gu_err; \ |
109 | }) | 109 | }) |
110 | 110 | ||
111 | #define __get_kernel_asm(ldx,ptr) \ | 111 | #define __get_kernel_asm(ldx, ptr) \ |
112 | __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ | 112 | __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ |
113 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ | 113 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ |
114 | : "=r"(__gu_val), "=r"(__gu_err) \ | 114 | : "=r"(__gu_val), "=r"(__gu_err) \ |
115 | : "r"(ptr), "1"(__gu_err) \ | 115 | : "r"(ptr), "1"(__gu_err) \ |
116 | : "r1"); | 116 | : "r1"); |
117 | 117 | ||
118 | #define __get_user_asm(ldx,ptr) \ | 118 | #define __get_user_asm(ldx, ptr) \ |
119 | __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ | 119 | __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ |
120 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\ | 120 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ |
121 | : "=r"(__gu_val), "=r"(__gu_err) \ | 121 | : "=r"(__gu_val), "=r"(__gu_err) \ |
122 | : "r"(ptr), "1"(__gu_err) \ | 122 | : "r"(ptr), "1"(__gu_err) \ |
123 | : "r1"); | 123 | : "r1"); |
124 | 124 | ||
125 | #define __put_user(x,ptr) \ | 125 | #define __put_user(x, ptr) \ |
126 | ({ \ | 126 | ({ \ |
127 | register long __pu_err __asm__ ("r8") = 0; \ | 127 | register long __pu_err __asm__ ("r8") = 0; \ |
128 | __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ | 128 | __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ |
129 | \ | 129 | \ |
130 | if (segment_eq(get_fs(),KERNEL_DS)) { \ | 130 | if (segment_eq(get_fs(), KERNEL_DS)) { \ |
131 | switch (sizeof(*(ptr))) { \ | 131 | switch (sizeof(*(ptr))) { \ |
132 | case 1: __put_kernel_asm("stb",__x,ptr); break; \ | 132 | case 1: __put_kernel_asm("stb", __x, ptr); break; \ |
133 | case 2: __put_kernel_asm("sth",__x,ptr); break; \ | 133 | case 2: __put_kernel_asm("sth", __x, ptr); break; \ |
134 | case 4: __put_kernel_asm("stw",__x,ptr); break; \ | 134 | case 4: __put_kernel_asm("stw", __x, ptr); break; \ |
135 | case 8: STD_KERNEL(__x,ptr); break; \ | 135 | case 8: STD_KERNEL(__x, ptr); break; \ |
136 | default: BUILD_BUG(); break; \ | 136 | default: BUILD_BUG(); break; \ |
137 | } \ | 137 | } \ |
138 | } \ | 138 | } \ |
139 | else { \ | 139 | else { \ |
140 | switch (sizeof(*(ptr))) { \ | 140 | switch (sizeof(*(ptr))) { \ |
141 | case 1: __put_user_asm("stb",__x,ptr); break; \ | 141 | case 1: __put_user_asm("stb", __x, ptr); break; \ |
142 | case 2: __put_user_asm("sth",__x,ptr); break; \ | 142 | case 2: __put_user_asm("sth", __x, ptr); break; \ |
143 | case 4: __put_user_asm("stw",__x,ptr); break; \ | 143 | case 4: __put_user_asm("stw", __x, ptr); break; \ |
144 | case 8: STD_USER(__x,ptr); break; \ | 144 | case 8: STD_USER(__x, ptr); break; \ |
145 | default: BUILD_BUG(); break; \ | 145 | default: BUILD_BUG(); break; \ |
146 | } \ | 146 | } \ |
147 | } \ | 147 | } \ |
@@ -159,18 +159,18 @@ struct exception_data { | |||
159 | * r8/r9 are already listed as err/val. | 159 | * r8/r9 are already listed as err/val. |
160 | */ | 160 | */ |
161 | 161 | ||
162 | #define __put_kernel_asm(stx,x,ptr) \ | 162 | #define __put_kernel_asm(stx, x, ptr) \ |
163 | __asm__ __volatile__ ( \ | 163 | __asm__ __volatile__ ( \ |
164 | "\n1:\t" stx "\t%2,0(%1)\n\t" \ | 164 | "\n1:\t" stx "\t%2,0(%1)\n\t" \ |
165 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ | 165 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ |
166 | : "=r"(__pu_err) \ | 166 | : "=r"(__pu_err) \ |
167 | : "r"(ptr), "r"(x), "0"(__pu_err) \ | 167 | : "r"(ptr), "r"(x), "0"(__pu_err) \ |
168 | : "r1") | 168 | : "r1") |
169 | 169 | ||
170 | #define __put_user_asm(stx,x,ptr) \ | 170 | #define __put_user_asm(stx, x, ptr) \ |
171 | __asm__ __volatile__ ( \ | 171 | __asm__ __volatile__ ( \ |
172 | "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ | 172 | "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ |
173 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ | 173 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ |
174 | : "=r"(__pu_err) \ | 174 | : "=r"(__pu_err) \ |
175 | : "r"(ptr), "r"(x), "0"(__pu_err) \ | 175 | : "r"(ptr), "r"(x), "0"(__pu_err) \ |
176 | : "r1") | 176 | : "r1") |
@@ -178,23 +178,23 @@ struct exception_data { | |||
178 | 178 | ||
179 | #if !defined(CONFIG_64BIT) | 179 | #if !defined(CONFIG_64BIT) |
180 | 180 | ||
181 | #define __put_kernel_asm64(__val,ptr) do { \ | 181 | #define __put_kernel_asm64(__val, ptr) do { \ |
182 | __asm__ __volatile__ ( \ | 182 | __asm__ __volatile__ ( \ |
183 | "\n1:\tstw %2,0(%1)" \ | 183 | "\n1:\tstw %2,0(%1)" \ |
184 | "\n2:\tstw %R2,4(%1)\n\t" \ | 184 | "\n2:\tstw %R2,4(%1)\n\t" \ |
185 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ | 185 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ |
186 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ | 186 | ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ |
187 | : "=r"(__pu_err) \ | 187 | : "=r"(__pu_err) \ |
188 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ | 188 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ |
189 | : "r1"); \ | 189 | : "r1"); \ |
190 | } while (0) | 190 | } while (0) |
191 | 191 | ||
192 | #define __put_user_asm64(__val,ptr) do { \ | 192 | #define __put_user_asm64(__val, ptr) do { \ |
193 | __asm__ __volatile__ ( \ | 193 | __asm__ __volatile__ ( \ |
194 | "\n1:\tstw %2,0(%%sr3,%1)" \ | 194 | "\n1:\tstw %2,0(%%sr3,%1)" \ |
195 | "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ | 195 | "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ |
196 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ | 196 | ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ |
197 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ | 197 | ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ |
198 | : "=r"(__pu_err) \ | 198 | : "=r"(__pu_err) \ |
199 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ | 199 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ |
200 | : "r1"); \ | 200 | : "r1"); \ |
@@ -211,8 +211,8 @@ extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); | |||
211 | extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); | 211 | extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); |
212 | extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); | 212 | extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); |
213 | extern long strncpy_from_user(char *, const char __user *, long); | 213 | extern long strncpy_from_user(char *, const char __user *, long); |
214 | extern unsigned lclear_user(void __user *,unsigned long); | 214 | extern unsigned lclear_user(void __user *, unsigned long); |
215 | extern long lstrnlen_user(const char __user *,long); | 215 | extern long lstrnlen_user(const char __user *, long); |
216 | /* | 216 | /* |
217 | * Complex access routines -- macros | 217 | * Complex access routines -- macros |
218 | */ | 218 | */ |
diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 5e2725f4ac49..ff795d3a6909 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h | |||
@@ -23,7 +23,7 @@ typedef struct { | |||
23 | #define USER_DS KERNEL_DS | 23 | #define USER_DS KERNEL_DS |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #define segment_eq(a,b) ((a).seg == (b).seg) | 26 | #define segment_eq(a, b) ((a).seg == (b).seg) |
27 | 27 | ||
28 | #define get_ds() (KERNEL_DS) | 28 | #define get_ds() (KERNEL_DS) |
29 | 29 | ||
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 9486376605f4..a49635c51266 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h | |||
@@ -60,7 +60,7 @@ struct __large_struct { unsigned long buf[100]; }; | |||
60 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | 60 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
61 | __chk_user_ptr(ptr); \ | 61 | __chk_user_ptr(ptr); \ |
62 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | 62 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
63 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 63 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
64 | __gu_err; \ | 64 | __gu_err; \ |
65 | }) | 65 | }) |
66 | 66 | ||
@@ -71,7 +71,7 @@ struct __large_struct { unsigned long buf[100]; }; | |||
71 | const __typeof__(*(ptr)) *__gu_addr = (ptr); \ | 71 | const __typeof__(*(ptr)) *__gu_addr = (ptr); \ |
72 | if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ | 72 | if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ |
73 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | 73 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
74 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 74 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
75 | __gu_err; \ | 75 | __gu_err; \ |
76 | }) | 76 | }) |
77 | 77 | ||
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 2e07e0f40c6a..c01376c76b86 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h | |||
@@ -59,19 +59,19 @@ do { \ | |||
59 | switch (size) { \ | 59 | switch (size) { \ |
60 | case 1: \ | 60 | case 1: \ |
61 | retval = __put_user_asm_b((void *)&x, \ | 61 | retval = __put_user_asm_b((void *)&x, \ |
62 | (long)ptr); \ | 62 | (__force long)ptr); \ |
63 | break; \ | 63 | break; \ |
64 | case 2: \ | 64 | case 2: \ |
65 | retval = __put_user_asm_w((void *)&x, \ | 65 | retval = __put_user_asm_w((void *)&x, \ |
66 | (long)ptr); \ | 66 | (__force long)ptr); \ |
67 | break; \ | 67 | break; \ |
68 | case 4: \ | 68 | case 4: \ |
69 | retval = __put_user_asm_l((void *)&x, \ | 69 | retval = __put_user_asm_l((void *)&x, \ |
70 | (long)ptr); \ | 70 | (__force long)ptr); \ |
71 | break; \ | 71 | break; \ |
72 | case 8: \ | 72 | case 8: \ |
73 | retval = __put_user_asm_q((void *)&x, \ | 73 | retval = __put_user_asm_q((void *)&x, \ |
74 | (long)ptr); \ | 74 | (__force long)ptr); \ |
75 | break; \ | 75 | break; \ |
76 | default: \ | 76 | default: \ |
77 | __put_user_unknown(); \ | 77 | __put_user_unknown(); \ |
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 9634d086fc56..64ee103dc29d 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h | |||
@@ -37,7 +37,7 @@ | |||
37 | #define get_fs() (current->thread.current_ds) | 37 | #define get_fs() (current->thread.current_ds) |
38 | #define set_fs(val) ((current->thread.current_ds) = (val)) | 38 | #define set_fs(val) ((current->thread.current_ds) = (val)) |
39 | 39 | ||
40 | #define segment_eq(a,b) ((a).seg == (b).seg) | 40 | #define segment_eq(a, b) ((a).seg == (b).seg) |
41 | 41 | ||
42 | /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test | 42 | /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test |
43 | * can be fairly lightweight. | 43 | * can be fairly lightweight. |
@@ -46,8 +46,8 @@ | |||
46 | */ | 46 | */ |
47 | #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) | 47 | #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) |
48 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 48 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
49 | #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) | 49 | #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) |
50 | #define access_ok(type, addr, size) \ | 50 | #define access_ok(type, addr, size) \ |
51 | ({ (void)(type); __access_ok((unsigned long)(addr), size); }) | 51 | ({ (void)(type); __access_ok((unsigned long)(addr), size); }) |
52 | 52 | ||
53 | /* | 53 | /* |
@@ -91,158 +91,221 @@ void __ret_efault(void); | |||
91 | * of a performance impact. Thus we have a few rather ugly macros here, | 91 | * of a performance impact. Thus we have a few rather ugly macros here, |
92 | * and hide all the ugliness from the user. | 92 | * and hide all the ugliness from the user. |
93 | */ | 93 | */ |
94 | #define put_user(x,ptr) ({ \ | 94 | #define put_user(x, ptr) ({ \ |
95 | unsigned long __pu_addr = (unsigned long)(ptr); \ | 95 | unsigned long __pu_addr = (unsigned long)(ptr); \ |
96 | __chk_user_ptr(ptr); \ | 96 | __chk_user_ptr(ptr); \ |
97 | __put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) | 97 | __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ |
98 | 98 | }) | |
99 | #define get_user(x,ptr) ({ \ | 99 | |
100 | unsigned long __gu_addr = (unsigned long)(ptr); \ | 100 | #define get_user(x, ptr) ({ \ |
101 | __chk_user_ptr(ptr); \ | 101 | unsigned long __gu_addr = (unsigned long)(ptr); \ |
102 | __get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) | 102 | __chk_user_ptr(ptr); \ |
103 | __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ | ||
104 | }) | ||
103 | 105 | ||
104 | /* | 106 | /* |
105 | * The "__xxx" versions do not do address space checking, useful when | 107 | * The "__xxx" versions do not do address space checking, useful when |
106 | * doing multiple accesses to the same area (the user has to do the | 108 | * doing multiple accesses to the same area (the user has to do the |
107 | * checks by hand with "access_ok()") | 109 | * checks by hand with "access_ok()") |
108 | */ | 110 | */ |
109 | #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | 111 | #define __put_user(x, ptr) \ |
110 | #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) | 112 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
113 | #define __get_user(x, ptr) \ | ||
114 | __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr))) | ||
111 | 115 | ||
112 | struct __large_struct { unsigned long buf[100]; }; | 116 | struct __large_struct { unsigned long buf[100]; }; |
113 | #define __m(x) ((struct __large_struct __user *)(x)) | 117 | #define __m(x) ((struct __large_struct __user *)(x)) |
114 | 118 | ||
115 | #define __put_user_check(x,addr,size) ({ \ | 119 | #define __put_user_check(x, addr, size) ({ \ |
116 | register int __pu_ret; \ | 120 | register int __pu_ret; \ |
117 | if (__access_ok(addr,size)) { \ | 121 | if (__access_ok(addr, size)) { \ |
118 | switch (size) { \ | 122 | switch (size) { \ |
119 | case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ | 123 | case 1: \ |
120 | case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ | 124 | __put_user_asm(x, b, addr, __pu_ret); \ |
121 | case 4: __put_user_asm(x,,addr,__pu_ret); break; \ | 125 | break; \ |
122 | case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ | 126 | case 2: \ |
123 | default: __pu_ret = __put_user_bad(); break; \ | 127 | __put_user_asm(x, h, addr, __pu_ret); \ |
124 | } } else { __pu_ret = -EFAULT; } __pu_ret; }) | 128 | break; \ |
125 | 129 | case 4: \ | |
126 | #define __put_user_nocheck(x,addr,size) ({ \ | 130 | __put_user_asm(x, , addr, __pu_ret); \ |
127 | register int __pu_ret; \ | 131 | break; \ |
128 | switch (size) { \ | 132 | case 8: \ |
129 | case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ | 133 | __put_user_asm(x, d, addr, __pu_ret); \ |
130 | case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ | 134 | break; \ |
131 | case 4: __put_user_asm(x,,addr,__pu_ret); break; \ | 135 | default: \ |
132 | case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ | 136 | __pu_ret = __put_user_bad(); \ |
133 | default: __pu_ret = __put_user_bad(); break; \ | 137 | break; \ |
134 | } __pu_ret; }) | 138 | } \ |
135 | 139 | } else { \ | |
136 | #define __put_user_asm(x,size,addr,ret) \ | 140 | __pu_ret = -EFAULT; \ |
141 | } \ | ||
142 | __pu_ret; \ | ||
143 | }) | ||
144 | |||
145 | #define __put_user_nocheck(x, addr, size) ({ \ | ||
146 | register int __pu_ret; \ | ||
147 | switch (size) { \ | ||
148 | case 1: __put_user_asm(x, b, addr, __pu_ret); break; \ | ||
149 | case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ | ||
150 | case 4: __put_user_asm(x, , addr, __pu_ret); break; \ | ||
151 | case 8: __put_user_asm(x, d, addr, __pu_ret); break; \ | ||
152 | default: __pu_ret = __put_user_bad(); break; \ | ||
153 | } \ | ||
154 | __pu_ret; \ | ||
155 | }) | ||
156 | |||
157 | #define __put_user_asm(x, size, addr, ret) \ | ||
137 | __asm__ __volatile__( \ | 158 | __asm__ __volatile__( \ |
138 | "/* Put user asm, inline. */\n" \ | 159 | "/* Put user asm, inline. */\n" \ |
139 | "1:\t" "st"#size " %1, %2\n\t" \ | 160 | "1:\t" "st"#size " %1, %2\n\t" \ |
140 | "clr %0\n" \ | 161 | "clr %0\n" \ |
141 | "2:\n\n\t" \ | 162 | "2:\n\n\t" \ |
142 | ".section .fixup,#alloc,#execinstr\n\t" \ | 163 | ".section .fixup,#alloc,#execinstr\n\t" \ |
143 | ".align 4\n" \ | 164 | ".align 4\n" \ |
144 | "3:\n\t" \ | 165 | "3:\n\t" \ |
145 | "b 2b\n\t" \ | 166 | "b 2b\n\t" \ |
146 | " mov %3, %0\n\t" \ | 167 | " mov %3, %0\n\t" \ |
147 | ".previous\n\n\t" \ | 168 | ".previous\n\n\t" \ |
148 | ".section __ex_table,#alloc\n\t" \ | 169 | ".section __ex_table,#alloc\n\t" \ |
149 | ".align 4\n\t" \ | 170 | ".align 4\n\t" \ |
150 | ".word 1b, 3b\n\t" \ | 171 | ".word 1b, 3b\n\t" \ |
151 | ".previous\n\n\t" \ | 172 | ".previous\n\n\t" \ |
152 | : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ | 173 | : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ |
153 | "i" (-EFAULT)) | 174 | "i" (-EFAULT)) |
154 | 175 | ||
155 | int __put_user_bad(void); | 176 | int __put_user_bad(void); |
156 | 177 | ||
157 | #define __get_user_check(x,addr,size,type) ({ \ | 178 | #define __get_user_check(x, addr, size, type) ({ \ |
158 | register int __gu_ret; \ | 179 | register int __gu_ret; \ |
159 | register unsigned long __gu_val; \ | 180 | register unsigned long __gu_val; \ |
160 | if (__access_ok(addr,size)) { \ | 181 | if (__access_ok(addr, size)) { \ |
161 | switch (size) { \ | 182 | switch (size) { \ |
162 | case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ | 183 | case 1: \ |
163 | case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ | 184 | __get_user_asm(__gu_val, ub, addr, __gu_ret); \ |
164 | case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ | 185 | break; \ |
165 | case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ | 186 | case 2: \ |
166 | default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ | 187 | __get_user_asm(__gu_val, uh, addr, __gu_ret); \ |
167 | } } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; }) | 188 | break; \ |
168 | 189 | case 4: \ | |
169 | #define __get_user_check_ret(x,addr,size,type,retval) ({ \ | 190 | __get_user_asm(__gu_val, , addr, __gu_ret); \ |
170 | register unsigned long __gu_val __asm__ ("l1"); \ | 191 | break; \ |
171 | if (__access_ok(addr,size)) { \ | 192 | case 8: \ |
172 | switch (size) { \ | 193 | __get_user_asm(__gu_val, d, addr, __gu_ret); \ |
173 | case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ | 194 | break; \ |
174 | case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ | 195 | default: \ |
175 | case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ | 196 | __gu_val = 0; \ |
176 | case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ | 197 | __gu_ret = __get_user_bad(); \ |
177 | default: if (__get_user_bad()) return retval; \ | 198 | break; \ |
178 | } x = (type) __gu_val; } else return retval; }) | 199 | } \ |
179 | 200 | } else { \ | |
180 | #define __get_user_nocheck(x,addr,size,type) ({ \ | 201 | __gu_val = 0; \ |
181 | register int __gu_ret; \ | 202 | __gu_ret = -EFAULT; \ |
182 | register unsigned long __gu_val; \ | 203 | } \ |
183 | switch (size) { \ | 204 | x = (__force type) __gu_val; \ |
184 | case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ | 205 | __gu_ret; \ |
185 | case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ | 206 | }) |
186 | case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ | 207 | |
187 | case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ | 208 | #define __get_user_check_ret(x, addr, size, type, retval) ({ \ |
188 | default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ | 209 | register unsigned long __gu_val __asm__ ("l1"); \ |
189 | } x = (type) __gu_val; __gu_ret; }) | 210 | if (__access_ok(addr, size)) { \ |
190 | 211 | switch (size) { \ | |
191 | #define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ | 212 | case 1: \ |
192 | register unsigned long __gu_val __asm__ ("l1"); \ | 213 | __get_user_asm_ret(__gu_val, ub, addr, retval); \ |
193 | switch (size) { \ | 214 | break; \ |
194 | case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ | 215 | case 2: \ |
195 | case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ | 216 | __get_user_asm_ret(__gu_val, uh, addr, retval); \ |
196 | case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ | 217 | break; \ |
197 | case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ | 218 | case 4: \ |
198 | default: if (__get_user_bad()) return retval; \ | 219 | __get_user_asm_ret(__gu_val, , addr, retval); \ |
199 | } x = (type) __gu_val; }) | 220 | break; \ |
200 | 221 | case 8: \ | |
201 | #define __get_user_asm(x,size,addr,ret) \ | 222 | __get_user_asm_ret(__gu_val, d, addr, retval); \ |
223 | break; \ | ||
224 | default: \ | ||
225 | if (__get_user_bad()) \ | ||
226 | return retval; \ | ||
227 | } \ | ||
228 | x = (__force type) __gu_val; \ | ||
229 | } else \ | ||
230 | return retval; \ | ||
231 | }) | ||
232 | |||
233 | #define __get_user_nocheck(x, addr, size, type) ({ \ | ||
234 | register int __gu_ret; \ | ||
235 | register unsigned long __gu_val; \ | ||
236 | switch (size) { \ | ||
237 | case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ | ||
238 | case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ | ||
239 | case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \ | ||
240 | case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \ | ||
241 | default: \ | ||
242 | __gu_val = 0; \ | ||
243 | __gu_ret = __get_user_bad(); \ | ||
244 | break; \ | ||
245 | } \ | ||
246 | x = (__force type) __gu_val; \ | ||
247 | __gu_ret; \ | ||
248 | }) | ||
249 | |||
250 | #define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ | ||
251 | register unsigned long __gu_val __asm__ ("l1"); \ | ||
252 | switch (size) { \ | ||
253 | case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ | ||
254 | case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ | ||
255 | case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \ | ||
256 | case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \ | ||
257 | default: \ | ||
258 | if (__get_user_bad()) \ | ||
259 | return retval; \ | ||
260 | } \ | ||
261 | x = (__force type) __gu_val; \ | ||
262 | }) | ||
263 | |||
264 | #define __get_user_asm(x, size, addr, ret) \ | ||
202 | __asm__ __volatile__( \ | 265 | __asm__ __volatile__( \ |
203 | "/* Get user asm, inline. */\n" \ | 266 | "/* Get user asm, inline. */\n" \ |
204 | "1:\t" "ld"#size " %2, %1\n\t" \ | 267 | "1:\t" "ld"#size " %2, %1\n\t" \ |
205 | "clr %0\n" \ | 268 | "clr %0\n" \ |
206 | "2:\n\n\t" \ | 269 | "2:\n\n\t" \ |
207 | ".section .fixup,#alloc,#execinstr\n\t" \ | 270 | ".section .fixup,#alloc,#execinstr\n\t" \ |
208 | ".align 4\n" \ | 271 | ".align 4\n" \ |
209 | "3:\n\t" \ | 272 | "3:\n\t" \ |
210 | "clr %1\n\t" \ | 273 | "clr %1\n\t" \ |
211 | "b 2b\n\t" \ | 274 | "b 2b\n\t" \ |
212 | " mov %3, %0\n\n\t" \ | 275 | " mov %3, %0\n\n\t" \ |
213 | ".previous\n\t" \ | 276 | ".previous\n\t" \ |
214 | ".section __ex_table,#alloc\n\t" \ | 277 | ".section __ex_table,#alloc\n\t" \ |
215 | ".align 4\n\t" \ | 278 | ".align 4\n\t" \ |
216 | ".word 1b, 3b\n\n\t" \ | 279 | ".word 1b, 3b\n\n\t" \ |
217 | ".previous\n\t" \ | 280 | ".previous\n\t" \ |
218 | : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ | 281 | : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ |
219 | "i" (-EFAULT)) | 282 | "i" (-EFAULT)) |
220 | 283 | ||
221 | #define __get_user_asm_ret(x,size,addr,retval) \ | 284 | #define __get_user_asm_ret(x, size, addr, retval) \ |
222 | if (__builtin_constant_p(retval) && retval == -EFAULT) \ | 285 | if (__builtin_constant_p(retval) && retval == -EFAULT) \ |
223 | __asm__ __volatile__( \ | 286 | __asm__ __volatile__( \ |
224 | "/* Get user asm ret, inline. */\n" \ | 287 | "/* Get user asm ret, inline. */\n" \ |
225 | "1:\t" "ld"#size " %1, %0\n\n\t" \ | 288 | "1:\t" "ld"#size " %1, %0\n\n\t" \ |
226 | ".section __ex_table,#alloc\n\t" \ | 289 | ".section __ex_table,#alloc\n\t" \ |
227 | ".align 4\n\t" \ | 290 | ".align 4\n\t" \ |
228 | ".word 1b,__ret_efault\n\n\t" \ | 291 | ".word 1b,__ret_efault\n\n\t" \ |
229 | ".previous\n\t" \ | 292 | ".previous\n\t" \ |
230 | : "=&r" (x) : "m" (*__m(addr))); \ | 293 | : "=&r" (x) : "m" (*__m(addr))); \ |
231 | else \ | 294 | else \ |
232 | __asm__ __volatile__( \ | 295 | __asm__ __volatile__( \ |
233 | "/* Get user asm ret, inline. */\n" \ | 296 | "/* Get user asm ret, inline. */\n" \ |
234 | "1:\t" "ld"#size " %1, %0\n\n\t" \ | 297 | "1:\t" "ld"#size " %1, %0\n\n\t" \ |
235 | ".section .fixup,#alloc,#execinstr\n\t" \ | 298 | ".section .fixup,#alloc,#execinstr\n\t" \ |
236 | ".align 4\n" \ | 299 | ".align 4\n" \ |
237 | "3:\n\t" \ | 300 | "3:\n\t" \ |
238 | "ret\n\t" \ | 301 | "ret\n\t" \ |
239 | " restore %%g0, %2, %%o0\n\n\t" \ | 302 | " restore %%g0, %2, %%o0\n\n\t" \ |
240 | ".previous\n\t" \ | 303 | ".previous\n\t" \ |
241 | ".section __ex_table,#alloc\n\t" \ | 304 | ".section __ex_table,#alloc\n\t" \ |
242 | ".align 4\n\t" \ | 305 | ".align 4\n\t" \ |
243 | ".word 1b, 3b\n\n\t" \ | 306 | ".word 1b, 3b\n\n\t" \ |
244 | ".previous\n\t" \ | 307 | ".previous\n\t" \ |
245 | : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) | 308 | : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) |
246 | 309 | ||
247 | int __get_user_bad(void); | 310 | int __get_user_bad(void); |
248 | 311 | ||
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index c990a5e577f0..a35194b7dba0 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
@@ -41,11 +41,11 @@ | |||
41 | #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) | 41 | #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) |
42 | #define get_ds() (KERNEL_DS) | 42 | #define get_ds() (KERNEL_DS) |
43 | 43 | ||
44 | #define segment_eq(a,b) ((a).seg == (b).seg) | 44 | #define segment_eq(a, b) ((a).seg == (b).seg) |
45 | 45 | ||
46 | #define set_fs(val) \ | 46 | #define set_fs(val) \ |
47 | do { \ | 47 | do { \ |
48 | current_thread_info()->current_ds =(val).seg; \ | 48 | current_thread_info()->current_ds = (val).seg; \ |
49 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ | 49 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ |
50 | } while(0) | 50 | } while(0) |
51 | 51 | ||
@@ -88,121 +88,135 @@ void __retl_efault(void); | |||
88 | * of a performance impact. Thus we have a few rather ugly macros here, | 88 | * of a performance impact. Thus we have a few rather ugly macros here, |
89 | * and hide all the ugliness from the user. | 89 | * and hide all the ugliness from the user. |
90 | */ | 90 | */ |
91 | #define put_user(x,ptr) ({ \ | 91 | #define put_user(x, ptr) ({ \ |
92 | unsigned long __pu_addr = (unsigned long)(ptr); \ | 92 | unsigned long __pu_addr = (unsigned long)(ptr); \ |
93 | __chk_user_ptr(ptr); \ | 93 | __chk_user_ptr(ptr); \ |
94 | __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) | 94 | __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ |
95 | }) | ||
95 | 96 | ||
96 | #define get_user(x,ptr) ({ \ | 97 | #define get_user(x, ptr) ({ \ |
97 | unsigned long __gu_addr = (unsigned long)(ptr); \ | 98 | unsigned long __gu_addr = (unsigned long)(ptr); \ |
98 | __chk_user_ptr(ptr); \ | 99 | __chk_user_ptr(ptr); \ |
99 | __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) | 100 | __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ |
101 | }) | ||
100 | 102 | ||
101 | #define __put_user(x,ptr) put_user(x,ptr) | 103 | #define __put_user(x, ptr) put_user(x, ptr) |
102 | #define __get_user(x,ptr) get_user(x,ptr) | 104 | #define __get_user(x, ptr) get_user(x, ptr) |
103 | 105 | ||
104 | struct __large_struct { unsigned long buf[100]; }; | 106 | struct __large_struct { unsigned long buf[100]; }; |
105 | #define __m(x) ((struct __large_struct *)(x)) | 107 | #define __m(x) ((struct __large_struct *)(x)) |
106 | 108 | ||
107 | #define __put_user_nocheck(data,addr,size) ({ \ | 109 | #define __put_user_nocheck(data, addr, size) ({ \ |
108 | register int __pu_ret; \ | 110 | register int __pu_ret; \ |
109 | switch (size) { \ | 111 | switch (size) { \ |
110 | case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ | 112 | case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ |
111 | case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ | 113 | case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ |
112 | case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ | 114 | case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ |
113 | case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ | 115 | case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ |
114 | default: __pu_ret = __put_user_bad(); break; \ | 116 | default: __pu_ret = __put_user_bad(); break; \ |
115 | } __pu_ret; }) | 117 | } \ |
116 | 118 | __pu_ret; \ | |
117 | #define __put_user_asm(x,size,addr,ret) \ | 119 | }) |
120 | |||
121 | #define __put_user_asm(x, size, addr, ret) \ | ||
118 | __asm__ __volatile__( \ | 122 | __asm__ __volatile__( \ |
119 | "/* Put user asm, inline. */\n" \ | 123 | "/* Put user asm, inline. */\n" \ |
120 | "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ | 124 | "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ |
121 | "clr %0\n" \ | 125 | "clr %0\n" \ |
122 | "2:\n\n\t" \ | 126 | "2:\n\n\t" \ |
123 | ".section .fixup,#alloc,#execinstr\n\t" \ | 127 | ".section .fixup,#alloc,#execinstr\n\t" \ |
124 | ".align 4\n" \ | 128 | ".align 4\n" \ |
125 | "3:\n\t" \ | 129 | "3:\n\t" \ |
126 | "sethi %%hi(2b), %0\n\t" \ | 130 | "sethi %%hi(2b), %0\n\t" \ |
127 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ | 131 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ |
128 | " mov %3, %0\n\n\t" \ | 132 | " mov %3, %0\n\n\t" \ |
129 | ".previous\n\t" \ | 133 | ".previous\n\t" \ |
130 | ".section __ex_table,\"a\"\n\t" \ | 134 | ".section __ex_table,\"a\"\n\t" \ |
131 | ".align 4\n\t" \ | 135 | ".align 4\n\t" \ |
132 | ".word 1b, 3b\n\t" \ | 136 | ".word 1b, 3b\n\t" \ |
133 | ".previous\n\n\t" \ | 137 | ".previous\n\n\t" \ |
134 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ | 138 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ |
135 | "i" (-EFAULT)) | 139 | "i" (-EFAULT)) |
136 | 140 | ||
137 | int __put_user_bad(void); | 141 | int __put_user_bad(void); |
138 | 142 | ||
139 | #define __get_user_nocheck(data,addr,size,type) ({ \ | 143 | #define __get_user_nocheck(data, addr, size, type) ({ \ |
140 | register int __gu_ret; \ | 144 | register int __gu_ret; \ |
141 | register unsigned long __gu_val; \ | 145 | register unsigned long __gu_val; \ |
142 | switch (size) { \ | 146 | switch (size) { \ |
143 | case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ | 147 | case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ |
144 | case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ | 148 | case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ |
145 | case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ | 149 | case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ |
146 | case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ | 150 | case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ |
147 | default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ | 151 | default: \ |
148 | } data = (type) __gu_val; __gu_ret; }) | 152 | __gu_val = 0; \ |
149 | 153 | __gu_ret = __get_user_bad(); \ | |
150 | #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ | 154 | break; \ |
151 | register unsigned long __gu_val __asm__ ("l1"); \ | 155 | } \ |
152 | switch (size) { \ | 156 | data = (__force type) __gu_val; \ |
153 | case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ | 157 | __gu_ret; \ |
154 | case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ | 158 | }) |
155 | case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ | 159 | |
156 | case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ | 160 | #define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ |
157 | default: if (__get_user_bad()) return retval; \ | 161 | register unsigned long __gu_val __asm__ ("l1"); \ |
158 | } data = (type) __gu_val; }) | 162 | switch (size) { \ |
159 | 163 | case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ | |
160 | #define __get_user_asm(x,size,addr,ret) \ | 164 | case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ |
165 | case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \ | ||
166 | case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \ | ||
167 | default: \ | ||
168 | if (__get_user_bad()) \ | ||
169 | return retval; \ | ||
170 | } \ | ||
171 | data = (__force type) __gu_val; \ | ||
172 | }) | ||
173 | |||
174 | #define __get_user_asm(x, size, addr, ret) \ | ||
161 | __asm__ __volatile__( \ | 175 | __asm__ __volatile__( \ |
162 | "/* Get user asm, inline. */\n" \ | 176 | "/* Get user asm, inline. */\n" \ |
163 | "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ | 177 | "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ |
164 | "clr %0\n" \ | 178 | "clr %0\n" \ |
165 | "2:\n\n\t" \ | 179 | "2:\n\n\t" \ |
166 | ".section .fixup,#alloc,#execinstr\n\t" \ | 180 | ".section .fixup,#alloc,#execinstr\n\t" \ |
167 | ".align 4\n" \ | 181 | ".align 4\n" \ |
168 | "3:\n\t" \ | 182 | "3:\n\t" \ |
169 | "sethi %%hi(2b), %0\n\t" \ | 183 | "sethi %%hi(2b), %0\n\t" \ |
170 | "clr %1\n\t" \ | 184 | "clr %1\n\t" \ |
171 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ | 185 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ |
172 | " mov %3, %0\n\n\t" \ | 186 | " mov %3, %0\n\n\t" \ |
173 | ".previous\n\t" \ | 187 | ".previous\n\t" \ |
174 | ".section __ex_table,\"a\"\n\t" \ | 188 | ".section __ex_table,\"a\"\n\t" \ |
175 | ".align 4\n\t" \ | 189 | ".align 4\n\t" \ |
176 | ".word 1b, 3b\n\n\t" \ | 190 | ".word 1b, 3b\n\n\t" \ |
177 | ".previous\n\t" \ | 191 | ".previous\n\t" \ |
178 | : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ | 192 | : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ |
179 | "i" (-EFAULT)) | 193 | "i" (-EFAULT)) |
180 | 194 | ||
181 | #define __get_user_asm_ret(x,size,addr,retval) \ | 195 | #define __get_user_asm_ret(x, size, addr, retval) \ |
182 | if (__builtin_constant_p(retval) && retval == -EFAULT) \ | 196 | if (__builtin_constant_p(retval) && retval == -EFAULT) \ |
183 | __asm__ __volatile__( \ | 197 | __asm__ __volatile__( \ |
184 | "/* Get user asm ret, inline. */\n" \ | 198 | "/* Get user asm ret, inline. */\n" \ |
185 | "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ | 199 | "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ |
186 | ".section __ex_table,\"a\"\n\t" \ | 200 | ".section __ex_table,\"a\"\n\t" \ |
187 | ".align 4\n\t" \ | 201 | ".align 4\n\t" \ |
188 | ".word 1b,__ret_efault\n\n\t" \ | 202 | ".word 1b,__ret_efault\n\n\t" \ |
189 | ".previous\n\t" \ | 203 | ".previous\n\t" \ |
190 | : "=r" (x) : "r" (__m(addr))); \ | 204 | : "=r" (x) : "r" (__m(addr))); \ |
191 | else \ | 205 | else \ |
192 | __asm__ __volatile__( \ | 206 | __asm__ __volatile__( \ |
193 | "/* Get user asm ret, inline. */\n" \ | 207 | "/* Get user asm ret, inline. */\n" \ |
194 | "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ | 208 | "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ |
195 | ".section .fixup,#alloc,#execinstr\n\t" \ | 209 | ".section .fixup,#alloc,#execinstr\n\t" \ |
196 | ".align 4\n" \ | 210 | ".align 4\n" \ |
197 | "3:\n\t" \ | 211 | "3:\n\t" \ |
198 | "ret\n\t" \ | 212 | "ret\n\t" \ |
199 | " restore %%g0, %2, %%o0\n\n\t" \ | 213 | " restore %%g0, %2, %%o0\n\n\t" \ |
200 | ".previous\n\t" \ | 214 | ".previous\n\t" \ |
201 | ".section __ex_table,\"a\"\n\t" \ | 215 | ".section __ex_table,\"a\"\n\t" \ |
202 | ".align 4\n\t" \ | 216 | ".align 4\n\t" \ |
203 | ".word 1b, 3b\n\n\t" \ | 217 | ".word 1b, 3b\n\n\t" \ |
204 | ".previous\n\t" \ | 218 | ".previous\n\t" \ |
205 | : "=r" (x) : "r" (__m(addr)), "i" (retval)) | 219 | : "=r" (x) : "r" (__m(addr)), "i" (retval)) |
206 | 220 | ||
207 | int __get_user_bad(void); | 221 | int __get_user_bad(void); |
208 | 222 | ||
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 0d592e0a5b84..ace9dec050b1 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -179,7 +179,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) | |||
179 | asm volatile("call __get_user_%P3" \ | 179 | asm volatile("call __get_user_%P3" \ |
180 | : "=a" (__ret_gu), "=r" (__val_gu) \ | 180 | : "=a" (__ret_gu), "=r" (__val_gu) \ |
181 | : "0" (ptr), "i" (sizeof(*(ptr)))); \ | 181 | : "0" (ptr), "i" (sizeof(*(ptr)))); \ |
182 | (x) = (__typeof__(*(ptr))) __val_gu; \ | 182 | (x) = (__force __typeof__(*(ptr))) __val_gu; \ |
183 | __ret_gu; \ | 183 | __ret_gu; \ |
184 | }) | 184 | }) |
185 | 185 | ||
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 876eb380aa26..147b26ed9c91 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h | |||
@@ -182,13 +182,13 @@ | |||
182 | #define get_fs() (current->thread.current_ds) | 182 | #define get_fs() (current->thread.current_ds) |
183 | #define set_fs(val) (current->thread.current_ds = (val)) | 183 | #define set_fs(val) (current->thread.current_ds = (val)) |
184 | 184 | ||
185 | #define segment_eq(a,b) ((a).seg == (b).seg) | 185 | #define segment_eq(a, b) ((a).seg == (b).seg) |
186 | 186 | ||
187 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 187 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
188 | #define __user_ok(addr,size) \ | 188 | #define __user_ok(addr, size) \ |
189 | (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) | 189 | (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) |
190 | #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) | 190 | #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) |
191 | #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) | 191 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * These are the main single-value transfer routines. They | 194 | * These are the main single-value transfer routines. They |
@@ -204,8 +204,8 @@ | |||
204 | * (a) re-use the arguments for side effects (sizeof is ok) | 204 | * (a) re-use the arguments for side effects (sizeof is ok) |
205 | * (b) require any knowledge of processes at this stage | 205 | * (b) require any knowledge of processes at this stage |
206 | */ | 206 | */ |
207 | #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) | 207 | #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) |
208 | #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) | 208 | #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * The "__xxx" versions of the user access functions are versions that | 211 | * The "__xxx" versions of the user access functions are versions that |
@@ -213,39 +213,39 @@ | |||
213 | * with a separate "access_ok()" call (this is used when we do multiple | 213 | * with a separate "access_ok()" call (this is used when we do multiple |
214 | * accesses to the same area of user memory). | 214 | * accesses to the same area of user memory). |
215 | */ | 215 | */ |
216 | #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) | 216 | #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) |
217 | #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | 217 | #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
218 | 218 | ||
219 | 219 | ||
220 | extern long __put_user_bad(void); | 220 | extern long __put_user_bad(void); |
221 | 221 | ||
222 | #define __put_user_nocheck(x,ptr,size) \ | 222 | #define __put_user_nocheck(x, ptr, size) \ |
223 | ({ \ | 223 | ({ \ |
224 | long __pu_err; \ | 224 | long __pu_err; \ |
225 | __put_user_size((x),(ptr),(size),__pu_err); \ | 225 | __put_user_size((x), (ptr), (size), __pu_err); \ |
226 | __pu_err; \ | 226 | __pu_err; \ |
227 | }) | 227 | }) |
228 | 228 | ||
229 | #define __put_user_check(x,ptr,size) \ | 229 | #define __put_user_check(x, ptr, size) \ |
230 | ({ \ | 230 | ({ \ |
231 | long __pu_err = -EFAULT; \ | 231 | long __pu_err = -EFAULT; \ |
232 | __typeof__(*(ptr)) *__pu_addr = (ptr); \ | 232 | __typeof__(*(ptr)) *__pu_addr = (ptr); \ |
233 | if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ | 233 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ |
234 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | 234 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
235 | __pu_err; \ | 235 | __pu_err; \ |
236 | }) | 236 | }) |
237 | 237 | ||
238 | #define __put_user_size(x,ptr,size,retval) \ | 238 | #define __put_user_size(x, ptr, size, retval) \ |
239 | do { \ | 239 | do { \ |
240 | int __cb; \ | 240 | int __cb; \ |
241 | retval = 0; \ | 241 | retval = 0; \ |
242 | switch (size) { \ | 242 | switch (size) { \ |
243 | case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ | 243 | case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \ |
244 | case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ | 244 | case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \ |
245 | case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ | 245 | case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \ |
246 | case 8: { \ | 246 | case 8: { \ |
247 | __typeof__(*ptr) __v64 = x; \ | 247 | __typeof__(*ptr) __v64 = x; \ |
248 | retval = __copy_to_user(ptr,&__v64,8); \ | 248 | retval = __copy_to_user(ptr, &__v64, 8); \ |
249 | break; \ | 249 | break; \ |
250 | } \ | 250 | } \ |
251 | default: __put_user_bad(); \ | 251 | default: __put_user_bad(); \ |
@@ -316,35 +316,35 @@ __asm__ __volatile__( \ | |||
316 | :"=r" (err), "=r" (cb) \ | 316 | :"=r" (err), "=r" (cb) \ |
317 | :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) | 317 | :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) |
318 | 318 | ||
319 | #define __get_user_nocheck(x,ptr,size) \ | 319 | #define __get_user_nocheck(x, ptr, size) \ |
320 | ({ \ | 320 | ({ \ |
321 | long __gu_err, __gu_val; \ | 321 | long __gu_err, __gu_val; \ |
322 | __get_user_size(__gu_val,(ptr),(size),__gu_err); \ | 322 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ |
323 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ | 323 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
324 | __gu_err; \ | 324 | __gu_err; \ |
325 | }) | 325 | }) |
326 | 326 | ||
327 | #define __get_user_check(x,ptr,size) \ | 327 | #define __get_user_check(x, ptr, size) \ |
328 | ({ \ | 328 | ({ \ |
329 | long __gu_err = -EFAULT, __gu_val = 0; \ | 329 | long __gu_err = -EFAULT, __gu_val = 0; \ |
330 | const __typeof__(*(ptr)) *__gu_addr = (ptr); \ | 330 | const __typeof__(*(ptr)) *__gu_addr = (ptr); \ |
331 | if (access_ok(VERIFY_READ,__gu_addr,size)) \ | 331 | if (access_ok(VERIFY_READ, __gu_addr, size)) \ |
332 | __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ | 332 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
333 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ | 333 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
334 | __gu_err; \ | 334 | __gu_err; \ |
335 | }) | 335 | }) |
336 | 336 | ||
337 | extern long __get_user_bad(void); | 337 | extern long __get_user_bad(void); |
338 | 338 | ||
339 | #define __get_user_size(x,ptr,size,retval) \ | 339 | #define __get_user_size(x, ptr, size, retval) \ |
340 | do { \ | 340 | do { \ |
341 | int __cb; \ | 341 | int __cb; \ |
342 | retval = 0; \ | 342 | retval = 0; \ |
343 | switch (size) { \ | 343 | switch (size) { \ |
344 | case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ | 344 | case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\ |
345 | case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ | 345 | case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\ |
346 | case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ | 346 | case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\ |
347 | case 8: retval = __copy_from_user(&x,ptr,8); break; \ | 347 | case 8: retval = __copy_from_user(&x, ptr, 8); break; \ |
348 | default: (x) = __get_user_bad(); \ | 348 | default: (x) = __get_user_bad(); \ |
349 | } \ | 349 | } \ |
350 | } while (0) | 350 | } while (0) |
@@ -390,19 +390,19 @@ __asm__ __volatile__( \ | |||
390 | */ | 390 | */ |
391 | 391 | ||
392 | extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); | 392 | extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); |
393 | #define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) | 393 | #define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) |
394 | 394 | ||
395 | 395 | ||
396 | static inline unsigned long | 396 | static inline unsigned long |
397 | __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) | 397 | __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) |
398 | { | 398 | { |
399 | return __copy_user(to,from,n); | 399 | return __copy_user(to, from, n); |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline unsigned long | 402 | static inline unsigned long |
403 | __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) | 403 | __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) |
404 | { | 404 | { |
405 | return __copy_user(to,from,n); | 405 | return __copy_user(to, from, n); |
406 | } | 406 | } |
407 | 407 | ||
408 | static inline unsigned long | 408 | static inline unsigned long |
@@ -410,7 +410,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n) | |||
410 | { | 410 | { |
411 | prefetch(from); | 411 | prefetch(from); |
412 | if (access_ok(VERIFY_WRITE, to, n)) | 412 | if (access_ok(VERIFY_WRITE, to, n)) |
413 | return __copy_user(to,from,n); | 413 | return __copy_user(to, from, n); |
414 | return n; | 414 | return n; |
415 | } | 415 | } |
416 | 416 | ||
@@ -419,18 +419,18 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) | |||
419 | { | 419 | { |
420 | prefetchw(to); | 420 | prefetchw(to); |
421 | if (access_ok(VERIFY_READ, from, n)) | 421 | if (access_ok(VERIFY_READ, from, n)) |
422 | return __copy_user(to,from,n); | 422 | return __copy_user(to, from, n); |
423 | else | 423 | else |
424 | memset(to, 0, n); | 424 | memset(to, 0, n); |
425 | return n; | 425 | return n; |
426 | } | 426 | } |
427 | 427 | ||
428 | #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) | 428 | #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) |
429 | #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) | 429 | #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) |
430 | #define __copy_to_user(to,from,n) \ | 430 | #define __copy_to_user(to, from, n) \ |
431 | __generic_copy_to_user_nocheck((to),(from),(n)) | 431 | __generic_copy_to_user_nocheck((to), (from), (n)) |
432 | #define __copy_from_user(to,from,n) \ | 432 | #define __copy_from_user(to, from, n) \ |
433 | __generic_copy_from_user_nocheck((to),(from),(n)) | 433 | __generic_copy_from_user_nocheck((to), (from), (n)) |
434 | #define __copy_to_user_inatomic __copy_to_user | 434 | #define __copy_to_user_inatomic __copy_to_user |
435 | #define __copy_from_user_inatomic __copy_from_user | 435 | #define __copy_from_user_inatomic __copy_from_user |
436 | 436 | ||