diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-10-31 02:39:20 -0500 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-10-31 22:34:17 -0500 |
commit | 5015b49448cbe5352b9cc232333ab26f3e608a07 (patch) | |
tree | 77267c0fbc585ee6988a33ffec49030c6c2b5030 /include/asm-powerpc/uaccess.h | |
parent | 2df5e8bcca53e528a78ee0e3b114d0d21dd6d043 (diff) |
powerpc: fix __strnlen_user in merge tree
Change USER/KERNEL_DS so that the merged version of
__strnlen_user can be used which allows us to complete the
removal of arch/ppc64/lib/.
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'include/asm-powerpc/uaccess.h')
-rw-r--r-- | include/asm-powerpc/uaccess.h | 113 |
1 files changed, 45 insertions, 68 deletions
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index 2ecc3e16e49e..035338b0c5ee 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h | |||
@@ -24,11 +24,11 @@ | |||
24 | 24 | ||
25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | 25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
26 | 26 | ||
27 | #define KERNEL_DS MAKE_MM_SEG(~0UL) | ||
27 | #ifdef __powerpc64__ | 28 | #ifdef __powerpc64__ |
28 | #define KERNEL_DS MAKE_MM_SEG(0UL) | 29 | /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ |
29 | #define USER_DS MAKE_MM_SEG(0xf000000000000000UL) | 30 | #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) |
30 | #else | 31 | #else |
31 | #define KERNEL_DS MAKE_MM_SEG(~0UL) | ||
32 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | 32 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) |
33 | #endif | 33 | #endif |
34 | 34 | ||
@@ -40,22 +40,11 @@ | |||
40 | 40 | ||
41 | #ifdef __powerpc64__ | 41 | #ifdef __powerpc64__ |
42 | /* | 42 | /* |
43 | * Use the alpha trick for checking ranges: | 43 | * This check is sufficient because there is a large enough |
44 | * | 44 | * gap between user addresses and the kernel addresses |
45 | * Is a address valid? This does a straightforward calculation rather | ||
46 | * than tests. | ||
47 | * | ||
48 | * Address valid if: | ||
49 | * - "addr" doesn't have any high-bits set | ||
50 | * - AND "size" doesn't have any high-bits set | ||
51 | * - OR we are in kernel mode. | ||
52 | * | ||
53 | * We dont have to check for high bits in (addr+size) because the first | ||
54 | * two checks force the maximum result to be below the start of the | ||
55 | * kernel region. | ||
56 | */ | 45 | */ |
57 | #define __access_ok(addr, size, segment) \ | 46 | #define __access_ok(addr, size, segment) \ |
58 | (((segment).seg & (addr | size )) == 0) | 47 | (((addr) <= (segment).seg) && ((size) <= (segment).seg)) |
59 | 48 | ||
60 | #else | 49 | #else |
61 | 50 | ||
@@ -161,7 +150,10 @@ extern long __put_user_bad(void); | |||
161 | : "=r" (err) \ | 150 | : "=r" (err) \ |
162 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | 151 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) |
163 | 152 | ||
164 | #ifndef __powerpc64__ | 153 | #ifdef __powerpc64__ |
154 | #define __put_user_asm2(x, ptr, retval) \ | ||
155 | __put_user_asm(x, ptr, retval, "std") | ||
156 | #else /* __powerpc64__ */ | ||
165 | #define __put_user_asm2(x, addr, err) \ | 157 | #define __put_user_asm2(x, addr, err) \ |
166 | __asm__ __volatile__( \ | 158 | __asm__ __volatile__( \ |
167 | "1: stw %1,0(%2)\n" \ | 159 | "1: stw %1,0(%2)\n" \ |
@@ -178,9 +170,6 @@ extern long __put_user_bad(void); | |||
178 | ".previous" \ | 170 | ".previous" \ |
179 | : "=r" (err) \ | 171 | : "=r" (err) \ |
180 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) | 172 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) |
181 | #else /* __powerpc64__ */ | ||
182 | #define __put_user_asm2(x, ptr, retval) \ | ||
183 | __put_user_asm(x, ptr, retval, "std") | ||
184 | #endif /* __powerpc64__ */ | 173 | #endif /* __powerpc64__ */ |
185 | 174 | ||
186 | #define __put_user_size(x, ptr, size, retval) \ | 175 | #define __put_user_size(x, ptr, size, retval) \ |
@@ -218,7 +207,7 @@ extern long __get_user_bad(void); | |||
218 | 207 | ||
219 | #define __get_user_asm(x, addr, err, op) \ | 208 | #define __get_user_asm(x, addr, err, op) \ |
220 | __asm__ __volatile__( \ | 209 | __asm__ __volatile__( \ |
221 | "1: "op" %1,0(%2) # get_user\n" \ | 210 | "1: "op" %1,0(%2) # get_user\n" \ |
222 | "2:\n" \ | 211 | "2:\n" \ |
223 | ".section .fixup,\"ax\"\n" \ | 212 | ".section .fixup,\"ax\"\n" \ |
224 | "3: li %0,%3\n" \ | 213 | "3: li %0,%3\n" \ |
@@ -232,8 +221,11 @@ extern long __get_user_bad(void); | |||
232 | : "=r" (err), "=r" (x) \ | 221 | : "=r" (err), "=r" (x) \ |
233 | : "b" (addr), "i" (-EFAULT), "0" (err)) | 222 | : "b" (addr), "i" (-EFAULT), "0" (err)) |
234 | 223 | ||
235 | #ifndef __powerpc64__ | 224 | #ifdef __powerpc64__ |
236 | #define __get_user_asm2(x, addr, err) \ | 225 | #define __get_user_asm2(x, addr, err) \ |
226 | __get_user_asm(x, addr, err, "ld") | ||
227 | #else /* __powerpc64__ */ | ||
228 | #define __get_user_asm2(x, addr, err) \ | ||
237 | __asm__ __volatile__( \ | 229 | __asm__ __volatile__( \ |
238 | "1: lwz %1,0(%2)\n" \ | 230 | "1: lwz %1,0(%2)\n" \ |
239 | "2: lwz %1+1,4(%2)\n" \ | 231 | "2: lwz %1+1,4(%2)\n" \ |
@@ -251,17 +243,14 @@ extern long __get_user_bad(void); | |||
251 | ".previous" \ | 243 | ".previous" \ |
252 | : "=r" (err), "=&r" (x) \ | 244 | : "=r" (err), "=&r" (x) \ |
253 | : "b" (addr), "i" (-EFAULT), "0" (err)) | 245 | : "b" (addr), "i" (-EFAULT), "0" (err)) |
254 | #else | ||
255 | #define __get_user_asm2(x, addr, err) \ | ||
256 | __get_user_asm(x, addr, err, "ld") | ||
257 | #endif /* __powerpc64__ */ | 246 | #endif /* __powerpc64__ */ |
258 | 247 | ||
259 | #define __get_user_size(x, ptr, size, retval) \ | 248 | #define __get_user_size(x, ptr, size, retval) \ |
260 | do { \ | 249 | do { \ |
261 | retval = 0; \ | 250 | retval = 0; \ |
262 | __chk_user_ptr(ptr); \ | 251 | __chk_user_ptr(ptr); \ |
263 | if (size > sizeof(x)) \ | 252 | if (size > sizeof(x)) \ |
264 | (x) = __get_user_bad(); \ | 253 | (x) = __get_user_bad(); \ |
265 | switch (size) { \ | 254 | switch (size) { \ |
266 | case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ | 255 | case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ |
267 | case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ | 256 | case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ |
@@ -300,7 +289,7 @@ do { \ | |||
300 | long __gu_err = -EFAULT; \ | 289 | long __gu_err = -EFAULT; \ |
301 | unsigned long __gu_val = 0; \ | 290 | unsigned long __gu_val = 0; \ |
302 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | 291 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
303 | might_sleep(); \ | 292 | might_sleep(); \ |
304 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ | 293 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ |
305 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ | 294 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
306 | (x) = (__typeof__(*(ptr)))__gu_val; \ | 295 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
@@ -313,8 +302,9 @@ extern unsigned long __copy_tofrom_user(void __user *to, | |||
313 | const void __user *from, unsigned long size); | 302 | const void __user *from, unsigned long size); |
314 | 303 | ||
315 | #ifndef __powerpc64__ | 304 | #ifndef __powerpc64__ |
316 | extern inline unsigned long | 305 | |
317 | copy_from_user(void *to, const void __user *from, unsigned long n) | 306 | extern inline unsigned long copy_from_user(void *to, |
307 | const void __user *from, unsigned long n) | ||
318 | { | 308 | { |
319 | unsigned long over; | 309 | unsigned long over; |
320 | 310 | ||
@@ -328,8 +318,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n) | |||
328 | return n; | 318 | return n; |
329 | } | 319 | } |
330 | 320 | ||
331 | extern inline unsigned long | 321 | extern inline unsigned long copy_to_user(void __user *to, |
332 | copy_to_user(void __user *to, const void *from, unsigned long n) | 322 | const void *from, unsigned long n) |
333 | { | 323 | { |
334 | unsigned long over; | 324 | unsigned long over; |
335 | 325 | ||
@@ -343,10 +333,23 @@ copy_to_user(void __user *to, const void *from, unsigned long n) | |||
343 | return n; | 333 | return n; |
344 | } | 334 | } |
345 | 335 | ||
336 | #define __copy_to_user_inatomic __copy_to_user | ||
337 | #define __copy_from_user_inatomic __copy_from_user | ||
338 | |||
346 | #else /* __powerpc64__ */ | 339 | #else /* __powerpc64__ */ |
347 | 340 | ||
348 | static inline unsigned long | 341 | #define __copy_in_user(to, from, size) \ |
349 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | 342 | __copy_tofrom_user((to), (from), (size)) |
343 | |||
344 | extern unsigned long copy_from_user(void *to, const void __user *from, | ||
345 | unsigned long n); | ||
346 | extern unsigned long copy_to_user(void __user *to, const void *from, | ||
347 | unsigned long n); | ||
348 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | ||
349 | unsigned long n); | ||
350 | |||
351 | static inline unsigned long __copy_from_user_inatomic(void *to, | ||
352 | const void __user *from, unsigned long n) | ||
350 | { | 353 | { |
351 | if (__builtin_constant_p(n) && (n <= 8)) { | 354 | if (__builtin_constant_p(n) && (n <= 8)) { |
352 | unsigned long ret; | 355 | unsigned long ret; |
@@ -370,8 +373,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |||
370 | return __copy_tofrom_user((__force void __user *) to, from, n); | 373 | return __copy_tofrom_user((__force void __user *) to, from, n); |
371 | } | 374 | } |
372 | 375 | ||
373 | static inline unsigned long | 376 | static inline unsigned long __copy_to_user_inatomic(void __user *to, |
374 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | 377 | const void *from, unsigned long n) |
375 | { | 378 | { |
376 | if (__builtin_constant_p(n) && (n <= 8)) { | 379 | if (__builtin_constant_p(n) && (n <= 8)) { |
377 | unsigned long ret; | 380 | unsigned long ret; |
@@ -397,8 +400,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |||
397 | 400 | ||
398 | #endif /* __powerpc64__ */ | 401 | #endif /* __powerpc64__ */ |
399 | 402 | ||
400 | static inline unsigned long | 403 | static inline unsigned long __copy_from_user(void *to, |
401 | __copy_from_user(void *to, const void __user *from, unsigned long size) | 404 | const void __user *from, unsigned long size) |
402 | { | 405 | { |
403 | might_sleep(); | 406 | might_sleep(); |
404 | #ifndef __powerpc64__ | 407 | #ifndef __powerpc64__ |
@@ -408,8 +411,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long size) | |||
408 | #endif /* __powerpc64__ */ | 411 | #endif /* __powerpc64__ */ |
409 | } | 412 | } |
410 | 413 | ||
411 | static inline unsigned long | 414 | static inline unsigned long __copy_to_user(void __user *to, |
412 | __copy_to_user(void __user *to, const void *from, unsigned long size) | 415 | const void *from, unsigned long size) |
413 | { | 416 | { |
414 | might_sleep(); | 417 | might_sleep(); |
415 | #ifndef __powerpc64__ | 418 | #ifndef __powerpc64__ |
@@ -419,21 +422,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long size) | |||
419 | #endif /* __powerpc64__ */ | 422 | #endif /* __powerpc64__ */ |
420 | } | 423 | } |
421 | 424 | ||
422 | #ifndef __powerpc64__ | ||
423 | #define __copy_to_user_inatomic __copy_to_user | ||
424 | #define __copy_from_user_inatomic __copy_from_user | ||
425 | #else /* __powerpc64__ */ | ||
426 | #define __copy_in_user(to, from, size) \ | ||
427 | __copy_tofrom_user((to), (from), (size)) | ||
428 | |||
429 | extern unsigned long copy_from_user(void *to, const void __user *from, | ||
430 | unsigned long n); | ||
431 | extern unsigned long copy_to_user(void __user *to, const void *from, | ||
432 | unsigned long n); | ||
433 | extern unsigned long copy_in_user(void __user *to, const void __user *from, | ||
434 | unsigned long n); | ||
435 | #endif /* __powerpc64__ */ | ||
436 | |||
437 | extern unsigned long __clear_user(void __user *addr, unsigned long size); | 425 | extern unsigned long __clear_user(void __user *addr, unsigned long size); |
438 | 426 | ||
439 | static inline unsigned long clear_user(void __user *addr, unsigned long size) | 427 | static inline unsigned long clear_user(void __user *addr, unsigned long size) |
@@ -466,11 +454,7 @@ static inline long strncpy_from_user(char *dst, const char __user *src, | |||
466 | * | 454 | * |
467 | * Return 0 for error | 455 | * Return 0 for error |
468 | */ | 456 | */ |
469 | #ifndef __powerpc64__ | ||
470 | extern int __strnlen_user(const char __user *str, long len, unsigned long top); | 457 | extern int __strnlen_user(const char __user *str, long len, unsigned long top); |
471 | #else /* __powerpc64__ */ | ||
472 | extern int __strnlen_user(const char __user *str, long len); | ||
473 | #endif /* __powerpc64__ */ | ||
474 | 458 | ||
475 | /* | 459 | /* |
476 | * Returns the length of the string at str (including the null byte), | 460 | * Returns the length of the string at str (including the null byte), |
@@ -482,18 +466,11 @@ extern int __strnlen_user(const char __user *str, long len); | |||
482 | */ | 466 | */ |
483 | static inline int strnlen_user(const char __user *str, long len) | 467 | static inline int strnlen_user(const char __user *str, long len) |
484 | { | 468 | { |
485 | #ifndef __powerpc64__ | ||
486 | unsigned long top = current->thread.fs.seg; | 469 | unsigned long top = current->thread.fs.seg; |
487 | 470 | ||
488 | if ((unsigned long)str > top) | 471 | if ((unsigned long)str > top) |
489 | return 0; | 472 | return 0; |
490 | return __strnlen_user(str, len, top); | 473 | return __strnlen_user(str, len, top); |
491 | #else /* __powerpc64__ */ | ||
492 | might_sleep(); | ||
493 | if (likely(access_ok(VERIFY_READ, str, 1))) | ||
494 | return __strnlen_user(str, len); | ||
495 | return 0; | ||
496 | #endif /* __powerpc64__ */ | ||
497 | } | 474 | } |
498 | 475 | ||
499 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) | 476 | #define strlen_user(str) strnlen_user((str), 0x7ffffffe) |