aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2005-10-31 02:39:20 -0500
committerStephen Rothwell <sfr@canb.auug.org.au>2005-10-31 22:34:17 -0500
commit5015b49448cbe5352b9cc232333ab26f3e608a07 (patch)
tree77267c0fbc585ee6988a33ffec49030c6c2b5030
parent2df5e8bcca53e528a78ee0e3b114d0d21dd6d043 (diff)
powerpc: fix __strnlen_user in merge tree
Change USER/KERNEL_DS so that the merged version of __strnlen_user can be used which allows us to complete the removal of arch/ppc64/lib/. Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/ppc64/Makefile1
-rw-r--r--arch/ppc64/lib/Makefile5
-rw-r--r--arch/ppc64/lib/string.S179
-rw-r--r--include/asm-powerpc/uaccess.h113
5 files changed, 46 insertions, 254 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index e6b2be3bcec1..dfb33915ad61 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -9,7 +9,7 @@ endif
9obj-y += strcase.o 9obj-y += strcase.o
10obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o 10obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
11obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ 11obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
12 memcpy_64.o usercopy_64.o mem_64.o 12 memcpy_64.o usercopy_64.o mem_64.o string.o
13obj-$(CONFIG_PPC_ISERIES) += e2a.o 13obj-$(CONFIG_PPC_ISERIES) += e2a.o
14obj-$(CONFIG_XMON) += sstep.o 14obj-$(CONFIG_XMON) += sstep.o
15 15
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index fdbd6f44adc0..a55a82d145d4 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -86,7 +86,6 @@ head-y := arch/ppc64/kernel/head.o
86head-y += arch/powerpc/kernel/fpu.o 86head-y += arch/powerpc/kernel/fpu.o
87head-y += arch/powerpc/kernel/entry_64.o 87head-y += arch/powerpc/kernel/entry_64.o
88 88
89libs-y += arch/ppc64/lib/
90core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ 89core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
91core-y += arch/powerpc/mm/ 90core-y += arch/powerpc/mm/
92core-y += arch/powerpc/sysdev/ 91core-y += arch/powerpc/sysdev/
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
deleted file mode 100644
index 42d5295bf345..000000000000
--- a/arch/ppc64/lib/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for ppc64-specific library files..
3#
4
5lib-y := string.o
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S
deleted file mode 100644
index e21a0038a4d6..000000000000
--- a/arch/ppc64/lib/string.S
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/errno.h>
13#include <asm/ppc_asm.h>
14
15_GLOBAL(strcpy)
16 addi r5,r3,-1
17 addi r4,r4,-1
181: lbzu r0,1(r4)
19 cmpwi 0,r0,0
20 stbu r0,1(r5)
21 bne 1b
22 blr
23
24_GLOBAL(strncpy)
25 cmpwi 0,r5,0
26 beqlr
27 mtctr r5
28 addi r6,r3,-1
29 addi r4,r4,-1
301: lbzu r0,1(r4)
31 cmpwi 0,r0,0
32 stbu r0,1(r6)
33 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
34 blr
35
36_GLOBAL(strcat)
37 addi r5,r3,-1
38 addi r4,r4,-1
391: lbzu r0,1(r5)
40 cmpwi 0,r0,0
41 bne 1b
42 addi r5,r5,-1
431: lbzu r0,1(r4)
44 cmpwi 0,r0,0
45 stbu r0,1(r5)
46 bne 1b
47 blr
48
49_GLOBAL(strcmp)
50 addi r5,r3,-1
51 addi r4,r4,-1
521: lbzu r3,1(r5)
53 cmpwi 1,r3,0
54 lbzu r0,1(r4)
55 subf. r3,r0,r3
56 beqlr 1
57 beq 1b
58 blr
59
60_GLOBAL(strlen)
61 addi r4,r3,-1
621: lbzu r0,1(r4)
63 cmpwi 0,r0,0
64 bne 1b
65 subf r3,r3,r4
66 blr
67
68_GLOBAL(memcmp)
69 cmpwi 0,r5,0
70 ble- 2f
71 mtctr r5
72 addi r6,r3,-1
73 addi r4,r4,-1
741: lbzu r3,1(r6)
75 lbzu r0,1(r4)
76 subf. r3,r0,r3
77 bdnzt 2,1b
78 blr
792: li r3,0
80 blr
81
82_GLOBAL(memchr)
83 cmpwi 0,r5,0
84 ble- 2f
85 mtctr r5
86 addi r3,r3,-1
871: lbzu r0,1(r3)
88 cmpw 0,r0,r4
89 bdnzf 2,1b
90 beqlr
912: li r3,0
92 blr
93
94_GLOBAL(__clear_user)
95 addi r6,r3,-4
96 li r3,0
97 li r5,0
98 cmplwi 0,r4,4
99 blt 7f
100 /* clear a single word */
10111: stwu r5,4(r6)
102 beqlr
103 /* clear word sized chunks */
104 andi. r0,r6,3
105 add r4,r0,r4
106 subf r6,r0,r6
107 srwi r0,r4,2
108 andi. r4,r4,3
109 mtctr r0
110 bdz 7f
1111: stwu r5,4(r6)
112 bdnz 1b
113 /* clear byte sized chunks */
1147: cmpwi 0,r4,0
115 beqlr
116 mtctr r4
117 addi r6,r6,3
1188: stbu r5,1(r6)
119 bdnz 8b
120 blr
12190: mr r3,r4
122 blr
12391: mfctr r3
124 slwi r3,r3,2
125 add r3,r3,r4
126 blr
12792: mfctr r3
128 blr
129
130 .section __ex_table,"a"
131 .align 3
132 .llong 11b,90b
133 .llong 1b,91b
134 .llong 8b,92b
135 .text
136
137/* r3 = dst, r4 = src, r5 = count */
138_GLOBAL(__strncpy_from_user)
139 addi r6,r3,-1
140 addi r4,r4,-1
141 cmpwi 0,r5,0
142 beq 2f
143 mtctr r5
1441: lbzu r0,1(r4)
145 cmpwi 0,r0,0
146 stbu r0,1(r6)
147 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
148 beq 3f
1492: addi r6,r6,1
1503: subf r3,r3,r6
151 blr
15299: li r3,-EFAULT
153 blr
154
155 .section __ex_table,"a"
156 .align 3
157 .llong 1b,99b
158 .text
159
160/* r3 = str, r4 = len (> 0) */
161_GLOBAL(__strnlen_user)
162 addi r7,r3,-1
163 mtctr r4 /* ctr = len */
1641: lbzu r0,1(r7) /* get next byte */
165 cmpwi 0,r0,0
166 bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
167 addi r7,r7,1
168 subf r3,r3,r7 /* number of bytes we have looked at */
169 beqlr /* return if we found a 0 byte */
170 cmpw 0,r3,r4 /* did we look at all len bytes? */
171 blt 99f /* if not, must have hit top */
172 addi r3,r4,1 /* return len + 1 to indicate no null found */
173 blr
17499: li r3,0 /* bad address, return 0 */
175 blr
176
177 .section __ex_table,"a"
178 .align 3
179 .llong 1b,99b
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
index 2ecc3e16e49e..035338b0c5ee 100644
--- a/include/asm-powerpc/uaccess.h
+++ b/include/asm-powerpc/uaccess.h
@@ -24,11 +24,11 @@
24 24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26 26
27#define KERNEL_DS MAKE_MM_SEG(~0UL)
27#ifdef __powerpc64__ 28#ifdef __powerpc64__
28#define KERNEL_DS MAKE_MM_SEG(0UL) 29/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
29#define USER_DS MAKE_MM_SEG(0xf000000000000000UL) 30#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
30#else 31#else
31#define KERNEL_DS MAKE_MM_SEG(~0UL)
32#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 32#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
33#endif 33#endif
34 34
@@ -40,22 +40,11 @@
40 40
41#ifdef __powerpc64__ 41#ifdef __powerpc64__
42/* 42/*
43 * Use the alpha trick for checking ranges: 43 * This check is sufficient because there is a large enough
44 * 44 * gap between user addresses and the kernel addresses
45 * Is a address valid? This does a straightforward calculation rather
46 * than tests.
47 *
48 * Address valid if:
49 * - "addr" doesn't have any high-bits set
50 * - AND "size" doesn't have any high-bits set
51 * - OR we are in kernel mode.
52 *
53 * We dont have to check for high bits in (addr+size) because the first
54 * two checks force the maximum result to be below the start of the
55 * kernel region.
56 */ 45 */
57#define __access_ok(addr, size, segment) \ 46#define __access_ok(addr, size, segment) \
58 (((segment).seg & (addr | size )) == 0) 47 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
59 48
60#else 49#else
61 50
@@ -161,7 +150,10 @@ extern long __put_user_bad(void);
161 : "=r" (err) \ 150 : "=r" (err) \
162 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 151 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
163 152
164#ifndef __powerpc64__ 153#ifdef __powerpc64__
154#define __put_user_asm2(x, ptr, retval) \
155 __put_user_asm(x, ptr, retval, "std")
156#else /* __powerpc64__ */
165#define __put_user_asm2(x, addr, err) \ 157#define __put_user_asm2(x, addr, err) \
166 __asm__ __volatile__( \ 158 __asm__ __volatile__( \
167 "1: stw %1,0(%2)\n" \ 159 "1: stw %1,0(%2)\n" \
@@ -178,9 +170,6 @@ extern long __put_user_bad(void);
178 ".previous" \ 170 ".previous" \
179 : "=r" (err) \ 171 : "=r" (err) \
180 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 172 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
181#else /* __powerpc64__ */
182#define __put_user_asm2(x, ptr, retval) \
183 __put_user_asm(x, ptr, retval, "std")
184#endif /* __powerpc64__ */ 173#endif /* __powerpc64__ */
185 174
186#define __put_user_size(x, ptr, size, retval) \ 175#define __put_user_size(x, ptr, size, retval) \
@@ -218,7 +207,7 @@ extern long __get_user_bad(void);
218 207
219#define __get_user_asm(x, addr, err, op) \ 208#define __get_user_asm(x, addr, err, op) \
220 __asm__ __volatile__( \ 209 __asm__ __volatile__( \
221 "1: "op" %1,0(%2) # get_user\n" \ 210 "1: "op" %1,0(%2) # get_user\n" \
222 "2:\n" \ 211 "2:\n" \
223 ".section .fixup,\"ax\"\n" \ 212 ".section .fixup,\"ax\"\n" \
224 "3: li %0,%3\n" \ 213 "3: li %0,%3\n" \
@@ -232,8 +221,11 @@ extern long __get_user_bad(void);
232 : "=r" (err), "=r" (x) \ 221 : "=r" (err), "=r" (x) \
233 : "b" (addr), "i" (-EFAULT), "0" (err)) 222 : "b" (addr), "i" (-EFAULT), "0" (err))
234 223
235#ifndef __powerpc64__ 224#ifdef __powerpc64__
236#define __get_user_asm2(x, addr, err) \ 225#define __get_user_asm2(x, addr, err) \
226 __get_user_asm(x, addr, err, "ld")
227#else /* __powerpc64__ */
228#define __get_user_asm2(x, addr, err) \
237 __asm__ __volatile__( \ 229 __asm__ __volatile__( \
238 "1: lwz %1,0(%2)\n" \ 230 "1: lwz %1,0(%2)\n" \
239 "2: lwz %1+1,4(%2)\n" \ 231 "2: lwz %1+1,4(%2)\n" \
@@ -251,17 +243,14 @@ extern long __get_user_bad(void);
251 ".previous" \ 243 ".previous" \
252 : "=r" (err), "=&r" (x) \ 244 : "=r" (err), "=&r" (x) \
253 : "b" (addr), "i" (-EFAULT), "0" (err)) 245 : "b" (addr), "i" (-EFAULT), "0" (err))
254#else
255#define __get_user_asm2(x, addr, err) \
256 __get_user_asm(x, addr, err, "ld")
257#endif /* __powerpc64__ */ 246#endif /* __powerpc64__ */
258 247
259#define __get_user_size(x, ptr, size, retval) \ 248#define __get_user_size(x, ptr, size, retval) \
260do { \ 249do { \
261 retval = 0; \ 250 retval = 0; \
262 __chk_user_ptr(ptr); \ 251 __chk_user_ptr(ptr); \
263 if (size > sizeof(x)) \ 252 if (size > sizeof(x)) \
264 (x) = __get_user_bad(); \ 253 (x) = __get_user_bad(); \
265 switch (size) { \ 254 switch (size) { \
266 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ 255 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
267 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ 256 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
@@ -300,7 +289,7 @@ do { \
300 long __gu_err = -EFAULT; \ 289 long __gu_err = -EFAULT; \
301 unsigned long __gu_val = 0; \ 290 unsigned long __gu_val = 0; \
302 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 291 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
303 might_sleep(); \ 292 might_sleep(); \
304 if (access_ok(VERIFY_READ, __gu_addr, (size))) \ 293 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
305 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 294 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
306 (x) = (__typeof__(*(ptr)))__gu_val; \ 295 (x) = (__typeof__(*(ptr)))__gu_val; \
@@ -313,8 +302,9 @@ extern unsigned long __copy_tofrom_user(void __user *to,
313 const void __user *from, unsigned long size); 302 const void __user *from, unsigned long size);
314 303
315#ifndef __powerpc64__ 304#ifndef __powerpc64__
316extern inline unsigned long 305
317copy_from_user(void *to, const void __user *from, unsigned long n) 306extern inline unsigned long copy_from_user(void *to,
307 const void __user *from, unsigned long n)
318{ 308{
319 unsigned long over; 309 unsigned long over;
320 310
@@ -328,8 +318,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
328 return n; 318 return n;
329} 319}
330 320
331extern inline unsigned long 321extern inline unsigned long copy_to_user(void __user *to,
332copy_to_user(void __user *to, const void *from, unsigned long n) 322 const void *from, unsigned long n)
333{ 323{
334 unsigned long over; 324 unsigned long over;
335 325
@@ -343,10 +333,23 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
343 return n; 333 return n;
344} 334}
345 335
336#define __copy_to_user_inatomic __copy_to_user
337#define __copy_from_user_inatomic __copy_from_user
338
346#else /* __powerpc64__ */ 339#else /* __powerpc64__ */
347 340
348static inline unsigned long 341#define __copy_in_user(to, from, size) \
349__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) 342 __copy_tofrom_user((to), (from), (size))
343
344extern unsigned long copy_from_user(void *to, const void __user *from,
345 unsigned long n);
346extern unsigned long copy_to_user(void __user *to, const void *from,
347 unsigned long n);
348extern unsigned long copy_in_user(void __user *to, const void __user *from,
349 unsigned long n);
350
351static inline unsigned long __copy_from_user_inatomic(void *to,
352 const void __user *from, unsigned long n)
350{ 353{
351 if (__builtin_constant_p(n) && (n <= 8)) { 354 if (__builtin_constant_p(n) && (n <= 8)) {
352 unsigned long ret; 355 unsigned long ret;
@@ -370,8 +373,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
370 return __copy_tofrom_user((__force void __user *) to, from, n); 373 return __copy_tofrom_user((__force void __user *) to, from, n);
371} 374}
372 375
373static inline unsigned long 376static inline unsigned long __copy_to_user_inatomic(void __user *to,
374__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) 377 const void *from, unsigned long n)
375{ 378{
376 if (__builtin_constant_p(n) && (n <= 8)) { 379 if (__builtin_constant_p(n) && (n <= 8)) {
377 unsigned long ret; 380 unsigned long ret;
@@ -397,8 +400,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
397 400
398#endif /* __powerpc64__ */ 401#endif /* __powerpc64__ */
399 402
400static inline unsigned long 403static inline unsigned long __copy_from_user(void *to,
401__copy_from_user(void *to, const void __user *from, unsigned long size) 404 const void __user *from, unsigned long size)
402{ 405{
403 might_sleep(); 406 might_sleep();
404#ifndef __powerpc64__ 407#ifndef __powerpc64__
@@ -408,8 +411,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long size)
408#endif /* __powerpc64__ */ 411#endif /* __powerpc64__ */
409} 412}
410 413
411static inline unsigned long 414static inline unsigned long __copy_to_user(void __user *to,
412__copy_to_user(void __user *to, const void *from, unsigned long size) 415 const void *from, unsigned long size)
413{ 416{
414 might_sleep(); 417 might_sleep();
415#ifndef __powerpc64__ 418#ifndef __powerpc64__
@@ -419,21 +422,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long size)
419#endif /* __powerpc64__ */ 422#endif /* __powerpc64__ */
420} 423}
421 424
422#ifndef __powerpc64__
423#define __copy_to_user_inatomic __copy_to_user
424#define __copy_from_user_inatomic __copy_from_user
425#else /* __powerpc64__ */
426#define __copy_in_user(to, from, size) \
427 __copy_tofrom_user((to), (from), (size))
428
429extern unsigned long copy_from_user(void *to, const void __user *from,
430 unsigned long n);
431extern unsigned long copy_to_user(void __user *to, const void *from,
432 unsigned long n);
433extern unsigned long copy_in_user(void __user *to, const void __user *from,
434 unsigned long n);
435#endif /* __powerpc64__ */
436
437extern unsigned long __clear_user(void __user *addr, unsigned long size); 425extern unsigned long __clear_user(void __user *addr, unsigned long size);
438 426
439static inline unsigned long clear_user(void __user *addr, unsigned long size) 427static inline unsigned long clear_user(void __user *addr, unsigned long size)
@@ -466,11 +454,7 @@ static inline long strncpy_from_user(char *dst, const char __user *src,
466 * 454 *
467 * Return 0 for error 455 * Return 0 for error
468 */ 456 */
469#ifndef __powerpc64__
470extern int __strnlen_user(const char __user *str, long len, unsigned long top); 457extern int __strnlen_user(const char __user *str, long len, unsigned long top);
471#else /* __powerpc64__ */
472extern int __strnlen_user(const char __user *str, long len);
473#endif /* __powerpc64__ */
474 458
475/* 459/*
476 * Returns the length of the string at str (including the null byte), 460 * Returns the length of the string at str (including the null byte),
@@ -482,18 +466,11 @@ extern int __strnlen_user(const char __user *str, long len);
482 */ 466 */
483static inline int strnlen_user(const char __user *str, long len) 467static inline int strnlen_user(const char __user *str, long len)
484{ 468{
485#ifndef __powerpc64__
486 unsigned long top = current->thread.fs.seg; 469 unsigned long top = current->thread.fs.seg;
487 470
488 if ((unsigned long)str > top) 471 if ((unsigned long)str > top)
489 return 0; 472 return 0;
490 return __strnlen_user(str, len, top); 473 return __strnlen_user(str, len, top);
491#else /* __powerpc64__ */
492 might_sleep();
493 if (likely(access_ok(VERIFY_READ, str, 1)))
494 return __strnlen_user(str, len);
495 return 0;
496#endif /* __powerpc64__ */
497} 474}
498 475
499#define strlen_user(str) strnlen_user((str), 0x7ffffffe) 476#define strlen_user(str) strnlen_user((str), 0x7ffffffe)