aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerald Schaefer <geraldsc@de.ibm.com>2006-09-20 09:59:42 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-20 09:59:42 -0400
commitd02765d1af743567398eb6d523dea0ba5e5e7e8e (patch)
tree9a39c21d9924a8d81ce85254cd3d013dbe50d23e
parent6837a8c352efcc5efc70424e9bfd94ff9bfa9a47 (diff)
[S390] Make user-copy operations run-time configurable.
Introduces a struct uaccess_ops which allows setting user-copy operations at run-time. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/kernel/s390_ksyms.c6
-rw-r--r--arch/s390/kernel/setup.c7
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/uaccess.S211
-rw-r--r--arch/s390/lib/uaccess64.S207
-rw-r--r--arch/s390/lib/uaccess_std.c340
-rw-r--r--include/asm-s390/futex.h87
-rw-r--r--include/asm-s390/uaccess.h171
8 files changed, 411 insertions, 621 deletions
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index c73a45467fa4..9f19e833a562 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -25,12 +25,6 @@ EXPORT_SYMBOL(_oi_bitmap);
25EXPORT_SYMBOL(_ni_bitmap); 25EXPORT_SYMBOL(_ni_bitmap);
26EXPORT_SYMBOL(_zb_findmap); 26EXPORT_SYMBOL(_zb_findmap);
27EXPORT_SYMBOL(_sb_findmap); 27EXPORT_SYMBOL(_sb_findmap);
28EXPORT_SYMBOL(__copy_from_user_asm);
29EXPORT_SYMBOL(__copy_to_user_asm);
30EXPORT_SYMBOL(__copy_in_user_asm);
31EXPORT_SYMBOL(__clear_user_asm);
32EXPORT_SYMBOL(__strncpy_from_user_asm);
33EXPORT_SYMBOL(__strnlen_user_asm);
34EXPORT_SYMBOL(diag10); 28EXPORT_SYMBOL(diag10);
35 29
36/* 30/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f2a9165ca4f8..e229af59976c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -51,6 +51,12 @@
51#include <asm/sections.h> 51#include <asm/sections.h>
52 52
53/* 53/*
54 * User copy operations.
55 */
56struct uaccess_ops uaccess;
57EXPORT_SYMBOL_GPL(uaccess);
58
59/*
54 * Machine setup.. 60 * Machine setup..
55 */ 61 */
56unsigned int console_mode = 0; 62unsigned int console_mode = 0;
@@ -641,6 +647,7 @@ setup_arch(char **cmdline_p)
641 647
642 memory_end = memory_size; 648 memory_end = memory_size;
643 649
650 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
644 parse_early_param(); 651 parse_early_param();
645 652
646#ifndef CONFIG_64BIT 653#ifndef CONFIG_64BIT
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e05d087a6eae..96c82424d88b 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,5 @@
4 4
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y += delay.o string.o 7lib-y += delay.o string.o uaccess_std.o
8lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o)
9lib-$(CONFIG_SMP) += spinlock.o 8lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S
deleted file mode 100644
index 837275284d9f..000000000000
--- a/arch/s390/lib/uaccess.S
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * arch/s390/lib/uaccess.S
3 * __copy_{from|to}_user functions.
4 *
5 * s390
6 * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * These functions have standard call interface
10 */
11
12#include <linux/errno.h>
13#include <asm/lowcore.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 4
18 .globl __copy_from_user_asm
19 # %r2 = to, %r3 = n, %r4 = from
20__copy_from_user_asm:
21 slr %r0,%r0
220: mvcp 0(%r3,%r2),0(%r4),%r0
23 jnz 1f
24 slr %r2,%r2
25 br %r14
261: la %r2,256(%r2)
27 la %r4,256(%r4)
28 ahi %r3,-256
292: mvcp 0(%r3,%r2),0(%r4),%r0
30 jnz 1b
313: slr %r2,%r2
32 br %r14
334: lhi %r0,-4096
34 lr %r5,%r4
35 slr %r5,%r0
36 nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
37 slr %r5,%r4 # %r5 = #bytes to next user page boundary
38 clr %r3,%r5 # copy crosses next page boundary ?
39 jnh 6f # no, the current page faulted
40 # move with the reduced length which is < 256
415: mvcp 0(%r5,%r2),0(%r4),%r0
42 slr %r3,%r5
436: lr %r2,%r3
44 br %r14
45 .section __ex_table,"a"
46 .long 0b,4b
47 .long 2b,4b
48 .long 5b,6b
49 .previous
50
51 .align 4
52 .text
53 .globl __copy_to_user_asm
54 # %r2 = from, %r3 = n, %r4 = to
55__copy_to_user_asm:
56 slr %r0,%r0
570: mvcs 0(%r3,%r4),0(%r2),%r0
58 jnz 1f
59 slr %r2,%r2
60 br %r14
611: la %r2,256(%r2)
62 la %r4,256(%r4)
63 ahi %r3,-256
642: mvcs 0(%r3,%r4),0(%r2),%r0
65 jnz 1b
663: slr %r2,%r2
67 br %r14
684: lhi %r0,-4096
69 lr %r5,%r4
70 slr %r5,%r0
71 nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
72 slr %r5,%r4 # %r5 = #bytes to next user page boundary
73 clr %r3,%r5 # copy crosses next page boundary ?
74 jnh 6f # no, the current page faulted
75 # move with the reduced length which is < 256
765: mvcs 0(%r5,%r4),0(%r2),%r0
77 slr %r3,%r5
786: lr %r2,%r3
79 br %r14
80 .section __ex_table,"a"
81 .long 0b,4b
82 .long 2b,4b
83 .long 5b,6b
84 .previous
85
86 .align 4
87 .text
88 .globl __copy_in_user_asm
89 # %r2 = from, %r3 = n, %r4 = to
90__copy_in_user_asm:
91 ahi %r3,-1
92 jo 6f
93 sacf 256
94 bras %r1,4f
950: ahi %r3,257
961: mvc 0(1,%r4),0(%r2)
97 la %r2,1(%r2)
98 la %r4,1(%r4)
99 ahi %r3,-1
100 jnz 1b
1012: lr %r2,%r3
102 br %r14
1033: mvc 0(256,%r4),0(%r2)
104 la %r2,256(%r2)
105 la %r4,256(%r4)
1064: ahi %r3,-256
107 jnm 3b
1085: ex %r3,4(%r1)
109 sacf 0
1106: slr %r2,%r2
111 br %r14
112 .section __ex_table,"a"
113 .long 1b,2b
114 .long 3b,0b
115 .long 5b,0b
116 .previous
117
118 .align 4
119 .text
120 .globl __clear_user_asm
121 # %r2 = to, %r3 = n
122__clear_user_asm:
123 bras %r5,0f
124 .long empty_zero_page
1250: l %r5,0(%r5)
126 slr %r0,%r0
1271: mvcs 0(%r3,%r2),0(%r5),%r0
128 jnz 2f
129 slr %r2,%r2
130 br %r14
1312: la %r2,256(%r2)
132 ahi %r3,-256
1333: mvcs 0(%r3,%r2),0(%r5),%r0
134 jnz 2b
1354: slr %r2,%r2
136 br %r14
1375: lhi %r0,-4096
138 lr %r4,%r2
139 slr %r4,%r0
140 nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
141 slr %r4,%r2 # %r4 = #bytes to next user page boundary
142 clr %r3,%r4 # clear crosses next page boundary ?
143 jnh 7f # no, the current page faulted
144 # clear with the reduced length which is < 256
1456: mvcs 0(%r4,%r2),0(%r5),%r0
146 slr %r3,%r4
1477: lr %r2,%r3
148 br %r14
149 .section __ex_table,"a"
150 .long 1b,5b
151 .long 3b,5b
152 .long 6b,7b
153 .previous
154
155 .align 4
156 .text
157 .globl __strncpy_from_user_asm
158 # %r2 = count, %r3 = dst, %r4 = src
159__strncpy_from_user_asm:
160 lhi %r0,0
161 lr %r1,%r4
162 la %r4,0(%r4) # clear high order bit from %r4
163 la %r2,0(%r2,%r4) # %r2 points to first byte after string
164 sacf 256
1650: srst %r2,%r1
166 jo 0b
167 sacf 0
168 lr %r1,%r2
169 jh 1f # \0 found in string ?
170 ahi %r1,1 # include \0 in copy
1711: slr %r1,%r4 # %r1 = copy length (without \0)
172 slr %r2,%r4 # %r2 = return length (including \0)
1732: mvcp 0(%r1,%r3),0(%r4),%r0
174 jnz 3f
175 br %r14
1763: la %r3,256(%r3)
177 la %r4,256(%r4)
178 ahi %r1,-256
179 mvcp 0(%r1,%r3),0(%r4),%r0
180 jnz 3b
181 br %r14
1824: sacf 0
183 lhi %r2,-EFAULT
184 br %r14
185 .section __ex_table,"a"
186 .long 0b,4b
187 .previous
188
189 .align 4
190 .text
191 .globl __strnlen_user_asm
192 # %r2 = count, %r3 = src
193__strnlen_user_asm:
194 lhi %r0,0
195 lr %r1,%r3
196 la %r3,0(%r3) # clear high order bit from %r4
197 la %r2,0(%r2,%r3) # %r2 points to first byte after string
198 sacf 256
1990: srst %r2,%r1
200 jo 0b
201 sacf 0
202 ahi %r2,1 # strnlen_user result includes the \0
203 # or return count+1 if \0 not found
204 slr %r2,%r3
205 br %r14
2062: sacf 0
207 slr %r2,%r2 # return 0 on exception
208 br %r14
209 .section __ex_table,"a"
210 .long 0b,2b
211 .previous
diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S
deleted file mode 100644
index 1f755be22f92..000000000000
--- a/arch/s390/lib/uaccess64.S
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * arch/s390x/lib/uaccess.S
3 * __copy_{from|to}_user functions.
4 *
5 * s390
6 * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * These functions have standard call interface
10 */
11
12#include <linux/errno.h>
13#include <asm/lowcore.h>
14#include <asm/asm-offsets.h>
15
16 .text
17 .align 4
18 .globl __copy_from_user_asm
19 # %r2 = to, %r3 = n, %r4 = from
20__copy_from_user_asm:
21 slgr %r0,%r0
220: mvcp 0(%r3,%r2),0(%r4),%r0
23 jnz 1f
24 slgr %r2,%r2
25 br %r14
261: la %r2,256(%r2)
27 la %r4,256(%r4)
28 aghi %r3,-256
292: mvcp 0(%r3,%r2),0(%r4),%r0
30 jnz 1b
313: slgr %r2,%r2
32 br %r14
334: lghi %r0,-4096
34 lgr %r5,%r4
35 slgr %r5,%r0
36 ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
37 slgr %r5,%r4 # %r5 = #bytes to next user page boundary
38 clgr %r3,%r5 # copy crosses next page boundary ?
39 jnh 6f # no, the current page faulted
40 # move with the reduced length which is < 256
415: mvcp 0(%r5,%r2),0(%r4),%r0
42 slgr %r3,%r5
436: lgr %r2,%r3
44 br %r14
45 .section __ex_table,"a"
46 .quad 0b,4b
47 .quad 2b,4b
48 .quad 5b,6b
49 .previous
50
51 .align 4
52 .text
53 .globl __copy_to_user_asm
54 # %r2 = from, %r3 = n, %r4 = to
55__copy_to_user_asm:
56 slgr %r0,%r0
570: mvcs 0(%r3,%r4),0(%r2),%r0
58 jnz 1f
59 slgr %r2,%r2
60 br %r14
611: la %r2,256(%r2)
62 la %r4,256(%r4)
63 aghi %r3,-256
642: mvcs 0(%r3,%r4),0(%r2),%r0
65 jnz 1b
663: slgr %r2,%r2
67 br %r14
684: lghi %r0,-4096
69 lgr %r5,%r4
70 slgr %r5,%r0
71 ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
72 slgr %r5,%r4 # %r5 = #bytes to next user page boundary
73 clgr %r3,%r5 # copy crosses next page boundary ?
74 jnh 6f # no, the current page faulted
75 # move with the reduced length which is < 256
765: mvcs 0(%r5,%r4),0(%r2),%r0
77 slgr %r3,%r5
786: lgr %r2,%r3
79 br %r14
80 .section __ex_table,"a"
81 .quad 0b,4b
82 .quad 2b,4b
83 .quad 5b,6b
84 .previous
85
86 .align 4
87 .text
88 .globl __copy_in_user_asm
89 # %r2 = from, %r3 = n, %r4 = to
90__copy_in_user_asm:
91 aghi %r3,-1
92 jo 6f
93 sacf 256
94 bras %r1,4f
950: aghi %r3,257
961: mvc 0(1,%r4),0(%r2)
97 la %r2,1(%r2)
98 la %r4,1(%r4)
99 aghi %r3,-1
100 jnz 1b
1012: lgr %r2,%r3
102 br %r14
1033: mvc 0(256,%r4),0(%r2)
104 la %r2,256(%r2)
105 la %r4,256(%r4)
1064: aghi %r3,-256
107 jnm 3b
1085: ex %r3,4(%r1)
109 sacf 0
1106: slgr %r2,%r2
111 br 14
112 .section __ex_table,"a"
113 .quad 1b,2b
114 .quad 3b,0b
115 .quad 5b,0b
116 .previous
117
118 .align 4
119 .text
120 .globl __clear_user_asm
121 # %r2 = to, %r3 = n
122__clear_user_asm:
123 slgr %r0,%r0
124 larl %r5,empty_zero_page
1251: mvcs 0(%r3,%r2),0(%r5),%r0
126 jnz 2f
127 slgr %r2,%r2
128 br %r14
1292: la %r2,256(%r2)
130 aghi %r3,-256
1313: mvcs 0(%r3,%r2),0(%r5),%r0
132 jnz 2b
1334: slgr %r2,%r2
134 br %r14
1355: lghi %r0,-4096
136 lgr %r4,%r2
137 slgr %r4,%r0
138 ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
139 slgr %r4,%r2 # %r4 = #bytes to next user page boundary
140 clgr %r3,%r4 # clear crosses next page boundary ?
141 jnh 7f # no, the current page faulted
142 # clear with the reduced length which is < 256
1436: mvcs 0(%r4,%r2),0(%r5),%r0
144 slgr %r3,%r4
1457: lgr %r2,%r3
146 br %r14
147 .section __ex_table,"a"
148 .quad 1b,5b
149 .quad 3b,5b
150 .quad 6b,7b
151 .previous
152
153 .align 4
154 .text
155 .globl __strncpy_from_user_asm
156 # %r2 = count, %r3 = dst, %r4 = src
157__strncpy_from_user_asm:
158 lghi %r0,0
159 lgr %r1,%r4
160 la %r2,0(%r2,%r4) # %r2 points to first byte after string
161 sacf 256
1620: srst %r2,%r1
163 jo 0b
164 sacf 0
165 lgr %r1,%r2
166 jh 1f # \0 found in string ?
167 aghi %r1,1 # include \0 in copy
1681: slgr %r1,%r4 # %r1 = copy length (without \0)
169 slgr %r2,%r4 # %r2 = return length (including \0)
1702: mvcp 0(%r1,%r3),0(%r4),%r0
171 jnz 3f
172 br %r14
1733: la %r3,256(%r3)
174 la %r4,256(%r4)
175 aghi %r1,-256
176 mvcp 0(%r1,%r3),0(%r4),%r0
177 jnz 3b
178 br %r14
1794: sacf 0
180 lghi %r2,-EFAULT
181 br %r14
182 .section __ex_table,"a"
183 .quad 0b,4b
184 .previous
185
186 .align 4
187 .text
188 .globl __strnlen_user_asm
189 # %r2 = count, %r3 = src
190__strnlen_user_asm:
191 lghi %r0,0
192 lgr %r1,%r3
193 la %r2,0(%r2,%r3) # %r2 points to first byte after string
194 sacf 256
1950: srst %r2,%r1
196 jo 0b
197 sacf 0
198 aghi %r2,1 # strnlen_user result includes the \0
199 # or return count+1 if \0 not found
200 slgr %r2,%r3
201 br %r14
2022: sacf 0
203 slgr %r2,%r2 # return 0 on exception
204 br %r14
205 .section __ex_table,"a"
206 .quad 0b,2b
207 .previous
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
new file mode 100644
index 000000000000..9a4d4a29ea79
--- /dev/null
+++ b/arch/s390/lib/uaccess_std.c
@@ -0,0 +1,340 @@
1/*
2 * arch/s390/lib/uaccess_std.c
3 *
4 * Standard user space access functions based on mvcp/mvcs and doing
5 * interesting things in the secondary space mode.
6 *
7 * Copyright (C) IBM Corp. 2006
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Gerald Schaefer (gerald.schaefer@de.ibm.com)
10 */
11
12#include <linux/errno.h>
13#include <linux/mm.h>
14#include <asm/uaccess.h>
15#include <asm/futex.h>
16
17#ifndef __s390x__
18#define AHI "ahi"
19#define ALR "alr"
20#define CLR "clr"
21#define LHI "lhi"
22#define SLR "slr"
23#else
24#define AHI "aghi"
25#define ALR "algr"
26#define CLR "clgr"
27#define LHI "lghi"
28#define SLR "slgr"
29#endif
30
31size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
32{
33 unsigned long tmp1, tmp2;
34
35 tmp1 = -256UL;
36 asm volatile(
37 "0: mvcp 0(%0,%2),0(%1),%3\n"
38 " jz 5f\n"
39 "1:"ALR" %0,%3\n"
40 " la %1,256(%1)\n"
41 " la %2,256(%2)\n"
42 "2: mvcp 0(%0,%2),0(%1),%3\n"
43 " jnz 1b\n"
44 " j 5f\n"
45 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
46 " "LHI" %3,-4096\n"
47 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
48 " "SLR" %4,%1\n"
49 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
50 " jnh 6f\n"
51 "4: mvcp 0(%4,%2),0(%1),%3\n"
52 " "SLR" %0,%4\n"
53 " j 6f\n"
54 "5:"SLR" %0,%0\n"
55 "6: \n"
56 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
57 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
58 : : "cc", "memory");
59 return size;
60}
61
62size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
63{
64 unsigned long tmp1, tmp2;
65
66 tmp1 = 0UL;
67 asm volatile(
68 "0: mvcp 0(%0,%2),0(%1),%3\n"
69 " "SLR" %0,%0\n"
70 " j 3f\n"
71 "1: la %4,255(%1)\n" /* %4 = ptr + 255 */
72 " "LHI" %3,-4096\n"
73 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
74 " "SLR" %4,%1\n"
75 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
76 " jnh 3f\n"
77 "2: mvcp 0(%4,%2),0(%1),%3\n"
78 " "SLR" %0,%4\n"
79 "3:\n"
80 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
81 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
82 : : "cc", "memory");
83 return size;
84}
85
86size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
87{
88 unsigned long tmp1, tmp2;
89
90 tmp1 = -256UL;
91 asm volatile(
92 "0: mvcs 0(%0,%1),0(%2),%3\n"
93 " jz 5f\n"
94 "1:"ALR" %0,%3\n"
95 " la %1,256(%1)\n"
96 " la %2,256(%2)\n"
97 "2: mvcs 0(%0,%1),0(%2),%3\n"
98 " jnz 1b\n"
99 " j 5f\n"
100 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
101 " "LHI" %3,-4096\n"
102 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
103 " "SLR" %4,%1\n"
104 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
105 " jnh 6f\n"
106 "4: mvcs 0(%4,%1),0(%2),%3\n"
107 " "SLR" %0,%4\n"
108 " j 6f\n"
109 "5:"SLR" %0,%0\n"
110 "6: \n"
111 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
112 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
113 : : "cc", "memory");
114 return size;
115}
116
117size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x)
118{
119 unsigned long tmp1, tmp2;
120
121 tmp1 = 0UL;
122 asm volatile(
123 "0: mvcs 0(%0,%1),0(%2),%3\n"
124 " "SLR" %0,%0\n"
125 " j 3f\n"
126 "1: la %4,255(%1)\n" /* ptr + 255 */
127 " "LHI" %3,-4096\n"
128 " nr %4,%3\n" /* (ptr + 255) & -4096UL */
129 " "SLR" %4,%1\n"
130 " "CLR" %0,%4\n" /* copy crosses next page boundary? */
131 " jnh 3f\n"
132 "2: mvcs 0(%4,%1),0(%2),%3\n"
133 " "SLR" %0,%4\n"
134 "3:\n"
135 EX_TABLE(0b,1b) EX_TABLE(2b,3b)
136 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
137 : : "cc", "memory");
138 return size;
139}
140
141size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
142{
143 unsigned long tmp1;
144
145 asm volatile(
146 " "AHI" %0,-1\n"
147 " jo 5f\n"
148 " sacf 256\n"
149 " bras %3,3f\n"
150 "0:"AHI" %0,257\n"
151 "1: mvc 0(1,%1),0(%2)\n"
152 " la %1,1(%1)\n"
153 " la %2,1(%2)\n"
154 " "AHI" %0,-1\n"
155 " jnz 1b\n"
156 " j 5f\n"
157 "2: mvc 0(256,%1),0(%2)\n"
158 " la %1,256(%1)\n"
159 " la %2,256(%2)\n"
160 "3:"AHI" %0,-256\n"
161 " jnm 2b\n"
162 "4: ex %0,1b-0b(%3)\n"
163 " sacf 0\n"
164 "5: "SLR" %0,%0\n"
165 "6:\n"
166 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
167 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
168 : : "cc", "memory");
169 return size;
170}
171
172size_t clear_user_std(size_t size, void __user *to)
173{
174 unsigned long tmp1, tmp2;
175
176 asm volatile(
177 " "AHI" %0,-1\n"
178 " jo 5f\n"
179 " sacf 256\n"
180 " bras %3,3f\n"
181 " xc 0(1,%1),0(%1)\n"
182 "0:"AHI" %0,257\n"
183 " la %2,255(%1)\n" /* %2 = ptr + 255 */
184 " srl %2,12\n"
185 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
186 " "SLR" %2,%1\n"
187 " "CLR" %0,%2\n" /* clear crosses next page boundary? */
188 " jnh 5f\n"
189 " "AHI" %2,-1\n"
190 "1: ex %2,0(%3)\n"
191 " "AHI" %2,1\n"
192 " "SLR" %0,%2\n"
193 " j 5f\n"
194 "2: xc 0(256,%1),0(%1)\n"
195 " la %1,256(%1)\n"
196 "3:"AHI" %0,-256\n"
197 " jnm 2b\n"
198 "4: ex %0,0(%3)\n"
199 " sacf 0\n"
200 "5: "SLR" %0,%0\n"
201 "6:\n"
202 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
203 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
204 : : "cc", "memory");
205 return size;
206}
207
208size_t strnlen_user_std(size_t size, const char __user *src)
209{
210 register unsigned long reg0 asm("0") = 0UL;
211 unsigned long tmp1, tmp2;
212
213 asm volatile(
214 " la %2,0(%1)\n"
215 " la %3,0(%0,%1)\n"
216 " "SLR" %0,%0\n"
217 " sacf 256\n"
218 "0: srst %3,%2\n"
219 " jo 0b\n"
220 " la %0,1(%3)\n" /* strnlen_user results includes \0 */
221 " "SLR" %0,%1\n"
222 "1: sacf 0\n"
223 EX_TABLE(0b,1b)
224 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
225 : "d" (reg0) : "cc", "memory");
226 return size;
227}
228
229size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
230{
231 register unsigned long reg0 asm("0") = 0UL;
232 unsigned long tmp1, tmp2;
233
234 asm volatile(
235 " la %3,0(%1)\n"
236 " la %4,0(%0,%1)\n"
237 " sacf 256\n"
238 "0: srst %4,%3\n"
239 " jo 0b\n"
240 " sacf 0\n"
241 " la %0,0(%4)\n"
242 " jh 1f\n" /* found \0 in string ? */
243 " "AHI" %4,1\n" /* include \0 in copy */
244 "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */
245 " "SLR" %4,%1\n" /* %4 = copy length (including \0) */
246 "2: mvcp 0(%4,%2),0(%1),%5\n"
247 " jz 9f\n"
248 "3:"AHI" %4,-256\n"
249 " la %1,256(%1)\n"
250 " la %2,256(%2)\n"
251 "4: mvcp 0(%4,%2),0(%1),%5\n"
252 " jnz 3b\n"
253 " j 9f\n"
254 "7: sacf 0\n"
255 "8:"LHI" %0,%6\n"
256 "9:\n"
257 EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
258 : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
259 : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
260 return size;
261}
262
263#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
264 asm volatile( \
265 " sacf 256\n" \
266 "0: l %1,0(%6)\n" \
267 "1:"insn \
268 "2: cs %1,%2,0(%6)\n" \
269 "3: jl 1b\n" \
270 " lhi %0,0\n" \
271 "4: sacf 0\n" \
272 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
273 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
274 "=m" (*uaddr) \
275 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
276 "m" (*uaddr) : "cc");
277
278int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
279{
280 int oldval = 0, newval, ret;
281
282 inc_preempt_count();
283
284 switch (op) {
285 case FUTEX_OP_SET:
286 __futex_atomic_op("lr %2,%5\n",
287 ret, oldval, newval, uaddr, oparg);
288 break;
289 case FUTEX_OP_ADD:
290 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
291 ret, oldval, newval, uaddr, oparg);
292 break;
293 case FUTEX_OP_OR:
294 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
295 ret, oldval, newval, uaddr, oparg);
296 break;
297 case FUTEX_OP_ANDN:
298 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
299 ret, oldval, newval, uaddr, oparg);
300 break;
301 case FUTEX_OP_XOR:
302 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
303 ret, oldval, newval, uaddr, oparg);
304 break;
305 default:
306 ret = -ENOSYS;
307 }
308 dec_preempt_count();
309 *old = oldval;
310 return ret;
311}
312
313int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval)
314{
315 int ret;
316
317 asm volatile(
318 " sacf 256\n"
319 " cs %1,%4,0(%5)\n"
320 "0: lr %0,%1\n"
321 "1: sacf 0\n"
322 EX_TABLE(0b,1b)
323 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
324 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
325 : "cc", "memory" );
326 return ret;
327}
328
329struct uaccess_ops uaccess_std = {
330 .copy_from_user = copy_from_user_std,
331 .copy_from_user_small = copy_from_user_std_small,
332 .copy_to_user = copy_to_user_std,
333 .copy_to_user_small = copy_to_user_std_small,
334 .copy_in_user = copy_in_user_std,
335 .clear_user = clear_user_std,
336 .strnlen_user = strnlen_user_std,
337 .strncpy_from_user = strncpy_from_user_std,
338 .futex_atomic_op = futex_atomic_op,
339 .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
340};
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index ffedf14f89f6..5e261e1de671 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -7,75 +7,21 @@
7#include <asm/errno.h> 7#include <asm/errno.h>
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9 9
10#ifndef __s390x__
11#define __futex_atomic_fixup \
12 ".section __ex_table,\"a\"\n" \
13 " .align 4\n" \
14 " .long 0b,4b,2b,4b,3b,4b\n" \
15 ".previous"
16#else /* __s390x__ */
17#define __futex_atomic_fixup \
18 ".section __ex_table,\"a\"\n" \
19 " .align 8\n" \
20 " .quad 0b,4b,2b,4b,3b,4b\n" \
21 ".previous"
22#endif /* __s390x__ */
23
24#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
25 asm volatile(" sacf 256\n" \
26 "0: l %1,0(%6)\n" \
27 "1: " insn \
28 "2: cs %1,%2,0(%6)\n" \
29 "3: jl 1b\n" \
30 " lhi %0,0\n" \
31 "4: sacf 0\n" \
32 __futex_atomic_fixup \
33 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
34 "=m" (*uaddr) \
35 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
36 "m" (*uaddr) : "cc" );
37
38static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 10static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
39{ 11{
40 int op = (encoded_op >> 28) & 7; 12 int op = (encoded_op >> 28) & 7;
41 int cmp = (encoded_op >> 24) & 15; 13 int cmp = (encoded_op >> 24) & 15;
42 int oparg = (encoded_op << 8) >> 20; 14 int oparg = (encoded_op << 8) >> 20;
43 int cmparg = (encoded_op << 20) >> 20; 15 int cmparg = (encoded_op << 20) >> 20;
44 int oldval = 0, newval, ret; 16 int oldval, ret;
17
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg; 19 oparg = 1 << oparg;
47 20
48 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
49 return -EFAULT; 22 return -EFAULT;
50 23
51 inc_preempt_count(); 24 ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
52
53 switch (op) {
54 case FUTEX_OP_SET:
55 __futex_atomic_op("lr %2,%5\n",
56 ret, oldval, newval, uaddr, oparg);
57 break;
58 case FUTEX_OP_ADD:
59 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
60 ret, oldval, newval, uaddr, oparg);
61 break;
62 case FUTEX_OP_OR:
63 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
64 ret, oldval, newval, uaddr, oparg);
65 break;
66 case FUTEX_OP_ANDN:
67 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
68 ret, oldval, newval, uaddr, oparg);
69 break;
70 case FUTEX_OP_XOR:
71 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
72 ret, oldval, newval, uaddr, oparg);
73 break;
74 default:
75 ret = -ENOSYS;
76 }
77
78 dec_preempt_count();
79 25
80 if (!ret) { 26 if (!ret) {
81 switch (cmp) { 27 switch (cmp) {
@@ -91,32 +37,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
91 return ret; 37 return ret;
92} 38}
93 39
94static inline int 40static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr,
95futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 41 int oldval, int newval)
96{ 42{
97 int ret;
98
99 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
100 return -EFAULT; 44 return -EFAULT;
101 asm volatile(" sacf 256\n" 45
102 " cs %1,%4,0(%5)\n" 46 return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval);
103 "0: lr %0,%1\n"
104 "1: sacf 0\n"
105#ifndef __s390x__
106 ".section __ex_table,\"a\"\n"
107 " .align 4\n"
108 " .long 0b,1b\n"
109 ".previous"
110#else /* __s390x__ */
111 ".section __ex_table,\"a\"\n"
112 " .align 8\n"
113 " .quad 0b,1b\n"
114 ".previous"
115#endif /* __s390x__ */
116 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
117 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
118 : "cc", "memory" );
119 return oldval;
120} 47}
121 48
122#endif /* __KERNEL__ */ 49#endif /* __KERNEL__ */
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 0b7c0ca4c3d7..39a2716ae188 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -47,7 +47,7 @@
47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 47 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ 48 asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \
49}) 49})
50#else 50#else /* __s390x__ */
51#define set_fs(x) \ 51#define set_fs(x) \
52({ \ 52({ \
53 unsigned long __pto; \ 53 unsigned long __pto; \
@@ -56,7 +56,7 @@
56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ 56 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \ 57 asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \
58}) 58})
59#endif 59#endif /* __s390x__ */
60 60
61#define segment_eq(a,b) ((a).ar4 == (b).ar4) 61#define segment_eq(a,b) ((a).ar4 == (b).ar4)
62 62
@@ -85,76 +85,50 @@ struct exception_table_entry
85 unsigned long insn, fixup; 85 unsigned long insn, fixup;
86}; 86};
87 87
88#ifndef __s390x__ 88struct uaccess_ops {
89#define __uaccess_fixup \ 89 size_t (*copy_from_user)(size_t, const void __user *, void *);
90 ".section .fixup,\"ax\"\n" \ 90 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
91 "2: lhi %0,%4\n" \ 91 size_t (*copy_to_user)(size_t, void __user *, const void *);
92 " bras 1,3f\n" \ 92 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
93 " .long 1b\n" \ 93 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
94 "3: l 1,0(1)\n" \ 94 size_t (*clear_user)(size_t, void __user *);
95 " br 1\n" \ 95 size_t (*strnlen_user)(size_t, const char __user *);
96 ".previous\n" \ 96 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
97 ".section __ex_table,\"a\"\n" \ 97 int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
98 " .align 4\n" \ 98 int (*futex_atomic_cmpxchg)(int __user *, int old, int new);
99 " .long 0b,2b\n" \ 99};
100 ".previous" 100
101#define __uaccess_clobber "cc", "1" 101extern struct uaccess_ops uaccess;
102#else /* __s390x__ */ 102extern struct uaccess_ops uaccess_std;
103#define __uaccess_fixup \ 103
104 ".section .fixup,\"ax\"\n" \ 104static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
105 "2: lghi %0,%4\n" \ 105{
106 " jg 1b\n" \ 106 size = uaccess.copy_to_user_small(size, ptr, x);
107 ".previous\n" \ 107 return size ? -EFAULT : size;
108 ".section __ex_table,\"a\"\n" \ 108}
109 " .align 8\n" \ 109
110 " .quad 0b,2b\n" \ 110static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
111 ".previous" 111{
112#define __uaccess_clobber "cc" 112 size = uaccess.copy_from_user_small(size, ptr, x);
113#endif /* __s390x__ */ 113 return size ? -EFAULT : size;
114}
114 115
115/* 116/*
116 * These are the main single-value transfer routines. They automatically 117 * These are the main single-value transfer routines. They automatically
117 * use the right size if we just have the right pointer type. 118 * use the right size if we just have the right pointer type.
118 */ 119 */
119#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
120#define __put_user_asm(x, ptr, err) \
121({ \
122 err = 0; \
123 asm volatile( \
124 "0: mvcs 0(%1,%2),%3,%0\n" \
125 "1:\n" \
126 __uaccess_fixup \
127 : "+&d" (err) \
128 : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \
129 "K" (-EFAULT) \
130 : __uaccess_clobber ); \
131})
132#else
133#define __put_user_asm(x, ptr, err) \
134({ \
135 err = 0; \
136 asm volatile( \
137 "0: mvcs 0(%1,%2),0(%3),%0\n" \
138 "1:\n" \
139 __uaccess_fixup \
140 : "+&d" (err) \
141 : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \
142 "K" (-EFAULT), "m" (x) \
143 : __uaccess_clobber ); \
144})
145#endif
146
147#define __put_user(x, ptr) \ 120#define __put_user(x, ptr) \
148({ \ 121({ \
149 __typeof__(*(ptr)) __x = (x); \ 122 __typeof__(*(ptr)) __x = (x); \
150 int __pu_err; \ 123 int __pu_err = -EFAULT; \
151 __chk_user_ptr(ptr); \ 124 __chk_user_ptr(ptr); \
152 switch (sizeof (*(ptr))) { \ 125 switch (sizeof (*(ptr))) { \
153 case 1: \ 126 case 1: \
154 case 2: \ 127 case 2: \
155 case 4: \ 128 case 4: \
156 case 8: \ 129 case 8: \
157 __put_user_asm(__x, ptr, __pu_err); \ 130 __pu_err = __put_user_fn(sizeof (*(ptr)), \
131 ptr, &__x); \
158 break; \ 132 break; \
159 default: \ 133 default: \
160 __put_user_bad(); \ 134 __put_user_bad(); \
@@ -172,60 +146,36 @@ struct exception_table_entry
172 146
173extern int __put_user_bad(void) __attribute__((noreturn)); 147extern int __put_user_bad(void) __attribute__((noreturn));
174 148
175#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
176#define __get_user_asm(x, ptr, err) \
177({ \
178 err = 0; \
179 asm volatile ( \
180 "0: mvcp %O1(%2,%R1),0(%3),%0\n" \
181 "1:\n" \
182 __uaccess_fixup \
183 : "+&d" (err), "=Q" (x) \
184 : "d" (sizeof(*(ptr))), "a" (ptr), \
185 "K" (-EFAULT) \
186 : __uaccess_clobber ); \
187})
188#else
189#define __get_user_asm(x, ptr, err) \
190({ \
191 err = 0; \
192 asm volatile ( \
193 "0: mvcp 0(%2,%5),0(%3),%0\n" \
194 "1:\n" \
195 __uaccess_fixup \
196 : "+&d" (err), "=m" (x) \
197 : "d" (sizeof(*(ptr))), "a" (ptr), \
198 "K" (-EFAULT), "a" (&(x)) \
199 : __uaccess_clobber ); \
200})
201#endif
202
203#define __get_user(x, ptr) \ 149#define __get_user(x, ptr) \
204({ \ 150({ \
205 int __gu_err; \ 151 int __gu_err = -EFAULT; \
206 __chk_user_ptr(ptr); \ 152 __chk_user_ptr(ptr); \
207 switch (sizeof(*(ptr))) { \ 153 switch (sizeof(*(ptr))) { \
208 case 1: { \ 154 case 1: { \
209 unsigned char __x; \ 155 unsigned char __x; \
210 __get_user_asm(__x, ptr, __gu_err); \ 156 __gu_err = __get_user_fn(sizeof (*(ptr)), \
157 ptr, &__x); \
211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 158 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
212 break; \ 159 break; \
213 }; \ 160 }; \
214 case 2: { \ 161 case 2: { \
215 unsigned short __x; \ 162 unsigned short __x; \
216 __get_user_asm(__x, ptr, __gu_err); \ 163 __gu_err = __get_user_fn(sizeof (*(ptr)), \
164 ptr, &__x); \
217 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 165 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
218 break; \ 166 break; \
219 }; \ 167 }; \
220 case 4: { \ 168 case 4: { \
221 unsigned int __x; \ 169 unsigned int __x; \
222 __get_user_asm(__x, ptr, __gu_err); \ 170 __gu_err = __get_user_fn(sizeof (*(ptr)), \
171 ptr, &__x); \
223 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 172 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
224 break; \ 173 break; \
225 }; \ 174 }; \
226 case 8: { \ 175 case 8: { \
227 unsigned long long __x; \ 176 unsigned long long __x; \
228 __get_user_asm(__x, ptr, __gu_err); \ 177 __gu_err = __get_user_fn(sizeof (*(ptr)), \
178 ptr, &__x); \
229 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 179 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
230 break; \ 180 break; \
231 }; \ 181 }; \
@@ -247,8 +197,6 @@ extern int __get_user_bad(void) __attribute__((noreturn));
247#define __put_user_unaligned __put_user 197#define __put_user_unaligned __put_user
248#define __get_user_unaligned __get_user 198#define __get_user_unaligned __get_user
249 199
250extern long __copy_to_user_asm(const void *from, long n, void __user *to);
251
252/** 200/**
253 * __copy_to_user: - Copy a block of data into user space, with less checking. 201 * __copy_to_user: - Copy a block of data into user space, with less checking.
254 * @to: Destination address, in user space. 202 * @to: Destination address, in user space.
@@ -266,7 +214,10 @@ extern long __copy_to_user_asm(const void *from, long n, void __user *to);
266static inline unsigned long 214static inline unsigned long
267__copy_to_user(void __user *to, const void *from, unsigned long n) 215__copy_to_user(void __user *to, const void *from, unsigned long n)
268{ 216{
269 return __copy_to_user_asm(from, n, to); 217 if (__builtin_constant_p(n) && (n <= 256))
218 return uaccess.copy_to_user_small(n, to, from);
219 else
220 return uaccess.copy_to_user(n, to, from);
270} 221}
271 222
272#define __copy_to_user_inatomic __copy_to_user 223#define __copy_to_user_inatomic __copy_to_user
@@ -294,8 +245,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
294 return n; 245 return n;
295} 246}
296 247
297extern long __copy_from_user_asm(void *to, long n, const void __user *from);
298
299/** 248/**
300 * __copy_from_user: - Copy a block of data from user space, with less checking. 249 * __copy_from_user: - Copy a block of data from user space, with less checking.
301 * @to: Destination address, in kernel space. 250 * @to: Destination address, in kernel space.
@@ -316,7 +265,10 @@ extern long __copy_from_user_asm(void *to, long n, const void __user *from);
316static inline unsigned long 265static inline unsigned long
317__copy_from_user(void *to, const void __user *from, unsigned long n) 266__copy_from_user(void *to, const void __user *from, unsigned long n)
318{ 267{
319 return __copy_from_user_asm(to, n, from); 268 if (__builtin_constant_p(n) && (n <= 256))
269 return uaccess.copy_from_user_small(n, from, to);
270 else
271 return uaccess.copy_from_user(n, from, to);
320} 272}
321 273
322/** 274/**
@@ -346,13 +298,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
346 return n; 298 return n;
347} 299}
348 300
349extern unsigned long __copy_in_user_asm(const void __user *from, long n,
350 void __user *to);
351
352static inline unsigned long 301static inline unsigned long
353__copy_in_user(void __user *to, const void __user *from, unsigned long n) 302__copy_in_user(void __user *to, const void __user *from, unsigned long n)
354{ 303{
355 return __copy_in_user_asm(from, n, to); 304 return uaccess.copy_in_user(n, to, from);
356} 305}
357 306
358static inline unsigned long 307static inline unsigned long
@@ -360,34 +309,28 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
360{ 309{
361 might_sleep(); 310 might_sleep();
362 if (__access_ok(from,n) && __access_ok(to,n)) 311 if (__access_ok(from,n) && __access_ok(to,n))
363 n = __copy_in_user_asm(from, n, to); 312 n = __copy_in_user(to, from, n);
364 return n; 313 return n;
365} 314}
366 315
367/* 316/*
368 * Copy a null terminated string from userspace. 317 * Copy a null terminated string from userspace.
369 */ 318 */
370extern long __strncpy_from_user_asm(long count, char *dst,
371 const char __user *src);
372
373static inline long 319static inline long
374strncpy_from_user(char *dst, const char __user *src, long count) 320strncpy_from_user(char *dst, const char __user *src, long count)
375{ 321{
376 long res = -EFAULT; 322 long res = -EFAULT;
377 might_sleep(); 323 might_sleep();
378 if (access_ok(VERIFY_READ, src, 1)) 324 if (access_ok(VERIFY_READ, src, 1))
379 res = __strncpy_from_user_asm(count, dst, src); 325 res = uaccess.strncpy_from_user(count, src, dst);
380 return res; 326 return res;
381} 327}
382 328
383
384extern long __strnlen_user_asm(long count, const char __user *src);
385
386static inline unsigned long 329static inline unsigned long
387strnlen_user(const char __user * src, unsigned long n) 330strnlen_user(const char __user * src, unsigned long n)
388{ 331{
389 might_sleep(); 332 might_sleep();
390 return __strnlen_user_asm(n, src); 333 return uaccess.strnlen_user(n, src);
391} 334}
392 335
393/** 336/**
@@ -410,12 +353,10 @@ strnlen_user(const char __user * src, unsigned long n)
410 * Zero Userspace 353 * Zero Userspace
411 */ 354 */
412 355
413extern long __clear_user_asm(void __user *to, long n);
414
415static inline unsigned long 356static inline unsigned long
416__clear_user(void __user *to, unsigned long n) 357__clear_user(void __user *to, unsigned long n)
417{ 358{
418 return __clear_user_asm(to, n); 359 return uaccess.clear_user(n, to);
419} 360}
420 361
421static inline unsigned long 362static inline unsigned long
@@ -423,7 +364,7 @@ clear_user(void __user *to, unsigned long n)
423{ 364{
424 might_sleep(); 365 might_sleep();
425 if (access_ok(VERIFY_WRITE, to, n)) 366 if (access_ok(VERIFY_WRITE, to, n))
426 n = __clear_user_asm(to, n); 367 n = uaccess.clear_user(n, to);
427 return n; 368 return n;
428} 369}
429 370